OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [drivers/] [net/] [ehea/] [ehea_qmr.h] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 *  linux/drivers/net/ehea/ehea_qmr.h
3
 *
4
 *  eHEA ethernet device driver for IBM eServer System p
5
 *
6
 *  (C) Copyright IBM Corp. 2006
7
 *
8
 *  Authors:
9
 *       Christoph Raisch <raisch@de.ibm.com>
10
 *       Jan-Bernd Themann <themann@de.ibm.com>
11
 *       Thomas Klein <tklein@de.ibm.com>
12
 *
13
 *
14
 * This program is free software; you can redistribute it and/or modify
15
 * it under the terms of the GNU General Public License as published by
16
 * the Free Software Foundation; either version 2, or (at your option)
17
 * any later version.
18
 *
19
 * This program is distributed in the hope that it will be useful,
20
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
22
 * GNU General Public License for more details.
23
 *
24
 * You should have received a copy of the GNU General Public License
25
 * along with this program; if not, write to the Free Software
26
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27
 */
28
 
29
#ifndef __EHEA_QMR_H__
30
#define __EHEA_QMR_H__
31
 
32
#include "ehea.h"
33
#include "ehea_hw.h"
34
 
35
/*
36
 * page size of ehea hardware queues
37
 */
38
 
39
#define EHEA_PAGESHIFT         12
40
#define EHEA_PAGESIZE          (1UL << EHEA_PAGESHIFT)
41
#define EHEA_SECTSIZE          (1UL << 24)
42
#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
43
 
44
#if (1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE
45
#error eHEA module can't work if kernel sectionsize < ehea sectionsize
46
#endif
47
 
48
/* Some abbreviations used here:
49
 *
50
 * WQE  - Work Queue Entry
51
 * SWQE - Send Work Queue Entry
52
 * RWQE - Receive Work Queue Entry
53
 * CQE  - Completion Queue Entry
54
 * EQE  - Event Queue Entry
55
 * MR   - Memory Region
56
 */
57
 
58
/* Use of WR_ID field for EHEA */
59
#define EHEA_WR_ID_COUNT   EHEA_BMASK_IBM(0, 19)
60
#define EHEA_WR_ID_TYPE    EHEA_BMASK_IBM(20, 23)
61
#define EHEA_SWQE2_TYPE    0x1
62
#define EHEA_SWQE3_TYPE    0x2
63
#define EHEA_RWQE2_TYPE    0x3
64
#define EHEA_RWQE3_TYPE    0x4
65
#define EHEA_WR_ID_INDEX   EHEA_BMASK_IBM(24, 47)
66
#define EHEA_WR_ID_REFILL  EHEA_BMASK_IBM(48, 63)
67
 
68
struct ehea_vsgentry {
69
        u64 vaddr;
70
        u32 l_key;
71
        u32 len;
72
};
73
 
74
/* maximum number of sg entries allowed in a WQE */
75
#define EHEA_MAX_WQE_SG_ENTRIES         252
76
#define SWQE2_MAX_IMM                   (0xD0 - 0x30)
77
#define SWQE3_MAX_IMM                   224
78
 
79
/* tx control flags for swqe */
80
#define EHEA_SWQE_CRC                   0x8000
81
#define EHEA_SWQE_IP_CHECKSUM           0x4000
82
#define EHEA_SWQE_TCP_CHECKSUM          0x2000
83
#define EHEA_SWQE_TSO                   0x1000
84
#define EHEA_SWQE_SIGNALLED_COMPLETION  0x0800
85
#define EHEA_SWQE_VLAN_INSERT           0x0400
86
#define EHEA_SWQE_IMM_DATA_PRESENT      0x0200
87
#define EHEA_SWQE_DESCRIPTORS_PRESENT   0x0100
88
#define EHEA_SWQE_WRAP_CTL_REC          0x0080
89
#define EHEA_SWQE_WRAP_CTL_FORCE        0x0040
90
#define EHEA_SWQE_BIND                  0x0020
91
#define EHEA_SWQE_PURGE                 0x0010
92
 
93
/* sizeof(struct ehea_swqe) less the union */
94
#define SWQE_HEADER_SIZE                32
95
 
96
struct ehea_swqe {
97
        u64 wr_id;
98
        u16 tx_control;
99
        u16 vlan_tag;
100
        u8 reserved1;
101
        u8 ip_start;
102
        u8 ip_end;
103
        u8 immediate_data_length;
104
        u8 tcp_offset;
105
        u8 reserved2;
106
        u16 tcp_end;
107
        u8 wrap_tag;
108
        u8 descriptors;         /* number of valid descriptors in WQE */
109
        u16 reserved3;
110
        u16 reserved4;
111
        u16 mss;
112
        u32 reserved5;
113
        union {
114
                /*  Send WQE Format 1 */
115
                struct {
116
                        struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
117
                } no_immediate_data;
118
 
119
                /*  Send WQE Format 2 */
120
                struct {
121
                        struct ehea_vsgentry sg_entry;
122
                        /* 0x30 */
123
                        u8 immediate_data[SWQE2_MAX_IMM];
124
                        /* 0xd0 */
125
                        struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES-1];
126
                } immdata_desc __attribute__ ((packed));
127
 
128
                /*  Send WQE Format 3 */
129
                struct {
130
                        u8 immediate_data[SWQE3_MAX_IMM];
131
                } immdata_nodesc;
132
        } u;
133
};
134
 
135
struct ehea_rwqe {
136
        u64 wr_id;              /* work request ID */
137
        u8 reserved1[5];
138
        u8 data_segments;
139
        u16 reserved2;
140
        u64 reserved3;
141
        u64 reserved4;
142
        struct ehea_vsgentry sg_list[EHEA_MAX_WQE_SG_ENTRIES];
143
};
144
 
145
#define EHEA_CQE_VLAN_TAG_XTRACT   0x0400
146
 
147
#define EHEA_CQE_TYPE_RQ           0x60
148
#define EHEA_CQE_STAT_ERR_MASK     0x700F
149
#define EHEA_CQE_STAT_FAT_ERR_MASK 0xF
150
#define EHEA_CQE_STAT_ERR_TCP      0x4000
151
#define EHEA_CQE_STAT_ERR_IP       0x2000
152
#define EHEA_CQE_STAT_ERR_CRC      0x1000
153
 
154
struct ehea_cqe {
155
        u64 wr_id;              /* work request ID from WQE */
156
        u8 type;
157
        u8 valid;
158
        u16 status;
159
        u16 reserved1;
160
        u16 num_bytes_transfered;
161
        u16 vlan_tag;
162
        u16 inet_checksum_value;
163
        u8 reserved2;
164
        u8 header_length;
165
        u16 reserved3;
166
        u16 page_offset;
167
        u16 wqe_count;
168
        u32 qp_token;
169
        u32 timestamp;
170
        u32 reserved4;
171
        u64 reserved5[3];
172
};
173
 
174
#define EHEA_EQE_VALID           EHEA_BMASK_IBM(0, 0)
175
#define EHEA_EQE_IS_CQE          EHEA_BMASK_IBM(1, 1)
176
#define EHEA_EQE_IDENTIFIER      EHEA_BMASK_IBM(2, 7)
177
#define EHEA_EQE_QP_CQ_NUMBER    EHEA_BMASK_IBM(8, 31)
178
#define EHEA_EQE_QP_TOKEN        EHEA_BMASK_IBM(32, 63)
179
#define EHEA_EQE_CQ_TOKEN        EHEA_BMASK_IBM(32, 63)
180
#define EHEA_EQE_KEY             EHEA_BMASK_IBM(32, 63)
181
#define EHEA_EQE_PORT_NUMBER     EHEA_BMASK_IBM(56, 63)
182
#define EHEA_EQE_EQ_NUMBER       EHEA_BMASK_IBM(48, 63)
183
#define EHEA_EQE_SM_ID           EHEA_BMASK_IBM(48, 63)
184
#define EHEA_EQE_SM_MECH_NUMBER  EHEA_BMASK_IBM(48, 55)
185
#define EHEA_EQE_SM_PORT_NUMBER  EHEA_BMASK_IBM(56, 63)
186
 
187
struct ehea_eqe {
188
        u64 entry;
189
};
190
 
191
#define ERROR_DATA_LENGTH  EHEA_BMASK_IBM(52,63)
192
#define ERROR_DATA_TYPE    EHEA_BMASK_IBM(0,7)
193
 
194
static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
195
{
196
        struct ehea_page *current_page;
197
 
198
        if (q_offset >= queue->queue_length)
199
                q_offset -= queue->queue_length;
200
        current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT];
201
        return &current_page->entries[q_offset & (EHEA_PAGESIZE - 1)];
202
}
203
 
204
static inline void *hw_qeit_get(struct hw_queue *queue)
205
{
206
        return hw_qeit_calc(queue, queue->current_q_offset);
207
}
208
 
209
static inline void hw_qeit_inc(struct hw_queue *queue)
210
{
211
        queue->current_q_offset += queue->qe_size;
212
        if (queue->current_q_offset >= queue->queue_length) {
213
                queue->current_q_offset = 0;
214
                /* toggle the valid flag */
215
                queue->toggle_state = (~queue->toggle_state) & 1;
216
        }
217
}
218
 
219
static inline void *hw_qeit_get_inc(struct hw_queue *queue)
220
{
221
        void *retvalue = hw_qeit_get(queue);
222
        hw_qeit_inc(queue);
223
        return retvalue;
224
}
225
 
226
static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue)
227
{
228
        struct ehea_cqe *retvalue = hw_qeit_get(queue);
229
        u8 valid = retvalue->valid;
230
        void *pref;
231
 
232
        if ((valid >> 7) == (queue->toggle_state & 1)) {
233
                /* this is a good one */
234
                hw_qeit_inc(queue);
235
                pref = hw_qeit_calc(queue, queue->current_q_offset);
236
                prefetch(pref);
237
                prefetch(pref + 128);
238
        } else
239
                retvalue = NULL;
240
        return retvalue;
241
}
242
 
243
static inline void *hw_qeit_get_valid(struct hw_queue *queue)
244
{
245
        struct ehea_cqe *retvalue = hw_qeit_get(queue);
246
        void *pref;
247
        u8 valid;
248
 
249
        pref = hw_qeit_calc(queue, queue->current_q_offset);
250
        prefetch(pref);
251
        prefetch(pref + 128);
252
        prefetch(pref + 256);
253
        valid = retvalue->valid;
254
        if (!((valid >> 7) == (queue->toggle_state & 1)))
255
                retvalue = NULL;
256
        return retvalue;
257
}
258
 
259
static inline void *hw_qeit_reset(struct hw_queue *queue)
260
{
261
        queue->current_q_offset = 0;
262
        return hw_qeit_get(queue);
263
}
264
 
265
static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue)
266
{
267
        u64 last_entry_in_q = queue->queue_length - queue->qe_size;
268
        void *retvalue;
269
 
270
        retvalue = hw_qeit_get(queue);
271
        queue->current_q_offset += queue->qe_size;
272
        if (queue->current_q_offset > last_entry_in_q) {
273
                queue->current_q_offset = 0;
274
                queue->toggle_state = (~queue->toggle_state) & 1;
275
        }
276
        return retvalue;
277
}
278
 
279
static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue)
280
{
281
        void *retvalue = hw_qeit_get(queue);
282
        u32 qe = *(u8*)retvalue;
283
        if ((qe >> 7) == (queue->toggle_state & 1))
284
                hw_qeit_eq_get_inc(queue);
285
        else
286
                retvalue = NULL;
287
        return retvalue;
288
}
289
 
290
static inline struct ehea_rwqe *ehea_get_next_rwqe(struct ehea_qp *qp,
291
                                                   int rq_nr)
292
{
293
        struct hw_queue *queue;
294
 
295
        if (rq_nr == 1)
296
                queue = &qp->hw_rqueue1;
297
        else if (rq_nr == 2)
298
                queue = &qp->hw_rqueue2;
299
        else
300
                queue = &qp->hw_rqueue3;
301
 
302
        return hw_qeit_get_inc(queue);
303
}
304
 
305
static inline struct ehea_swqe *ehea_get_swqe(struct ehea_qp *my_qp,
306
                                              int *wqe_index)
307
{
308
        struct hw_queue *queue = &my_qp->hw_squeue;
309
        struct ehea_swqe *wqe_p;
310
 
311
        *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ);
312
        wqe_p = hw_qeit_get_inc(&my_qp->hw_squeue);
313
 
314
        return wqe_p;
315
}
316
 
317
static inline void ehea_post_swqe(struct ehea_qp *my_qp, struct ehea_swqe *swqe)
318
{
319
        iosync();
320
        ehea_update_sqa(my_qp, 1);
321
}
322
 
323
static inline struct ehea_cqe *ehea_poll_rq1(struct ehea_qp *qp, int *wqe_index)
324
{
325
        struct hw_queue *queue = &qp->hw_rqueue1;
326
 
327
        *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1);
328
        return hw_qeit_get_valid(queue);
329
}
330
 
331
static inline void ehea_inc_cq(struct ehea_cq *cq)
332
{
333
        hw_qeit_inc(&cq->hw_queue);
334
}
335
 
336
static inline void ehea_inc_rq1(struct ehea_qp *qp)
337
{
338
        hw_qeit_inc(&qp->hw_rqueue1);
339
}
340
 
341
static inline struct ehea_cqe *ehea_poll_cq(struct ehea_cq *my_cq)
342
{
343
        return hw_qeit_get_valid(&my_cq->hw_queue);
344
}
345
 
346
#define EHEA_CQ_REGISTER_ORIG 0
347
#define EHEA_EQ_REGISTER_ORIG 0
348
 
349
enum ehea_eq_type {
350
        EHEA_EQ = 0,             /* event queue              */
351
        EHEA_NEQ                /* notification event queue */
352
};
353
 
354
struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
355
                               enum ehea_eq_type type,
356
                               const u32 length, const u8 eqe_gen);
357
 
358
int ehea_destroy_eq(struct ehea_eq *eq);
359
 
360
struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq);
361
 
362
struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter, int cqe,
363
                               u64 eq_handle, u32 cq_token);
364
 
365
int ehea_destroy_cq(struct ehea_cq *cq);
366
 
367
struct ehea_qp *ehea_create_qp(struct ehea_adapter * adapter, u32 pd,
368
                               struct ehea_qp_init_attr *init_attr);
369
 
370
int ehea_destroy_qp(struct ehea_qp *qp);
371
 
372
int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr);
373
 
374
int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
375
                 struct ehea_mr *shared_mr);
376
 
377
int ehea_rem_mr(struct ehea_mr *mr);
378
 
379
void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
380
 
381
int ehea_create_busmap( void );
382
void ehea_destroy_busmap( void );
383
u64 ehea_map_vaddr(void *caddr);
384
 
385
#endif  /* __EHEA_QMR_H__ */

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.