1 |
62 |
marcus.erl |
/*
|
2 |
|
|
* Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
|
3 |
|
|
* Copyright (c) 2004 Infinicon Corporation. All rights reserved.
|
4 |
|
|
* Copyright (c) 2004 Intel Corporation. All rights reserved.
|
5 |
|
|
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
|
6 |
|
|
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
|
7 |
|
|
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
|
8 |
|
|
* Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
|
9 |
|
|
*
|
10 |
|
|
* This software is available to you under a choice of one of two
|
11 |
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
12 |
|
|
* General Public License (GPL) Version 2, available from the file
|
13 |
|
|
* COPYING in the main directory of this source tree, or the
|
14 |
|
|
* OpenIB.org BSD license below:
|
15 |
|
|
*
|
16 |
|
|
* Redistribution and use in source and binary forms, with or
|
17 |
|
|
* without modification, are permitted provided that the following
|
18 |
|
|
* conditions are met:
|
19 |
|
|
*
|
20 |
|
|
* - Redistributions of source code must retain the above
|
21 |
|
|
* copyright notice, this list of conditions and the following
|
22 |
|
|
* disclaimer.
|
23 |
|
|
*
|
24 |
|
|
* - Redistributions in binary form must reproduce the above
|
25 |
|
|
* copyright notice, this list of conditions and the following
|
26 |
|
|
* disclaimer in the documentation and/or other materials
|
27 |
|
|
* provided with the distribution.
|
28 |
|
|
*
|
29 |
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
30 |
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
31 |
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
32 |
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
33 |
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
34 |
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
35 |
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
36 |
|
|
* SOFTWARE.
|
37 |
|
|
*
|
38 |
|
|
* $Id: ib_verbs.h 1349 2004-12-16 21:09:43Z roland $
|
39 |
|
|
*/
|
40 |
|
|
|
41 |
|
|
#if !defined(IB_VERBS_H)
|
42 |
|
|
#define IB_VERBS_H
|
43 |
|
|
|
44 |
|
|
#include <linux/types.h>
|
45 |
|
|
#include <linux/device.h>
|
46 |
|
|
#include <linux/mm.h>
|
47 |
|
|
#include <linux/dma-mapping.h>
|
48 |
|
|
#include <linux/kref.h>
|
49 |
|
|
#include <linux/list.h>
|
50 |
|
|
#include <linux/rwsem.h>
|
51 |
|
|
#include <linux/scatterlist.h>
|
52 |
|
|
|
53 |
|
|
#include <asm/atomic.h>
|
54 |
|
|
#include <asm/uaccess.h>
|
55 |
|
|
|
56 |
|
|
union ib_gid {
|
57 |
|
|
u8 raw[16];
|
58 |
|
|
struct {
|
59 |
|
|
__be64 subnet_prefix;
|
60 |
|
|
__be64 interface_id;
|
61 |
|
|
} global;
|
62 |
|
|
};
|
63 |
|
|
|
64 |
|
|
enum rdma_node_type {
|
65 |
|
|
/* IB values map to NodeInfo:NodeType. */
|
66 |
|
|
RDMA_NODE_IB_CA = 1,
|
67 |
|
|
RDMA_NODE_IB_SWITCH,
|
68 |
|
|
RDMA_NODE_IB_ROUTER,
|
69 |
|
|
RDMA_NODE_RNIC
|
70 |
|
|
};
|
71 |
|
|
|
72 |
|
|
enum rdma_transport_type {
|
73 |
|
|
RDMA_TRANSPORT_IB,
|
74 |
|
|
RDMA_TRANSPORT_IWARP
|
75 |
|
|
};
|
76 |
|
|
|
77 |
|
|
enum rdma_transport_type
|
78 |
|
|
rdma_node_get_transport(enum rdma_node_type node_type) __attribute_const__;
|
79 |
|
|
|
80 |
|
|
enum ib_device_cap_flags {
|
81 |
|
|
IB_DEVICE_RESIZE_MAX_WR = 1,
|
82 |
|
|
IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
|
83 |
|
|
IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
|
84 |
|
|
IB_DEVICE_RAW_MULTI = (1<<3),
|
85 |
|
|
IB_DEVICE_AUTO_PATH_MIG = (1<<4),
|
86 |
|
|
IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
|
87 |
|
|
IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
|
88 |
|
|
IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
|
89 |
|
|
IB_DEVICE_SHUTDOWN_PORT = (1<<8),
|
90 |
|
|
IB_DEVICE_INIT_TYPE = (1<<9),
|
91 |
|
|
IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
|
92 |
|
|
IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
|
93 |
|
|
IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
|
94 |
|
|
IB_DEVICE_SRQ_RESIZE = (1<<13),
|
95 |
|
|
IB_DEVICE_N_NOTIFY_CQ = (1<<14),
|
96 |
|
|
IB_DEVICE_ZERO_STAG = (1<<15),
|
97 |
|
|
IB_DEVICE_SEND_W_INV = (1<<16),
|
98 |
|
|
IB_DEVICE_MEM_WINDOW = (1<<17)
|
99 |
|
|
};
|
100 |
|
|
|
101 |
|
|
enum ib_atomic_cap {
|
102 |
|
|
IB_ATOMIC_NONE,
|
103 |
|
|
IB_ATOMIC_HCA,
|
104 |
|
|
IB_ATOMIC_GLOB
|
105 |
|
|
};
|
106 |
|
|
|
107 |
|
|
struct ib_device_attr {
|
108 |
|
|
u64 fw_ver;
|
109 |
|
|
__be64 sys_image_guid;
|
110 |
|
|
u64 max_mr_size;
|
111 |
|
|
u64 page_size_cap;
|
112 |
|
|
u32 vendor_id;
|
113 |
|
|
u32 vendor_part_id;
|
114 |
|
|
u32 hw_ver;
|
115 |
|
|
int max_qp;
|
116 |
|
|
int max_qp_wr;
|
117 |
|
|
int device_cap_flags;
|
118 |
|
|
int max_sge;
|
119 |
|
|
int max_sge_rd;
|
120 |
|
|
int max_cq;
|
121 |
|
|
int max_cqe;
|
122 |
|
|
int max_mr;
|
123 |
|
|
int max_pd;
|
124 |
|
|
int max_qp_rd_atom;
|
125 |
|
|
int max_ee_rd_atom;
|
126 |
|
|
int max_res_rd_atom;
|
127 |
|
|
int max_qp_init_rd_atom;
|
128 |
|
|
int max_ee_init_rd_atom;
|
129 |
|
|
enum ib_atomic_cap atomic_cap;
|
130 |
|
|
int max_ee;
|
131 |
|
|
int max_rdd;
|
132 |
|
|
int max_mw;
|
133 |
|
|
int max_raw_ipv6_qp;
|
134 |
|
|
int max_raw_ethy_qp;
|
135 |
|
|
int max_mcast_grp;
|
136 |
|
|
int max_mcast_qp_attach;
|
137 |
|
|
int max_total_mcast_qp_attach;
|
138 |
|
|
int max_ah;
|
139 |
|
|
int max_fmr;
|
140 |
|
|
int max_map_per_fmr;
|
141 |
|
|
int max_srq;
|
142 |
|
|
int max_srq_wr;
|
143 |
|
|
int max_srq_sge;
|
144 |
|
|
u16 max_pkeys;
|
145 |
|
|
u8 local_ca_ack_delay;
|
146 |
|
|
};
|
147 |
|
|
|
148 |
|
|
enum ib_mtu {
|
149 |
|
|
IB_MTU_256 = 1,
|
150 |
|
|
IB_MTU_512 = 2,
|
151 |
|
|
IB_MTU_1024 = 3,
|
152 |
|
|
IB_MTU_2048 = 4,
|
153 |
|
|
IB_MTU_4096 = 5
|
154 |
|
|
};
|
155 |
|
|
|
156 |
|
|
static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
|
157 |
|
|
{
|
158 |
|
|
switch (mtu) {
|
159 |
|
|
case IB_MTU_256: return 256;
|
160 |
|
|
case IB_MTU_512: return 512;
|
161 |
|
|
case IB_MTU_1024: return 1024;
|
162 |
|
|
case IB_MTU_2048: return 2048;
|
163 |
|
|
case IB_MTU_4096: return 4096;
|
164 |
|
|
default: return -1;
|
165 |
|
|
}
|
166 |
|
|
}
|
167 |
|
|
|
168 |
|
|
enum ib_port_state {
|
169 |
|
|
IB_PORT_NOP = 0,
|
170 |
|
|
IB_PORT_DOWN = 1,
|
171 |
|
|
IB_PORT_INIT = 2,
|
172 |
|
|
IB_PORT_ARMED = 3,
|
173 |
|
|
IB_PORT_ACTIVE = 4,
|
174 |
|
|
IB_PORT_ACTIVE_DEFER = 5
|
175 |
|
|
};
|
176 |
|
|
|
177 |
|
|
enum ib_port_cap_flags {
|
178 |
|
|
IB_PORT_SM = 1 << 1,
|
179 |
|
|
IB_PORT_NOTICE_SUP = 1 << 2,
|
180 |
|
|
IB_PORT_TRAP_SUP = 1 << 3,
|
181 |
|
|
IB_PORT_OPT_IPD_SUP = 1 << 4,
|
182 |
|
|
IB_PORT_AUTO_MIGR_SUP = 1 << 5,
|
183 |
|
|
IB_PORT_SL_MAP_SUP = 1 << 6,
|
184 |
|
|
IB_PORT_MKEY_NVRAM = 1 << 7,
|
185 |
|
|
IB_PORT_PKEY_NVRAM = 1 << 8,
|
186 |
|
|
IB_PORT_LED_INFO_SUP = 1 << 9,
|
187 |
|
|
IB_PORT_SM_DISABLED = 1 << 10,
|
188 |
|
|
IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
|
189 |
|
|
IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
|
190 |
|
|
IB_PORT_CM_SUP = 1 << 16,
|
191 |
|
|
IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
|
192 |
|
|
IB_PORT_REINIT_SUP = 1 << 18,
|
193 |
|
|
IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
|
194 |
|
|
IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
|
195 |
|
|
IB_PORT_DR_NOTICE_SUP = 1 << 21,
|
196 |
|
|
IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
|
197 |
|
|
IB_PORT_BOOT_MGMT_SUP = 1 << 23,
|
198 |
|
|
IB_PORT_LINK_LATENCY_SUP = 1 << 24,
|
199 |
|
|
IB_PORT_CLIENT_REG_SUP = 1 << 25
|
200 |
|
|
};
|
201 |
|
|
|
202 |
|
|
enum ib_port_width {
|
203 |
|
|
IB_WIDTH_1X = 1,
|
204 |
|
|
IB_WIDTH_4X = 2,
|
205 |
|
|
IB_WIDTH_8X = 4,
|
206 |
|
|
IB_WIDTH_12X = 8
|
207 |
|
|
};
|
208 |
|
|
|
209 |
|
|
static inline int ib_width_enum_to_int(enum ib_port_width width)
|
210 |
|
|
{
|
211 |
|
|
switch (width) {
|
212 |
|
|
case IB_WIDTH_1X: return 1;
|
213 |
|
|
case IB_WIDTH_4X: return 4;
|
214 |
|
|
case IB_WIDTH_8X: return 8;
|
215 |
|
|
case IB_WIDTH_12X: return 12;
|
216 |
|
|
default: return -1;
|
217 |
|
|
}
|
218 |
|
|
}
|
219 |
|
|
|
220 |
|
|
struct ib_port_attr {
|
221 |
|
|
enum ib_port_state state;
|
222 |
|
|
enum ib_mtu max_mtu;
|
223 |
|
|
enum ib_mtu active_mtu;
|
224 |
|
|
int gid_tbl_len;
|
225 |
|
|
u32 port_cap_flags;
|
226 |
|
|
u32 max_msg_sz;
|
227 |
|
|
u32 bad_pkey_cntr;
|
228 |
|
|
u32 qkey_viol_cntr;
|
229 |
|
|
u16 pkey_tbl_len;
|
230 |
|
|
u16 lid;
|
231 |
|
|
u16 sm_lid;
|
232 |
|
|
u8 lmc;
|
233 |
|
|
u8 max_vl_num;
|
234 |
|
|
u8 sm_sl;
|
235 |
|
|
u8 subnet_timeout;
|
236 |
|
|
u8 init_type_reply;
|
237 |
|
|
u8 active_width;
|
238 |
|
|
u8 active_speed;
|
239 |
|
|
u8 phys_state;
|
240 |
|
|
};
|
241 |
|
|
|
242 |
|
|
enum ib_device_modify_flags {
|
243 |
|
|
IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 << 0,
|
244 |
|
|
IB_DEVICE_MODIFY_NODE_DESC = 1 << 1
|
245 |
|
|
};
|
246 |
|
|
|
247 |
|
|
struct ib_device_modify {
|
248 |
|
|
u64 sys_image_guid;
|
249 |
|
|
char node_desc[64];
|
250 |
|
|
};
|
251 |
|
|
|
252 |
|
|
enum ib_port_modify_flags {
|
253 |
|
|
IB_PORT_SHUTDOWN = 1,
|
254 |
|
|
IB_PORT_INIT_TYPE = (1<<2),
|
255 |
|
|
IB_PORT_RESET_QKEY_CNTR = (1<<3)
|
256 |
|
|
};
|
257 |
|
|
|
258 |
|
|
struct ib_port_modify {
|
259 |
|
|
u32 set_port_cap_mask;
|
260 |
|
|
u32 clr_port_cap_mask;
|
261 |
|
|
u8 init_type;
|
262 |
|
|
};
|
263 |
|
|
|
264 |
|
|
enum ib_event_type {
|
265 |
|
|
IB_EVENT_CQ_ERR,
|
266 |
|
|
IB_EVENT_QP_FATAL,
|
267 |
|
|
IB_EVENT_QP_REQ_ERR,
|
268 |
|
|
IB_EVENT_QP_ACCESS_ERR,
|
269 |
|
|
IB_EVENT_COMM_EST,
|
270 |
|
|
IB_EVENT_SQ_DRAINED,
|
271 |
|
|
IB_EVENT_PATH_MIG,
|
272 |
|
|
IB_EVENT_PATH_MIG_ERR,
|
273 |
|
|
IB_EVENT_DEVICE_FATAL,
|
274 |
|
|
IB_EVENT_PORT_ACTIVE,
|
275 |
|
|
IB_EVENT_PORT_ERR,
|
276 |
|
|
IB_EVENT_LID_CHANGE,
|
277 |
|
|
IB_EVENT_PKEY_CHANGE,
|
278 |
|
|
IB_EVENT_SM_CHANGE,
|
279 |
|
|
IB_EVENT_SRQ_ERR,
|
280 |
|
|
IB_EVENT_SRQ_LIMIT_REACHED,
|
281 |
|
|
IB_EVENT_QP_LAST_WQE_REACHED,
|
282 |
|
|
IB_EVENT_CLIENT_REREGISTER
|
283 |
|
|
};
|
284 |
|
|
|
285 |
|
|
struct ib_event {
|
286 |
|
|
struct ib_device *device;
|
287 |
|
|
union {
|
288 |
|
|
struct ib_cq *cq;
|
289 |
|
|
struct ib_qp *qp;
|
290 |
|
|
struct ib_srq *srq;
|
291 |
|
|
u8 port_num;
|
292 |
|
|
} element;
|
293 |
|
|
enum ib_event_type event;
|
294 |
|
|
};
|
295 |
|
|
|
296 |
|
|
struct ib_event_handler {
|
297 |
|
|
struct ib_device *device;
|
298 |
|
|
void (*handler)(struct ib_event_handler *, struct ib_event *);
|
299 |
|
|
struct list_head list;
|
300 |
|
|
};
|
301 |
|
|
|
302 |
|
|
#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
|
303 |
|
|
do { \
|
304 |
|
|
(_ptr)->device = _device; \
|
305 |
|
|
(_ptr)->handler = _handler; \
|
306 |
|
|
INIT_LIST_HEAD(&(_ptr)->list); \
|
307 |
|
|
} while (0)
|
308 |
|
|
|
309 |
|
|
struct ib_global_route {
|
310 |
|
|
union ib_gid dgid;
|
311 |
|
|
u32 flow_label;
|
312 |
|
|
u8 sgid_index;
|
313 |
|
|
u8 hop_limit;
|
314 |
|
|
u8 traffic_class;
|
315 |
|
|
};
|
316 |
|
|
|
317 |
|
|
struct ib_grh {
|
318 |
|
|
__be32 version_tclass_flow;
|
319 |
|
|
__be16 paylen;
|
320 |
|
|
u8 next_hdr;
|
321 |
|
|
u8 hop_limit;
|
322 |
|
|
union ib_gid sgid;
|
323 |
|
|
union ib_gid dgid;
|
324 |
|
|
};
|
325 |
|
|
|
326 |
|
|
enum {
|
327 |
|
|
IB_MULTICAST_QPN = 0xffffff
|
328 |
|
|
};
|
329 |
|
|
|
330 |
|
|
#define IB_LID_PERMISSIVE __constant_htons(0xFFFF)
|
331 |
|
|
|
332 |
|
|
enum ib_ah_flags {
|
333 |
|
|
IB_AH_GRH = 1
|
334 |
|
|
};
|
335 |
|
|
|
336 |
|
|
enum ib_rate {
|
337 |
|
|
IB_RATE_PORT_CURRENT = 0,
|
338 |
|
|
IB_RATE_2_5_GBPS = 2,
|
339 |
|
|
IB_RATE_5_GBPS = 5,
|
340 |
|
|
IB_RATE_10_GBPS = 3,
|
341 |
|
|
IB_RATE_20_GBPS = 6,
|
342 |
|
|
IB_RATE_30_GBPS = 4,
|
343 |
|
|
IB_RATE_40_GBPS = 7,
|
344 |
|
|
IB_RATE_60_GBPS = 8,
|
345 |
|
|
IB_RATE_80_GBPS = 9,
|
346 |
|
|
IB_RATE_120_GBPS = 10
|
347 |
|
|
};
|
348 |
|
|
|
349 |
|
|
/**
|
350 |
|
|
* ib_rate_to_mult - Convert the IB rate enum to a multiple of the
|
351 |
|
|
* base rate of 2.5 Gbit/sec. For example, IB_RATE_5_GBPS will be
|
352 |
|
|
* converted to 2, since 5 Gbit/sec is 2 * 2.5 Gbit/sec.
|
353 |
|
|
* @rate: rate to convert.
|
354 |
|
|
*/
|
355 |
|
|
int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
|
356 |
|
|
|
357 |
|
|
/**
|
358 |
|
|
* mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
|
359 |
|
|
* enum.
|
360 |
|
|
* @mult: multiple to convert.
|
361 |
|
|
*/
|
362 |
|
|
enum ib_rate mult_to_ib_rate(int mult) __attribute_const__;
|
363 |
|
|
|
364 |
|
|
struct ib_ah_attr {
|
365 |
|
|
struct ib_global_route grh;
|
366 |
|
|
u16 dlid;
|
367 |
|
|
u8 sl;
|
368 |
|
|
u8 src_path_bits;
|
369 |
|
|
u8 static_rate;
|
370 |
|
|
u8 ah_flags;
|
371 |
|
|
u8 port_num;
|
372 |
|
|
};
|
373 |
|
|
|
374 |
|
|
enum ib_wc_status {
|
375 |
|
|
IB_WC_SUCCESS,
|
376 |
|
|
IB_WC_LOC_LEN_ERR,
|
377 |
|
|
IB_WC_LOC_QP_OP_ERR,
|
378 |
|
|
IB_WC_LOC_EEC_OP_ERR,
|
379 |
|
|
IB_WC_LOC_PROT_ERR,
|
380 |
|
|
IB_WC_WR_FLUSH_ERR,
|
381 |
|
|
IB_WC_MW_BIND_ERR,
|
382 |
|
|
IB_WC_BAD_RESP_ERR,
|
383 |
|
|
IB_WC_LOC_ACCESS_ERR,
|
384 |
|
|
IB_WC_REM_INV_REQ_ERR,
|
385 |
|
|
IB_WC_REM_ACCESS_ERR,
|
386 |
|
|
IB_WC_REM_OP_ERR,
|
387 |
|
|
IB_WC_RETRY_EXC_ERR,
|
388 |
|
|
IB_WC_RNR_RETRY_EXC_ERR,
|
389 |
|
|
IB_WC_LOC_RDD_VIOL_ERR,
|
390 |
|
|
IB_WC_REM_INV_RD_REQ_ERR,
|
391 |
|
|
IB_WC_REM_ABORT_ERR,
|
392 |
|
|
IB_WC_INV_EECN_ERR,
|
393 |
|
|
IB_WC_INV_EEC_STATE_ERR,
|
394 |
|
|
IB_WC_FATAL_ERR,
|
395 |
|
|
IB_WC_RESP_TIMEOUT_ERR,
|
396 |
|
|
IB_WC_GENERAL_ERR
|
397 |
|
|
};
|
398 |
|
|
|
399 |
|
|
enum ib_wc_opcode {
|
400 |
|
|
IB_WC_SEND,
|
401 |
|
|
IB_WC_RDMA_WRITE,
|
402 |
|
|
IB_WC_RDMA_READ,
|
403 |
|
|
IB_WC_COMP_SWAP,
|
404 |
|
|
IB_WC_FETCH_ADD,
|
405 |
|
|
IB_WC_BIND_MW,
|
406 |
|
|
/*
|
407 |
|
|
* Set value of IB_WC_RECV so consumers can test if a completion is a
|
408 |
|
|
* receive by testing (opcode & IB_WC_RECV).
|
409 |
|
|
*/
|
410 |
|
|
IB_WC_RECV = 1 << 7,
|
411 |
|
|
IB_WC_RECV_RDMA_WITH_IMM
|
412 |
|
|
};
|
413 |
|
|
|
414 |
|
|
enum ib_wc_flags {
|
415 |
|
|
IB_WC_GRH = 1,
|
416 |
|
|
IB_WC_WITH_IMM = (1<<1)
|
417 |
|
|
};
|
418 |
|
|
|
419 |
|
|
struct ib_wc {
|
420 |
|
|
u64 wr_id;
|
421 |
|
|
enum ib_wc_status status;
|
422 |
|
|
enum ib_wc_opcode opcode;
|
423 |
|
|
u32 vendor_err;
|
424 |
|
|
u32 byte_len;
|
425 |
|
|
struct ib_qp *qp;
|
426 |
|
|
__be32 imm_data;
|
427 |
|
|
u32 src_qp;
|
428 |
|
|
int wc_flags;
|
429 |
|
|
u16 pkey_index;
|
430 |
|
|
u16 slid;
|
431 |
|
|
u8 sl;
|
432 |
|
|
u8 dlid_path_bits;
|
433 |
|
|
u8 port_num; /* valid only for DR SMPs on switches */
|
434 |
|
|
};
|
435 |
|
|
|
436 |
|
|
enum ib_cq_notify_flags {
|
437 |
|
|
IB_CQ_SOLICITED = 1 << 0,
|
438 |
|
|
IB_CQ_NEXT_COMP = 1 << 1,
|
439 |
|
|
IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
|
440 |
|
|
IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
|
441 |
|
|
};
|
442 |
|
|
|
443 |
|
|
enum ib_srq_attr_mask {
|
444 |
|
|
IB_SRQ_MAX_WR = 1 << 0,
|
445 |
|
|
IB_SRQ_LIMIT = 1 << 1,
|
446 |
|
|
};
|
447 |
|
|
|
448 |
|
|
struct ib_srq_attr {
|
449 |
|
|
u32 max_wr;
|
450 |
|
|
u32 max_sge;
|
451 |
|
|
u32 srq_limit;
|
452 |
|
|
};
|
453 |
|
|
|
454 |
|
|
struct ib_srq_init_attr {
|
455 |
|
|
void (*event_handler)(struct ib_event *, void *);
|
456 |
|
|
void *srq_context;
|
457 |
|
|
struct ib_srq_attr attr;
|
458 |
|
|
};
|
459 |
|
|
|
460 |
|
|
struct ib_qp_cap {
|
461 |
|
|
u32 max_send_wr;
|
462 |
|
|
u32 max_recv_wr;
|
463 |
|
|
u32 max_send_sge;
|
464 |
|
|
u32 max_recv_sge;
|
465 |
|
|
u32 max_inline_data;
|
466 |
|
|
};
|
467 |
|
|
|
468 |
|
|
enum ib_sig_type {
|
469 |
|
|
IB_SIGNAL_ALL_WR,
|
470 |
|
|
IB_SIGNAL_REQ_WR
|
471 |
|
|
};
|
472 |
|
|
|
473 |
|
|
enum ib_qp_type {
|
474 |
|
|
/*
|
475 |
|
|
* IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
|
476 |
|
|
* here (and in that order) since the MAD layer uses them as
|
477 |
|
|
* indices into a 2-entry table.
|
478 |
|
|
*/
|
479 |
|
|
IB_QPT_SMI,
|
480 |
|
|
IB_QPT_GSI,
|
481 |
|
|
|
482 |
|
|
IB_QPT_RC,
|
483 |
|
|
IB_QPT_UC,
|
484 |
|
|
IB_QPT_UD,
|
485 |
|
|
IB_QPT_RAW_IPV6,
|
486 |
|
|
IB_QPT_RAW_ETY
|
487 |
|
|
};
|
488 |
|
|
|
489 |
|
|
struct ib_qp_init_attr {
|
490 |
|
|
void (*event_handler)(struct ib_event *, void *);
|
491 |
|
|
void *qp_context;
|
492 |
|
|
struct ib_cq *send_cq;
|
493 |
|
|
struct ib_cq *recv_cq;
|
494 |
|
|
struct ib_srq *srq;
|
495 |
|
|
struct ib_qp_cap cap;
|
496 |
|
|
enum ib_sig_type sq_sig_type;
|
497 |
|
|
enum ib_qp_type qp_type;
|
498 |
|
|
u8 port_num; /* special QP types only */
|
499 |
|
|
};
|
500 |
|
|
|
501 |
|
|
enum ib_rnr_timeout {
|
502 |
|
|
IB_RNR_TIMER_655_36 = 0,
|
503 |
|
|
IB_RNR_TIMER_000_01 = 1,
|
504 |
|
|
IB_RNR_TIMER_000_02 = 2,
|
505 |
|
|
IB_RNR_TIMER_000_03 = 3,
|
506 |
|
|
IB_RNR_TIMER_000_04 = 4,
|
507 |
|
|
IB_RNR_TIMER_000_06 = 5,
|
508 |
|
|
IB_RNR_TIMER_000_08 = 6,
|
509 |
|
|
IB_RNR_TIMER_000_12 = 7,
|
510 |
|
|
IB_RNR_TIMER_000_16 = 8,
|
511 |
|
|
IB_RNR_TIMER_000_24 = 9,
|
512 |
|
|
IB_RNR_TIMER_000_32 = 10,
|
513 |
|
|
IB_RNR_TIMER_000_48 = 11,
|
514 |
|
|
IB_RNR_TIMER_000_64 = 12,
|
515 |
|
|
IB_RNR_TIMER_000_96 = 13,
|
516 |
|
|
IB_RNR_TIMER_001_28 = 14,
|
517 |
|
|
IB_RNR_TIMER_001_92 = 15,
|
518 |
|
|
IB_RNR_TIMER_002_56 = 16,
|
519 |
|
|
IB_RNR_TIMER_003_84 = 17,
|
520 |
|
|
IB_RNR_TIMER_005_12 = 18,
|
521 |
|
|
IB_RNR_TIMER_007_68 = 19,
|
522 |
|
|
IB_RNR_TIMER_010_24 = 20,
|
523 |
|
|
IB_RNR_TIMER_015_36 = 21,
|
524 |
|
|
IB_RNR_TIMER_020_48 = 22,
|
525 |
|
|
IB_RNR_TIMER_030_72 = 23,
|
526 |
|
|
IB_RNR_TIMER_040_96 = 24,
|
527 |
|
|
IB_RNR_TIMER_061_44 = 25,
|
528 |
|
|
IB_RNR_TIMER_081_92 = 26,
|
529 |
|
|
IB_RNR_TIMER_122_88 = 27,
|
530 |
|
|
IB_RNR_TIMER_163_84 = 28,
|
531 |
|
|
IB_RNR_TIMER_245_76 = 29,
|
532 |
|
|
IB_RNR_TIMER_327_68 = 30,
|
533 |
|
|
IB_RNR_TIMER_491_52 = 31
|
534 |
|
|
};
|
535 |
|
|
|
536 |
|
|
enum ib_qp_attr_mask {
|
537 |
|
|
IB_QP_STATE = 1,
|
538 |
|
|
IB_QP_CUR_STATE = (1<<1),
|
539 |
|
|
IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
|
540 |
|
|
IB_QP_ACCESS_FLAGS = (1<<3),
|
541 |
|
|
IB_QP_PKEY_INDEX = (1<<4),
|
542 |
|
|
IB_QP_PORT = (1<<5),
|
543 |
|
|
IB_QP_QKEY = (1<<6),
|
544 |
|
|
IB_QP_AV = (1<<7),
|
545 |
|
|
IB_QP_PATH_MTU = (1<<8),
|
546 |
|
|
IB_QP_TIMEOUT = (1<<9),
|
547 |
|
|
IB_QP_RETRY_CNT = (1<<10),
|
548 |
|
|
IB_QP_RNR_RETRY = (1<<11),
|
549 |
|
|
IB_QP_RQ_PSN = (1<<12),
|
550 |
|
|
IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
|
551 |
|
|
IB_QP_ALT_PATH = (1<<14),
|
552 |
|
|
IB_QP_MIN_RNR_TIMER = (1<<15),
|
553 |
|
|
IB_QP_SQ_PSN = (1<<16),
|
554 |
|
|
IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
|
555 |
|
|
IB_QP_PATH_MIG_STATE = (1<<18),
|
556 |
|
|
IB_QP_CAP = (1<<19),
|
557 |
|
|
IB_QP_DEST_QPN = (1<<20)
|
558 |
|
|
};
|
559 |
|
|
|
560 |
|
|
enum ib_qp_state {
|
561 |
|
|
IB_QPS_RESET,
|
562 |
|
|
IB_QPS_INIT,
|
563 |
|
|
IB_QPS_RTR,
|
564 |
|
|
IB_QPS_RTS,
|
565 |
|
|
IB_QPS_SQD,
|
566 |
|
|
IB_QPS_SQE,
|
567 |
|
|
IB_QPS_ERR
|
568 |
|
|
};
|
569 |
|
|
|
570 |
|
|
enum ib_mig_state {
|
571 |
|
|
IB_MIG_MIGRATED,
|
572 |
|
|
IB_MIG_REARM,
|
573 |
|
|
IB_MIG_ARMED
|
574 |
|
|
};
|
575 |
|
|
|
576 |
|
|
struct ib_qp_attr {
|
577 |
|
|
enum ib_qp_state qp_state;
|
578 |
|
|
enum ib_qp_state cur_qp_state;
|
579 |
|
|
enum ib_mtu path_mtu;
|
580 |
|
|
enum ib_mig_state path_mig_state;
|
581 |
|
|
u32 qkey;
|
582 |
|
|
u32 rq_psn;
|
583 |
|
|
u32 sq_psn;
|
584 |
|
|
u32 dest_qp_num;
|
585 |
|
|
int qp_access_flags;
|
586 |
|
|
struct ib_qp_cap cap;
|
587 |
|
|
struct ib_ah_attr ah_attr;
|
588 |
|
|
struct ib_ah_attr alt_ah_attr;
|
589 |
|
|
u16 pkey_index;
|
590 |
|
|
u16 alt_pkey_index;
|
591 |
|
|
u8 en_sqd_async_notify;
|
592 |
|
|
u8 sq_draining;
|
593 |
|
|
u8 max_rd_atomic;
|
594 |
|
|
u8 max_dest_rd_atomic;
|
595 |
|
|
u8 min_rnr_timer;
|
596 |
|
|
u8 port_num;
|
597 |
|
|
u8 timeout;
|
598 |
|
|
u8 retry_cnt;
|
599 |
|
|
u8 rnr_retry;
|
600 |
|
|
u8 alt_port_num;
|
601 |
|
|
u8 alt_timeout;
|
602 |
|
|
};
|
603 |
|
|
|
604 |
|
|
enum ib_wr_opcode {
|
605 |
|
|
IB_WR_RDMA_WRITE,
|
606 |
|
|
IB_WR_RDMA_WRITE_WITH_IMM,
|
607 |
|
|
IB_WR_SEND,
|
608 |
|
|
IB_WR_SEND_WITH_IMM,
|
609 |
|
|
IB_WR_RDMA_READ,
|
610 |
|
|
IB_WR_ATOMIC_CMP_AND_SWP,
|
611 |
|
|
IB_WR_ATOMIC_FETCH_AND_ADD
|
612 |
|
|
};
|
613 |
|
|
|
614 |
|
|
enum ib_send_flags {
|
615 |
|
|
IB_SEND_FENCE = 1,
|
616 |
|
|
IB_SEND_SIGNALED = (1<<1),
|
617 |
|
|
IB_SEND_SOLICITED = (1<<2),
|
618 |
|
|
IB_SEND_INLINE = (1<<3)
|
619 |
|
|
};
|
620 |
|
|
|
621 |
|
|
struct ib_sge {
|
622 |
|
|
u64 addr;
|
623 |
|
|
u32 length;
|
624 |
|
|
u32 lkey;
|
625 |
|
|
};
|
626 |
|
|
|
627 |
|
|
struct ib_send_wr {
|
628 |
|
|
struct ib_send_wr *next;
|
629 |
|
|
u64 wr_id;
|
630 |
|
|
struct ib_sge *sg_list;
|
631 |
|
|
int num_sge;
|
632 |
|
|
enum ib_wr_opcode opcode;
|
633 |
|
|
int send_flags;
|
634 |
|
|
__be32 imm_data;
|
635 |
|
|
union {
|
636 |
|
|
struct {
|
637 |
|
|
u64 remote_addr;
|
638 |
|
|
u32 rkey;
|
639 |
|
|
} rdma;
|
640 |
|
|
struct {
|
641 |
|
|
u64 remote_addr;
|
642 |
|
|
u64 compare_add;
|
643 |
|
|
u64 swap;
|
644 |
|
|
u32 rkey;
|
645 |
|
|
} atomic;
|
646 |
|
|
struct {
|
647 |
|
|
struct ib_ah *ah;
|
648 |
|
|
u32 remote_qpn;
|
649 |
|
|
u32 remote_qkey;
|
650 |
|
|
u16 pkey_index; /* valid for GSI only */
|
651 |
|
|
u8 port_num; /* valid for DR SMPs on switch only */
|
652 |
|
|
} ud;
|
653 |
|
|
} wr;
|
654 |
|
|
};
|
655 |
|
|
|
656 |
|
|
struct ib_recv_wr {
|
657 |
|
|
struct ib_recv_wr *next;
|
658 |
|
|
u64 wr_id;
|
659 |
|
|
struct ib_sge *sg_list;
|
660 |
|
|
int num_sge;
|
661 |
|
|
};
|
662 |
|
|
|
663 |
|
|
enum ib_access_flags {
|
664 |
|
|
IB_ACCESS_LOCAL_WRITE = 1,
|
665 |
|
|
IB_ACCESS_REMOTE_WRITE = (1<<1),
|
666 |
|
|
IB_ACCESS_REMOTE_READ = (1<<2),
|
667 |
|
|
IB_ACCESS_REMOTE_ATOMIC = (1<<3),
|
668 |
|
|
IB_ACCESS_MW_BIND = (1<<4)
|
669 |
|
|
};
|
670 |
|
|
|
671 |
|
|
struct ib_phys_buf {
|
672 |
|
|
u64 addr;
|
673 |
|
|
u64 size;
|
674 |
|
|
};
|
675 |
|
|
|
676 |
|
|
struct ib_mr_attr {
|
677 |
|
|
struct ib_pd *pd;
|
678 |
|
|
u64 device_virt_addr;
|
679 |
|
|
u64 size;
|
680 |
|
|
int mr_access_flags;
|
681 |
|
|
u32 lkey;
|
682 |
|
|
u32 rkey;
|
683 |
|
|
};
|
684 |
|
|
|
685 |
|
|
enum ib_mr_rereg_flags {
|
686 |
|
|
IB_MR_REREG_TRANS = 1,
|
687 |
|
|
IB_MR_REREG_PD = (1<<1),
|
688 |
|
|
IB_MR_REREG_ACCESS = (1<<2)
|
689 |
|
|
};
|
690 |
|
|
|
691 |
|
|
struct ib_mw_bind {
|
692 |
|
|
struct ib_mr *mr;
|
693 |
|
|
u64 wr_id;
|
694 |
|
|
u64 addr;
|
695 |
|
|
u32 length;
|
696 |
|
|
int send_flags;
|
697 |
|
|
int mw_access_flags;
|
698 |
|
|
};
|
699 |
|
|
|
700 |
|
|
struct ib_fmr_attr {
|
701 |
|
|
int max_pages;
|
702 |
|
|
int max_maps;
|
703 |
|
|
u8 page_shift;
|
704 |
|
|
};
|
705 |
|
|
|
706 |
|
|
struct ib_ucontext {
|
707 |
|
|
struct ib_device *device;
|
708 |
|
|
struct list_head pd_list;
|
709 |
|
|
struct list_head mr_list;
|
710 |
|
|
struct list_head mw_list;
|
711 |
|
|
struct list_head cq_list;
|
712 |
|
|
struct list_head qp_list;
|
713 |
|
|
struct list_head srq_list;
|
714 |
|
|
struct list_head ah_list;
|
715 |
|
|
int closing;
|
716 |
|
|
};
|
717 |
|
|
|
718 |
|
|
struct ib_uobject {
|
719 |
|
|
u64 user_handle; /* handle given to us by userspace */
|
720 |
|
|
struct ib_ucontext *context; /* associated user context */
|
721 |
|
|
void *object; /* containing object */
|
722 |
|
|
struct list_head list; /* link to context's list */
|
723 |
|
|
u32 id; /* index into kernel idr */
|
724 |
|
|
struct kref ref;
|
725 |
|
|
struct rw_semaphore mutex; /* protects .live */
|
726 |
|
|
int live;
|
727 |
|
|
};
|
728 |
|
|
|
729 |
|
|
struct ib_udata {
|
730 |
|
|
void __user *inbuf;
|
731 |
|
|
void __user *outbuf;
|
732 |
|
|
size_t inlen;
|
733 |
|
|
size_t outlen;
|
734 |
|
|
};
|
735 |
|
|
|
736 |
|
|
struct ib_pd {
|
737 |
|
|
struct ib_device *device;
|
738 |
|
|
struct ib_uobject *uobject;
|
739 |
|
|
atomic_t usecnt; /* count all resources */
|
740 |
|
|
};
|
741 |
|
|
|
742 |
|
|
struct ib_ah {
|
743 |
|
|
struct ib_device *device;
|
744 |
|
|
struct ib_pd *pd;
|
745 |
|
|
struct ib_uobject *uobject;
|
746 |
|
|
};
|
747 |
|
|
|
748 |
|
|
typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
|
749 |
|
|
|
750 |
|
|
struct ib_cq {
|
751 |
|
|
struct ib_device *device;
|
752 |
|
|
struct ib_uobject *uobject;
|
753 |
|
|
ib_comp_handler comp_handler;
|
754 |
|
|
void (*event_handler)(struct ib_event *, void *);
|
755 |
|
|
void * cq_context;
|
756 |
|
|
int cqe;
|
757 |
|
|
atomic_t usecnt; /* count number of work queues */
|
758 |
|
|
};
|
759 |
|
|
|
760 |
|
|
struct ib_srq {
|
761 |
|
|
struct ib_device *device;
|
762 |
|
|
struct ib_pd *pd;
|
763 |
|
|
struct ib_uobject *uobject;
|
764 |
|
|
void (*event_handler)(struct ib_event *, void *);
|
765 |
|
|
void *srq_context;
|
766 |
|
|
atomic_t usecnt;
|
767 |
|
|
};
|
768 |
|
|
|
769 |
|
|
struct ib_qp {
|
770 |
|
|
struct ib_device *device;
|
771 |
|
|
struct ib_pd *pd;
|
772 |
|
|
struct ib_cq *send_cq;
|
773 |
|
|
struct ib_cq *recv_cq;
|
774 |
|
|
struct ib_srq *srq;
|
775 |
|
|
struct ib_uobject *uobject;
|
776 |
|
|
void (*event_handler)(struct ib_event *, void *);
|
777 |
|
|
void *qp_context;
|
778 |
|
|
u32 qp_num;
|
779 |
|
|
enum ib_qp_type qp_type;
|
780 |
|
|
};
|
781 |
|
|
|
782 |
|
|
struct ib_mr {
|
783 |
|
|
struct ib_device *device;
|
784 |
|
|
struct ib_pd *pd;
|
785 |
|
|
struct ib_uobject *uobject;
|
786 |
|
|
u32 lkey;
|
787 |
|
|
u32 rkey;
|
788 |
|
|
atomic_t usecnt; /* count number of MWs */
|
789 |
|
|
};
|
790 |
|
|
|
791 |
|
|
struct ib_mw {
|
792 |
|
|
struct ib_device *device;
|
793 |
|
|
struct ib_pd *pd;
|
794 |
|
|
struct ib_uobject *uobject;
|
795 |
|
|
u32 rkey;
|
796 |
|
|
};
|
797 |
|
|
|
798 |
|
|
struct ib_fmr {
|
799 |
|
|
struct ib_device *device;
|
800 |
|
|
struct ib_pd *pd;
|
801 |
|
|
struct list_head list;
|
802 |
|
|
u32 lkey;
|
803 |
|
|
u32 rkey;
|
804 |
|
|
};
|
805 |
|
|
|
806 |
|
|
struct ib_mad;
|
807 |
|
|
struct ib_grh;
|
808 |
|
|
|
809 |
|
|
enum ib_process_mad_flags {
|
810 |
|
|
IB_MAD_IGNORE_MKEY = 1,
|
811 |
|
|
IB_MAD_IGNORE_BKEY = 2,
|
812 |
|
|
IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
|
813 |
|
|
};
|
814 |
|
|
|
815 |
|
|
enum ib_mad_result {
|
816 |
|
|
IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
|
817 |
|
|
IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
|
818 |
|
|
IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
|
819 |
|
|
IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
|
820 |
|
|
};
|
821 |
|
|
|
822 |
|
|
#define IB_DEVICE_NAME_MAX 64
|
823 |
|
|
|
824 |
|
|
struct ib_cache {
|
825 |
|
|
rwlock_t lock;
|
826 |
|
|
struct ib_event_handler event_handler;
|
827 |
|
|
struct ib_pkey_cache **pkey_cache;
|
828 |
|
|
struct ib_gid_cache **gid_cache;
|
829 |
|
|
u8 *lmc_cache;
|
830 |
|
|
};
|
831 |
|
|
|
832 |
|
|
struct ib_dma_mapping_ops {
|
833 |
|
|
int (*mapping_error)(struct ib_device *dev,
|
834 |
|
|
u64 dma_addr);
|
835 |
|
|
u64 (*map_single)(struct ib_device *dev,
|
836 |
|
|
void *ptr, size_t size,
|
837 |
|
|
enum dma_data_direction direction);
|
838 |
|
|
void (*unmap_single)(struct ib_device *dev,
|
839 |
|
|
u64 addr, size_t size,
|
840 |
|
|
enum dma_data_direction direction);
|
841 |
|
|
u64 (*map_page)(struct ib_device *dev,
|
842 |
|
|
struct page *page, unsigned long offset,
|
843 |
|
|
size_t size,
|
844 |
|
|
enum dma_data_direction direction);
|
845 |
|
|
void (*unmap_page)(struct ib_device *dev,
|
846 |
|
|
u64 addr, size_t size,
|
847 |
|
|
enum dma_data_direction direction);
|
848 |
|
|
int (*map_sg)(struct ib_device *dev,
|
849 |
|
|
struct scatterlist *sg, int nents,
|
850 |
|
|
enum dma_data_direction direction);
|
851 |
|
|
void (*unmap_sg)(struct ib_device *dev,
|
852 |
|
|
struct scatterlist *sg, int nents,
|
853 |
|
|
enum dma_data_direction direction);
|
854 |
|
|
u64 (*dma_address)(struct ib_device *dev,
|
855 |
|
|
struct scatterlist *sg);
|
856 |
|
|
unsigned int (*dma_len)(struct ib_device *dev,
|
857 |
|
|
struct scatterlist *sg);
|
858 |
|
|
void (*sync_single_for_cpu)(struct ib_device *dev,
|
859 |
|
|
u64 dma_handle,
|
860 |
|
|
size_t size,
|
861 |
|
|
enum dma_data_direction dir);
|
862 |
|
|
void (*sync_single_for_device)(struct ib_device *dev,
|
863 |
|
|
u64 dma_handle,
|
864 |
|
|
size_t size,
|
865 |
|
|
enum dma_data_direction dir);
|
866 |
|
|
void *(*alloc_coherent)(struct ib_device *dev,
|
867 |
|
|
size_t size,
|
868 |
|
|
u64 *dma_handle,
|
869 |
|
|
gfp_t flag);
|
870 |
|
|
void (*free_coherent)(struct ib_device *dev,
|
871 |
|
|
size_t size, void *cpu_addr,
|
872 |
|
|
u64 dma_handle);
|
873 |
|
|
};
|
874 |
|
|
|
875 |
|
|
struct iw_cm_verbs;
|
876 |
|
|
|
877 |
|
|
struct ib_device {
|
878 |
|
|
struct device *dma_device;
|
879 |
|
|
|
880 |
|
|
char name[IB_DEVICE_NAME_MAX];
|
881 |
|
|
|
882 |
|
|
struct list_head event_handler_list;
|
883 |
|
|
spinlock_t event_handler_lock;
|
884 |
|
|
|
885 |
|
|
struct list_head core_list;
|
886 |
|
|
struct list_head client_data_list;
|
887 |
|
|
spinlock_t client_data_lock;
|
888 |
|
|
|
889 |
|
|
struct ib_cache cache;
|
890 |
|
|
int *pkey_tbl_len;
|
891 |
|
|
int *gid_tbl_len;
|
892 |
|
|
|
893 |
|
|
u32 flags;
|
894 |
|
|
|
895 |
|
|
int num_comp_vectors;
|
896 |
|
|
|
897 |
|
|
struct iw_cm_verbs *iwcm;
|
898 |
|
|
|
899 |
|
|
int (*query_device)(struct ib_device *device,
|
900 |
|
|
struct ib_device_attr *device_attr);
|
901 |
|
|
int (*query_port)(struct ib_device *device,
|
902 |
|
|
u8 port_num,
|
903 |
|
|
struct ib_port_attr *port_attr);
|
904 |
|
|
int (*query_gid)(struct ib_device *device,
|
905 |
|
|
u8 port_num, int index,
|
906 |
|
|
union ib_gid *gid);
|
907 |
|
|
int (*query_pkey)(struct ib_device *device,
|
908 |
|
|
u8 port_num, u16 index, u16 *pkey);
|
909 |
|
|
int (*modify_device)(struct ib_device *device,
|
910 |
|
|
int device_modify_mask,
|
911 |
|
|
struct ib_device_modify *device_modify);
|
912 |
|
|
int (*modify_port)(struct ib_device *device,
|
913 |
|
|
u8 port_num, int port_modify_mask,
|
914 |
|
|
struct ib_port_modify *port_modify);
|
915 |
|
|
struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
|
916 |
|
|
struct ib_udata *udata);
|
917 |
|
|
int (*dealloc_ucontext)(struct ib_ucontext *context);
|
918 |
|
|
int (*mmap)(struct ib_ucontext *context,
|
919 |
|
|
struct vm_area_struct *vma);
|
920 |
|
|
struct ib_pd * (*alloc_pd)(struct ib_device *device,
|
921 |
|
|
struct ib_ucontext *context,
|
922 |
|
|
struct ib_udata *udata);
|
923 |
|
|
int (*dealloc_pd)(struct ib_pd *pd);
|
924 |
|
|
struct ib_ah * (*create_ah)(struct ib_pd *pd,
|
925 |
|
|
struct ib_ah_attr *ah_attr);
|
926 |
|
|
int (*modify_ah)(struct ib_ah *ah,
|
927 |
|
|
struct ib_ah_attr *ah_attr);
|
928 |
|
|
int (*query_ah)(struct ib_ah *ah,
|
929 |
|
|
struct ib_ah_attr *ah_attr);
|
930 |
|
|
int (*destroy_ah)(struct ib_ah *ah);
|
931 |
|
|
struct ib_srq * (*create_srq)(struct ib_pd *pd,
|
932 |
|
|
struct ib_srq_init_attr *srq_init_attr,
|
933 |
|
|
struct ib_udata *udata);
|
934 |
|
|
int (*modify_srq)(struct ib_srq *srq,
|
935 |
|
|
struct ib_srq_attr *srq_attr,
|
936 |
|
|
enum ib_srq_attr_mask srq_attr_mask,
|
937 |
|
|
struct ib_udata *udata);
|
938 |
|
|
int (*query_srq)(struct ib_srq *srq,
|
939 |
|
|
struct ib_srq_attr *srq_attr);
|
940 |
|
|
int (*destroy_srq)(struct ib_srq *srq);
|
941 |
|
|
int (*post_srq_recv)(struct ib_srq *srq,
|
942 |
|
|
struct ib_recv_wr *recv_wr,
|
943 |
|
|
struct ib_recv_wr **bad_recv_wr);
|
944 |
|
|
struct ib_qp * (*create_qp)(struct ib_pd *pd,
|
945 |
|
|
struct ib_qp_init_attr *qp_init_attr,
|
946 |
|
|
struct ib_udata *udata);
|
947 |
|
|
int (*modify_qp)(struct ib_qp *qp,
|
948 |
|
|
struct ib_qp_attr *qp_attr,
|
949 |
|
|
int qp_attr_mask,
|
950 |
|
|
struct ib_udata *udata);
|
951 |
|
|
int (*query_qp)(struct ib_qp *qp,
|
952 |
|
|
struct ib_qp_attr *qp_attr,
|
953 |
|
|
int qp_attr_mask,
|
954 |
|
|
struct ib_qp_init_attr *qp_init_attr);
|
955 |
|
|
int (*destroy_qp)(struct ib_qp *qp);
|
956 |
|
|
int (*post_send)(struct ib_qp *qp,
|
957 |
|
|
struct ib_send_wr *send_wr,
|
958 |
|
|
struct ib_send_wr **bad_send_wr);
|
959 |
|
|
int (*post_recv)(struct ib_qp *qp,
|
960 |
|
|
struct ib_recv_wr *recv_wr,
|
961 |
|
|
struct ib_recv_wr **bad_recv_wr);
|
962 |
|
|
struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
|
963 |
|
|
int comp_vector,
|
964 |
|
|
struct ib_ucontext *context,
|
965 |
|
|
struct ib_udata *udata);
|
966 |
|
|
int (*destroy_cq)(struct ib_cq *cq);
|
967 |
|
|
int (*resize_cq)(struct ib_cq *cq, int cqe,
|
968 |
|
|
struct ib_udata *udata);
|
969 |
|
|
int (*poll_cq)(struct ib_cq *cq, int num_entries,
|
970 |
|
|
struct ib_wc *wc);
|
971 |
|
|
int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
|
972 |
|
|
int (*req_notify_cq)(struct ib_cq *cq,
|
973 |
|
|
enum ib_cq_notify_flags flags);
|
974 |
|
|
int (*req_ncomp_notif)(struct ib_cq *cq,
|
975 |
|
|
int wc_cnt);
|
976 |
|
|
struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
|
977 |
|
|
int mr_access_flags);
|
978 |
|
|
struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
|
979 |
|
|
struct ib_phys_buf *phys_buf_array,
|
980 |
|
|
int num_phys_buf,
|
981 |
|
|
int mr_access_flags,
|
982 |
|
|
u64 *iova_start);
|
983 |
|
|
struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
|
984 |
|
|
u64 start, u64 length,
|
985 |
|
|
u64 virt_addr,
|
986 |
|
|
int mr_access_flags,
|
987 |
|
|
struct ib_udata *udata);
|
988 |
|
|
int (*query_mr)(struct ib_mr *mr,
|
989 |
|
|
struct ib_mr_attr *mr_attr);
|
990 |
|
|
int (*dereg_mr)(struct ib_mr *mr);
|
991 |
|
|
int (*rereg_phys_mr)(struct ib_mr *mr,
|
992 |
|
|
int mr_rereg_mask,
|
993 |
|
|
struct ib_pd *pd,
|
994 |
|
|
struct ib_phys_buf *phys_buf_array,
|
995 |
|
|
int num_phys_buf,
|
996 |
|
|
int mr_access_flags,
|
997 |
|
|
u64 *iova_start);
|
998 |
|
|
struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
|
999 |
|
|
int (*bind_mw)(struct ib_qp *qp,
|
1000 |
|
|
struct ib_mw *mw,
|
1001 |
|
|
struct ib_mw_bind *mw_bind);
|
1002 |
|
|
int (*dealloc_mw)(struct ib_mw *mw);
|
1003 |
|
|
struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
|
1004 |
|
|
int mr_access_flags,
|
1005 |
|
|
struct ib_fmr_attr *fmr_attr);
|
1006 |
|
|
int (*map_phys_fmr)(struct ib_fmr *fmr,
|
1007 |
|
|
u64 *page_list, int list_len,
|
1008 |
|
|
u64 iova);
|
1009 |
|
|
int (*unmap_fmr)(struct list_head *fmr_list);
|
1010 |
|
|
int (*dealloc_fmr)(struct ib_fmr *fmr);
|
1011 |
|
|
int (*attach_mcast)(struct ib_qp *qp,
|
1012 |
|
|
union ib_gid *gid,
|
1013 |
|
|
u16 lid);
|
1014 |
|
|
int (*detach_mcast)(struct ib_qp *qp,
|
1015 |
|
|
union ib_gid *gid,
|
1016 |
|
|
u16 lid);
|
1017 |
|
|
int (*process_mad)(struct ib_device *device,
|
1018 |
|
|
int process_mad_flags,
|
1019 |
|
|
u8 port_num,
|
1020 |
|
|
struct ib_wc *in_wc,
|
1021 |
|
|
struct ib_grh *in_grh,
|
1022 |
|
|
struct ib_mad *in_mad,
|
1023 |
|
|
struct ib_mad *out_mad);
|
1024 |
|
|
|
1025 |
|
|
struct ib_dma_mapping_ops *dma_ops;
|
1026 |
|
|
|
1027 |
|
|
struct module *owner;
|
1028 |
|
|
struct class_device class_dev;
|
1029 |
|
|
struct kobject ports_parent;
|
1030 |
|
|
struct list_head port_list;
|
1031 |
|
|
|
1032 |
|
|
enum {
|
1033 |
|
|
IB_DEV_UNINITIALIZED,
|
1034 |
|
|
IB_DEV_REGISTERED,
|
1035 |
|
|
IB_DEV_UNREGISTERED
|
1036 |
|
|
} reg_state;
|
1037 |
|
|
|
1038 |
|
|
u64 uverbs_cmd_mask;
|
1039 |
|
|
int uverbs_abi_ver;
|
1040 |
|
|
|
1041 |
|
|
char node_desc[64];
|
1042 |
|
|
__be64 node_guid;
|
1043 |
|
|
u8 node_type;
|
1044 |
|
|
u8 phys_port_cnt;
|
1045 |
|
|
};
|
1046 |
|
|
|
1047 |
|
|
struct ib_client {
|
1048 |
|
|
char *name;
|
1049 |
|
|
void (*add) (struct ib_device *);
|
1050 |
|
|
void (*remove)(struct ib_device *);
|
1051 |
|
|
|
1052 |
|
|
struct list_head list;
|
1053 |
|
|
};
|
1054 |
|
|
|
1055 |
|
|
struct ib_device *ib_alloc_device(size_t size);
|
1056 |
|
|
void ib_dealloc_device(struct ib_device *device);
|
1057 |
|
|
|
1058 |
|
|
int ib_register_device (struct ib_device *device);
|
1059 |
|
|
void ib_unregister_device(struct ib_device *device);
|
1060 |
|
|
|
1061 |
|
|
int ib_register_client (struct ib_client *client);
|
1062 |
|
|
void ib_unregister_client(struct ib_client *client);
|
1063 |
|
|
|
1064 |
|
|
void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
|
1065 |
|
|
void ib_set_client_data(struct ib_device *device, struct ib_client *client,
|
1066 |
|
|
void *data);
|
1067 |
|
|
|
1068 |
|
|
static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
|
1069 |
|
|
{
|
1070 |
|
|
return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
|
1071 |
|
|
}
|
1072 |
|
|
|
1073 |
|
|
static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
|
1074 |
|
|
{
|
1075 |
|
|
return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
|
1076 |
|
|
}
|
1077 |
|
|
|
1078 |
|
|
/**
|
1079 |
|
|
* ib_modify_qp_is_ok - Check that the supplied attribute mask
|
1080 |
|
|
* contains all required attributes and no attributes not allowed for
|
1081 |
|
|
* the given QP state transition.
|
1082 |
|
|
* @cur_state: Current QP state
|
1083 |
|
|
* @next_state: Next QP state
|
1084 |
|
|
* @type: QP type
|
1085 |
|
|
* @mask: Mask of supplied QP attributes
|
1086 |
|
|
*
|
1087 |
|
|
* This function is a helper function that a low-level driver's
|
1088 |
|
|
* modify_qp method can use to validate the consumer's input. It
|
1089 |
|
|
* checks that cur_state and next_state are valid QP states, that a
|
1090 |
|
|
* transition from cur_state to next_state is allowed by the IB spec,
|
1091 |
|
|
* and that the attribute mask supplied is allowed for the transition.
|
1092 |
|
|
*/
|
1093 |
|
|
int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
|
1094 |
|
|
enum ib_qp_type type, enum ib_qp_attr_mask mask);
|
1095 |
|
|
|
1096 |
|
|
int ib_register_event_handler (struct ib_event_handler *event_handler);
|
1097 |
|
|
int ib_unregister_event_handler(struct ib_event_handler *event_handler);
|
1098 |
|
|
void ib_dispatch_event(struct ib_event *event);
|
1099 |
|
|
|
1100 |
|
|
int ib_query_device(struct ib_device *device,
|
1101 |
|
|
struct ib_device_attr *device_attr);
|
1102 |
|
|
|
1103 |
|
|
int ib_query_port(struct ib_device *device,
|
1104 |
|
|
u8 port_num, struct ib_port_attr *port_attr);
|
1105 |
|
|
|
1106 |
|
|
int ib_query_gid(struct ib_device *device,
|
1107 |
|
|
u8 port_num, int index, union ib_gid *gid);
|
1108 |
|
|
|
1109 |
|
|
int ib_query_pkey(struct ib_device *device,
|
1110 |
|
|
u8 port_num, u16 index, u16 *pkey);
|
1111 |
|
|
|
1112 |
|
|
int ib_modify_device(struct ib_device *device,
|
1113 |
|
|
int device_modify_mask,
|
1114 |
|
|
struct ib_device_modify *device_modify);
|
1115 |
|
|
|
1116 |
|
|
int ib_modify_port(struct ib_device *device,
|
1117 |
|
|
u8 port_num, int port_modify_mask,
|
1118 |
|
|
struct ib_port_modify *port_modify);
|
1119 |
|
|
|
1120 |
|
|
int ib_find_gid(struct ib_device *device, union ib_gid *gid,
|
1121 |
|
|
u8 *port_num, u16 *index);
|
1122 |
|
|
|
1123 |
|
|
int ib_find_pkey(struct ib_device *device,
|
1124 |
|
|
u8 port_num, u16 pkey, u16 *index);
|
1125 |
|
|
|
1126 |
|
|
/**
|
1127 |
|
|
* ib_alloc_pd - Allocates an unused protection domain.
|
1128 |
|
|
* @device: The device on which to allocate the protection domain.
|
1129 |
|
|
*
|
1130 |
|
|
* A protection domain object provides an association between QPs, shared
|
1131 |
|
|
* receive queues, address handles, memory regions, and memory windows.
|
1132 |
|
|
*/
|
1133 |
|
|
struct ib_pd *ib_alloc_pd(struct ib_device *device);
|
1134 |
|
|
|
1135 |
|
|
/**
|
1136 |
|
|
* ib_dealloc_pd - Deallocates a protection domain.
|
1137 |
|
|
* @pd: The protection domain to deallocate.
|
1138 |
|
|
*/
|
1139 |
|
|
int ib_dealloc_pd(struct ib_pd *pd);
|
1140 |
|
|
|
1141 |
|
|
/**
|
1142 |
|
|
* ib_create_ah - Creates an address handle for the given address vector.
|
1143 |
|
|
* @pd: The protection domain associated with the address handle.
|
1144 |
|
|
* @ah_attr: The attributes of the address vector.
|
1145 |
|
|
*
|
1146 |
|
|
* The address handle is used to reference a local or global destination
|
1147 |
|
|
* in all UD QP post sends.
|
1148 |
|
|
*/
|
1149 |
|
|
struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
|
1150 |
|
|
|
1151 |
|
|
/**
|
1152 |
|
|
* ib_init_ah_from_wc - Initializes address handle attributes from a
|
1153 |
|
|
* work completion.
|
1154 |
|
|
* @device: Device on which the received message arrived.
|
1155 |
|
|
* @port_num: Port on which the received message arrived.
|
1156 |
|
|
* @wc: Work completion associated with the received message.
|
1157 |
|
|
* @grh: References the received global route header. This parameter is
|
1158 |
|
|
* ignored unless the work completion indicates that the GRH is valid.
|
1159 |
|
|
* @ah_attr: Returned attributes that can be used when creating an address
|
1160 |
|
|
* handle for replying to the message.
|
1161 |
|
|
*/
|
1162 |
|
|
int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
|
1163 |
|
|
struct ib_grh *grh, struct ib_ah_attr *ah_attr);
|
1164 |
|
|
|
1165 |
|
|
/**
|
1166 |
|
|
* ib_create_ah_from_wc - Creates an address handle associated with the
|
1167 |
|
|
* sender of the specified work completion.
|
1168 |
|
|
* @pd: The protection domain associated with the address handle.
|
1169 |
|
|
* @wc: Work completion information associated with a received message.
|
1170 |
|
|
* @grh: References the received global route header. This parameter is
|
1171 |
|
|
* ignored unless the work completion indicates that the GRH is valid.
|
1172 |
|
|
* @port_num: The outbound port number to associate with the address.
|
1173 |
|
|
*
|
1174 |
|
|
* The address handle is used to reference a local or global destination
|
1175 |
|
|
* in all UD QP post sends.
|
1176 |
|
|
*/
|
1177 |
|
|
struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
|
1178 |
|
|
struct ib_grh *grh, u8 port_num);
|
1179 |
|
|
|
1180 |
|
|
/**
|
1181 |
|
|
* ib_modify_ah - Modifies the address vector associated with an address
|
1182 |
|
|
* handle.
|
1183 |
|
|
* @ah: The address handle to modify.
|
1184 |
|
|
* @ah_attr: The new address vector attributes to associate with the
|
1185 |
|
|
* address handle.
|
1186 |
|
|
*/
|
1187 |
|
|
int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
|
1188 |
|
|
|
1189 |
|
|
/**
|
1190 |
|
|
* ib_query_ah - Queries the address vector associated with an address
|
1191 |
|
|
* handle.
|
1192 |
|
|
* @ah: The address handle to query.
|
1193 |
|
|
* @ah_attr: The address vector attributes associated with the address
|
1194 |
|
|
* handle.
|
1195 |
|
|
*/
|
1196 |
|
|
int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
|
1197 |
|
|
|
1198 |
|
|
/**
|
1199 |
|
|
* ib_destroy_ah - Destroys an address handle.
|
1200 |
|
|
* @ah: The address handle to destroy.
|
1201 |
|
|
*/
|
1202 |
|
|
int ib_destroy_ah(struct ib_ah *ah);
|
1203 |
|
|
|
1204 |
|
|
/**
|
1205 |
|
|
* ib_create_srq - Creates a SRQ associated with the specified protection
|
1206 |
|
|
* domain.
|
1207 |
|
|
* @pd: The protection domain associated with the SRQ.
|
1208 |
|
|
* @srq_init_attr: A list of initial attributes required to create the
|
1209 |
|
|
* SRQ. If SRQ creation succeeds, then the attributes are updated to
|
1210 |
|
|
* the actual capabilities of the created SRQ.
|
1211 |
|
|
*
|
1212 |
|
|
* srq_attr->max_wr and srq_attr->max_sge are read the determine the
|
1213 |
|
|
* requested size of the SRQ, and set to the actual values allocated
|
1214 |
|
|
* on return. If ib_create_srq() succeeds, then max_wr and max_sge
|
1215 |
|
|
* will always be at least as large as the requested values.
|
1216 |
|
|
*/
|
1217 |
|
|
struct ib_srq *ib_create_srq(struct ib_pd *pd,
|
1218 |
|
|
struct ib_srq_init_attr *srq_init_attr);
|
1219 |
|
|
|
1220 |
|
|
/**
|
1221 |
|
|
* ib_modify_srq - Modifies the attributes for the specified SRQ.
|
1222 |
|
|
* @srq: The SRQ to modify.
|
1223 |
|
|
* @srq_attr: On input, specifies the SRQ attributes to modify. On output,
|
1224 |
|
|
* the current values of selected SRQ attributes are returned.
|
1225 |
|
|
* @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
|
1226 |
|
|
* are being modified.
|
1227 |
|
|
*
|
1228 |
|
|
* The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
|
1229 |
|
|
* IB_SRQ_LIMIT to set the SRQ's limit and request notification when
|
1230 |
|
|
* the number of receives queued drops below the limit.
|
1231 |
|
|
*/
|
1232 |
|
|
int ib_modify_srq(struct ib_srq *srq,
|
1233 |
|
|
struct ib_srq_attr *srq_attr,
|
1234 |
|
|
enum ib_srq_attr_mask srq_attr_mask);
|
1235 |
|
|
|
1236 |
|
|
/**
|
1237 |
|
|
* ib_query_srq - Returns the attribute list and current values for the
|
1238 |
|
|
* specified SRQ.
|
1239 |
|
|
* @srq: The SRQ to query.
|
1240 |
|
|
* @srq_attr: The attributes of the specified SRQ.
|
1241 |
|
|
*/
|
1242 |
|
|
int ib_query_srq(struct ib_srq *srq,
|
1243 |
|
|
struct ib_srq_attr *srq_attr);
|
1244 |
|
|
|
1245 |
|
|
/**
|
1246 |
|
|
* ib_destroy_srq - Destroys the specified SRQ.
|
1247 |
|
|
* @srq: The SRQ to destroy.
|
1248 |
|
|
*/
|
1249 |
|
|
int ib_destroy_srq(struct ib_srq *srq);
|
1250 |
|
|
|
1251 |
|
|
/**
|
1252 |
|
|
* ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
|
1253 |
|
|
* @srq: The SRQ to post the work request on.
|
1254 |
|
|
* @recv_wr: A list of work requests to post on the receive queue.
|
1255 |
|
|
* @bad_recv_wr: On an immediate failure, this parameter will reference
|
1256 |
|
|
* the work request that failed to be posted on the QP.
|
1257 |
|
|
*/
|
1258 |
|
|
static inline int ib_post_srq_recv(struct ib_srq *srq,
|
1259 |
|
|
struct ib_recv_wr *recv_wr,
|
1260 |
|
|
struct ib_recv_wr **bad_recv_wr)
|
1261 |
|
|
{
|
1262 |
|
|
return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
|
1263 |
|
|
}
|
1264 |
|
|
|
1265 |
|
|
/**
|
1266 |
|
|
* ib_create_qp - Creates a QP associated with the specified protection
|
1267 |
|
|
* domain.
|
1268 |
|
|
* @pd: The protection domain associated with the QP.
|
1269 |
|
|
* @qp_init_attr: A list of initial attributes required to create the
|
1270 |
|
|
* QP. If QP creation succeeds, then the attributes are updated to
|
1271 |
|
|
* the actual capabilities of the created QP.
|
1272 |
|
|
*/
|
1273 |
|
|
struct ib_qp *ib_create_qp(struct ib_pd *pd,
|
1274 |
|
|
struct ib_qp_init_attr *qp_init_attr);
|
1275 |
|
|
|
1276 |
|
|
/**
|
1277 |
|
|
* ib_modify_qp - Modifies the attributes for the specified QP and then
|
1278 |
|
|
* transitions the QP to the given state.
|
1279 |
|
|
* @qp: The QP to modify.
|
1280 |
|
|
* @qp_attr: On input, specifies the QP attributes to modify. On output,
|
1281 |
|
|
* the current values of selected QP attributes are returned.
|
1282 |
|
|
* @qp_attr_mask: A bit-mask used to specify which attributes of the QP
|
1283 |
|
|
* are being modified.
|
1284 |
|
|
*/
|
1285 |
|
|
int ib_modify_qp(struct ib_qp *qp,
|
1286 |
|
|
struct ib_qp_attr *qp_attr,
|
1287 |
|
|
int qp_attr_mask);
|
1288 |
|
|
|
1289 |
|
|
/**
|
1290 |
|
|
* ib_query_qp - Returns the attribute list and current values for the
|
1291 |
|
|
* specified QP.
|
1292 |
|
|
* @qp: The QP to query.
|
1293 |
|
|
* @qp_attr: The attributes of the specified QP.
|
1294 |
|
|
* @qp_attr_mask: A bit-mask used to select specific attributes to query.
|
1295 |
|
|
* @qp_init_attr: Additional attributes of the selected QP.
|
1296 |
|
|
*
|
1297 |
|
|
* The qp_attr_mask may be used to limit the query to gathering only the
|
1298 |
|
|
* selected attributes.
|
1299 |
|
|
*/
|
1300 |
|
|
int ib_query_qp(struct ib_qp *qp,
|
1301 |
|
|
struct ib_qp_attr *qp_attr,
|
1302 |
|
|
int qp_attr_mask,
|
1303 |
|
|
struct ib_qp_init_attr *qp_init_attr);
|
1304 |
|
|
|
1305 |
|
|
/**
|
1306 |
|
|
* ib_destroy_qp - Destroys the specified QP.
|
1307 |
|
|
* @qp: The QP to destroy.
|
1308 |
|
|
*/
|
1309 |
|
|
int ib_destroy_qp(struct ib_qp *qp);
|
1310 |
|
|
|
1311 |
|
|
/**
|
1312 |
|
|
* ib_post_send - Posts a list of work requests to the send queue of
|
1313 |
|
|
* the specified QP.
|
1314 |
|
|
* @qp: The QP to post the work request on.
|
1315 |
|
|
* @send_wr: A list of work requests to post on the send queue.
|
1316 |
|
|
* @bad_send_wr: On an immediate failure, this parameter will reference
|
1317 |
|
|
* the work request that failed to be posted on the QP.
|
1318 |
|
|
*/
|
1319 |
|
|
static inline int ib_post_send(struct ib_qp *qp,
|
1320 |
|
|
struct ib_send_wr *send_wr,
|
1321 |
|
|
struct ib_send_wr **bad_send_wr)
|
1322 |
|
|
{
|
1323 |
|
|
return qp->device->post_send(qp, send_wr, bad_send_wr);
|
1324 |
|
|
}
|
1325 |
|
|
|
1326 |
|
|
/**
|
1327 |
|
|
* ib_post_recv - Posts a list of work requests to the receive queue of
|
1328 |
|
|
* the specified QP.
|
1329 |
|
|
* @qp: The QP to post the work request on.
|
1330 |
|
|
* @recv_wr: A list of work requests to post on the receive queue.
|
1331 |
|
|
* @bad_recv_wr: On an immediate failure, this parameter will reference
|
1332 |
|
|
* the work request that failed to be posted on the QP.
|
1333 |
|
|
*/
|
1334 |
|
|
static inline int ib_post_recv(struct ib_qp *qp,
|
1335 |
|
|
struct ib_recv_wr *recv_wr,
|
1336 |
|
|
struct ib_recv_wr **bad_recv_wr)
|
1337 |
|
|
{
|
1338 |
|
|
return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
|
1339 |
|
|
}
|
1340 |
|
|
|
1341 |
|
|
/**
|
1342 |
|
|
* ib_create_cq - Creates a CQ on the specified device.
|
1343 |
|
|
* @device: The device on which to create the CQ.
|
1344 |
|
|
* @comp_handler: A user-specified callback that is invoked when a
|
1345 |
|
|
* completion event occurs on the CQ.
|
1346 |
|
|
* @event_handler: A user-specified callback that is invoked when an
|
1347 |
|
|
* asynchronous event not associated with a completion occurs on the CQ.
|
1348 |
|
|
* @cq_context: Context associated with the CQ returned to the user via
|
1349 |
|
|
* the associated completion and event handlers.
|
1350 |
|
|
* @cqe: The minimum size of the CQ.
|
1351 |
|
|
* @comp_vector - Completion vector used to signal completion events.
|
1352 |
|
|
* Must be >= 0 and < context->num_comp_vectors.
|
1353 |
|
|
*
|
1354 |
|
|
* Users can examine the cq structure to determine the actual CQ size.
|
1355 |
|
|
*/
|
1356 |
|
|
struct ib_cq *ib_create_cq(struct ib_device *device,
|
1357 |
|
|
ib_comp_handler comp_handler,
|
1358 |
|
|
void (*event_handler)(struct ib_event *, void *),
|
1359 |
|
|
void *cq_context, int cqe, int comp_vector);
|
1360 |
|
|
|
1361 |
|
|
/**
|
1362 |
|
|
* ib_resize_cq - Modifies the capacity of the CQ.
|
1363 |
|
|
* @cq: The CQ to resize.
|
1364 |
|
|
* @cqe: The minimum size of the CQ.
|
1365 |
|
|
*
|
1366 |
|
|
* Users can examine the cq structure to determine the actual CQ size.
|
1367 |
|
|
*/
|
1368 |
|
|
int ib_resize_cq(struct ib_cq *cq, int cqe);
|
1369 |
|
|
|
1370 |
|
|
/**
|
1371 |
|
|
* ib_destroy_cq - Destroys the specified CQ.
|
1372 |
|
|
* @cq: The CQ to destroy.
|
1373 |
|
|
*/
|
1374 |
|
|
int ib_destroy_cq(struct ib_cq *cq);
|
1375 |
|
|
|
1376 |
|
|
/**
|
1377 |
|
|
* ib_poll_cq - poll a CQ for completion(s)
|
1378 |
|
|
* @cq:the CQ being polled
|
1379 |
|
|
* @num_entries:maximum number of completions to return
|
1380 |
|
|
* @wc:array of at least @num_entries &struct ib_wc where completions
|
1381 |
|
|
* will be returned
|
1382 |
|
|
*
|
1383 |
|
|
* Poll a CQ for (possibly multiple) completions. If the return value
|
1384 |
|
|
* is < 0, an error occurred. If the return value is >= 0, it is the
|
1385 |
|
|
* number of completions returned. If the return value is
|
1386 |
|
|
* non-negative and < num_entries, then the CQ was emptied.
|
1387 |
|
|
*/
|
1388 |
|
|
static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
|
1389 |
|
|
struct ib_wc *wc)
|
1390 |
|
|
{
|
1391 |
|
|
return cq->device->poll_cq(cq, num_entries, wc);
|
1392 |
|
|
}
|
1393 |
|
|
|
1394 |
|
|
/**
|
1395 |
|
|
* ib_peek_cq - Returns the number of unreaped completions currently
|
1396 |
|
|
* on the specified CQ.
|
1397 |
|
|
* @cq: The CQ to peek.
|
1398 |
|
|
* @wc_cnt: A minimum number of unreaped completions to check for.
|
1399 |
|
|
*
|
1400 |
|
|
* If the number of unreaped completions is greater than or equal to wc_cnt,
|
1401 |
|
|
* this function returns wc_cnt, otherwise, it returns the actual number of
|
1402 |
|
|
* unreaped completions.
|
1403 |
|
|
*/
|
1404 |
|
|
int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
|
1405 |
|
|
|
1406 |
|
|
/**
|
1407 |
|
|
* ib_req_notify_cq - Request completion notification on a CQ.
|
1408 |
|
|
* @cq: The CQ to generate an event for.
|
1409 |
|
|
* @flags:
|
1410 |
|
|
* Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
|
1411 |
|
|
* to request an event on the next solicited event or next work
|
1412 |
|
|
* completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
|
1413 |
|
|
* may also be |ed in to request a hint about missed events, as
|
1414 |
|
|
* described below.
|
1415 |
|
|
*
|
1416 |
|
|
* Return Value:
|
1417 |
|
|
* < 0 means an error occurred while requesting notification
|
1418 |
|
|
* == 0 means notification was requested successfully, and if
|
1419 |
|
|
* IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
|
1420 |
|
|
* were missed and it is safe to wait for another event. In
|
1421 |
|
|
* this case is it guaranteed that any work completions added
|
1422 |
|
|
* to the CQ since the last CQ poll will trigger a completion
|
1423 |
|
|
* notification event.
|
1424 |
|
|
* > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
|
1425 |
|
|
* in. It means that the consumer must poll the CQ again to
|
1426 |
|
|
* make sure it is empty to avoid missing an event because of a
|
1427 |
|
|
* race between requesting notification and an entry being
|
1428 |
|
|
* added to the CQ. This return value means it is possible
|
1429 |
|
|
* (but not guaranteed) that a work completion has been added
|
1430 |
|
|
* to the CQ since the last poll without triggering a
|
1431 |
|
|
* completion notification event.
|
1432 |
|
|
*/
|
1433 |
|
|
static inline int ib_req_notify_cq(struct ib_cq *cq,
|
1434 |
|
|
enum ib_cq_notify_flags flags)
|
1435 |
|
|
{
|
1436 |
|
|
return cq->device->req_notify_cq(cq, flags);
|
1437 |
|
|
}
|
1438 |
|
|
|
1439 |
|
|
/**
|
1440 |
|
|
* ib_req_ncomp_notif - Request completion notification when there are
|
1441 |
|
|
* at least the specified number of unreaped completions on the CQ.
|
1442 |
|
|
* @cq: The CQ to generate an event for.
|
1443 |
|
|
* @wc_cnt: The number of unreaped completions that should be on the
|
1444 |
|
|
* CQ before an event is generated.
|
1445 |
|
|
*/
|
1446 |
|
|
static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
|
1447 |
|
|
{
|
1448 |
|
|
return cq->device->req_ncomp_notif ?
|
1449 |
|
|
cq->device->req_ncomp_notif(cq, wc_cnt) :
|
1450 |
|
|
-ENOSYS;
|
1451 |
|
|
}
|
1452 |
|
|
|
1453 |
|
|
/**
|
1454 |
|
|
* ib_get_dma_mr - Returns a memory region for system memory that is
|
1455 |
|
|
* usable for DMA.
|
1456 |
|
|
* @pd: The protection domain associated with the memory region.
|
1457 |
|
|
* @mr_access_flags: Specifies the memory access rights.
|
1458 |
|
|
*
|
1459 |
|
|
* Note that the ib_dma_*() functions defined below must be used
|
1460 |
|
|
* to create/destroy addresses used with the Lkey or Rkey returned
|
1461 |
|
|
* by ib_get_dma_mr().
|
1462 |
|
|
*/
|
1463 |
|
|
struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
|
1464 |
|
|
|
1465 |
|
|
/**
|
1466 |
|
|
* ib_dma_mapping_error - check a DMA addr for error
|
1467 |
|
|
* @dev: The device for which the dma_addr was created
|
1468 |
|
|
* @dma_addr: The DMA address to check
|
1469 |
|
|
*/
|
1470 |
|
|
static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
|
1471 |
|
|
{
|
1472 |
|
|
if (dev->dma_ops)
|
1473 |
|
|
return dev->dma_ops->mapping_error(dev, dma_addr);
|
1474 |
|
|
return dma_mapping_error(dma_addr);
|
1475 |
|
|
}
|
1476 |
|
|
|
1477 |
|
|
/**
|
1478 |
|
|
* ib_dma_map_single - Map a kernel virtual address to DMA address
|
1479 |
|
|
* @dev: The device for which the dma_addr is to be created
|
1480 |
|
|
* @cpu_addr: The kernel virtual address
|
1481 |
|
|
* @size: The size of the region in bytes
|
1482 |
|
|
* @direction: The direction of the DMA
|
1483 |
|
|
*/
|
1484 |
|
|
static inline u64 ib_dma_map_single(struct ib_device *dev,
|
1485 |
|
|
void *cpu_addr, size_t size,
|
1486 |
|
|
enum dma_data_direction direction)
|
1487 |
|
|
{
|
1488 |
|
|
if (dev->dma_ops)
|
1489 |
|
|
return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
|
1490 |
|
|
return dma_map_single(dev->dma_device, cpu_addr, size, direction);
|
1491 |
|
|
}
|
1492 |
|
|
|
1493 |
|
|
/**
|
1494 |
|
|
* ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
|
1495 |
|
|
* @dev: The device for which the DMA address was created
|
1496 |
|
|
* @addr: The DMA address
|
1497 |
|
|
* @size: The size of the region in bytes
|
1498 |
|
|
* @direction: The direction of the DMA
|
1499 |
|
|
*/
|
1500 |
|
|
static inline void ib_dma_unmap_single(struct ib_device *dev,
|
1501 |
|
|
u64 addr, size_t size,
|
1502 |
|
|
enum dma_data_direction direction)
|
1503 |
|
|
{
|
1504 |
|
|
if (dev->dma_ops)
|
1505 |
|
|
dev->dma_ops->unmap_single(dev, addr, size, direction);
|
1506 |
|
|
else
|
1507 |
|
|
dma_unmap_single(dev->dma_device, addr, size, direction);
|
1508 |
|
|
}
|
1509 |
|
|
|
1510 |
|
|
/**
|
1511 |
|
|
* ib_dma_map_page - Map a physical page to DMA address
|
1512 |
|
|
* @dev: The device for which the dma_addr is to be created
|
1513 |
|
|
* @page: The page to be mapped
|
1514 |
|
|
* @offset: The offset within the page
|
1515 |
|
|
* @size: The size of the region in bytes
|
1516 |
|
|
* @direction: The direction of the DMA
|
1517 |
|
|
*/
|
1518 |
|
|
static inline u64 ib_dma_map_page(struct ib_device *dev,
|
1519 |
|
|
struct page *page,
|
1520 |
|
|
unsigned long offset,
|
1521 |
|
|
size_t size,
|
1522 |
|
|
enum dma_data_direction direction)
|
1523 |
|
|
{
|
1524 |
|
|
if (dev->dma_ops)
|
1525 |
|
|
return dev->dma_ops->map_page(dev, page, offset, size, direction);
|
1526 |
|
|
return dma_map_page(dev->dma_device, page, offset, size, direction);
|
1527 |
|
|
}
|
1528 |
|
|
|
1529 |
|
|
/**
|
1530 |
|
|
* ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
|
1531 |
|
|
* @dev: The device for which the DMA address was created
|
1532 |
|
|
* @addr: The DMA address
|
1533 |
|
|
* @size: The size of the region in bytes
|
1534 |
|
|
* @direction: The direction of the DMA
|
1535 |
|
|
*/
|
1536 |
|
|
static inline void ib_dma_unmap_page(struct ib_device *dev,
|
1537 |
|
|
u64 addr, size_t size,
|
1538 |
|
|
enum dma_data_direction direction)
|
1539 |
|
|
{
|
1540 |
|
|
if (dev->dma_ops)
|
1541 |
|
|
dev->dma_ops->unmap_page(dev, addr, size, direction);
|
1542 |
|
|
else
|
1543 |
|
|
dma_unmap_page(dev->dma_device, addr, size, direction);
|
1544 |
|
|
}
|
1545 |
|
|
|
1546 |
|
|
/**
|
1547 |
|
|
* ib_dma_map_sg - Map a scatter/gather list to DMA addresses
|
1548 |
|
|
* @dev: The device for which the DMA addresses are to be created
|
1549 |
|
|
* @sg: The array of scatter/gather entries
|
1550 |
|
|
* @nents: The number of scatter/gather entries
|
1551 |
|
|
* @direction: The direction of the DMA
|
1552 |
|
|
*/
|
1553 |
|
|
static inline int ib_dma_map_sg(struct ib_device *dev,
|
1554 |
|
|
struct scatterlist *sg, int nents,
|
1555 |
|
|
enum dma_data_direction direction)
|
1556 |
|
|
{
|
1557 |
|
|
if (dev->dma_ops)
|
1558 |
|
|
return dev->dma_ops->map_sg(dev, sg, nents, direction);
|
1559 |
|
|
return dma_map_sg(dev->dma_device, sg, nents, direction);
|
1560 |
|
|
}
|
1561 |
|
|
|
1562 |
|
|
/**
|
1563 |
|
|
* ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
|
1564 |
|
|
* @dev: The device for which the DMA addresses were created
|
1565 |
|
|
* @sg: The array of scatter/gather entries
|
1566 |
|
|
* @nents: The number of scatter/gather entries
|
1567 |
|
|
* @direction: The direction of the DMA
|
1568 |
|
|
*/
|
1569 |
|
|
static inline void ib_dma_unmap_sg(struct ib_device *dev,
|
1570 |
|
|
struct scatterlist *sg, int nents,
|
1571 |
|
|
enum dma_data_direction direction)
|
1572 |
|
|
{
|
1573 |
|
|
if (dev->dma_ops)
|
1574 |
|
|
dev->dma_ops->unmap_sg(dev, sg, nents, direction);
|
1575 |
|
|
else
|
1576 |
|
|
dma_unmap_sg(dev->dma_device, sg, nents, direction);
|
1577 |
|
|
}
|
1578 |
|
|
|
1579 |
|
|
/**
|
1580 |
|
|
* ib_sg_dma_address - Return the DMA address from a scatter/gather entry
|
1581 |
|
|
* @dev: The device for which the DMA addresses were created
|
1582 |
|
|
* @sg: The scatter/gather entry
|
1583 |
|
|
*/
|
1584 |
|
|
static inline u64 ib_sg_dma_address(struct ib_device *dev,
|
1585 |
|
|
struct scatterlist *sg)
|
1586 |
|
|
{
|
1587 |
|
|
if (dev->dma_ops)
|
1588 |
|
|
return dev->dma_ops->dma_address(dev, sg);
|
1589 |
|
|
return sg_dma_address(sg);
|
1590 |
|
|
}
|
1591 |
|
|
|
1592 |
|
|
/**
|
1593 |
|
|
* ib_sg_dma_len - Return the DMA length from a scatter/gather entry
|
1594 |
|
|
* @dev: The device for which the DMA addresses were created
|
1595 |
|
|
* @sg: The scatter/gather entry
|
1596 |
|
|
*/
|
1597 |
|
|
static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
|
1598 |
|
|
struct scatterlist *sg)
|
1599 |
|
|
{
|
1600 |
|
|
if (dev->dma_ops)
|
1601 |
|
|
return dev->dma_ops->dma_len(dev, sg);
|
1602 |
|
|
return sg_dma_len(sg);
|
1603 |
|
|
}
|
1604 |
|
|
|
1605 |
|
|
/**
|
1606 |
|
|
* ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
|
1607 |
|
|
* @dev: The device for which the DMA address was created
|
1608 |
|
|
* @addr: The DMA address
|
1609 |
|
|
* @size: The size of the region in bytes
|
1610 |
|
|
* @dir: The direction of the DMA
|
1611 |
|
|
*/
|
1612 |
|
|
static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
|
1613 |
|
|
u64 addr,
|
1614 |
|
|
size_t size,
|
1615 |
|
|
enum dma_data_direction dir)
|
1616 |
|
|
{
|
1617 |
|
|
if (dev->dma_ops)
|
1618 |
|
|
dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
|
1619 |
|
|
else
|
1620 |
|
|
dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
|
1621 |
|
|
}
|
1622 |
|
|
|
1623 |
|
|
/**
|
1624 |
|
|
* ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
|
1625 |
|
|
* @dev: The device for which the DMA address was created
|
1626 |
|
|
* @addr: The DMA address
|
1627 |
|
|
* @size: The size of the region in bytes
|
1628 |
|
|
* @dir: The direction of the DMA
|
1629 |
|
|
*/
|
1630 |
|
|
static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
|
1631 |
|
|
u64 addr,
|
1632 |
|
|
size_t size,
|
1633 |
|
|
enum dma_data_direction dir)
|
1634 |
|
|
{
|
1635 |
|
|
if (dev->dma_ops)
|
1636 |
|
|
dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
|
1637 |
|
|
else
|
1638 |
|
|
dma_sync_single_for_device(dev->dma_device, addr, size, dir);
|
1639 |
|
|
}
|
1640 |
|
|
|
1641 |
|
|
/**
|
1642 |
|
|
* ib_dma_alloc_coherent - Allocate memory and map it for DMA
|
1643 |
|
|
* @dev: The device for which the DMA address is requested
|
1644 |
|
|
* @size: The size of the region to allocate in bytes
|
1645 |
|
|
* @dma_handle: A pointer for returning the DMA address of the region
|
1646 |
|
|
* @flag: memory allocator flags
|
1647 |
|
|
*/
|
1648 |
|
|
static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
|
1649 |
|
|
size_t size,
|
1650 |
|
|
u64 *dma_handle,
|
1651 |
|
|
gfp_t flag)
|
1652 |
|
|
{
|
1653 |
|
|
if (dev->dma_ops)
|
1654 |
|
|
return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
|
1655 |
|
|
else {
|
1656 |
|
|
dma_addr_t handle;
|
1657 |
|
|
void *ret;
|
1658 |
|
|
|
1659 |
|
|
ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
|
1660 |
|
|
*dma_handle = handle;
|
1661 |
|
|
return ret;
|
1662 |
|
|
}
|
1663 |
|
|
}
|
1664 |
|
|
|
1665 |
|
|
/**
|
1666 |
|
|
* ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
|
1667 |
|
|
* @dev: The device for which the DMA addresses were allocated
|
1668 |
|
|
* @size: The size of the region
|
1669 |
|
|
* @cpu_addr: the address returned by ib_dma_alloc_coherent()
|
1670 |
|
|
* @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
|
1671 |
|
|
*/
|
1672 |
|
|
static inline void ib_dma_free_coherent(struct ib_device *dev,
|
1673 |
|
|
size_t size, void *cpu_addr,
|
1674 |
|
|
u64 dma_handle)
|
1675 |
|
|
{
|
1676 |
|
|
if (dev->dma_ops)
|
1677 |
|
|
dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
|
1678 |
|
|
else
|
1679 |
|
|
dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
|
1680 |
|
|
}
|
1681 |
|
|
|
1682 |
|
|
/**
|
1683 |
|
|
* ib_reg_phys_mr - Prepares a virtually addressed memory region for use
|
1684 |
|
|
* by an HCA.
|
1685 |
|
|
* @pd: The protection domain associated assigned to the registered region.
|
1686 |
|
|
* @phys_buf_array: Specifies a list of physical buffers to use in the
|
1687 |
|
|
* memory region.
|
1688 |
|
|
* @num_phys_buf: Specifies the size of the phys_buf_array.
|
1689 |
|
|
* @mr_access_flags: Specifies the memory access rights.
|
1690 |
|
|
* @iova_start: The offset of the region's starting I/O virtual address.
|
1691 |
|
|
*/
|
1692 |
|
|
struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
|
1693 |
|
|
struct ib_phys_buf *phys_buf_array,
|
1694 |
|
|
int num_phys_buf,
|
1695 |
|
|
int mr_access_flags,
|
1696 |
|
|
u64 *iova_start);
|
1697 |
|
|
|
1698 |
|
|
/**
|
1699 |
|
|
* ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
|
1700 |
|
|
* Conceptually, this call performs the functions deregister memory region
|
1701 |
|
|
* followed by register physical memory region. Where possible,
|
1702 |
|
|
* resources are reused instead of deallocated and reallocated.
|
1703 |
|
|
* @mr: The memory region to modify.
|
1704 |
|
|
* @mr_rereg_mask: A bit-mask used to indicate which of the following
|
1705 |
|
|
* properties of the memory region are being modified.
|
1706 |
|
|
* @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
|
1707 |
|
|
* the new protection domain to associated with the memory region,
|
1708 |
|
|
* otherwise, this parameter is ignored.
|
1709 |
|
|
* @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
|
1710 |
|
|
* field specifies a list of physical buffers to use in the new
|
1711 |
|
|
* translation, otherwise, this parameter is ignored.
|
1712 |
|
|
* @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
|
1713 |
|
|
* field specifies the size of the phys_buf_array, otherwise, this
|
1714 |
|
|
* parameter is ignored.
|
1715 |
|
|
* @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
|
1716 |
|
|
* field specifies the new memory access rights, otherwise, this
|
1717 |
|
|
* parameter is ignored.
|
1718 |
|
|
* @iova_start: The offset of the region's starting I/O virtual address.
|
1719 |
|
|
*/
|
1720 |
|
|
int ib_rereg_phys_mr(struct ib_mr *mr,
|
1721 |
|
|
int mr_rereg_mask,
|
1722 |
|
|
struct ib_pd *pd,
|
1723 |
|
|
struct ib_phys_buf *phys_buf_array,
|
1724 |
|
|
int num_phys_buf,
|
1725 |
|
|
int mr_access_flags,
|
1726 |
|
|
u64 *iova_start);
|
1727 |
|
|
|
1728 |
|
|
/**
|
1729 |
|
|
* ib_query_mr - Retrieves information about a specific memory region.
|
1730 |
|
|
* @mr: The memory region to retrieve information about.
|
1731 |
|
|
* @mr_attr: The attributes of the specified memory region.
|
1732 |
|
|
*/
|
1733 |
|
|
int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
|
1734 |
|
|
|
1735 |
|
|
/**
|
1736 |
|
|
* ib_dereg_mr - Deregisters a memory region and removes it from the
|
1737 |
|
|
* HCA translation table.
|
1738 |
|
|
* @mr: The memory region to deregister.
|
1739 |
|
|
*/
|
1740 |
|
|
int ib_dereg_mr(struct ib_mr *mr);
|
1741 |
|
|
|
1742 |
|
|
/**
|
1743 |
|
|
* ib_alloc_mw - Allocates a memory window.
|
1744 |
|
|
* @pd: The protection domain associated with the memory window.
|
1745 |
|
|
*/
|
1746 |
|
|
struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
|
1747 |
|
|
|
1748 |
|
|
/**
|
1749 |
|
|
* ib_bind_mw - Posts a work request to the send queue of the specified
|
1750 |
|
|
* QP, which binds the memory window to the given address range and
|
1751 |
|
|
* remote access attributes.
|
1752 |
|
|
* @qp: QP to post the bind work request on.
|
1753 |
|
|
* @mw: The memory window to bind.
|
1754 |
|
|
* @mw_bind: Specifies information about the memory window, including
|
1755 |
|
|
* its address range, remote access rights, and associated memory region.
|
1756 |
|
|
*/
|
1757 |
|
|
static inline int ib_bind_mw(struct ib_qp *qp,
|
1758 |
|
|
struct ib_mw *mw,
|
1759 |
|
|
struct ib_mw_bind *mw_bind)
|
1760 |
|
|
{
|
1761 |
|
|
/* XXX reference counting in corresponding MR? */
|
1762 |
|
|
return mw->device->bind_mw ?
|
1763 |
|
|
mw->device->bind_mw(qp, mw, mw_bind) :
|
1764 |
|
|
-ENOSYS;
|
1765 |
|
|
}
|
1766 |
|
|
|
1767 |
|
|
/**
|
1768 |
|
|
* ib_dealloc_mw - Deallocates a memory window.
|
1769 |
|
|
* @mw: The memory window to deallocate.
|
1770 |
|
|
*/
|
1771 |
|
|
int ib_dealloc_mw(struct ib_mw *mw);
|
1772 |
|
|
|
1773 |
|
|
/**
|
1774 |
|
|
* ib_alloc_fmr - Allocates a unmapped fast memory region.
|
1775 |
|
|
* @pd: The protection domain associated with the unmapped region.
|
1776 |
|
|
* @mr_access_flags: Specifies the memory access rights.
|
1777 |
|
|
* @fmr_attr: Attributes of the unmapped region.
|
1778 |
|
|
*
|
1779 |
|
|
* A fast memory region must be mapped before it can be used as part of
|
1780 |
|
|
* a work request.
|
1781 |
|
|
*/
|
1782 |
|
|
struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
|
1783 |
|
|
int mr_access_flags,
|
1784 |
|
|
struct ib_fmr_attr *fmr_attr);
|
1785 |
|
|
|
1786 |
|
|
/**
|
1787 |
|
|
* ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
|
1788 |
|
|
* @fmr: The fast memory region to associate with the pages.
|
1789 |
|
|
* @page_list: An array of physical pages to map to the fast memory region.
|
1790 |
|
|
* @list_len: The number of pages in page_list.
|
1791 |
|
|
* @iova: The I/O virtual address to use with the mapped region.
|
1792 |
|
|
*/
|
1793 |
|
|
static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
|
1794 |
|
|
u64 *page_list, int list_len,
|
1795 |
|
|
u64 iova)
|
1796 |
|
|
{
|
1797 |
|
|
return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
|
1798 |
|
|
}
|
1799 |
|
|
|
1800 |
|
|
/**
|
1801 |
|
|
* ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
|
1802 |
|
|
* @fmr_list: A linked list of fast memory regions to unmap.
|
1803 |
|
|
*/
|
1804 |
|
|
int ib_unmap_fmr(struct list_head *fmr_list);
|
1805 |
|
|
|
1806 |
|
|
/**
|
1807 |
|
|
* ib_dealloc_fmr - Deallocates a fast memory region.
|
1808 |
|
|
* @fmr: The fast memory region to deallocate.
|
1809 |
|
|
*/
|
1810 |
|
|
int ib_dealloc_fmr(struct ib_fmr *fmr);
|
1811 |
|
|
|
1812 |
|
|
/**
|
1813 |
|
|
* ib_attach_mcast - Attaches the specified QP to a multicast group.
|
1814 |
|
|
* @qp: QP to attach to the multicast group. The QP must be type
|
1815 |
|
|
* IB_QPT_UD.
|
1816 |
|
|
* @gid: Multicast group GID.
|
1817 |
|
|
* @lid: Multicast group LID in host byte order.
|
1818 |
|
|
*
|
1819 |
|
|
* In order to send and receive multicast packets, subnet
|
1820 |
|
|
* administration must have created the multicast group and configured
|
1821 |
|
|
* the fabric appropriately. The port associated with the specified
|
1822 |
|
|
* QP must also be a member of the multicast group.
|
1823 |
|
|
*/
|
1824 |
|
|
int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
|
1825 |
|
|
|
1826 |
|
|
/**
|
1827 |
|
|
* ib_detach_mcast - Detaches the specified QP from a multicast group.
|
1828 |
|
|
* @qp: QP to detach from the multicast group.
|
1829 |
|
|
* @gid: Multicast group GID.
|
1830 |
|
|
* @lid: Multicast group LID in host byte order.
|
1831 |
|
|
*/
|
1832 |
|
|
int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
|
1833 |
|
|
|
1834 |
|
|
#endif /* IB_VERBS_H */
|