1 |
62 |
marcus.erl |
/*
|
2 |
|
|
* Copyright (c) 2004 Topspin Communications. All rights reserved.
|
3 |
|
|
* Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
|
4 |
|
|
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
|
5 |
|
|
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
|
6 |
|
|
*
|
7 |
|
|
* This software is available to you under a choice of one of two
|
8 |
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
9 |
|
|
* General Public License (GPL) Version 2, available from the file
|
10 |
|
|
* COPYING in the main directory of this source tree, or the
|
11 |
|
|
* OpenIB.org BSD license below:
|
12 |
|
|
*
|
13 |
|
|
* Redistribution and use in source and binary forms, with or
|
14 |
|
|
* without modification, are permitted provided that the following
|
15 |
|
|
* conditions are met:
|
16 |
|
|
*
|
17 |
|
|
* - Redistributions of source code must retain the above
|
18 |
|
|
* copyright notice, this list of conditions and the following
|
19 |
|
|
* disclaimer.
|
20 |
|
|
*
|
21 |
|
|
* - Redistributions in binary form must reproduce the above
|
22 |
|
|
* copyright notice, this list of conditions and the following
|
23 |
|
|
* disclaimer in the documentation and/or other materials
|
24 |
|
|
* provided with the distribution.
|
25 |
|
|
*
|
26 |
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
27 |
|
|
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
28 |
|
|
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
29 |
|
|
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
30 |
|
|
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
31 |
|
|
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
32 |
|
|
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
33 |
|
|
* SOFTWARE.
|
34 |
|
|
*/
|
35 |
|
|
|
36 |
|
|
#include <linux/init.h>
|
37 |
|
|
|
38 |
|
|
#include <linux/mlx4/cmd.h>
|
39 |
|
|
#include <linux/mlx4/qp.h>
|
40 |
|
|
|
41 |
|
|
#include "mlx4.h"
|
42 |
|
|
#include "icm.h"
|
43 |
|
|
|
44 |
|
|
void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
|
45 |
|
|
{
|
46 |
|
|
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
|
47 |
|
|
struct mlx4_qp *qp;
|
48 |
|
|
|
49 |
|
|
spin_lock(&qp_table->lock);
|
50 |
|
|
|
51 |
|
|
qp = __mlx4_qp_lookup(dev, qpn);
|
52 |
|
|
if (qp)
|
53 |
|
|
atomic_inc(&qp->refcount);
|
54 |
|
|
|
55 |
|
|
spin_unlock(&qp_table->lock);
|
56 |
|
|
|
57 |
|
|
if (!qp) {
|
58 |
|
|
mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn);
|
59 |
|
|
return;
|
60 |
|
|
}
|
61 |
|
|
|
62 |
|
|
qp->event(qp, event_type);
|
63 |
|
|
|
64 |
|
|
if (atomic_dec_and_test(&qp->refcount))
|
65 |
|
|
complete(&qp->free);
|
66 |
|
|
}
|
67 |
|
|
|
68 |
|
|
int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
|
69 |
|
|
enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
|
70 |
|
|
struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
|
71 |
|
|
int sqd_event, struct mlx4_qp *qp)
|
72 |
|
|
{
|
73 |
|
|
static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
|
74 |
|
|
[MLX4_QP_STATE_RST] = {
|
75 |
|
|
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
|
76 |
|
|
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
|
77 |
|
|
[MLX4_QP_STATE_INIT] = MLX4_CMD_RST2INIT_QP,
|
78 |
|
|
},
|
79 |
|
|
[MLX4_QP_STATE_INIT] = {
|
80 |
|
|
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
|
81 |
|
|
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
|
82 |
|
|
[MLX4_QP_STATE_INIT] = MLX4_CMD_INIT2INIT_QP,
|
83 |
|
|
[MLX4_QP_STATE_RTR] = MLX4_CMD_INIT2RTR_QP,
|
84 |
|
|
},
|
85 |
|
|
[MLX4_QP_STATE_RTR] = {
|
86 |
|
|
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
|
87 |
|
|
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
|
88 |
|
|
[MLX4_QP_STATE_RTS] = MLX4_CMD_RTR2RTS_QP,
|
89 |
|
|
},
|
90 |
|
|
[MLX4_QP_STATE_RTS] = {
|
91 |
|
|
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
|
92 |
|
|
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
|
93 |
|
|
[MLX4_QP_STATE_RTS] = MLX4_CMD_RTS2RTS_QP,
|
94 |
|
|
[MLX4_QP_STATE_SQD] = MLX4_CMD_RTS2SQD_QP,
|
95 |
|
|
},
|
96 |
|
|
[MLX4_QP_STATE_SQD] = {
|
97 |
|
|
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
|
98 |
|
|
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
|
99 |
|
|
[MLX4_QP_STATE_RTS] = MLX4_CMD_SQD2RTS_QP,
|
100 |
|
|
[MLX4_QP_STATE_SQD] = MLX4_CMD_SQD2SQD_QP,
|
101 |
|
|
},
|
102 |
|
|
[MLX4_QP_STATE_SQER] = {
|
103 |
|
|
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
|
104 |
|
|
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
|
105 |
|
|
[MLX4_QP_STATE_RTS] = MLX4_CMD_SQERR2RTS_QP,
|
106 |
|
|
},
|
107 |
|
|
[MLX4_QP_STATE_ERR] = {
|
108 |
|
|
[MLX4_QP_STATE_RST] = MLX4_CMD_2RST_QP,
|
109 |
|
|
[MLX4_QP_STATE_ERR] = MLX4_CMD_2ERR_QP,
|
110 |
|
|
}
|
111 |
|
|
};
|
112 |
|
|
|
113 |
|
|
struct mlx4_cmd_mailbox *mailbox;
|
114 |
|
|
int ret = 0;
|
115 |
|
|
|
116 |
|
|
if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
|
117 |
|
|
!op[cur_state][new_state])
|
118 |
|
|
return -EINVAL;
|
119 |
|
|
|
120 |
|
|
if (op[cur_state][new_state] == MLX4_CMD_2RST_QP)
|
121 |
|
|
return mlx4_cmd(dev, 0, qp->qpn, 2,
|
122 |
|
|
MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A);
|
123 |
|
|
|
124 |
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
125 |
|
|
if (IS_ERR(mailbox))
|
126 |
|
|
return PTR_ERR(mailbox);
|
127 |
|
|
|
128 |
|
|
if (cur_state == MLX4_QP_STATE_RST && new_state == MLX4_QP_STATE_INIT) {
|
129 |
|
|
u64 mtt_addr = mlx4_mtt_addr(dev, mtt);
|
130 |
|
|
context->mtt_base_addr_h = mtt_addr >> 32;
|
131 |
|
|
context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
|
132 |
|
|
context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
|
133 |
|
|
}
|
134 |
|
|
|
135 |
|
|
*(__be32 *) mailbox->buf = cpu_to_be32(optpar);
|
136 |
|
|
memcpy(mailbox->buf + 8, context, sizeof *context);
|
137 |
|
|
|
138 |
|
|
((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
|
139 |
|
|
cpu_to_be32(qp->qpn);
|
140 |
|
|
|
141 |
|
|
ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31),
|
142 |
|
|
new_state == MLX4_QP_STATE_RST ? 2 : 0,
|
143 |
|
|
op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C);
|
144 |
|
|
|
145 |
|
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
146 |
|
|
return ret;
|
147 |
|
|
}
|
148 |
|
|
EXPORT_SYMBOL_GPL(mlx4_qp_modify);
|
149 |
|
|
|
150 |
|
|
int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp)
|
151 |
|
|
{
|
152 |
|
|
struct mlx4_priv *priv = mlx4_priv(dev);
|
153 |
|
|
struct mlx4_qp_table *qp_table = &priv->qp_table;
|
154 |
|
|
int err;
|
155 |
|
|
|
156 |
|
|
if (sqpn)
|
157 |
|
|
qp->qpn = sqpn;
|
158 |
|
|
else {
|
159 |
|
|
qp->qpn = mlx4_bitmap_alloc(&qp_table->bitmap);
|
160 |
|
|
if (qp->qpn == -1)
|
161 |
|
|
return -ENOMEM;
|
162 |
|
|
}
|
163 |
|
|
|
164 |
|
|
err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
|
165 |
|
|
if (err)
|
166 |
|
|
goto err_out;
|
167 |
|
|
|
168 |
|
|
err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn);
|
169 |
|
|
if (err)
|
170 |
|
|
goto err_put_qp;
|
171 |
|
|
|
172 |
|
|
err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn);
|
173 |
|
|
if (err)
|
174 |
|
|
goto err_put_auxc;
|
175 |
|
|
|
176 |
|
|
err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn);
|
177 |
|
|
if (err)
|
178 |
|
|
goto err_put_altc;
|
179 |
|
|
|
180 |
|
|
err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn);
|
181 |
|
|
if (err)
|
182 |
|
|
goto err_put_rdmarc;
|
183 |
|
|
|
184 |
|
|
spin_lock_irq(&qp_table->lock);
|
185 |
|
|
err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
|
186 |
|
|
spin_unlock_irq(&qp_table->lock);
|
187 |
|
|
if (err)
|
188 |
|
|
goto err_put_cmpt;
|
189 |
|
|
|
190 |
|
|
atomic_set(&qp->refcount, 1);
|
191 |
|
|
init_completion(&qp->free);
|
192 |
|
|
|
193 |
|
|
return 0;
|
194 |
|
|
|
195 |
|
|
err_put_cmpt:
|
196 |
|
|
mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
|
197 |
|
|
|
198 |
|
|
err_put_rdmarc:
|
199 |
|
|
mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
|
200 |
|
|
|
201 |
|
|
err_put_altc:
|
202 |
|
|
mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
|
203 |
|
|
|
204 |
|
|
err_put_auxc:
|
205 |
|
|
mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
|
206 |
|
|
|
207 |
|
|
err_put_qp:
|
208 |
|
|
mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
|
209 |
|
|
|
210 |
|
|
err_out:
|
211 |
|
|
if (!sqpn)
|
212 |
|
|
mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
|
213 |
|
|
|
214 |
|
|
return err;
|
215 |
|
|
}
|
216 |
|
|
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
|
217 |
|
|
|
218 |
|
|
void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
|
219 |
|
|
{
|
220 |
|
|
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
|
221 |
|
|
unsigned long flags;
|
222 |
|
|
|
223 |
|
|
spin_lock_irqsave(&qp_table->lock, flags);
|
224 |
|
|
radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1));
|
225 |
|
|
spin_unlock_irqrestore(&qp_table->lock, flags);
|
226 |
|
|
}
|
227 |
|
|
EXPORT_SYMBOL_GPL(mlx4_qp_remove);
|
228 |
|
|
|
229 |
|
|
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
|
230 |
|
|
{
|
231 |
|
|
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
|
232 |
|
|
|
233 |
|
|
if (atomic_dec_and_test(&qp->refcount))
|
234 |
|
|
complete(&qp->free);
|
235 |
|
|
wait_for_completion(&qp->free);
|
236 |
|
|
|
237 |
|
|
mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
|
238 |
|
|
mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
|
239 |
|
|
mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
|
240 |
|
|
mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
|
241 |
|
|
mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
|
242 |
|
|
|
243 |
|
|
if (qp->qpn >= dev->caps.sqp_start + 8)
|
244 |
|
|
mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
|
245 |
|
|
}
|
246 |
|
|
EXPORT_SYMBOL_GPL(mlx4_qp_free);
|
247 |
|
|
|
248 |
|
|
static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
|
249 |
|
|
{
|
250 |
|
|
return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
|
251 |
|
|
MLX4_CMD_TIME_CLASS_B);
|
252 |
|
|
}
|
253 |
|
|
|
254 |
|
|
int mlx4_init_qp_table(struct mlx4_dev *dev)
|
255 |
|
|
{
|
256 |
|
|
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
|
257 |
|
|
int err;
|
258 |
|
|
|
259 |
|
|
spin_lock_init(&qp_table->lock);
|
260 |
|
|
INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
|
261 |
|
|
|
262 |
|
|
/*
|
263 |
|
|
* We reserve 2 extra QPs per port for the special QPs. The
|
264 |
|
|
* block of special QPs must be aligned to a multiple of 8, so
|
265 |
|
|
* round up.
|
266 |
|
|
*/
|
267 |
|
|
dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps, 8);
|
268 |
|
|
err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
|
269 |
|
|
(1 << 24) - 1, dev->caps.sqp_start + 8);
|
270 |
|
|
if (err)
|
271 |
|
|
return err;
|
272 |
|
|
|
273 |
|
|
return mlx4_CONF_SPECIAL_QP(dev, dev->caps.sqp_start);
|
274 |
|
|
}
|
275 |
|
|
|
276 |
|
|
void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
|
277 |
|
|
{
|
278 |
|
|
mlx4_CONF_SPECIAL_QP(dev, 0);
|
279 |
|
|
mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
|
280 |
|
|
}
|
281 |
|
|
|
282 |
|
|
int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
|
283 |
|
|
struct mlx4_qp_context *context)
|
284 |
|
|
{
|
285 |
|
|
struct mlx4_cmd_mailbox *mailbox;
|
286 |
|
|
int err;
|
287 |
|
|
|
288 |
|
|
mailbox = mlx4_alloc_cmd_mailbox(dev);
|
289 |
|
|
if (IS_ERR(mailbox))
|
290 |
|
|
return PTR_ERR(mailbox);
|
291 |
|
|
|
292 |
|
|
err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
|
293 |
|
|
MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A);
|
294 |
|
|
if (!err)
|
295 |
|
|
memcpy(context, mailbox->buf + 8, sizeof *context);
|
296 |
|
|
|
297 |
|
|
mlx4_free_cmd_mailbox(dev, mailbox);
|
298 |
|
|
return err;
|
299 |
|
|
}
|
300 |
|
|
EXPORT_SYMBOL_GPL(mlx4_qp_query);
|
301 |
|
|
|