OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [drivers/] [net/] [mlx4/] [eq.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
3
 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4
 *
5
 * This software is available to you under a choice of one of two
6
 * licenses.  You may choose to be licensed under the terms of the GNU
7
 * General Public License (GPL) Version 2, available from the file
8
 * COPYING in the main directory of this source tree, or the
9
 * OpenIB.org BSD license below:
10
 *
11
 *     Redistribution and use in source and binary forms, with or
12
 *     without modification, are permitted provided that the following
13
 *     conditions are met:
14
 *
15
 *      - Redistributions of source code must retain the above
16
 *        copyright notice, this list of conditions and the following
17
 *        disclaimer.
18
 *
19
 *      - Redistributions in binary form must reproduce the above
20
 *        copyright notice, this list of conditions and the following
21
 *        disclaimer in the documentation and/or other materials
22
 *        provided with the distribution.
23
 *
24
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31
 * SOFTWARE.
32
 */
33
 
34
#include <linux/init.h>
35
#include <linux/interrupt.h>
36
#include <linux/dma-mapping.h>
37
 
38
#include <linux/mlx4/cmd.h>
39
 
40
#include "mlx4.h"
41
#include "fw.h"
42
 
43
enum {
44
        MLX4_NUM_ASYNC_EQE      = 0x100,
45
        MLX4_NUM_SPARE_EQE      = 0x80,
46
        MLX4_EQ_ENTRY_SIZE      = 0x20
47
};
48
 
49
/*
50
 * Must be packed because start is 64 bits but only aligned to 32 bits.
51
 */
52
struct mlx4_eq_context {
53
        __be32                  flags;
54
        u16                     reserved1[3];
55
        __be16                  page_offset;
56
        u8                      log_eq_size;
57
        u8                      reserved2[4];
58
        u8                      eq_period;
59
        u8                      reserved3;
60
        u8                      eq_max_count;
61
        u8                      reserved4[3];
62
        u8                      intr;
63
        u8                      log_page_size;
64
        u8                      reserved5[2];
65
        u8                      mtt_base_addr_h;
66
        __be32                  mtt_base_addr_l;
67
        u32                     reserved6[2];
68
        __be32                  consumer_index;
69
        __be32                  producer_index;
70
        u32                     reserved7[4];
71
};
72
 
73
#define MLX4_EQ_STATUS_OK          ( 0 << 28)
74
#define MLX4_EQ_STATUS_WRITE_FAIL  (10 << 28)
75
#define MLX4_EQ_OWNER_SW           ( 0 << 24)
76
#define MLX4_EQ_OWNER_HW           ( 1 << 24)
77
#define MLX4_EQ_FLAG_EC            ( 1 << 18)
78
#define MLX4_EQ_FLAG_OI            ( 1 << 17)
79
#define MLX4_EQ_STATE_ARMED        ( 9 <<  8)
80
#define MLX4_EQ_STATE_FIRED        (10 <<  8)
81
#define MLX4_EQ_STATE_ALWAYS_ARMED (11 <<  8)
82
 
83
#define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG)           | \
84
                               (1ull << MLX4_EVENT_TYPE_COMM_EST)           | \
85
                               (1ull << MLX4_EVENT_TYPE_SQ_DRAINED)         | \
86
                               (1ull << MLX4_EVENT_TYPE_CQ_ERROR)           | \
87
                               (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR)     | \
88
                               (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR)    | \
89
                               (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED)    | \
90
                               (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
91
                               (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR)    | \
92
                               (1ull << MLX4_EVENT_TYPE_PORT_CHANGE)        | \
93
                               (1ull << MLX4_EVENT_TYPE_ECC_DETECT)         | \
94
                               (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)    | \
95
                               (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE)    | \
96
                               (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT)          | \
97
                               (1ull << MLX4_EVENT_TYPE_CMD))
98
 
99
struct mlx4_eqe {
100
        u8                      reserved1;
101
        u8                      type;
102
        u8                      reserved2;
103
        u8                      subtype;
104
        union {
105
                u32             raw[6];
106
                struct {
107
                        __be32  cqn;
108
                } __attribute__((packed)) comp;
109
                struct {
110
                        u16     reserved1;
111
                        __be16  token;
112
                        u32     reserved2;
113
                        u8      reserved3[3];
114
                        u8      status;
115
                        __be64  out_param;
116
                } __attribute__((packed)) cmd;
117
                struct {
118
                        __be32  qpn;
119
                } __attribute__((packed)) qp;
120
                struct {
121
                        __be32  srqn;
122
                } __attribute__((packed)) srq;
123
                struct {
124
                        __be32  cqn;
125
                        u32     reserved1;
126
                        u8      reserved2[3];
127
                        u8      syndrome;
128
                } __attribute__((packed)) cq_err;
129
                struct {
130
                        u32     reserved1[2];
131
                        __be32  port;
132
                } __attribute__((packed)) port_change;
133
        }                       event;
134
        u8                      reserved3[3];
135
        u8                      owner;
136
} __attribute__((packed));
137
 
138
static void eq_set_ci(struct mlx4_eq *eq, int req_not)
139
{
140
        __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
141
                                               req_not << 31),
142
                     eq->doorbell);
143
        /* We still want ordering, just not swabbing, so add a barrier */
144
        mb();
145
}
146
 
147
static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
148
{
149
        unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
150
        return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
151
}
152
 
153
static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
154
{
155
        struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
156
        return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
157
}
158
 
159
static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
160
{
161
        struct mlx4_eqe *eqe;
162
        int cqn;
163
        int eqes_found = 0;
164
        int set_ci = 0;
165
 
166
        while ((eqe = next_eqe_sw(eq))) {
167
                /*
168
                 * Make sure we read EQ entry contents after we've
169
                 * checked the ownership bit.
170
                 */
171
                rmb();
172
 
173
                switch (eqe->type) {
174
                case MLX4_EVENT_TYPE_COMP:
175
                        cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
176
                        mlx4_cq_completion(dev, cqn);
177
                        break;
178
 
179
                case MLX4_EVENT_TYPE_PATH_MIG:
180
                case MLX4_EVENT_TYPE_COMM_EST:
181
                case MLX4_EVENT_TYPE_SQ_DRAINED:
182
                case MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE:
183
                case MLX4_EVENT_TYPE_WQ_CATAS_ERROR:
184
                case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
185
                case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
186
                case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
187
                        mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
188
                                      eqe->type);
189
                        break;
190
 
191
                case MLX4_EVENT_TYPE_SRQ_LIMIT:
192
                case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
193
                        mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
194
                                      eqe->type);
195
                        break;
196
 
197
                case MLX4_EVENT_TYPE_CMD:
198
                        mlx4_cmd_event(dev,
199
                                       be16_to_cpu(eqe->event.cmd.token),
200
                                       eqe->event.cmd.status,
201
                                       be64_to_cpu(eqe->event.cmd.out_param));
202
                        break;
203
 
204
                case MLX4_EVENT_TYPE_PORT_CHANGE:
205
                        mlx4_dispatch_event(dev, eqe->type, eqe->subtype,
206
                                            be32_to_cpu(eqe->event.port_change.port) >> 28);
207
                        break;
208
 
209
                case MLX4_EVENT_TYPE_CQ_ERROR:
210
                        mlx4_warn(dev, "CQ %s on CQN %06x\n",
211
                                  eqe->event.cq_err.syndrome == 1 ?
212
                                  "overrun" : "access violation",
213
                                  be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
214
                        mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
215
                                      eqe->type);
216
                        break;
217
 
218
                case MLX4_EVENT_TYPE_EQ_OVERFLOW:
219
                        mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
220
                        break;
221
 
222
                case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
223
                case MLX4_EVENT_TYPE_ECC_DETECT:
224
                default:
225
                        mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
226
                                  eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
227
                        break;
228
                };
229
 
230
                ++eq->cons_index;
231
                eqes_found = 1;
232
                ++set_ci;
233
 
234
                /*
235
                 * The HCA will think the queue has overflowed if we
236
                 * don't tell it we've been processing events.  We
237
                 * create our EQs with MLX4_NUM_SPARE_EQE extra
238
                 * entries, so we must update our consumer index at
239
                 * least that often.
240
                 */
241
                if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
242
                        /*
243
                         * Conditional on hca_type is OK here because
244
                         * this is a rare case, not the fast path.
245
                         */
246
                        eq_set_ci(eq, 0);
247
                        set_ci = 0;
248
                }
249
        }
250
 
251
        eq_set_ci(eq, 1);
252
 
253
        return eqes_found;
254
}
255
 
256
static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
257
{
258
        struct mlx4_dev *dev = dev_ptr;
259
        struct mlx4_priv *priv = mlx4_priv(dev);
260
        int work = 0;
261
        int i;
262
 
263
        writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
264
 
265
        for (i = 0; i < MLX4_NUM_EQ; ++i)
266
                work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
267
 
268
        return IRQ_RETVAL(work);
269
}
270
 
271
static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
272
{
273
        struct mlx4_eq  *eq  = eq_ptr;
274
        struct mlx4_dev *dev = eq->dev;
275
 
276
        mlx4_eq_int(dev, eq);
277
 
278
        /* MSI-X vectors always belong to us */
279
        return IRQ_HANDLED;
280
}
281
 
282
static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
283
                        int eq_num)
284
{
285
        return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
286
                        0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
287
}
288
 
289
static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
290
                         int eq_num)
291
{
292
        return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
293
                        MLX4_CMD_TIME_CLASS_A);
294
}
295
 
296
static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
297
                         int eq_num)
298
{
299
        return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
300
                            MLX4_CMD_TIME_CLASS_A);
301
}
302
 
303
static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
304
{
305
        struct mlx4_priv *priv = mlx4_priv(dev);
306
        int index;
307
 
308
        index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
309
 
310
        if (!priv->eq_table.uar_map[index]) {
311
                priv->eq_table.uar_map[index] =
312
                        ioremap(pci_resource_start(dev->pdev, 2) +
313
                                ((eq->eqn / 4) << PAGE_SHIFT),
314
                                PAGE_SIZE);
315
                if (!priv->eq_table.uar_map[index]) {
316
                        mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
317
                                 eq->eqn);
318
                        return NULL;
319
                }
320
        }
321
 
322
        return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
323
}
324
 
325
static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
326
                          u8 intr, struct mlx4_eq *eq)
327
{
328
        struct mlx4_priv *priv = mlx4_priv(dev);
329
        struct mlx4_cmd_mailbox *mailbox;
330
        struct mlx4_eq_context *eq_context;
331
        int npages;
332
        u64 *dma_list = NULL;
333
        dma_addr_t t;
334
        u64 mtt_addr;
335
        int err = -ENOMEM;
336
        int i;
337
 
338
        eq->dev   = dev;
339
        eq->nent  = roundup_pow_of_two(max(nent, 2));
340
        npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
341
 
342
        eq->page_list = kmalloc(npages * sizeof *eq->page_list,
343
                                GFP_KERNEL);
344
        if (!eq->page_list)
345
                goto err_out;
346
 
347
        for (i = 0; i < npages; ++i)
348
                eq->page_list[i].buf = NULL;
349
 
350
        dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
351
        if (!dma_list)
352
                goto err_out_free;
353
 
354
        mailbox = mlx4_alloc_cmd_mailbox(dev);
355
        if (IS_ERR(mailbox))
356
                goto err_out_free;
357
        eq_context = mailbox->buf;
358
 
359
        for (i = 0; i < npages; ++i) {
360
                eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
361
                                                          PAGE_SIZE, &t, GFP_KERNEL);
362
                if (!eq->page_list[i].buf)
363
                        goto err_out_free_pages;
364
 
365
                dma_list[i] = t;
366
                eq->page_list[i].map = t;
367
 
368
                memset(eq->page_list[i].buf, 0, PAGE_SIZE);
369
        }
370
 
371
        eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
372
        if (eq->eqn == -1)
373
                goto err_out_free_pages;
374
 
375
        eq->doorbell = mlx4_get_eq_uar(dev, eq);
376
        if (!eq->doorbell) {
377
                err = -ENOMEM;
378
                goto err_out_free_eq;
379
        }
380
 
381
        err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
382
        if (err)
383
                goto err_out_free_eq;
384
 
385
        err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
386
        if (err)
387
                goto err_out_free_mtt;
388
 
389
        memset(eq_context, 0, sizeof *eq_context);
390
        eq_context->flags         = cpu_to_be32(MLX4_EQ_STATUS_OK   |
391
                                                MLX4_EQ_STATE_ARMED);
392
        eq_context->log_eq_size   = ilog2(eq->nent);
393
        eq_context->intr          = intr;
394
        eq_context->log_page_size = PAGE_SHIFT - MLX4_ICM_PAGE_SHIFT;
395
 
396
        mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
397
        eq_context->mtt_base_addr_h = mtt_addr >> 32;
398
        eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
399
 
400
        err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
401
        if (err) {
402
                mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
403
                goto err_out_free_mtt;
404
        }
405
 
406
        kfree(dma_list);
407
        mlx4_free_cmd_mailbox(dev, mailbox);
408
 
409
        eq->cons_index = 0;
410
 
411
        return err;
412
 
413
err_out_free_mtt:
414
        mlx4_mtt_cleanup(dev, &eq->mtt);
415
 
416
err_out_free_eq:
417
        mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
418
 
419
err_out_free_pages:
420
        for (i = 0; i < npages; ++i)
421
                if (eq->page_list[i].buf)
422
                        dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
423
                                          eq->page_list[i].buf,
424
                                          eq->page_list[i].map);
425
 
426
        mlx4_free_cmd_mailbox(dev, mailbox);
427
 
428
err_out_free:
429
        kfree(eq->page_list);
430
        kfree(dma_list);
431
 
432
err_out:
433
        return err;
434
}
435
 
436
static void mlx4_free_eq(struct mlx4_dev *dev,
437
                         struct mlx4_eq *eq)
438
{
439
        struct mlx4_priv *priv = mlx4_priv(dev);
440
        struct mlx4_cmd_mailbox *mailbox;
441
        int err;
442
        int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
443
        int i;
444
 
445
        mailbox = mlx4_alloc_cmd_mailbox(dev);
446
        if (IS_ERR(mailbox))
447
                return;
448
 
449
        err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
450
        if (err)
451
                mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
452
 
453
        if (0) {
454
                mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
455
                for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
456
                        if (i % 4 == 0)
457
                                printk("[%02x] ", i * 4);
458
                        printk(" %08x", be32_to_cpup(mailbox->buf + i * 4));
459
                        if ((i + 1) % 4 == 0)
460
                                printk("\n");
461
                }
462
        }
463
 
464
        mlx4_mtt_cleanup(dev, &eq->mtt);
465
        for (i = 0; i < npages; ++i)
466
                pci_free_consistent(dev->pdev, PAGE_SIZE,
467
                                    eq->page_list[i].buf,
468
                                    eq->page_list[i].map);
469
 
470
        kfree(eq->page_list);
471
        mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
472
        mlx4_free_cmd_mailbox(dev, mailbox);
473
}
474
 
475
static void mlx4_free_irqs(struct mlx4_dev *dev)
476
{
477
        struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
478
        int i;
479
 
480
        if (eq_table->have_irq)
481
                free_irq(dev->pdev->irq, dev);
482
        for (i = 0; i < MLX4_NUM_EQ; ++i)
483
                if (eq_table->eq[i].have_irq)
484
                        free_irq(eq_table->eq[i].irq, eq_table->eq + i);
485
}
486
 
487
static int mlx4_map_clr_int(struct mlx4_dev *dev)
488
{
489
        struct mlx4_priv *priv = mlx4_priv(dev);
490
 
491
        priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
492
                                 priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
493
        if (!priv->clr_base) {
494
                mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
495
                return -ENOMEM;
496
        }
497
 
498
        return 0;
499
}
500
 
501
static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
502
{
503
        struct mlx4_priv *priv = mlx4_priv(dev);
504
 
505
        iounmap(priv->clr_base);
506
}
507
 
508
int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
509
{
510
        struct mlx4_priv *priv = mlx4_priv(dev);
511
        int ret;
512
 
513
        /*
514
         * We assume that mapping one page is enough for the whole EQ
515
         * context table.  This is fine with all current HCAs, because
516
         * we only use 32 EQs and each EQ uses 64 bytes of context
517
         * memory, or 1 KB total.
518
         */
519
        priv->eq_table.icm_virt = icm_virt;
520
        priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
521
        if (!priv->eq_table.icm_page)
522
                return -ENOMEM;
523
        priv->eq_table.icm_dma  = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
524
                                               PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
525
        if (pci_dma_mapping_error(priv->eq_table.icm_dma)) {
526
                __free_page(priv->eq_table.icm_page);
527
                return -ENOMEM;
528
        }
529
 
530
        ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
531
        if (ret) {
532
                pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
533
                               PCI_DMA_BIDIRECTIONAL);
534
                __free_page(priv->eq_table.icm_page);
535
        }
536
 
537
        return ret;
538
}
539
 
540
void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
541
{
542
        struct mlx4_priv *priv = mlx4_priv(dev);
543
 
544
        mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
545
        pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
546
                       PCI_DMA_BIDIRECTIONAL);
547
        __free_page(priv->eq_table.icm_page);
548
}
549
 
550
int mlx4_init_eq_table(struct mlx4_dev *dev)
551
{
552
        struct mlx4_priv *priv = mlx4_priv(dev);
553
        int err;
554
        int i;
555
 
556
        err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
557
                               dev->caps.num_eqs - 1, dev->caps.reserved_eqs);
558
        if (err)
559
                return err;
560
 
561
        for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i)
562
                priv->eq_table.uar_map[i] = NULL;
563
 
564
        err = mlx4_map_clr_int(dev);
565
        if (err)
566
                goto err_out_free;
567
 
568
        priv->eq_table.clr_mask =
569
                swab32(1 << (priv->eq_table.inta_pin & 31));
570
        priv->eq_table.clr_int  = priv->clr_base +
571
                (priv->eq_table.inta_pin < 32 ? 4 : 0);
572
 
573
        err = mlx4_create_eq(dev, dev->caps.num_cqs + MLX4_NUM_SPARE_EQE,
574
                             (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_COMP : 0,
575
                             &priv->eq_table.eq[MLX4_EQ_COMP]);
576
        if (err)
577
                goto err_out_unmap;
578
 
579
        err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
580
                             (dev->flags & MLX4_FLAG_MSI_X) ? MLX4_EQ_ASYNC : 0,
581
                             &priv->eq_table.eq[MLX4_EQ_ASYNC]);
582
        if (err)
583
                goto err_out_comp;
584
 
585
        if (dev->flags & MLX4_FLAG_MSI_X) {
586
                static const char *eq_name[] = {
587
                        [MLX4_EQ_COMP]  = DRV_NAME " (comp)",
588
                        [MLX4_EQ_ASYNC] = DRV_NAME " (async)"
589
                };
590
 
591
                for (i = 0; i < MLX4_NUM_EQ; ++i) {
592
                        err = request_irq(priv->eq_table.eq[i].irq,
593
                                          mlx4_msi_x_interrupt,
594
                                          0, eq_name[i], priv->eq_table.eq + i);
595
                        if (err)
596
                                goto err_out_async;
597
 
598
                        priv->eq_table.eq[i].have_irq = 1;
599
                }
600
 
601
        } else {
602
                err = request_irq(dev->pdev->irq, mlx4_interrupt,
603
                                  IRQF_SHARED, DRV_NAME, dev);
604
                if (err)
605
                        goto err_out_async;
606
 
607
                priv->eq_table.have_irq = 1;
608
        }
609
 
610
        err = mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 0,
611
                          priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
612
        if (err)
613
                mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
614
                           priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
615
 
616
        for (i = 0; i < MLX4_NUM_EQ; ++i)
617
                eq_set_ci(&priv->eq_table.eq[i], 1);
618
 
619
        return 0;
620
 
621
err_out_async:
622
        mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
623
 
624
err_out_comp:
625
        mlx4_free_eq(dev, &priv->eq_table.eq[MLX4_EQ_COMP]);
626
 
627
err_out_unmap:
628
        mlx4_unmap_clr_int(dev);
629
        mlx4_free_irqs(dev);
630
 
631
err_out_free:
632
        mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
633
        return err;
634
}
635
 
636
void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
637
{
638
        struct mlx4_priv *priv = mlx4_priv(dev);
639
        int i;
640
 
641
        mlx4_MAP_EQ(dev, MLX4_ASYNC_EVENT_MASK, 1,
642
                    priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
643
 
644
        mlx4_free_irqs(dev);
645
 
646
        for (i = 0; i < MLX4_NUM_EQ; ++i)
647
                mlx4_free_eq(dev, &priv->eq_table.eq[i]);
648
 
649
        mlx4_unmap_clr_int(dev);
650
 
651
        for (i = 0; i < ARRAY_SIZE(priv->eq_table.uar_map); ++i)
652
                if (priv->eq_table.uar_map[i])
653
                        iounmap(priv->eq_table.uar_map[i]);
654
 
655
        mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
656
}

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.