OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [drivers/] [atm/] [nicstar.c] - Blame information for rev 62

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/******************************************************************************
2
 *
3
 * nicstar.c
4
 *
5
 * Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards.
6
 *
7
 * IMPORTANT: The included file nicstarmac.c was NOT WRITTEN BY ME.
8
 *            It was taken from the frle-0.22 device driver.
9
 *            As the file doesn't have a copyright notice, in the file
10
 *            nicstarmac.copyright I put the copyright notice from the
11
 *            frle-0.22 device driver.
12
 *            Some code is based on the nicstar driver by M. Welsh.
13
 *
14
 * Author: Rui Prior (rprior@inescn.pt)
15
 * PowerPC support by Jay Talbott (jay_talbott@mcg.mot.com) April 1999
16
 *
17
 *
18
 * (C) INESC 1999
19
 *
20
 *
21
 ******************************************************************************/
22
 
23
 
24
/**** IMPORTANT INFORMATION ***************************************************
25
 *
26
 * There are currently three types of spinlocks:
27
 *
28
 * 1 - Per card interrupt spinlock (to protect structures and such)
29
 * 2 - Per SCQ scq spinlock
30
 * 3 - Per card resource spinlock (to access registers, etc.)
31
 *
32
 * These must NEVER be grabbed in reverse order.
33
 *
34
 ******************************************************************************/
35
 
36
/* Header files ***************************************************************/
37
 
38
#include <linux/module.h>
39
#include <linux/kernel.h>
40
#include <linux/skbuff.h>
41
#include <linux/atmdev.h>
42
#include <linux/atm.h>
43
#include <linux/pci.h>
44
#include <linux/types.h>
45
#include <linux/string.h>
46
#include <linux/delay.h>
47
#include <linux/init.h>
48
#include <linux/sched.h>
49
#include <linux/timer.h>
50
#include <linux/interrupt.h>
51
#include <linux/bitops.h>
52
#include <asm/io.h>
53
#include <asm/uaccess.h>
54
#include <asm/atomic.h>
55
#include "nicstar.h"
56
#ifdef CONFIG_ATM_NICSTAR_USE_SUNI
57
#include "suni.h"
58
#endif /* CONFIG_ATM_NICSTAR_USE_SUNI */
59
#ifdef CONFIG_ATM_NICSTAR_USE_IDT77105
60
#include "idt77105.h"
61
#endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */
62
 
63
#if BITS_PER_LONG != 32
64
#  error FIXME: this driver requires a 32-bit platform
65
#endif
66
 
67
/* Additional code ************************************************************/
68
 
69
#include "nicstarmac.c"
70
 
71
 
72
/* Configurable parameters ****************************************************/
73
 
74
#undef PHY_LOOPBACK
75
#undef TX_DEBUG
76
#undef RX_DEBUG
77
#undef GENERAL_DEBUG
78
#undef EXTRA_DEBUG
79
 
80
#undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know
81
                             you're going to use only raw ATM */
82
 
83
 
84
/* Do not touch these *********************************************************/
85
 
86
#ifdef TX_DEBUG
87
#define TXPRINTK(args...) printk(args)
88
#else
89
#define TXPRINTK(args...)
90
#endif /* TX_DEBUG */
91
 
92
#ifdef RX_DEBUG
93
#define RXPRINTK(args...) printk(args)
94
#else
95
#define RXPRINTK(args...)
96
#endif /* RX_DEBUG */
97
 
98
#ifdef GENERAL_DEBUG
99
#define PRINTK(args...) printk(args)
100
#else
101
#define PRINTK(args...)
102
#endif /* GENERAL_DEBUG */
103
 
104
#ifdef EXTRA_DEBUG
105
#define XPRINTK(args...) printk(args)
106
#else
107
#define XPRINTK(args...)
108
#endif /* EXTRA_DEBUG */
109
 
110
 
111
/* Macros *********************************************************************/
112
 
113
#define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ)
114
 
115
#define NS_DELAY mdelay(1)
116
 
117
#define ALIGN_BUS_ADDR(addr, alignment) \
118
        ((((u32) (addr)) + (((u32) (alignment)) - 1)) & ~(((u32) (alignment)) - 1))
119
#define ALIGN_ADDRESS(addr, alignment) \
120
        bus_to_virt(ALIGN_BUS_ADDR(virt_to_bus(addr), alignment))
121
 
122
#undef CEIL
123
 
124
#ifndef ATM_SKB
125
#define ATM_SKB(s) (&(s)->atm)
126
#endif
127
 
128
   /* Spinlock debugging stuff */
129
#ifdef NS_DEBUG_SPINLOCKS /* See nicstar.h */
130
#define ns_grab_int_lock(card,flags) \
131
 do { \
132
    unsigned long nsdsf, nsdsf2; \
133
    local_irq_save(flags); \
134
    save_flags(nsdsf); cli();\
135
    if (nsdsf & (1<<9)) printk ("nicstar.c: ints %sabled -> enabled.\n", \
136
                                (flags)&(1<<9)?"en":"dis"); \
137
    if (spin_is_locked(&(card)->int_lock) && \
138
        (card)->cpu_int == smp_processor_id()) { \
139
       printk("nicstar.c: line %d (cpu %d) int_lock already locked at line %d (cpu %d)\n", \
140
              __LINE__, smp_processor_id(), (card)->has_int_lock, \
141
              (card)->cpu_int); \
142
       printk("nicstar.c: ints were %sabled.\n", ((flags)&(1<<9)?"en":"dis")); \
143
    } \
144
    if (spin_is_locked(&(card)->res_lock) && \
145
        (card)->cpu_res == smp_processor_id()) { \
146
       printk("nicstar.c: line %d (cpu %d) res_lock locked at line %d (cpu %d)(trying int)\n", \
147
              __LINE__, smp_processor_id(), (card)->has_res_lock, \
148
              (card)->cpu_res); \
149
       printk("nicstar.c: ints were %sabled.\n", ((flags)&(1<<9)?"en":"dis")); \
150
    } \
151
    spin_lock_irq(&(card)->int_lock); \
152
    (card)->has_int_lock = __LINE__; \
153
    (card)->cpu_int = smp_processor_id(); \
154
    restore_flags(nsdsf); } while (0)
155
#define ns_grab_res_lock(card,flags) \
156
 do { \
157
    unsigned long nsdsf, nsdsf2; \
158
    local_irq_save(flags); \
159
    save_flags(nsdsf); cli();\
160
    if (nsdsf & (1<<9)) printk ("nicstar.c: ints %sabled -> enabled.\n", \
161
                                (flags)&(1<<9)?"en":"dis"); \
162
    if (spin_is_locked(&(card)->res_lock) && \
163
        (card)->cpu_res == smp_processor_id()) { \
164
       printk("nicstar.c: line %d (cpu %d) res_lock already locked at line %d (cpu %d)\n", \
165
              __LINE__, smp_processor_id(), (card)->has_res_lock, \
166
              (card)->cpu_res); \
167
       printk("nicstar.c: ints were %sabled.\n", ((flags)&(1<<9)?"en":"dis")); \
168
    } \
169
    spin_lock_irq(&(card)->res_lock); \
170
    (card)->has_res_lock = __LINE__; \
171
    (card)->cpu_res = smp_processor_id(); \
172
    restore_flags(nsdsf); } while (0)
173
#define ns_grab_scq_lock(card,scq,flags) \
174
 do { \
175
    unsigned long nsdsf, nsdsf2; \
176
    local_irq_save(flags); \
177
    save_flags(nsdsf); cli();\
178
    if (nsdsf & (1<<9)) printk ("nicstar.c: ints %sabled -> enabled.\n", \
179
                                (flags)&(1<<9)?"en":"dis"); \
180
    if (spin_is_locked(&(scq)->lock) && \
181
        (scq)->cpu_lock == smp_processor_id()) { \
182
       printk("nicstar.c: line %d (cpu %d) this scq_lock already locked at line %d (cpu %d)\n", \
183
              __LINE__, smp_processor_id(), (scq)->has_lock, \
184
              (scq)->cpu_lock); \
185
       printk("nicstar.c: ints were %sabled.\n", ((flags)&(1<<9)?"en":"dis")); \
186
    } \
187
    if (spin_is_locked(&(card)->res_lock) && \
188
        (card)->cpu_res == smp_processor_id()) { \
189
       printk("nicstar.c: line %d (cpu %d) res_lock locked at line %d (cpu %d)(trying scq)\n", \
190
              __LINE__, smp_processor_id(), (card)->has_res_lock, \
191
              (card)->cpu_res); \
192
       printk("nicstar.c: ints were %sabled.\n", ((flags)&(1<<9)?"en":"dis")); \
193
    } \
194
    spin_lock_irq(&(scq)->lock); \
195
    (scq)->has_lock = __LINE__; \
196
    (scq)->cpu_lock = smp_processor_id(); \
197
    restore_flags(nsdsf); } while (0)
198
#else /* !NS_DEBUG_SPINLOCKS */
199
#define ns_grab_int_lock(card,flags) \
200
        spin_lock_irqsave(&(card)->int_lock,(flags))
201
#define ns_grab_res_lock(card,flags) \
202
        spin_lock_irqsave(&(card)->res_lock,(flags))
203
#define ns_grab_scq_lock(card,scq,flags) \
204
        spin_lock_irqsave(&(scq)->lock,flags)
205
#endif /* NS_DEBUG_SPINLOCKS */
206
 
207
 
208
/* Function declarations ******************************************************/
209
 
210
static u32 ns_read_sram(ns_dev *card, u32 sram_address);
211
static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count);
212
static int __devinit ns_init_card(int i, struct pci_dev *pcidev);
213
static void __devinit ns_init_card_error(ns_dev *card, int error);
214
static scq_info *get_scq(int size, u32 scd);
215
static void free_scq(scq_info *scq, struct atm_vcc *vcc);
216
static void push_rxbufs(ns_dev *, struct sk_buff *);
217
static irqreturn_t ns_irq_handler(int irq, void *dev_id);
218
static int ns_open(struct atm_vcc *vcc);
219
static void ns_close(struct atm_vcc *vcc);
220
static void fill_tst(ns_dev *card, int n, vc_map *vc);
221
static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb);
222
static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd,
223
                     struct sk_buff *skb);
224
static void process_tsq(ns_dev *card);
225
static void drain_scq(ns_dev *card, scq_info *scq, int pos);
226
static void process_rsq(ns_dev *card);
227
static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe);
228
#ifdef NS_USE_DESTRUCTORS
229
static void ns_sb_destructor(struct sk_buff *sb);
230
static void ns_lb_destructor(struct sk_buff *lb);
231
static void ns_hb_destructor(struct sk_buff *hb);
232
#endif /* NS_USE_DESTRUCTORS */
233
static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb);
234
static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count);
235
static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb);
236
static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb);
237
static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb);
238
static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page);
239
static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg);
240
static void which_list(ns_dev *card, struct sk_buff *skb);
241
static void ns_poll(unsigned long arg);
242
static int ns_parse_mac(char *mac, unsigned char *esi);
243
static short ns_h2i(char c);
244
static void ns_phy_put(struct atm_dev *dev, unsigned char value,
245
                       unsigned long addr);
246
static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
247
 
248
 
249
 
250
/* Global variables ***********************************************************/
251
 
252
static struct ns_dev *cards[NS_MAX_CARDS];
253
static unsigned num_cards;
254
static struct atmdev_ops atm_ops =
255
{
256
   .open        = ns_open,
257
   .close       = ns_close,
258
   .ioctl       = ns_ioctl,
259
   .send        = ns_send,
260
   .phy_put     = ns_phy_put,
261
   .phy_get     = ns_phy_get,
262
   .proc_read   = ns_proc_read,
263
   .owner       = THIS_MODULE,
264
};
265
static struct timer_list ns_timer;
266
static char *mac[NS_MAX_CARDS];
267
module_param_array(mac, charp, NULL, 0);
268
MODULE_LICENSE("GPL");
269
 
270
 
271
/* Functions*******************************************************************/
272
 
273
static int __devinit nicstar_init_one(struct pci_dev *pcidev,
274
                                      const struct pci_device_id *ent)
275
{
276
   static int index = -1;
277
   unsigned int error;
278
 
279
   index++;
280
   cards[index] = NULL;
281
 
282
   error = ns_init_card(index, pcidev);
283
   if (error) {
284
      cards[index--] = NULL;    /* don't increment index */
285
      goto err_out;
286
   }
287
 
288
   return 0;
289
err_out:
290
   return -ENODEV;
291
}
292
 
293
 
294
 
295
static void __devexit nicstar_remove_one(struct pci_dev *pcidev)
296
{
297
   int i, j;
298
   ns_dev *card = pci_get_drvdata(pcidev);
299
   struct sk_buff *hb;
300
   struct sk_buff *iovb;
301
   struct sk_buff *lb;
302
   struct sk_buff *sb;
303
 
304
   i = card->index;
305
 
306
   if (cards[i] == NULL)
307
      return;
308
 
309
   if (card->atmdev->phy && card->atmdev->phy->stop)
310
      card->atmdev->phy->stop(card->atmdev);
311
 
312
   /* Stop everything */
313
   writel(0x00000000, card->membase + CFG);
314
 
315
   /* De-register device */
316
   atm_dev_deregister(card->atmdev);
317
 
318
   /* Disable PCI device */
319
   pci_disable_device(pcidev);
320
 
321
   /* Free up resources */
322
   j = 0;
323
   PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count);
324
   while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL)
325
   {
326
      dev_kfree_skb_any(hb);
327
      j++;
328
   }
329
   PRINTK("nicstar%d: %d huge buffers freed.\n", i, j);
330
   j = 0;
331
   PRINTK("nicstar%d: freeing %d iovec buffers.\n", i, card->iovpool.count);
332
   while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL)
333
   {
334
      dev_kfree_skb_any(iovb);
335
      j++;
336
   }
337
   PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j);
338
   while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
339
      dev_kfree_skb_any(lb);
340
   while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
341
      dev_kfree_skb_any(sb);
342
   free_scq(card->scq0, NULL);
343
   for (j = 0; j < NS_FRSCD_NUM; j++)
344
   {
345
      if (card->scd2vc[j] != NULL)
346
         free_scq(card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc);
347
   }
348
   kfree(card->rsq.org);
349
   kfree(card->tsq.org);
350
   free_irq(card->pcidev->irq, card);
351
   iounmap(card->membase);
352
   kfree(card);
353
}
354
 
355
 
356
 
357
static struct pci_device_id nicstar_pci_tbl[] __devinitdata =
358
{
359
        {PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_IDT_IDT77201,
360
         PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
361
        {0,}                     /* terminate list */
362
};
363
MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl);
364
 
365
 
366
 
367
static struct pci_driver nicstar_driver = {
368
        .name           = "nicstar",
369
        .id_table       = nicstar_pci_tbl,
370
        .probe          = nicstar_init_one,
371
        .remove         = __devexit_p(nicstar_remove_one),
372
};
373
 
374
 
375
 
376
static int __init nicstar_init(void)
377
{
378
   unsigned error = 0;   /* Initialized to remove compile warning */
379
 
380
   XPRINTK("nicstar: nicstar_init() called.\n");
381
 
382
   error = pci_register_driver(&nicstar_driver);
383
 
384
   TXPRINTK("nicstar: TX debug enabled.\n");
385
   RXPRINTK("nicstar: RX debug enabled.\n");
386
   PRINTK("nicstar: General debug enabled.\n");
387
#ifdef PHY_LOOPBACK
388
   printk("nicstar: using PHY loopback.\n");
389
#endif /* PHY_LOOPBACK */
390
   XPRINTK("nicstar: nicstar_init() returned.\n");
391
 
392
   if (!error) {
393
      init_timer(&ns_timer);
394
      ns_timer.expires = jiffies + NS_POLL_PERIOD;
395
      ns_timer.data = 0UL;
396
      ns_timer.function = ns_poll;
397
      add_timer(&ns_timer);
398
   }
399
 
400
   return error;
401
}
402
 
403
 
404
 
405
static void __exit nicstar_cleanup(void)
406
{
407
   XPRINTK("nicstar: nicstar_cleanup() called.\n");
408
 
409
   del_timer(&ns_timer);
410
 
411
   pci_unregister_driver(&nicstar_driver);
412
 
413
   XPRINTK("nicstar: nicstar_cleanup() returned.\n");
414
}
415
 
416
 
417
 
418
static u32 ns_read_sram(ns_dev *card, u32 sram_address)
419
{
420
   unsigned long flags;
421
   u32 data;
422
   sram_address <<= 2;
423
   sram_address &= 0x0007FFFC;  /* address must be dword aligned */
424
   sram_address |= 0x50000000;  /* SRAM read command */
425
   ns_grab_res_lock(card, flags);
426
   while (CMD_BUSY(card));
427
   writel(sram_address, card->membase + CMD);
428
   while (CMD_BUSY(card));
429
   data = readl(card->membase + DR0);
430
   spin_unlock_irqrestore(&card->res_lock, flags);
431
   return data;
432
}
433
 
434
 
435
 
436
static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count)
437
{
438
   unsigned long flags;
439
   int i, c;
440
   count--;     /* count range now is 0..3 instead of 1..4 */
441
   c = count;
442
   c <<= 2;     /* to use increments of 4 */
443
   ns_grab_res_lock(card, flags);
444
   while (CMD_BUSY(card));
445
   for (i = 0; i <= c; i += 4)
446
      writel(*(value++), card->membase + i);
447
   /* Note: DR# registers are the first 4 dwords in nicstar's memspace,
448
            so card->membase + DR0 == card->membase */
449
   sram_address <<= 2;
450
   sram_address &= 0x0007FFFC;
451
   sram_address |= (0x40000000 | count);
452
   writel(sram_address, card->membase + CMD);
453
   spin_unlock_irqrestore(&card->res_lock, flags);
454
}
455
 
456
 
457
static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
458
{
459
   int j;
460
   struct ns_dev *card = NULL;
461
   unsigned char pci_latency;
462
   unsigned error;
463
   u32 data;
464
   u32 u32d[4];
465
   u32 ns_cfg_rctsize;
466
   int bcount;
467
   unsigned long membase;
468
 
469
   error = 0;
470
 
471
   if (pci_enable_device(pcidev))
472
   {
473
      printk("nicstar%d: can't enable PCI device\n", i);
474
      error = 2;
475
      ns_init_card_error(card, error);
476
      return error;
477
   }
478
 
479
   if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL)
480
   {
481
      printk("nicstar%d: can't allocate memory for device structure.\n", i);
482
      error = 2;
483
      ns_init_card_error(card, error);
484
      return error;
485
   }
486
   cards[i] = card;
487
   spin_lock_init(&card->int_lock);
488
   spin_lock_init(&card->res_lock);
489
 
490
   pci_set_drvdata(pcidev, card);
491
 
492
   card->index = i;
493
   card->atmdev = NULL;
494
   card->pcidev = pcidev;
495
   membase = pci_resource_start(pcidev, 1);
496
   card->membase = ioremap(membase, NS_IOREMAP_SIZE);
497
   if (card->membase == 0)
498
   {
499
      printk("nicstar%d: can't ioremap() membase.\n",i);
500
      error = 3;
501
      ns_init_card_error(card, error);
502
      return error;
503
   }
504
   PRINTK("nicstar%d: membase at 0x%x.\n", i, card->membase);
505
 
506
   pci_set_master(pcidev);
507
 
508
   if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0)
509
   {
510
      printk("nicstar%d: can't read PCI latency timer.\n", i);
511
      error = 6;
512
      ns_init_card_error(card, error);
513
      return error;
514
   }
515
#ifdef NS_PCI_LATENCY
516
   if (pci_latency < NS_PCI_LATENCY)
517
   {
518
      PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i, NS_PCI_LATENCY);
519
      for (j = 1; j < 4; j++)
520
      {
521
         if (pci_write_config_byte(pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0)
522
            break;
523
      }
524
      if (j == 4)
525
      {
526
         printk("nicstar%d: can't set PCI latency timer to %d.\n", i, NS_PCI_LATENCY);
527
         error = 7;
528
         ns_init_card_error(card, error);
529
         return error;
530
      }
531
   }
532
#endif /* NS_PCI_LATENCY */
533
 
534
   /* Clear timer overflow */
535
   data = readl(card->membase + STAT);
536
   if (data & NS_STAT_TMROF)
537
      writel(NS_STAT_TMROF, card->membase + STAT);
538
 
539
   /* Software reset */
540
   writel(NS_CFG_SWRST, card->membase + CFG);
541
   NS_DELAY;
542
   writel(0x00000000, card->membase + CFG);
543
 
544
   /* PHY reset */
545
   writel(0x00000008, card->membase + GP);
546
   NS_DELAY;
547
   writel(0x00000001, card->membase + GP);
548
   NS_DELAY;
549
   while (CMD_BUSY(card));
550
   writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD);      /* Sync UTOPIA with SAR clock */
551
   NS_DELAY;
552
 
553
   /* Detect PHY type */
554
   while (CMD_BUSY(card));
555
   writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD);
556
   while (CMD_BUSY(card));
557
   data = readl(card->membase + DR0);
558
   switch(data) {
559
      case 0x00000009:
560
         printk("nicstar%d: PHY seems to be 25 Mbps.\n", i);
561
         card->max_pcr = ATM_25_PCR;
562
         while(CMD_BUSY(card));
563
         writel(0x00000008, card->membase + DR0);
564
         writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD);
565
         /* Clear an eventual pending interrupt */
566
         writel(NS_STAT_SFBQF, card->membase + STAT);
567
#ifdef PHY_LOOPBACK
568
         while(CMD_BUSY(card));
569
         writel(0x00000022, card->membase + DR0);
570
         writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD);
571
#endif /* PHY_LOOPBACK */
572
         break;
573
      case 0x00000030:
574
      case 0x00000031:
575
         printk("nicstar%d: PHY seems to be 155 Mbps.\n", i);
576
         card->max_pcr = ATM_OC3_PCR;
577
#ifdef PHY_LOOPBACK
578
         while(CMD_BUSY(card));
579
         writel(0x00000002, card->membase + DR0);
580
         writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD);
581
#endif /* PHY_LOOPBACK */
582
         break;
583
      default:
584
         printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data);
585
         error = 8;
586
         ns_init_card_error(card, error);
587
         return error;
588
   }
589
   writel(0x00000000, card->membase + GP);
590
 
591
   /* Determine SRAM size */
592
   data = 0x76543210;
593
   ns_write_sram(card, 0x1C003, &data, 1);
594
   data = 0x89ABCDEF;
595
   ns_write_sram(card, 0x14003, &data, 1);
596
   if (ns_read_sram(card, 0x14003) == 0x89ABCDEF &&
597
       ns_read_sram(card, 0x1C003) == 0x76543210)
598
       card->sram_size = 128;
599
   else
600
      card->sram_size = 32;
601
   PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size);
602
 
603
   card->rct_size = NS_MAX_RCTSIZE;
604
 
605
#if (NS_MAX_RCTSIZE == 4096)
606
   if (card->sram_size == 128)
607
      printk("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n", i);
608
#elif (NS_MAX_RCTSIZE == 16384)
609
   if (card->sram_size == 32)
610
   {
611
      printk("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n", i);
612
      card->rct_size = 4096;
613
   }
614
#else
615
#error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c
616
#endif
617
 
618
   card->vpibits = NS_VPIBITS;
619
   if (card->rct_size == 4096)
620
      card->vcibits = 12 - NS_VPIBITS;
621
   else /* card->rct_size == 16384 */
622
      card->vcibits = 14 - NS_VPIBITS;
623
 
624
   /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */
625
   if (mac[i] == NULL)
626
      nicstar_init_eprom(card->membase);
627
 
628
   /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */
629
   writel(0x00000000, card->membase + VPM);
630
 
631
   /* Initialize TSQ */
632
   card->tsq.org = kmalloc(NS_TSQSIZE + NS_TSQ_ALIGNMENT, GFP_KERNEL);
633
   if (card->tsq.org == NULL)
634
   {
635
      printk("nicstar%d: can't allocate TSQ.\n", i);
636
      error = 10;
637
      ns_init_card_error(card, error);
638
      return error;
639
   }
640
   card->tsq.base = (ns_tsi *) ALIGN_ADDRESS(card->tsq.org, NS_TSQ_ALIGNMENT);
641
   card->tsq.next = card->tsq.base;
642
   card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1);
643
   for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++)
644
      ns_tsi_init(card->tsq.base + j);
645
   writel(0x00000000, card->membase + TSQH);
646
   writel((u32) virt_to_bus(card->tsq.base), card->membase + TSQB);
647
   PRINTK("nicstar%d: TSQ base at 0x%x  0x%x  0x%x.\n", i, (u32) card->tsq.base,
648
          (u32) virt_to_bus(card->tsq.base), readl(card->membase + TSQB));
649
 
650
   /* Initialize RSQ */
651
   card->rsq.org = kmalloc(NS_RSQSIZE + NS_RSQ_ALIGNMENT, GFP_KERNEL);
652
   if (card->rsq.org == NULL)
653
   {
654
      printk("nicstar%d: can't allocate RSQ.\n", i);
655
      error = 11;
656
      ns_init_card_error(card, error);
657
      return error;
658
   }
659
   card->rsq.base = (ns_rsqe *) ALIGN_ADDRESS(card->rsq.org, NS_RSQ_ALIGNMENT);
660
   card->rsq.next = card->rsq.base;
661
   card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1);
662
   for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++)
663
      ns_rsqe_init(card->rsq.base + j);
664
   writel(0x00000000, card->membase + RSQH);
665
   writel((u32) virt_to_bus(card->rsq.base), card->membase + RSQB);
666
   PRINTK("nicstar%d: RSQ base at 0x%x.\n", i, (u32) card->rsq.base);
667
 
668
   /* Initialize SCQ0, the only VBR SCQ used */
669
   card->scq1 = NULL;
670
   card->scq2 = NULL;
671
   card->scq0 = get_scq(VBR_SCQSIZE, NS_VRSCD0);
672
   if (card->scq0 == NULL)
673
   {
674
      printk("nicstar%d: can't get SCQ0.\n", i);
675
      error = 12;
676
      ns_init_card_error(card, error);
677
      return error;
678
   }
679
   u32d[0] = (u32) virt_to_bus(card->scq0->base);
680
   u32d[1] = (u32) 0x00000000;
681
   u32d[2] = (u32) 0xffffffff;
682
   u32d[3] = (u32) 0x00000000;
683
   ns_write_sram(card, NS_VRSCD0, u32d, 4);
684
   ns_write_sram(card, NS_VRSCD1, u32d, 4);     /* These last two won't be used */
685
   ns_write_sram(card, NS_VRSCD2, u32d, 4);     /* but are initialized, just in case... */
686
   card->scq0->scd = NS_VRSCD0;
687
   PRINTK("nicstar%d: VBR-SCQ0 base at 0x%x.\n", i, (u32) card->scq0->base);
688
 
689
   /* Initialize TSTs */
690
   card->tst_addr = NS_TST0;
691
   card->tst_free_entries = NS_TST_NUM_ENTRIES;
692
   data = NS_TST_OPCODE_VARIABLE;
693
   for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
694
      ns_write_sram(card, NS_TST0 + j, &data, 1);
695
   data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0);
696
   ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1);
697
   for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
698
      ns_write_sram(card, NS_TST1 + j, &data, 1);
699
   data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1);
700
   ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1);
701
   for (j = 0; j < NS_TST_NUM_ENTRIES; j++)
702
      card->tste2vc[j] = NULL;
703
   writel(NS_TST0 << 2, card->membase + TSTB);
704
 
705
 
706
   /* Initialize RCT. AAL type is set on opening the VC. */
707
#ifdef RCQ_SUPPORT
708
   u32d[0] = NS_RCTE_RAWCELLINTEN;
709
#else
710
   u32d[0] = 0x00000000;
711
#endif /* RCQ_SUPPORT */
712
   u32d[1] = 0x00000000;
713
   u32d[2] = 0x00000000;
714
   u32d[3] = 0xFFFFFFFF;
715
   for (j = 0; j < card->rct_size; j++)
716
      ns_write_sram(card, j * 4, u32d, 4);
717
 
718
   memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map));
719
 
720
   for (j = 0; j < NS_FRSCD_NUM; j++)
721
      card->scd2vc[j] = NULL;
722
 
723
   /* Initialize buffer levels */
724
   card->sbnr.min = MIN_SB;
725
   card->sbnr.init = NUM_SB;
726
   card->sbnr.max = MAX_SB;
727
   card->lbnr.min = MIN_LB;
728
   card->lbnr.init = NUM_LB;
729
   card->lbnr.max = MAX_LB;
730
   card->iovnr.min = MIN_IOVB;
731
   card->iovnr.init = NUM_IOVB;
732
   card->iovnr.max = MAX_IOVB;
733
   card->hbnr.min = MIN_HB;
734
   card->hbnr.init = NUM_HB;
735
   card->hbnr.max = MAX_HB;
736
 
737
   card->sm_handle = 0x00000000;
738
   card->sm_addr = 0x00000000;
739
   card->lg_handle = 0x00000000;
740
   card->lg_addr = 0x00000000;
741
 
742
   card->efbie = 1;     /* To prevent push_rxbufs from enabling the interrupt */
743
 
744
   /* Pre-allocate some huge buffers */
745
   skb_queue_head_init(&card->hbpool.queue);
746
   card->hbpool.count = 0;
747
   for (j = 0; j < NUM_HB; j++)
748
   {
749
      struct sk_buff *hb;
750
      hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
751
      if (hb == NULL)
752
      {
753
         printk("nicstar%d: can't allocate %dth of %d huge buffers.\n",
754
                i, j, NUM_HB);
755
         error = 13;
756
         ns_init_card_error(card, error);
757
         return error;
758
      }
759
      NS_SKB_CB(hb)->buf_type = BUF_NONE;
760
      skb_queue_tail(&card->hbpool.queue, hb);
761
      card->hbpool.count++;
762
   }
763
 
764
 
765
   /* Allocate large buffers */
766
   skb_queue_head_init(&card->lbpool.queue);
767
   card->lbpool.count = 0;                       /* Not used */
768
   for (j = 0; j < NUM_LB; j++)
769
   {
770
      struct sk_buff *lb;
771
      lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
772
      if (lb == NULL)
773
      {
774
         printk("nicstar%d: can't allocate %dth of %d large buffers.\n",
775
                i, j, NUM_LB);
776
         error = 14;
777
         ns_init_card_error(card, error);
778
         return error;
779
      }
780
      NS_SKB_CB(lb)->buf_type = BUF_LG;
781
      skb_queue_tail(&card->lbpool.queue, lb);
782
      skb_reserve(lb, NS_SMBUFSIZE);
783
      push_rxbufs(card, lb);
784
      /* Due to the implementation of push_rxbufs() this is 1, not 0 */
785
      if (j == 1)
786
      {
787
         card->rcbuf = lb;
788
         card->rawch = (u32) virt_to_bus(lb->data);
789
      }
790
   }
791
   /* Test for strange behaviour which leads to crashes */
792
   if ((bcount = ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min)
793
   {
794
      printk("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n",
795
             i, j, bcount);
796
      error = 14;
797
      ns_init_card_error(card, error);
798
      return error;
799
   }
800
 
801
 
802
   /* Allocate small buffers */
803
   skb_queue_head_init(&card->sbpool.queue);
804
   card->sbpool.count = 0;                       /* Not used */
805
   for (j = 0; j < NUM_SB; j++)
806
   {
807
      struct sk_buff *sb;
808
      sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
809
      if (sb == NULL)
810
      {
811
         printk("nicstar%d: can't allocate %dth of %d small buffers.\n",
812
                i, j, NUM_SB);
813
         error = 15;
814
         ns_init_card_error(card, error);
815
         return error;
816
      }
817
      NS_SKB_CB(sb)->buf_type = BUF_SM;
818
      skb_queue_tail(&card->sbpool.queue, sb);
819
      skb_reserve(sb, NS_AAL0_HEADER);
820
      push_rxbufs(card, sb);
821
   }
822
   /* Test for strange behaviour which leads to crashes */
823
   if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min)
824
   {
825
      printk("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n",
826
             i, j, bcount);
827
      error = 15;
828
      ns_init_card_error(card, error);
829
      return error;
830
   }
831
 
832
 
833
   /* Allocate iovec buffers */
834
   skb_queue_head_init(&card->iovpool.queue);
835
   card->iovpool.count = 0;
836
   for (j = 0; j < NUM_IOVB; j++)
837
   {
838
      struct sk_buff *iovb;
839
      iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
840
      if (iovb == NULL)
841
      {
842
         printk("nicstar%d: can't allocate %dth of %d iovec buffers.\n",
843
                i, j, NUM_IOVB);
844
         error = 16;
845
         ns_init_card_error(card, error);
846
         return error;
847
      }
848
      NS_SKB_CB(iovb)->buf_type = BUF_NONE;
849
      skb_queue_tail(&card->iovpool.queue, iovb);
850
      card->iovpool.count++;
851
   }
852
 
853
   /* Configure NICStAR */
854
   if (card->rct_size == 4096)
855
      ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES;
856
   else /* (card->rct_size == 16384) */
857
      ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES;
858
 
859
   card->efbie = 1;
860
 
861
   card->intcnt = 0;
862
   if (request_irq(pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED, "nicstar", card) != 0)
863
   {
864
      printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq);
865
      error = 9;
866
      ns_init_card_error(card, error);
867
      return error;
868
   }
869
 
870
   /* Register device */
871
   card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL);
872
   if (card->atmdev == NULL)
873
   {
874
      printk("nicstar%d: can't register device.\n", i);
875
      error = 17;
876
      ns_init_card_error(card, error);
877
      return error;
878
   }
879
 
880
   if (ns_parse_mac(mac[i], card->atmdev->esi)) {
881
      nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
882
                         card->atmdev->esi, 6);
883
      if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) == 0) {
884
         nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT,
885
                         card->atmdev->esi, 6);
886
      }
887
   }
888
 
889
   printk("nicstar%d: MAC address %02X:%02X:%02X:%02X:%02X:%02X\n", i,
890
          card->atmdev->esi[0], card->atmdev->esi[1], card->atmdev->esi[2],
891
          card->atmdev->esi[3], card->atmdev->esi[4], card->atmdev->esi[5]);
892
 
893
   card->atmdev->dev_data = card;
894
   card->atmdev->ci_range.vpi_bits = card->vpibits;
895
   card->atmdev->ci_range.vci_bits = card->vcibits;
896
   card->atmdev->link_rate = card->max_pcr;
897
   card->atmdev->phy = NULL;
898
 
899
#ifdef CONFIG_ATM_NICSTAR_USE_SUNI
900
   if (card->max_pcr == ATM_OC3_PCR)
901
      suni_init(card->atmdev);
902
#endif /* CONFIG_ATM_NICSTAR_USE_SUNI */
903
 
904
#ifdef CONFIG_ATM_NICSTAR_USE_IDT77105
905
   if (card->max_pcr == ATM_25_PCR)
906
      idt77105_init(card->atmdev);
907
#endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */
908
 
909
   if (card->atmdev->phy && card->atmdev->phy->start)
910
      card->atmdev->phy->start(card->atmdev);
911
 
912
   writel(NS_CFG_RXPATH |
913
          NS_CFG_SMBUFSIZE |
914
          NS_CFG_LGBUFSIZE |
915
          NS_CFG_EFBIE |
916
          NS_CFG_RSQSIZE |
917
          NS_CFG_VPIBITS |
918
          ns_cfg_rctsize |
919
          NS_CFG_RXINT_NODELAY |
920
          NS_CFG_RAWIE |                /* Only enabled if RCQ_SUPPORT */
921
          NS_CFG_RSQAFIE |
922
          NS_CFG_TXEN |
923
          NS_CFG_TXIE |
924
          NS_CFG_TSQFIE_OPT |           /* Only enabled if ENABLE_TSQFIE */
925
          NS_CFG_PHYIE,
926
          card->membase + CFG);
927
 
928
   num_cards++;
929
 
930
   return error;
931
}
932
 
933
 
934
 
935
static void __devinit ns_init_card_error(ns_dev *card, int error)
936
{
937
   if (error >= 17)
938
   {
939
      writel(0x00000000, card->membase + CFG);
940
   }
941
   if (error >= 16)
942
   {
943
      struct sk_buff *iovb;
944
      while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL)
945
         dev_kfree_skb_any(iovb);
946
   }
947
   if (error >= 15)
948
   {
949
      struct sk_buff *sb;
950
      while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL)
951
         dev_kfree_skb_any(sb);
952
      free_scq(card->scq0, NULL);
953
   }
954
   if (error >= 14)
955
   {
956
      struct sk_buff *lb;
957
      while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL)
958
         dev_kfree_skb_any(lb);
959
   }
960
   if (error >= 13)
961
   {
962
      struct sk_buff *hb;
963
      while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL)
964
         dev_kfree_skb_any(hb);
965
   }
966
   if (error >= 12)
967
   {
968
      kfree(card->rsq.org);
969
   }
970
   if (error >= 11)
971
   {
972
      kfree(card->tsq.org);
973
   }
974
   if (error >= 10)
975
   {
976
      free_irq(card->pcidev->irq, card);
977
   }
978
   if (error >= 4)
979
   {
980
      iounmap(card->membase);
981
   }
982
   if (error >= 3)
983
   {
984
      pci_disable_device(card->pcidev);
985
      kfree(card);
986
   }
987
}
988
 
989
 
990
 
991
static scq_info *get_scq(int size, u32 scd)
992
{
993
   scq_info *scq;
994
   int i;
995
 
996
   if (size != VBR_SCQSIZE && size != CBR_SCQSIZE)
997
      return NULL;
998
 
999
   scq = kmalloc(sizeof(scq_info), GFP_KERNEL);
1000
   if (scq == NULL)
1001
      return NULL;
1002
   scq->org = kmalloc(2 * size, GFP_KERNEL);
1003
   if (scq->org == NULL)
1004
   {
1005
      kfree(scq);
1006
      return NULL;
1007
   }
1008
   scq->skb = kmalloc(sizeof(struct sk_buff *) *
1009
                                          (size / NS_SCQE_SIZE), GFP_KERNEL);
1010
   if (scq->skb == NULL)
1011
   {
1012
      kfree(scq->org);
1013
      kfree(scq);
1014
      return NULL;
1015
   }
1016
   scq->num_entries = size / NS_SCQE_SIZE;
1017
   scq->base = (ns_scqe *) ALIGN_ADDRESS(scq->org, size);
1018
   scq->next = scq->base;
1019
   scq->last = scq->base + (scq->num_entries - 1);
1020
   scq->tail = scq->last;
1021
   scq->scd = scd;
1022
   scq->num_entries = size / NS_SCQE_SIZE;
1023
   scq->tbd_count = 0;
1024
   init_waitqueue_head(&scq->scqfull_waitq);
1025
   scq->full = 0;
1026
   spin_lock_init(&scq->lock);
1027
 
1028
   for (i = 0; i < scq->num_entries; i++)
1029
      scq->skb[i] = NULL;
1030
 
1031
   return scq;
1032
}
1033
 
1034
 
1035
 
1036
/* For variable rate SCQ vcc must be NULL */
1037
static void free_scq(scq_info *scq, struct atm_vcc *vcc)
1038
{
1039
   int i;
1040
 
1041
   if (scq->num_entries == VBR_SCQ_NUM_ENTRIES)
1042
      for (i = 0; i < scq->num_entries; i++)
1043
      {
1044
         if (scq->skb[i] != NULL)
1045
         {
1046
            vcc = ATM_SKB(scq->skb[i])->vcc;
1047
            if (vcc->pop != NULL)
1048
               vcc->pop(vcc, scq->skb[i]);
1049
            else
1050
               dev_kfree_skb_any(scq->skb[i]);
1051
         }
1052
      }
1053
   else /* vcc must be != NULL */
1054
   {
1055
      if (vcc == NULL)
1056
      {
1057
         printk("nicstar: free_scq() called with vcc == NULL for fixed rate scq.");
1058
         for (i = 0; i < scq->num_entries; i++)
1059
            dev_kfree_skb_any(scq->skb[i]);
1060
      }
1061
      else
1062
         for (i = 0; i < scq->num_entries; i++)
1063
         {
1064
            if (scq->skb[i] != NULL)
1065
            {
1066
               if (vcc->pop != NULL)
1067
                  vcc->pop(vcc, scq->skb[i]);
1068
               else
1069
                  dev_kfree_skb_any(scq->skb[i]);
1070
            }
1071
         }
1072
   }
1073
   kfree(scq->skb);
1074
   kfree(scq->org);
1075
   kfree(scq);
1076
}
1077
 
1078
 
1079
 
1080
/* The handles passed must be pointers to the sk_buff containing the small
1081
   or large buffer(s) cast to u32. */
1082
static void push_rxbufs(ns_dev *card, struct sk_buff *skb)
1083
{
1084
   struct ns_skb_cb *cb = NS_SKB_CB(skb);
1085
   u32 handle1, addr1;
1086
   u32 handle2, addr2;
1087
   u32 stat;
1088
   unsigned long flags;
1089
 
1090
   /* *BARF* */
1091
   handle2 = addr2 = 0;
1092
   handle1 = (u32)skb;
1093
   addr1 = (u32)virt_to_bus(skb->data);
1094
 
1095
#ifdef GENERAL_DEBUG
1096
   if (!addr1)
1097
      printk("nicstar%d: push_rxbufs called with addr1 = 0.\n", card->index);
1098
#endif /* GENERAL_DEBUG */
1099
 
1100
   stat = readl(card->membase + STAT);
1101
   card->sbfqc = ns_stat_sfbqc_get(stat);
1102
   card->lbfqc = ns_stat_lfbqc_get(stat);
1103
   if (cb->buf_type == BUF_SM)
1104
   {
1105
      if (!addr2)
1106
      {
1107
         if (card->sm_addr)
1108
         {
1109
            addr2 = card->sm_addr;
1110
            handle2 = card->sm_handle;
1111
            card->sm_addr = 0x00000000;
1112
            card->sm_handle = 0x00000000;
1113
         }
1114
         else /* (!sm_addr) */
1115
         {
1116
            card->sm_addr = addr1;
1117
            card->sm_handle = handle1;
1118
         }
1119
      }
1120
   }
1121
   else /* buf_type == BUF_LG */
1122
   {
1123
      if (!addr2)
1124
      {
1125
         if (card->lg_addr)
1126
         {
1127
            addr2 = card->lg_addr;
1128
            handle2 = card->lg_handle;
1129
            card->lg_addr = 0x00000000;
1130
            card->lg_handle = 0x00000000;
1131
         }
1132
         else /* (!lg_addr) */
1133
         {
1134
            card->lg_addr = addr1;
1135
            card->lg_handle = handle1;
1136
         }
1137
      }
1138
   }
1139
 
1140
   if (addr2)
1141
   {
1142
      if (cb->buf_type == BUF_SM)
1143
      {
1144
         if (card->sbfqc >= card->sbnr.max)
1145
         {
1146
            skb_unlink((struct sk_buff *) handle1, &card->sbpool.queue);
1147
            dev_kfree_skb_any((struct sk_buff *) handle1);
1148
            skb_unlink((struct sk_buff *) handle2, &card->sbpool.queue);
1149
            dev_kfree_skb_any((struct sk_buff *) handle2);
1150
            return;
1151
         }
1152
         else
1153
            card->sbfqc += 2;
1154
      }
1155
      else /* (buf_type == BUF_LG) */
1156
      {
1157
         if (card->lbfqc >= card->lbnr.max)
1158
         {
1159
            skb_unlink((struct sk_buff *) handle1, &card->lbpool.queue);
1160
            dev_kfree_skb_any((struct sk_buff *) handle1);
1161
            skb_unlink((struct sk_buff *) handle2, &card->lbpool.queue);
1162
            dev_kfree_skb_any((struct sk_buff *) handle2);
1163
            return;
1164
         }
1165
         else
1166
            card->lbfqc += 2;
1167
      }
1168
 
1169
      ns_grab_res_lock(card, flags);
1170
 
1171
      while (CMD_BUSY(card));
1172
      writel(addr2, card->membase + DR3);
1173
      writel(handle2, card->membase + DR2);
1174
      writel(addr1, card->membase + DR1);
1175
      writel(handle1, card->membase + DR0);
1176
      writel(NS_CMD_WRITE_FREEBUFQ | cb->buf_type, card->membase + CMD);
1177
 
1178
      spin_unlock_irqrestore(&card->res_lock, flags);
1179
 
1180
      XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index,
1181
              (cb->buf_type == BUF_SM ? "small" : "large"), addr1, addr2);
1182
   }
1183
 
1184
   if (!card->efbie && card->sbfqc >= card->sbnr.min &&
1185
       card->lbfqc >= card->lbnr.min)
1186
   {
1187
      card->efbie = 1;
1188
      writel((readl(card->membase + CFG) | NS_CFG_EFBIE), card->membase + CFG);
1189
   }
1190
 
1191
   return;
1192
}
1193
 
1194
 
1195
 
1196
static irqreturn_t ns_irq_handler(int irq, void *dev_id)
1197
{
1198
   u32 stat_r;
1199
   ns_dev *card;
1200
   struct atm_dev *dev;
1201
   unsigned long flags;
1202
 
1203
   card = (ns_dev *) dev_id;
1204
   dev = card->atmdev;
1205
   card->intcnt++;
1206
 
1207
   PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index);
1208
 
1209
   ns_grab_int_lock(card, flags);
1210
 
1211
   stat_r = readl(card->membase + STAT);
1212
 
1213
   /* Transmit Status Indicator has been written to T. S. Queue */
1214
   if (stat_r & NS_STAT_TSIF)
1215
   {
1216
      TXPRINTK("nicstar%d: TSI interrupt\n", card->index);
1217
      process_tsq(card);
1218
      writel(NS_STAT_TSIF, card->membase + STAT);
1219
   }
1220
 
1221
   /* Incomplete CS-PDU has been transmitted */
1222
   if (stat_r & NS_STAT_TXICP)
1223
   {
1224
      writel(NS_STAT_TXICP, card->membase + STAT);
1225
      TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n",
1226
               card->index);
1227
   }
1228
 
1229
   /* Transmit Status Queue 7/8 full */
1230
   if (stat_r & NS_STAT_TSQF)
1231
   {
1232
      writel(NS_STAT_TSQF, card->membase + STAT);
1233
      PRINTK("nicstar%d: TSQ full.\n", card->index);
1234
      process_tsq(card);
1235
   }
1236
 
1237
   /* Timer overflow */
1238
   if (stat_r & NS_STAT_TMROF)
1239
   {
1240
      writel(NS_STAT_TMROF, card->membase + STAT);
1241
      PRINTK("nicstar%d: Timer overflow.\n", card->index);
1242
   }
1243
 
1244
   /* PHY device interrupt signal active */
1245
   if (stat_r & NS_STAT_PHYI)
1246
   {
1247
      writel(NS_STAT_PHYI, card->membase + STAT);
1248
      PRINTK("nicstar%d: PHY interrupt.\n", card->index);
1249
      if (dev->phy && dev->phy->interrupt) {
1250
         dev->phy->interrupt(dev);
1251
      }
1252
   }
1253
 
1254
   /* Small Buffer Queue is full */
1255
   if (stat_r & NS_STAT_SFBQF)
1256
   {
1257
      writel(NS_STAT_SFBQF, card->membase + STAT);
1258
      printk("nicstar%d: Small free buffer queue is full.\n", card->index);
1259
   }
1260
 
1261
   /* Large Buffer Queue is full */
1262
   if (stat_r & NS_STAT_LFBQF)
1263
   {
1264
      writel(NS_STAT_LFBQF, card->membase + STAT);
1265
      printk("nicstar%d: Large free buffer queue is full.\n", card->index);
1266
   }
1267
 
1268
   /* Receive Status Queue is full */
1269
   if (stat_r & NS_STAT_RSQF)
1270
   {
1271
      writel(NS_STAT_RSQF, card->membase + STAT);
1272
      printk("nicstar%d: RSQ full.\n", card->index);
1273
      process_rsq(card);
1274
   }
1275
 
1276
   /* Complete CS-PDU received */
1277
   if (stat_r & NS_STAT_EOPDU)
1278
   {
1279
      RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index);
1280
      process_rsq(card);
1281
      writel(NS_STAT_EOPDU, card->membase + STAT);
1282
   }
1283
 
1284
   /* Raw cell received */
1285
   if (stat_r & NS_STAT_RAWCF)
1286
   {
1287
      writel(NS_STAT_RAWCF, card->membase + STAT);
1288
#ifndef RCQ_SUPPORT
1289
      printk("nicstar%d: Raw cell received and no support yet...\n",
1290
             card->index);
1291
#endif /* RCQ_SUPPORT */
1292
      /* NOTE: the following procedure may keep a raw cell pending until the
1293
               next interrupt. As this preliminary support is only meant to
1294
               avoid buffer leakage, this is not an issue. */
1295
      while (readl(card->membase + RAWCT) != card->rawch)
1296
      {
1297
         ns_rcqe *rawcell;
1298
 
1299
         rawcell = (ns_rcqe *) bus_to_virt(card->rawch);
1300
         if (ns_rcqe_islast(rawcell))
1301
         {
1302
            struct sk_buff *oldbuf;
1303
 
1304
            oldbuf = card->rcbuf;
1305
            card->rcbuf = (struct sk_buff *) ns_rcqe_nextbufhandle(rawcell);
1306
            card->rawch = (u32) virt_to_bus(card->rcbuf->data);
1307
            recycle_rx_buf(card, oldbuf);
1308
         }
1309
         else
1310
            card->rawch += NS_RCQE_SIZE;
1311
      }
1312
   }
1313
 
1314
   /* Small buffer queue is empty */
1315
   if (stat_r & NS_STAT_SFBQE)
1316
   {
1317
      int i;
1318
      struct sk_buff *sb;
1319
 
1320
      writel(NS_STAT_SFBQE, card->membase + STAT);
1321
      printk("nicstar%d: Small free buffer queue empty.\n",
1322
             card->index);
1323
      for (i = 0; i < card->sbnr.min; i++)
1324
      {
1325
         sb = dev_alloc_skb(NS_SMSKBSIZE);
1326
         if (sb == NULL)
1327
         {
1328
            writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG);
1329
            card->efbie = 0;
1330
            break;
1331
         }
1332
         NS_SKB_CB(sb)->buf_type = BUF_SM;
1333
         skb_queue_tail(&card->sbpool.queue, sb);
1334
         skb_reserve(sb, NS_AAL0_HEADER);
1335
         push_rxbufs(card, sb);
1336
      }
1337
      card->sbfqc = i;
1338
      process_rsq(card);
1339
   }
1340
 
1341
   /* Large buffer queue empty */
1342
   if (stat_r & NS_STAT_LFBQE)
1343
   {
1344
      int i;
1345
      struct sk_buff *lb;
1346
 
1347
      writel(NS_STAT_LFBQE, card->membase + STAT);
1348
      printk("nicstar%d: Large free buffer queue empty.\n",
1349
             card->index);
1350
      for (i = 0; i < card->lbnr.min; i++)
1351
      {
1352
         lb = dev_alloc_skb(NS_LGSKBSIZE);
1353
         if (lb == NULL)
1354
         {
1355
            writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG);
1356
            card->efbie = 0;
1357
            break;
1358
         }
1359
         NS_SKB_CB(lb)->buf_type = BUF_LG;
1360
         skb_queue_tail(&card->lbpool.queue, lb);
1361
         skb_reserve(lb, NS_SMBUFSIZE);
1362
         push_rxbufs(card, lb);
1363
      }
1364
      card->lbfqc = i;
1365
      process_rsq(card);
1366
   }
1367
 
1368
   /* Receive Status Queue is 7/8 full */
1369
   if (stat_r & NS_STAT_RSQAF)
1370
   {
1371
      writel(NS_STAT_RSQAF, card->membase + STAT);
1372
      RXPRINTK("nicstar%d: RSQ almost full.\n", card->index);
1373
      process_rsq(card);
1374
   }
1375
 
1376
   spin_unlock_irqrestore(&card->int_lock, flags);
1377
   PRINTK("nicstar%d: end of interrupt service\n", card->index);
1378
   return IRQ_HANDLED;
1379
}
1380
 
1381
 
1382
 
1383
static int ns_open(struct atm_vcc *vcc)
1384
{
1385
   ns_dev *card;
1386
   vc_map *vc;
1387
   unsigned long tmpl, modl;
1388
   int tcr, tcra;       /* target cell rate, and absolute value */
1389
   int n = 0;            /* Number of entries in the TST. Initialized to remove
1390
                           the compiler warning. */
1391
   u32 u32d[4];
1392
   int frscdi = 0;       /* Index of the SCD. Initialized to remove the compiler
1393
                           warning. How I wish compilers were clever enough to
1394
                           tell which variables can truly be used
1395
                           uninitialized... */
1396
   int inuse;           /* tx or rx vc already in use by another vcc */
1397
   short vpi = vcc->vpi;
1398
   int vci = vcc->vci;
1399
 
1400
   card = (ns_dev *) vcc->dev->dev_data;
1401
   PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int) vpi, vci);
1402
   if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
1403
   {
1404
      PRINTK("nicstar%d: unsupported AAL.\n", card->index);
1405
      return -EINVAL;
1406
   }
1407
 
1408
   vc = &(card->vcmap[vpi << card->vcibits | vci]);
1409
   vcc->dev_data = vc;
1410
 
1411
   inuse = 0;
1412
   if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx)
1413
      inuse = 1;
1414
   if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx)
1415
      inuse += 2;
1416
   if (inuse)
1417
   {
1418
      printk("nicstar%d: %s vci already in use.\n", card->index,
1419
             inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx");
1420
      return -EINVAL;
1421
   }
1422
 
1423
   set_bit(ATM_VF_ADDR,&vcc->flags);
1424
 
1425
   /* NOTE: You are not allowed to modify an open connection's QOS. To change
1426
      that, remove the ATM_VF_PARTIAL flag checking. There may be other changes
1427
      needed to do that. */
1428
   if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
1429
   {
1430
      scq_info *scq;
1431
 
1432
      set_bit(ATM_VF_PARTIAL,&vcc->flags);
1433
      if (vcc->qos.txtp.traffic_class == ATM_CBR)
1434
      {
1435
         /* Check requested cell rate and availability of SCD */
1436
         if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 &&
1437
             vcc->qos.txtp.min_pcr == 0)
1438
         {
1439
            PRINTK("nicstar%d: trying to open a CBR vc with cell rate = 0 \n",
1440
                   card->index);
1441
            clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1442
            clear_bit(ATM_VF_ADDR,&vcc->flags);
1443
            return -EINVAL;
1444
         }
1445
 
1446
         tcr = atm_pcr_goal(&(vcc->qos.txtp));
1447
         tcra = tcr >= 0 ? tcr : -tcr;
1448
 
1449
         PRINTK("nicstar%d: target cell rate = %d.\n", card->index,
1450
                vcc->qos.txtp.max_pcr);
1451
 
1452
         tmpl = (unsigned long)tcra * (unsigned long)NS_TST_NUM_ENTRIES;
1453
         modl = tmpl % card->max_pcr;
1454
 
1455
         n = (int)(tmpl / card->max_pcr);
1456
         if (tcr > 0)
1457
         {
1458
            if (modl > 0) n++;
1459
         }
1460
         else if (tcr == 0)
1461
         {
1462
            if ((n = (card->tst_free_entries - NS_TST_RESERVED)) <= 0)
1463
            {
1464
               PRINTK("nicstar%d: no CBR bandwidth free.\n", card->index);
1465
               clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1466
               clear_bit(ATM_VF_ADDR,&vcc->flags);
1467
               return -EINVAL;
1468
            }
1469
         }
1470
 
1471
         if (n == 0)
1472
         {
1473
            printk("nicstar%d: selected bandwidth < granularity.\n", card->index);
1474
            clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1475
            clear_bit(ATM_VF_ADDR,&vcc->flags);
1476
            return -EINVAL;
1477
         }
1478
 
1479
         if (n > (card->tst_free_entries - NS_TST_RESERVED))
1480
         {
1481
            PRINTK("nicstar%d: not enough free CBR bandwidth.\n", card->index);
1482
            clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1483
            clear_bit(ATM_VF_ADDR,&vcc->flags);
1484
            return -EINVAL;
1485
         }
1486
         else
1487
            card->tst_free_entries -= n;
1488
 
1489
         XPRINTK("nicstar%d: writing %d tst entries.\n", card->index, n);
1490
         for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++)
1491
         {
1492
            if (card->scd2vc[frscdi] == NULL)
1493
            {
1494
               card->scd2vc[frscdi] = vc;
1495
               break;
1496
            }
1497
         }
1498
         if (frscdi == NS_FRSCD_NUM)
1499
         {
1500
            PRINTK("nicstar%d: no SCD available for CBR channel.\n", card->index);
1501
            card->tst_free_entries += n;
1502
            clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1503
            clear_bit(ATM_VF_ADDR,&vcc->flags);
1504
            return -EBUSY;
1505
         }
1506
 
1507
         vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE;
1508
 
1509
         scq = get_scq(CBR_SCQSIZE, vc->cbr_scd);
1510
         if (scq == NULL)
1511
         {
1512
            PRINTK("nicstar%d: can't get fixed rate SCQ.\n", card->index);
1513
            card->scd2vc[frscdi] = NULL;
1514
            card->tst_free_entries += n;
1515
            clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1516
            clear_bit(ATM_VF_ADDR,&vcc->flags);
1517
            return -ENOMEM;
1518
         }
1519
         vc->scq = scq;
1520
         u32d[0] = (u32) virt_to_bus(scq->base);
1521
         u32d[1] = (u32) 0x00000000;
1522
         u32d[2] = (u32) 0xffffffff;
1523
         u32d[3] = (u32) 0x00000000;
1524
         ns_write_sram(card, vc->cbr_scd, u32d, 4);
1525
 
1526
         fill_tst(card, n, vc);
1527
      }
1528
      else if (vcc->qos.txtp.traffic_class == ATM_UBR)
1529
      {
1530
         vc->cbr_scd = 0x00000000;
1531
         vc->scq = card->scq0;
1532
      }
1533
 
1534
      if (vcc->qos.txtp.traffic_class != ATM_NONE)
1535
      {
1536
         vc->tx = 1;
1537
         vc->tx_vcc = vcc;
1538
         vc->tbd_count = 0;
1539
      }
1540
      if (vcc->qos.rxtp.traffic_class != ATM_NONE)
1541
      {
1542
         u32 status;
1543
 
1544
         vc->rx = 1;
1545
         vc->rx_vcc = vcc;
1546
         vc->rx_iov = NULL;
1547
 
1548
         /* Open the connection in hardware */
1549
         if (vcc->qos.aal == ATM_AAL5)
1550
            status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN;
1551
         else /* vcc->qos.aal == ATM_AAL0 */
1552
            status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN;
1553
#ifdef RCQ_SUPPORT
1554
         status |= NS_RCTE_RAWCELLINTEN;
1555
#endif /* RCQ_SUPPORT */
1556
         ns_write_sram(card, NS_RCT + (vpi << card->vcibits | vci) *
1557
                       NS_RCT_ENTRY_SIZE, &status, 1);
1558
      }
1559
 
1560
   }
1561
 
1562
   set_bit(ATM_VF_READY,&vcc->flags);
1563
   return 0;
1564
}
1565
 
1566
 
1567
 
1568
static void ns_close(struct atm_vcc *vcc)
1569
{
1570
   vc_map *vc;
1571
   ns_dev *card;
1572
   u32 data;
1573
   int i;
1574
 
1575
   vc = vcc->dev_data;
1576
   card = vcc->dev->dev_data;
1577
   PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index,
1578
          (int) vcc->vpi, vcc->vci);
1579
 
1580
   clear_bit(ATM_VF_READY,&vcc->flags);
1581
 
1582
   if (vcc->qos.rxtp.traffic_class != ATM_NONE)
1583
   {
1584
      u32 addr;
1585
      unsigned long flags;
1586
 
1587
      addr = NS_RCT + (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE;
1588
      ns_grab_res_lock(card, flags);
1589
      while(CMD_BUSY(card));
1590
      writel(NS_CMD_CLOSE_CONNECTION | addr << 2, card->membase + CMD);
1591
      spin_unlock_irqrestore(&card->res_lock, flags);
1592
 
1593
      vc->rx = 0;
1594
      if (vc->rx_iov != NULL)
1595
      {
1596
         struct sk_buff *iovb;
1597
         u32 stat;
1598
 
1599
         stat = readl(card->membase + STAT);
1600
         card->sbfqc = ns_stat_sfbqc_get(stat);
1601
         card->lbfqc = ns_stat_lfbqc_get(stat);
1602
 
1603
         PRINTK("nicstar%d: closing a VC with pending rx buffers.\n",
1604
                card->index);
1605
         iovb = vc->rx_iov;
1606
         recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
1607
                               NS_SKB(iovb)->iovcnt);
1608
         NS_SKB(iovb)->iovcnt = 0;
1609
         NS_SKB(iovb)->vcc = NULL;
1610
         ns_grab_int_lock(card, flags);
1611
         recycle_iov_buf(card, iovb);
1612
         spin_unlock_irqrestore(&card->int_lock, flags);
1613
         vc->rx_iov = NULL;
1614
      }
1615
   }
1616
 
1617
   if (vcc->qos.txtp.traffic_class != ATM_NONE)
1618
   {
1619
      vc->tx = 0;
1620
   }
1621
 
1622
   if (vcc->qos.txtp.traffic_class == ATM_CBR)
1623
   {
1624
      unsigned long flags;
1625
      ns_scqe *scqep;
1626
      scq_info *scq;
1627
 
1628
      scq = vc->scq;
1629
 
1630
      for (;;)
1631
      {
1632
         ns_grab_scq_lock(card, scq, flags);
1633
         scqep = scq->next;
1634
         if (scqep == scq->base)
1635
            scqep = scq->last;
1636
         else
1637
            scqep--;
1638
         if (scqep == scq->tail)
1639
         {
1640
            spin_unlock_irqrestore(&scq->lock, flags);
1641
            break;
1642
         }
1643
         /* If the last entry is not a TSR, place one in the SCQ in order to
1644
            be able to completely drain it and then close. */
1645
         if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next)
1646
         {
1647
            ns_scqe tsr;
1648
            u32 scdi, scqi;
1649
            u32 data;
1650
            int index;
1651
 
1652
            tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
1653
            scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
1654
            scqi = scq->next - scq->base;
1655
            tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
1656
            tsr.word_3 = 0x00000000;
1657
            tsr.word_4 = 0x00000000;
1658
            *scq->next = tsr;
1659
            index = (int) scqi;
1660
            scq->skb[index] = NULL;
1661
            if (scq->next == scq->last)
1662
               scq->next = scq->base;
1663
            else
1664
               scq->next++;
1665
            data = (u32) virt_to_bus(scq->next);
1666
            ns_write_sram(card, scq->scd, &data, 1);
1667
         }
1668
         spin_unlock_irqrestore(&scq->lock, flags);
1669
         schedule();
1670
      }
1671
 
1672
      /* Free all TST entries */
1673
      data = NS_TST_OPCODE_VARIABLE;
1674
      for (i = 0; i < NS_TST_NUM_ENTRIES; i++)
1675
      {
1676
         if (card->tste2vc[i] == vc)
1677
         {
1678
            ns_write_sram(card, card->tst_addr + i, &data, 1);
1679
            card->tste2vc[i] = NULL;
1680
            card->tst_free_entries++;
1681
         }
1682
      }
1683
 
1684
      card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL;
1685
      free_scq(vc->scq, vcc);
1686
   }
1687
 
1688
   /* remove all references to vcc before deleting it */
1689
   if (vcc->qos.txtp.traffic_class != ATM_NONE)
1690
   {
1691
     unsigned long flags;
1692
     scq_info *scq = card->scq0;
1693
 
1694
     ns_grab_scq_lock(card, scq, flags);
1695
 
1696
     for(i = 0; i < scq->num_entries; i++) {
1697
       if(scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) {
1698
        ATM_SKB(scq->skb[i])->vcc = NULL;
1699
        atm_return(vcc, scq->skb[i]->truesize);
1700
        PRINTK("nicstar: deleted pending vcc mapping\n");
1701
       }
1702
     }
1703
 
1704
     spin_unlock_irqrestore(&scq->lock, flags);
1705
   }
1706
 
1707
   vcc->dev_data = NULL;
1708
   clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1709
   clear_bit(ATM_VF_ADDR,&vcc->flags);
1710
 
1711
#ifdef RX_DEBUG
1712
   {
1713
      u32 stat, cfg;
1714
      stat = readl(card->membase + STAT);
1715
      cfg = readl(card->membase + CFG);
1716
      printk("STAT = 0x%08X  CFG = 0x%08X  \n", stat, cfg);
1717
      printk("TSQ: base = 0x%08X  next = 0x%08X  last = 0x%08X  TSQT = 0x%08X \n",
1718
             (u32) card->tsq.base, (u32) card->tsq.next,(u32) card->tsq.last,
1719
             readl(card->membase + TSQT));
1720
      printk("RSQ: base = 0x%08X  next = 0x%08X  last = 0x%08X  RSQT = 0x%08X \n",
1721
             (u32) card->rsq.base, (u32) card->rsq.next,(u32) card->rsq.last,
1722
             readl(card->membase + RSQT));
1723
      printk("Empty free buffer queue interrupt %s \n",
1724
             card->efbie ? "enabled" : "disabled");
1725
      printk("SBCNT = %d  count = %d   LBCNT = %d count = %d \n",
1726
             ns_stat_sfbqc_get(stat), card->sbpool.count,
1727
             ns_stat_lfbqc_get(stat), card->lbpool.count);
1728
      printk("hbpool.count = %d  iovpool.count = %d \n",
1729
             card->hbpool.count, card->iovpool.count);
1730
   }
1731
#endif /* RX_DEBUG */
1732
}
1733
 
1734
 
1735
 
1736
static void fill_tst(ns_dev *card, int n, vc_map *vc)
1737
{
1738
   u32 new_tst;
1739
   unsigned long cl;
1740
   int e, r;
1741
   u32 data;
1742
 
1743
   /* It would be very complicated to keep the two TSTs synchronized while
1744
      assuring that writes are only made to the inactive TST. So, for now I
1745
      will use only one TST. If problems occur, I will change this again */
1746
 
1747
   new_tst = card->tst_addr;
1748
 
1749
   /* Fill procedure */
1750
 
1751
   for (e = 0; e < NS_TST_NUM_ENTRIES; e++)
1752
   {
1753
      if (card->tste2vc[e] == NULL)
1754
         break;
1755
   }
1756
   if (e == NS_TST_NUM_ENTRIES) {
1757
      printk("nicstar%d: No free TST entries found. \n", card->index);
1758
      return;
1759
   }
1760
 
1761
   r = n;
1762
   cl = NS_TST_NUM_ENTRIES;
1763
   data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd);
1764
 
1765
   while (r > 0)
1766
   {
1767
      if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL)
1768
      {
1769
         card->tste2vc[e] = vc;
1770
         ns_write_sram(card, new_tst + e, &data, 1);
1771
         cl -= NS_TST_NUM_ENTRIES;
1772
         r--;
1773
      }
1774
 
1775
      if (++e == NS_TST_NUM_ENTRIES) {
1776
         e = 0;
1777
      }
1778
      cl += n;
1779
   }
1780
 
1781
   /* End of fill procedure */
1782
 
1783
   data = ns_tste_make(NS_TST_OPCODE_END, new_tst);
1784
   ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1);
1785
   ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1);
1786
   card->tst_addr = new_tst;
1787
}
1788
 
1789
 
1790
 
1791
static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
1792
{
1793
   ns_dev *card;
1794
   vc_map *vc;
1795
   scq_info *scq;
1796
   unsigned long buflen;
1797
   ns_scqe scqe;
1798
   u32 flags;           /* TBD flags, not CPU flags */
1799
 
1800
   card = vcc->dev->dev_data;
1801
   TXPRINTK("nicstar%d: ns_send() called.\n", card->index);
1802
   if ((vc = (vc_map *) vcc->dev_data) == NULL)
1803
   {
1804
      printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
1805
      atomic_inc(&vcc->stats->tx_err);
1806
      dev_kfree_skb_any(skb);
1807
      return -EINVAL;
1808
   }
1809
 
1810
   if (!vc->tx)
1811
   {
1812
      printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
1813
      atomic_inc(&vcc->stats->tx_err);
1814
      dev_kfree_skb_any(skb);
1815
      return -EINVAL;
1816
   }
1817
 
1818
   if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
1819
   {
1820
      printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
1821
      atomic_inc(&vcc->stats->tx_err);
1822
      dev_kfree_skb_any(skb);
1823
      return -EINVAL;
1824
   }
1825
 
1826
   if (skb_shinfo(skb)->nr_frags != 0)
1827
   {
1828
      printk("nicstar%d: No scatter-gather yet.\n", card->index);
1829
      atomic_inc(&vcc->stats->tx_err);
1830
      dev_kfree_skb_any(skb);
1831
      return -EINVAL;
1832
   }
1833
 
1834
   ATM_SKB(skb)->vcc = vcc;
1835
 
1836
   if (vcc->qos.aal == ATM_AAL5)
1837
   {
1838
      buflen = (skb->len + 47 + 8) / 48 * 48;   /* Multiple of 48 */
1839
      flags = NS_TBD_AAL5;
1840
      scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data));
1841
      scqe.word_3 = cpu_to_le32((u32) skb->len);
1842
      scqe.word_4 = ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0,
1843
                           ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? 1 : 0);
1844
      flags |= NS_TBD_EOPDU;
1845
   }
1846
   else /* (vcc->qos.aal == ATM_AAL0) */
1847
   {
1848
      buflen = ATM_CELL_PAYLOAD;        /* i.e., 48 bytes */
1849
      flags = NS_TBD_AAL0;
1850
      scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data) + NS_AAL0_HEADER);
1851
      scqe.word_3 = cpu_to_le32(0x00000000);
1852
      if (*skb->data & 0x02)    /* Payload type 1 - end of pdu */
1853
         flags |= NS_TBD_EOPDU;
1854
      scqe.word_4 = cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK);
1855
      /* Force the VPI/VCI to be the same as in VCC struct */
1856
      scqe.word_4 |= cpu_to_le32((((u32) vcc->vpi) << NS_TBD_VPI_SHIFT |
1857
                                 ((u32) vcc->vci) << NS_TBD_VCI_SHIFT) &
1858
                                 NS_TBD_VC_MASK);
1859
   }
1860
 
1861
   if (vcc->qos.txtp.traffic_class == ATM_CBR)
1862
   {
1863
      scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen);
1864
      scq = ((vc_map *) vcc->dev_data)->scq;
1865
   }
1866
   else
1867
   {
1868
      scqe.word_1 = ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen);
1869
      scq = card->scq0;
1870
   }
1871
 
1872
   if (push_scqe(card, vc, scq, &scqe, skb) != 0)
1873
   {
1874
      atomic_inc(&vcc->stats->tx_err);
1875
      dev_kfree_skb_any(skb);
1876
      return -EIO;
1877
   }
1878
   atomic_inc(&vcc->stats->tx);
1879
 
1880
   return 0;
1881
}
1882
 
1883
 
1884
 
1885
static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd,
1886
                     struct sk_buff *skb)
1887
{
1888
   unsigned long flags;
1889
   ns_scqe tsr;
1890
   u32 scdi, scqi;
1891
   int scq_is_vbr;
1892
   u32 data;
1893
   int index;
1894
 
1895
   ns_grab_scq_lock(card, scq, flags);
1896
   while (scq->tail == scq->next)
1897
   {
1898
      if (in_interrupt()) {
1899
         spin_unlock_irqrestore(&scq->lock, flags);
1900
         printk("nicstar%d: Error pushing TBD.\n", card->index);
1901
         return 1;
1902
      }
1903
 
1904
      scq->full = 1;
1905
      spin_unlock_irqrestore(&scq->lock, flags);
1906
      interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT);
1907
      ns_grab_scq_lock(card, scq, flags);
1908
 
1909
      if (scq->full) {
1910
         spin_unlock_irqrestore(&scq->lock, flags);
1911
         printk("nicstar%d: Timeout pushing TBD.\n", card->index);
1912
         return 1;
1913
      }
1914
   }
1915
   *scq->next = *tbd;
1916
   index = (int) (scq->next - scq->base);
1917
   scq->skb[index] = skb;
1918
   XPRINTK("nicstar%d: sending skb at 0x%x (pos %d).\n",
1919
           card->index, (u32) skb, index);
1920
   XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n",
1921
           card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2),
1922
           le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4),
1923
           (u32) scq->next);
1924
   if (scq->next == scq->last)
1925
      scq->next = scq->base;
1926
   else
1927
      scq->next++;
1928
 
1929
   vc->tbd_count++;
1930
   if (scq->num_entries == VBR_SCQ_NUM_ENTRIES)
1931
   {
1932
      scq->tbd_count++;
1933
      scq_is_vbr = 1;
1934
   }
1935
   else
1936
      scq_is_vbr = 0;
1937
 
1938
   if (vc->tbd_count >= MAX_TBD_PER_VC || scq->tbd_count >= MAX_TBD_PER_SCQ)
1939
   {
1940
      int has_run = 0;
1941
 
1942
      while (scq->tail == scq->next)
1943
      {
1944
         if (in_interrupt()) {
1945
            data = (u32) virt_to_bus(scq->next);
1946
            ns_write_sram(card, scq->scd, &data, 1);
1947
            spin_unlock_irqrestore(&scq->lock, flags);
1948
            printk("nicstar%d: Error pushing TSR.\n", card->index);
1949
            return 0;
1950
         }
1951
 
1952
         scq->full = 1;
1953
         if (has_run++) break;
1954
         spin_unlock_irqrestore(&scq->lock, flags);
1955
         interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT);
1956
         ns_grab_scq_lock(card, scq, flags);
1957
      }
1958
 
1959
      if (!scq->full)
1960
      {
1961
         tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE);
1962
         if (scq_is_vbr)
1963
            scdi = NS_TSR_SCDISVBR;
1964
         else
1965
            scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE;
1966
         scqi = scq->next - scq->base;
1967
         tsr.word_2 = ns_tsr_mkword_2(scdi, scqi);
1968
         tsr.word_3 = 0x00000000;
1969
         tsr.word_4 = 0x00000000;
1970
 
1971
         *scq->next = tsr;
1972
         index = (int) scqi;
1973
         scq->skb[index] = NULL;
1974
         XPRINTK("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n",
1975
                 card->index, le32_to_cpu(tsr.word_1), le32_to_cpu(tsr.word_2),
1976
                 le32_to_cpu(tsr.word_3), le32_to_cpu(tsr.word_4),
1977
                 (u32) scq->next);
1978
         if (scq->next == scq->last)
1979
            scq->next = scq->base;
1980
         else
1981
            scq->next++;
1982
         vc->tbd_count = 0;
1983
         scq->tbd_count = 0;
1984
      }
1985
      else
1986
         PRINTK("nicstar%d: Timeout pushing TSR.\n", card->index);
1987
   }
1988
   data = (u32) virt_to_bus(scq->next);
1989
   ns_write_sram(card, scq->scd, &data, 1);
1990
 
1991
   spin_unlock_irqrestore(&scq->lock, flags);
1992
 
1993
   return 0;
1994
}
1995
 
1996
 
1997
 
1998
static void process_tsq(ns_dev *card)
1999
{
2000
   u32 scdi;
2001
   scq_info *scq;
2002
   ns_tsi *previous = NULL, *one_ahead, *two_ahead;
2003
   int serviced_entries;   /* flag indicating at least on entry was serviced */
2004
 
2005
   serviced_entries = 0;
2006
 
2007
   if (card->tsq.next == card->tsq.last)
2008
      one_ahead = card->tsq.base;
2009
   else
2010
      one_ahead = card->tsq.next + 1;
2011
 
2012
   if (one_ahead == card->tsq.last)
2013
      two_ahead = card->tsq.base;
2014
   else
2015
      two_ahead = one_ahead + 1;
2016
 
2017
   while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) ||
2018
          !ns_tsi_isempty(two_ahead))
2019
          /* At most two empty, as stated in the 77201 errata */
2020
   {
2021
      serviced_entries = 1;
2022
 
2023
      /* Skip the one or two possible empty entries */
2024
      while (ns_tsi_isempty(card->tsq.next)) {
2025
         if (card->tsq.next == card->tsq.last)
2026
            card->tsq.next = card->tsq.base;
2027
         else
2028
            card->tsq.next++;
2029
      }
2030
 
2031
      if (!ns_tsi_tmrof(card->tsq.next))
2032
      {
2033
         scdi = ns_tsi_getscdindex(card->tsq.next);
2034
         if (scdi == NS_TSI_SCDISVBR)
2035
            scq = card->scq0;
2036
         else
2037
         {
2038
            if (card->scd2vc[scdi] == NULL)
2039
            {
2040
               printk("nicstar%d: could not find VC from SCD index.\n",
2041
                      card->index);
2042
               ns_tsi_init(card->tsq.next);
2043
               return;
2044
            }
2045
            scq = card->scd2vc[scdi]->scq;
2046
         }
2047
         drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next));
2048
         scq->full = 0;
2049
         wake_up_interruptible(&(scq->scqfull_waitq));
2050
      }
2051
 
2052
      ns_tsi_init(card->tsq.next);
2053
      previous = card->tsq.next;
2054
      if (card->tsq.next == card->tsq.last)
2055
         card->tsq.next = card->tsq.base;
2056
      else
2057
         card->tsq.next++;
2058
 
2059
      if (card->tsq.next == card->tsq.last)
2060
         one_ahead = card->tsq.base;
2061
      else
2062
         one_ahead = card->tsq.next + 1;
2063
 
2064
      if (one_ahead == card->tsq.last)
2065
         two_ahead = card->tsq.base;
2066
      else
2067
         two_ahead = one_ahead + 1;
2068
   }
2069
 
2070
   if (serviced_entries) {
2071
      writel((((u32) previous) - ((u32) card->tsq.base)),
2072
             card->membase + TSQH);
2073
   }
2074
}
2075
 
2076
 
2077
 
2078
static void drain_scq(ns_dev *card, scq_info *scq, int pos)
2079
{
2080
   struct atm_vcc *vcc;
2081
   struct sk_buff *skb;
2082
   int i;
2083
   unsigned long flags;
2084
 
2085
   XPRINTK("nicstar%d: drain_scq() called, scq at 0x%x, pos %d.\n",
2086
           card->index, (u32) scq, pos);
2087
   if (pos >= scq->num_entries)
2088
   {
2089
      printk("nicstar%d: Bad index on drain_scq().\n", card->index);
2090
      return;
2091
   }
2092
 
2093
   ns_grab_scq_lock(card, scq, flags);
2094
   i = (int) (scq->tail - scq->base);
2095
   if (++i == scq->num_entries)
2096
      i = 0;
2097
   while (i != pos)
2098
   {
2099
      skb = scq->skb[i];
2100
      XPRINTK("nicstar%d: freeing skb at 0x%x (index %d).\n",
2101
              card->index, (u32) skb, i);
2102
      if (skb != NULL)
2103
      {
2104
         vcc = ATM_SKB(skb)->vcc;
2105
         if (vcc && vcc->pop != NULL) {
2106
            vcc->pop(vcc, skb);
2107
         } else {
2108
            dev_kfree_skb_irq(skb);
2109
         }
2110
         scq->skb[i] = NULL;
2111
      }
2112
      if (++i == scq->num_entries)
2113
         i = 0;
2114
   }
2115
   scq->tail = scq->base + pos;
2116
   spin_unlock_irqrestore(&scq->lock, flags);
2117
}
2118
 
2119
 
2120
 
2121
static void process_rsq(ns_dev *card)
2122
{
2123
   ns_rsqe *previous;
2124
 
2125
   if (!ns_rsqe_valid(card->rsq.next))
2126
      return;
2127
   do {
2128
      dequeue_rx(card, card->rsq.next);
2129
      ns_rsqe_init(card->rsq.next);
2130
      previous = card->rsq.next;
2131
      if (card->rsq.next == card->rsq.last)
2132
         card->rsq.next = card->rsq.base;
2133
      else
2134
         card->rsq.next++;
2135
   } while (ns_rsqe_valid(card->rsq.next));
2136
   writel((((u32) previous) - ((u32) card->rsq.base)),
2137
          card->membase + RSQH);
2138
}
2139
 
2140
 
2141
 
2142
static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
2143
{
2144
   u32 vpi, vci;
2145
   vc_map *vc;
2146
   struct sk_buff *iovb;
2147
   struct iovec *iov;
2148
   struct atm_vcc *vcc;
2149
   struct sk_buff *skb;
2150
   unsigned short aal5_len;
2151
   int len;
2152
   u32 stat;
2153
 
2154
   stat = readl(card->membase + STAT);
2155
   card->sbfqc = ns_stat_sfbqc_get(stat);
2156
   card->lbfqc = ns_stat_lfbqc_get(stat);
2157
 
2158
   skb = (struct sk_buff *) le32_to_cpu(rsqe->buffer_handle);
2159
   vpi = ns_rsqe_vpi(rsqe);
2160
   vci = ns_rsqe_vci(rsqe);
2161
   if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits)
2162
   {
2163
      printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n",
2164
             card->index, vpi, vci);
2165
      recycle_rx_buf(card, skb);
2166
      return;
2167
   }
2168
 
2169
   vc = &(card->vcmap[vpi << card->vcibits | vci]);
2170
   if (!vc->rx)
2171
   {
2172
      RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n",
2173
             card->index, vpi, vci);
2174
      recycle_rx_buf(card, skb);
2175
      return;
2176
   }
2177
 
2178
   vcc = vc->rx_vcc;
2179
 
2180
   if (vcc->qos.aal == ATM_AAL0)
2181
   {
2182
      struct sk_buff *sb;
2183
      unsigned char *cell;
2184
      int i;
2185
 
2186
      cell = skb->data;
2187
      for (i = ns_rsqe_cellcount(rsqe); i; i--)
2188
      {
2189
         if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL)
2190
         {
2191
            printk("nicstar%d: Can't allocate buffers for aal0.\n",
2192
                   card->index);
2193
            atomic_add(i,&vcc->stats->rx_drop);
2194
            break;
2195
         }
2196
         if (!atm_charge(vcc, sb->truesize))
2197
         {
2198
            RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
2199
                     card->index);
2200
            atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
2201
            dev_kfree_skb_any(sb);
2202
            break;
2203
         }
2204
         /* Rebuild the header */
2205
         *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 |
2206
                               (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000);
2207
         if (i == 1 && ns_rsqe_eopdu(rsqe))
2208
            *((u32 *) sb->data) |= 0x00000002;
2209
         skb_put(sb, NS_AAL0_HEADER);
2210
         memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD);
2211
         skb_put(sb, ATM_CELL_PAYLOAD);
2212
         ATM_SKB(sb)->vcc = vcc;
2213
         __net_timestamp(sb);
2214
         vcc->push(vcc, sb);
2215
         atomic_inc(&vcc->stats->rx);
2216
         cell += ATM_CELL_PAYLOAD;
2217
      }
2218
 
2219
      recycle_rx_buf(card, skb);
2220
      return;
2221
   }
2222
 
2223
   /* To reach this point, the AAL layer can only be AAL5 */
2224
 
2225
   if ((iovb = vc->rx_iov) == NULL)
2226
   {
2227
      iovb = skb_dequeue(&(card->iovpool.queue));
2228
      if (iovb == NULL)         /* No buffers in the queue */
2229
      {
2230
         iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC);
2231
         if (iovb == NULL)
2232
         {
2233
            printk("nicstar%d: Out of iovec buffers.\n", card->index);
2234
            atomic_inc(&vcc->stats->rx_drop);
2235
            recycle_rx_buf(card, skb);
2236
            return;
2237
         }
2238
         NS_SKB_CB(iovb)->buf_type = BUF_NONE;
2239
      }
2240
      else
2241
         if (--card->iovpool.count < card->iovnr.min)
2242
         {
2243
            struct sk_buff *new_iovb;
2244
            if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL)
2245
            {
2246
               NS_SKB_CB(iovb)->buf_type = BUF_NONE;
2247
               skb_queue_tail(&card->iovpool.queue, new_iovb);
2248
               card->iovpool.count++;
2249
            }
2250
         }
2251
      vc->rx_iov = iovb;
2252
      NS_SKB(iovb)->iovcnt = 0;
2253
      iovb->len = 0;
2254
      iovb->data = iovb->head;
2255
      skb_reset_tail_pointer(iovb);
2256
      NS_SKB(iovb)->vcc = vcc;
2257
      /* IMPORTANT: a pointer to the sk_buff containing the small or large
2258
                    buffer is stored as iovec base, NOT a pointer to the
2259
                    small or large buffer itself. */
2260
   }
2261
   else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
2262
   {
2263
      printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
2264
      atomic_inc(&vcc->stats->rx_err);
2265
      recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
2266
      NS_SKB(iovb)->iovcnt = 0;
2267
      iovb->len = 0;
2268
      iovb->data = iovb->head;
2269
      skb_reset_tail_pointer(iovb);
2270
      NS_SKB(iovb)->vcc = vcc;
2271
   }
2272
   iov = &((struct iovec *) iovb->data)[NS_SKB(iovb)->iovcnt++];
2273
   iov->iov_base = (void *) skb;
2274
   iov->iov_len = ns_rsqe_cellcount(rsqe) * 48;
2275
   iovb->len += iov->iov_len;
2276
 
2277
   if (NS_SKB(iovb)->iovcnt == 1)
2278
   {
2279
      if (NS_SKB_CB(skb)->buf_type != BUF_SM)
2280
      {
2281
         printk("nicstar%d: Expected a small buffer, and this is not one.\n",
2282
                card->index);
2283
         which_list(card, skb);
2284
         atomic_inc(&vcc->stats->rx_err);
2285
         recycle_rx_buf(card, skb);
2286
         vc->rx_iov = NULL;
2287
         recycle_iov_buf(card, iovb);
2288
         return;
2289
      }
2290
   }
2291
   else /* NS_SKB(iovb)->iovcnt >= 2 */
2292
   {
2293
      if (NS_SKB_CB(skb)->buf_type != BUF_LG)
2294
      {
2295
         printk("nicstar%d: Expected a large buffer, and this is not one.\n",
2296
                card->index);
2297
         which_list(card, skb);
2298
         atomic_inc(&vcc->stats->rx_err);
2299
         recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
2300
                               NS_SKB(iovb)->iovcnt);
2301
         vc->rx_iov = NULL;
2302
         recycle_iov_buf(card, iovb);
2303
         return;
2304
      }
2305
   }
2306
 
2307
   if (ns_rsqe_eopdu(rsqe))
2308
   {
2309
      /* This works correctly regardless of the endianness of the host */
2310
      unsigned char *L1L2 = (unsigned char *)((u32)skb->data +
2311
                                              iov->iov_len - 6);
2312
      aal5_len = L1L2[0] << 8 | L1L2[1];
2313
      len = (aal5_len == 0x0000) ? 0x10000 : aal5_len;
2314
      if (ns_rsqe_crcerr(rsqe) ||
2315
          len + 8 > iovb->len || len + (47 + 8) < iovb->len)
2316
      {
2317
         printk("nicstar%d: AAL5 CRC error", card->index);
2318
         if (len + 8 > iovb->len || len + (47 + 8) < iovb->len)
2319
            printk(" - PDU size mismatch.\n");
2320
         else
2321
            printk(".\n");
2322
         atomic_inc(&vcc->stats->rx_err);
2323
         recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
2324
           NS_SKB(iovb)->iovcnt);
2325
         vc->rx_iov = NULL;
2326
         recycle_iov_buf(card, iovb);
2327
         return;
2328
      }
2329
 
2330
      /* By this point we (hopefully) have a complete SDU without errors. */
2331
 
2332
      if (NS_SKB(iovb)->iovcnt == 1)    /* Just a small buffer */
2333
      {
2334
         /* skb points to a small buffer */
2335
         if (!atm_charge(vcc, skb->truesize))
2336
         {
2337
            push_rxbufs(card, skb);
2338
            atomic_inc(&vcc->stats->rx_drop);
2339
         }
2340
         else
2341
         {
2342
            skb_put(skb, len);
2343
            dequeue_sm_buf(card, skb);
2344
#ifdef NS_USE_DESTRUCTORS
2345
            skb->destructor = ns_sb_destructor;
2346
#endif /* NS_USE_DESTRUCTORS */
2347
            ATM_SKB(skb)->vcc = vcc;
2348
            __net_timestamp(skb);
2349
            vcc->push(vcc, skb);
2350
            atomic_inc(&vcc->stats->rx);
2351
         }
2352
      }
2353
      else if (NS_SKB(iovb)->iovcnt == 2)       /* One small plus one large buffer */
2354
      {
2355
         struct sk_buff *sb;
2356
 
2357
         sb = (struct sk_buff *) (iov - 1)->iov_base;
2358
         /* skb points to a large buffer */
2359
 
2360
         if (len <= NS_SMBUFSIZE)
2361
         {
2362
            if (!atm_charge(vcc, sb->truesize))
2363
            {
2364
               push_rxbufs(card, sb);
2365
               atomic_inc(&vcc->stats->rx_drop);
2366
            }
2367
            else
2368
            {
2369
               skb_put(sb, len);
2370
               dequeue_sm_buf(card, sb);
2371
#ifdef NS_USE_DESTRUCTORS
2372
               sb->destructor = ns_sb_destructor;
2373
#endif /* NS_USE_DESTRUCTORS */
2374
               ATM_SKB(sb)->vcc = vcc;
2375
               __net_timestamp(sb);
2376
               vcc->push(vcc, sb);
2377
               atomic_inc(&vcc->stats->rx);
2378
            }
2379
 
2380
            push_rxbufs(card, skb);
2381
 
2382
         }
2383
         else                   /* len > NS_SMBUFSIZE, the usual case */
2384
         {
2385
            if (!atm_charge(vcc, skb->truesize))
2386
            {
2387
               push_rxbufs(card, skb);
2388
               atomic_inc(&vcc->stats->rx_drop);
2389
            }
2390
            else
2391
            {
2392
               dequeue_lg_buf(card, skb);
2393
#ifdef NS_USE_DESTRUCTORS
2394
               skb->destructor = ns_lb_destructor;
2395
#endif /* NS_USE_DESTRUCTORS */
2396
               skb_push(skb, NS_SMBUFSIZE);
2397
               skb_copy_from_linear_data(sb, skb->data, NS_SMBUFSIZE);
2398
               skb_put(skb, len - NS_SMBUFSIZE);
2399
               ATM_SKB(skb)->vcc = vcc;
2400
               __net_timestamp(skb);
2401
               vcc->push(vcc, skb);
2402
               atomic_inc(&vcc->stats->rx);
2403
            }
2404
 
2405
            push_rxbufs(card, sb);
2406
 
2407
         }
2408
 
2409
      }
2410
      else                              /* Must push a huge buffer */
2411
      {
2412
         struct sk_buff *hb, *sb, *lb;
2413
         int remaining, tocopy;
2414
         int j;
2415
 
2416
         hb = skb_dequeue(&(card->hbpool.queue));
2417
         if (hb == NULL)                /* No buffers in the queue */
2418
         {
2419
 
2420
            hb = dev_alloc_skb(NS_HBUFSIZE);
2421
            if (hb == NULL)
2422
            {
2423
               printk("nicstar%d: Out of huge buffers.\n", card->index);
2424
               atomic_inc(&vcc->stats->rx_drop);
2425
               recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
2426
                                     NS_SKB(iovb)->iovcnt);
2427
               vc->rx_iov = NULL;
2428
               recycle_iov_buf(card, iovb);
2429
               return;
2430
            }
2431
            else if (card->hbpool.count < card->hbnr.min)
2432
            {
2433
               struct sk_buff *new_hb;
2434
               if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
2435
               {
2436
                  skb_queue_tail(&card->hbpool.queue, new_hb);
2437
                  card->hbpool.count++;
2438
               }
2439
            }
2440
            NS_SKB_CB(hb)->buf_type = BUF_NONE;
2441
         }
2442
         else
2443
         if (--card->hbpool.count < card->hbnr.min)
2444
         {
2445
            struct sk_buff *new_hb;
2446
            if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
2447
            {
2448
               NS_SKB_CB(new_hb)->buf_type = BUF_NONE;
2449
               skb_queue_tail(&card->hbpool.queue, new_hb);
2450
               card->hbpool.count++;
2451
            }
2452
            if (card->hbpool.count < card->hbnr.min)
2453
            {
2454
               if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL)
2455
               {
2456
                  NS_SKB_CB(new_hb)->buf_type = BUF_NONE;
2457
                  skb_queue_tail(&card->hbpool.queue, new_hb);
2458
                  card->hbpool.count++;
2459
               }
2460
            }
2461
         }
2462
 
2463
         iov = (struct iovec *) iovb->data;
2464
 
2465
         if (!atm_charge(vcc, hb->truesize))
2466
         {
2467
            recycle_iovec_rx_bufs(card, iov, NS_SKB(iovb)->iovcnt);
2468
            if (card->hbpool.count < card->hbnr.max)
2469
            {
2470
               skb_queue_tail(&card->hbpool.queue, hb);
2471
               card->hbpool.count++;
2472
            }
2473
            else
2474
               dev_kfree_skb_any(hb);
2475
            atomic_inc(&vcc->stats->rx_drop);
2476
         }
2477
         else
2478
         {
2479
            /* Copy the small buffer to the huge buffer */
2480
            sb = (struct sk_buff *) iov->iov_base;
2481
            skb_copy_from_linear_data(sb, hb->data, iov->iov_len);
2482
            skb_put(hb, iov->iov_len);
2483
            remaining = len - iov->iov_len;
2484
            iov++;
2485
            /* Free the small buffer */
2486
            push_rxbufs(card, sb);
2487
 
2488
            /* Copy all large buffers to the huge buffer and free them */
2489
            for (j = 1; j < NS_SKB(iovb)->iovcnt; j++)
2490
            {
2491
               lb = (struct sk_buff *) iov->iov_base;
2492
               tocopy = min_t(int, remaining, iov->iov_len);
2493
               skb_copy_from_linear_data(lb, skb_tail_pointer(hb), tocopy);
2494
               skb_put(hb, tocopy);
2495
               iov++;
2496
               remaining -= tocopy;
2497
               push_rxbufs(card, lb);
2498
            }
2499
#ifdef EXTRA_DEBUG
2500
            if (remaining != 0 || hb->len != len)
2501
               printk("nicstar%d: Huge buffer len mismatch.\n", card->index);
2502
#endif /* EXTRA_DEBUG */
2503
            ATM_SKB(hb)->vcc = vcc;
2504
#ifdef NS_USE_DESTRUCTORS
2505
            hb->destructor = ns_hb_destructor;
2506
#endif /* NS_USE_DESTRUCTORS */
2507
            __net_timestamp(hb);
2508
            vcc->push(vcc, hb);
2509
            atomic_inc(&vcc->stats->rx);
2510
         }
2511
      }
2512
 
2513
      vc->rx_iov = NULL;
2514
      recycle_iov_buf(card, iovb);
2515
   }
2516
 
2517
}
2518
 
2519
 
2520
 
2521
#ifdef NS_USE_DESTRUCTORS
2522
 
2523
static void ns_sb_destructor(struct sk_buff *sb)
2524
{
2525
   ns_dev *card;
2526
   u32 stat;
2527
 
2528
   card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data;
2529
   stat = readl(card->membase + STAT);
2530
   card->sbfqc = ns_stat_sfbqc_get(stat);
2531
   card->lbfqc = ns_stat_lfbqc_get(stat);
2532
 
2533
   do
2534
   {
2535
      sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2536
      if (sb == NULL)
2537
         break;
2538
      NS_SKB_CB(sb)->buf_type = BUF_SM;
2539
      skb_queue_tail(&card->sbpool.queue, sb);
2540
      skb_reserve(sb, NS_AAL0_HEADER);
2541
      push_rxbufs(card, sb);
2542
   } while (card->sbfqc < card->sbnr.min);
2543
}
2544
 
2545
 
2546
 
2547
static void ns_lb_destructor(struct sk_buff *lb)
2548
{
2549
   ns_dev *card;
2550
   u32 stat;
2551
 
2552
   card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data;
2553
   stat = readl(card->membase + STAT);
2554
   card->sbfqc = ns_stat_sfbqc_get(stat);
2555
   card->lbfqc = ns_stat_lfbqc_get(stat);
2556
 
2557
   do
2558
   {
2559
      lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2560
      if (lb == NULL)
2561
         break;
2562
      NS_SKB_CB(lb)->buf_type = BUF_LG;
2563
      skb_queue_tail(&card->lbpool.queue, lb);
2564
      skb_reserve(lb, NS_SMBUFSIZE);
2565
      push_rxbufs(card, lb);
2566
   } while (card->lbfqc < card->lbnr.min);
2567
}
2568
 
2569
 
2570
 
2571
static void ns_hb_destructor(struct sk_buff *hb)
2572
{
2573
   ns_dev *card;
2574
 
2575
   card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data;
2576
 
2577
   while (card->hbpool.count < card->hbnr.init)
2578
   {
2579
      hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2580
      if (hb == NULL)
2581
         break;
2582
      NS_SKB_CB(hb)->buf_type = BUF_NONE;
2583
      skb_queue_tail(&card->hbpool.queue, hb);
2584
      card->hbpool.count++;
2585
   }
2586
}
2587
 
2588
#endif /* NS_USE_DESTRUCTORS */
2589
 
2590
 
2591
static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb)
2592
{
2593
        struct ns_skb_cb *cb = NS_SKB_CB(skb);
2594
 
2595
        if (unlikely(cb->buf_type == BUF_NONE)) {
2596
                printk("nicstar%d: What kind of rx buffer is this?\n", card->index);
2597
                dev_kfree_skb_any(skb);
2598
        } else
2599
                push_rxbufs(card, skb);
2600
}
2601
 
2602
 
2603
static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count)
2604
{
2605
        while (count-- > 0)
2606
                recycle_rx_buf(card, (struct sk_buff *) (iov++)->iov_base);
2607
}
2608
 
2609
 
2610
static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb)
2611
{
2612
   if (card->iovpool.count < card->iovnr.max)
2613
   {
2614
      skb_queue_tail(&card->iovpool.queue, iovb);
2615
      card->iovpool.count++;
2616
   }
2617
   else
2618
      dev_kfree_skb_any(iovb);
2619
}
2620
 
2621
 
2622
 
2623
static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb)
2624
{
2625
   skb_unlink(sb, &card->sbpool.queue);
2626
#ifdef NS_USE_DESTRUCTORS
2627
   if (card->sbfqc < card->sbnr.min)
2628
#else
2629
   if (card->sbfqc < card->sbnr.init)
2630
   {
2631
      struct sk_buff *new_sb;
2632
      if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL)
2633
      {
2634
         NS_SKB_CB(new_sb)->buf_type = BUF_SM;
2635
         skb_queue_tail(&card->sbpool.queue, new_sb);
2636
         skb_reserve(new_sb, NS_AAL0_HEADER);
2637
         push_rxbufs(card, new_sb);
2638
      }
2639
   }
2640
   if (card->sbfqc < card->sbnr.init)
2641
#endif /* NS_USE_DESTRUCTORS */
2642
   {
2643
      struct sk_buff *new_sb;
2644
      if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL)
2645
      {
2646
         NS_SKB_CB(new_sb)->buf_type = BUF_SM;
2647
         skb_queue_tail(&card->sbpool.queue, new_sb);
2648
         skb_reserve(new_sb, NS_AAL0_HEADER);
2649
         push_rxbufs(card, new_sb);
2650
      }
2651
   }
2652
}
2653
 
2654
 
2655
 
2656
static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb)
2657
{
2658
   skb_unlink(lb, &card->lbpool.queue);
2659
#ifdef NS_USE_DESTRUCTORS
2660
   if (card->lbfqc < card->lbnr.min)
2661
#else
2662
   if (card->lbfqc < card->lbnr.init)
2663
   {
2664
      struct sk_buff *new_lb;
2665
      if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL)
2666
      {
2667
         NS_SKB_CB(new_lb)->buf_type = BUF_LG;
2668
         skb_queue_tail(&card->lbpool.queue, new_lb);
2669
         skb_reserve(new_lb, NS_SMBUFSIZE);
2670
         push_rxbufs(card, new_lb);
2671
      }
2672
   }
2673
   if (card->lbfqc < card->lbnr.init)
2674
#endif /* NS_USE_DESTRUCTORS */
2675
   {
2676
      struct sk_buff *new_lb;
2677
      if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL)
2678
      {
2679
         NS_SKB_CB(new_lb)->buf_type = BUF_LG;
2680
         skb_queue_tail(&card->lbpool.queue, new_lb);
2681
         skb_reserve(new_lb, NS_SMBUFSIZE);
2682
         push_rxbufs(card, new_lb);
2683
      }
2684
   }
2685
}
2686
 
2687
 
2688
 
2689
static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
2690
{
2691
   u32 stat;
2692
   ns_dev *card;
2693
   int left;
2694
 
2695
   left = (int) *pos;
2696
   card = (ns_dev *) dev->dev_data;
2697
   stat = readl(card->membase + STAT);
2698
   if (!left--)
2699
      return sprintf(page, "Pool   count    min   init    max \n");
2700
   if (!left--)
2701
      return sprintf(page, "Small  %5d  %5d  %5d  %5d \n",
2702
                     ns_stat_sfbqc_get(stat), card->sbnr.min, card->sbnr.init,
2703
                     card->sbnr.max);
2704
   if (!left--)
2705
      return sprintf(page, "Large  %5d  %5d  %5d  %5d \n",
2706
                     ns_stat_lfbqc_get(stat), card->lbnr.min, card->lbnr.init,
2707
                     card->lbnr.max);
2708
   if (!left--)
2709
      return sprintf(page, "Huge   %5d  %5d  %5d  %5d \n", card->hbpool.count,
2710
                     card->hbnr.min, card->hbnr.init, card->hbnr.max);
2711
   if (!left--)
2712
      return sprintf(page, "Iovec  %5d  %5d  %5d  %5d \n", card->iovpool.count,
2713
                     card->iovnr.min, card->iovnr.init, card->iovnr.max);
2714
   if (!left--)
2715
   {
2716
      int retval;
2717
      retval = sprintf(page, "Interrupt counter: %u \n", card->intcnt);
2718
      card->intcnt = 0;
2719
      return retval;
2720
   }
2721
#if 0
2722
   /* Dump 25.6 Mbps PHY registers */
2723
   /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it
2724
      here just in case it's needed for debugging. */
2725
   if (card->max_pcr == ATM_25_PCR && !left--)
2726
   {
2727
      u32 phy_regs[4];
2728
      u32 i;
2729
 
2730
      for (i = 0; i < 4; i++)
2731
      {
2732
         while (CMD_BUSY(card));
2733
         writel(NS_CMD_READ_UTILITY | 0x00000200 | i, card->membase + CMD);
2734
         while (CMD_BUSY(card));
2735
         phy_regs[i] = readl(card->membase + DR0) & 0x000000FF;
2736
      }
2737
 
2738
      return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n",
2739
                     phy_regs[0], phy_regs[1], phy_regs[2], phy_regs[3]);
2740
   }
2741
#endif /* 0 - Dump 25.6 Mbps PHY registers */
2742
#if 0
2743
   /* Dump TST */
2744
   if (left-- < NS_TST_NUM_ENTRIES)
2745
   {
2746
      if (card->tste2vc[left + 1] == NULL)
2747
         return sprintf(page, "%5d - VBR/UBR \n", left + 1);
2748
      else
2749
         return sprintf(page, "%5d - %d %d \n", left + 1,
2750
                        card->tste2vc[left + 1]->tx_vcc->vpi,
2751
                        card->tste2vc[left + 1]->tx_vcc->vci);
2752
   }
2753
#endif /* 0 */
2754
   return 0;
2755
}
2756
 
2757
 
2758
 
2759
static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2760
{
2761
   ns_dev *card;
2762
   pool_levels pl;
2763
   long btype;
2764
   unsigned long flags;
2765
 
2766
   card = dev->dev_data;
2767
   switch (cmd)
2768
   {
2769
      case NS_GETPSTAT:
2770
         if (get_user(pl.buftype, &((pool_levels __user *) arg)->buftype))
2771
            return -EFAULT;
2772
         switch (pl.buftype)
2773
         {
2774
            case NS_BUFTYPE_SMALL:
2775
               pl.count = ns_stat_sfbqc_get(readl(card->membase + STAT));
2776
               pl.level.min = card->sbnr.min;
2777
               pl.level.init = card->sbnr.init;
2778
               pl.level.max = card->sbnr.max;
2779
               break;
2780
 
2781
            case NS_BUFTYPE_LARGE:
2782
               pl.count = ns_stat_lfbqc_get(readl(card->membase + STAT));
2783
               pl.level.min = card->lbnr.min;
2784
               pl.level.init = card->lbnr.init;
2785
               pl.level.max = card->lbnr.max;
2786
               break;
2787
 
2788
            case NS_BUFTYPE_HUGE:
2789
               pl.count = card->hbpool.count;
2790
               pl.level.min = card->hbnr.min;
2791
               pl.level.init = card->hbnr.init;
2792
               pl.level.max = card->hbnr.max;
2793
               break;
2794
 
2795
            case NS_BUFTYPE_IOVEC:
2796
               pl.count = card->iovpool.count;
2797
               pl.level.min = card->iovnr.min;
2798
               pl.level.init = card->iovnr.init;
2799
               pl.level.max = card->iovnr.max;
2800
               break;
2801
 
2802
            default:
2803
               return -ENOIOCTLCMD;
2804
 
2805
         }
2806
         if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl)))
2807
            return (sizeof(pl));
2808
         else
2809
            return -EFAULT;
2810
 
2811
      case NS_SETBUFLEV:
2812
         if (!capable(CAP_NET_ADMIN))
2813
            return -EPERM;
2814
         if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl)))
2815
            return -EFAULT;
2816
         if (pl.level.min >= pl.level.init || pl.level.init >= pl.level.max)
2817
            return -EINVAL;
2818
         if (pl.level.min == 0)
2819
            return -EINVAL;
2820
         switch (pl.buftype)
2821
         {
2822
            case NS_BUFTYPE_SMALL:
2823
               if (pl.level.max > TOP_SB)
2824
                  return -EINVAL;
2825
               card->sbnr.min = pl.level.min;
2826
               card->sbnr.init = pl.level.init;
2827
               card->sbnr.max = pl.level.max;
2828
               break;
2829
 
2830
            case NS_BUFTYPE_LARGE:
2831
               if (pl.level.max > TOP_LB)
2832
                  return -EINVAL;
2833
               card->lbnr.min = pl.level.min;
2834
               card->lbnr.init = pl.level.init;
2835
               card->lbnr.max = pl.level.max;
2836
               break;
2837
 
2838
            case NS_BUFTYPE_HUGE:
2839
               if (pl.level.max > TOP_HB)
2840
                  return -EINVAL;
2841
               card->hbnr.min = pl.level.min;
2842
               card->hbnr.init = pl.level.init;
2843
               card->hbnr.max = pl.level.max;
2844
               break;
2845
 
2846
            case NS_BUFTYPE_IOVEC:
2847
               if (pl.level.max > TOP_IOVB)
2848
                  return -EINVAL;
2849
               card->iovnr.min = pl.level.min;
2850
               card->iovnr.init = pl.level.init;
2851
               card->iovnr.max = pl.level.max;
2852
               break;
2853
 
2854
            default:
2855
               return -EINVAL;
2856
 
2857
         }
2858
         return 0;
2859
 
2860
      case NS_ADJBUFLEV:
2861
         if (!capable(CAP_NET_ADMIN))
2862
            return -EPERM;
2863
         btype = (long) arg;    /* a long is the same size as a pointer or bigger */
2864
         switch (btype)
2865
         {
2866
            case NS_BUFTYPE_SMALL:
2867
               while (card->sbfqc < card->sbnr.init)
2868
               {
2869
                  struct sk_buff *sb;
2870
 
2871
                  sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL);
2872
                  if (sb == NULL)
2873
                     return -ENOMEM;
2874
                  NS_SKB_CB(sb)->buf_type = BUF_SM;
2875
                  skb_queue_tail(&card->sbpool.queue, sb);
2876
                  skb_reserve(sb, NS_AAL0_HEADER);
2877
                  push_rxbufs(card, sb);
2878
               }
2879
               break;
2880
 
2881
            case NS_BUFTYPE_LARGE:
2882
               while (card->lbfqc < card->lbnr.init)
2883
               {
2884
                  struct sk_buff *lb;
2885
 
2886
                  lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL);
2887
                  if (lb == NULL)
2888
                     return -ENOMEM;
2889
                  NS_SKB_CB(lb)->buf_type = BUF_LG;
2890
                  skb_queue_tail(&card->lbpool.queue, lb);
2891
                  skb_reserve(lb, NS_SMBUFSIZE);
2892
                  push_rxbufs(card, lb);
2893
               }
2894
               break;
2895
 
2896
            case NS_BUFTYPE_HUGE:
2897
               while (card->hbpool.count > card->hbnr.init)
2898
               {
2899
                  struct sk_buff *hb;
2900
 
2901
                  ns_grab_int_lock(card, flags);
2902
                  hb = skb_dequeue(&card->hbpool.queue);
2903
                  card->hbpool.count--;
2904
                  spin_unlock_irqrestore(&card->int_lock, flags);
2905
                  if (hb == NULL)
2906
                     printk("nicstar%d: huge buffer count inconsistent.\n",
2907
                            card->index);
2908
                  else
2909
                     dev_kfree_skb_any(hb);
2910
 
2911
               }
2912
               while (card->hbpool.count < card->hbnr.init)
2913
               {
2914
                  struct sk_buff *hb;
2915
 
2916
                  hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL);
2917
                  if (hb == NULL)
2918
                     return -ENOMEM;
2919
                  NS_SKB_CB(hb)->buf_type = BUF_NONE;
2920
                  ns_grab_int_lock(card, flags);
2921
                  skb_queue_tail(&card->hbpool.queue, hb);
2922
                  card->hbpool.count++;
2923
                  spin_unlock_irqrestore(&card->int_lock, flags);
2924
               }
2925
               break;
2926
 
2927
            case NS_BUFTYPE_IOVEC:
2928
               while (card->iovpool.count > card->iovnr.init)
2929
               {
2930
                  struct sk_buff *iovb;
2931
 
2932
                  ns_grab_int_lock(card, flags);
2933
                  iovb = skb_dequeue(&card->iovpool.queue);
2934
                  card->iovpool.count--;
2935
                  spin_unlock_irqrestore(&card->int_lock, flags);
2936
                  if (iovb == NULL)
2937
                     printk("nicstar%d: iovec buffer count inconsistent.\n",
2938
                            card->index);
2939
                  else
2940
                     dev_kfree_skb_any(iovb);
2941
 
2942
               }
2943
               while (card->iovpool.count < card->iovnr.init)
2944
               {
2945
                  struct sk_buff *iovb;
2946
 
2947
                  iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL);
2948
                  if (iovb == NULL)
2949
                     return -ENOMEM;
2950
                  NS_SKB_CB(iovb)->buf_type = BUF_NONE;
2951
                  ns_grab_int_lock(card, flags);
2952
                  skb_queue_tail(&card->iovpool.queue, iovb);
2953
                  card->iovpool.count++;
2954
                  spin_unlock_irqrestore(&card->int_lock, flags);
2955
               }
2956
               break;
2957
 
2958
            default:
2959
               return -EINVAL;
2960
 
2961
         }
2962
         return 0;
2963
 
2964
      default:
2965
         if (dev->phy && dev->phy->ioctl) {
2966
            return dev->phy->ioctl(dev, cmd, arg);
2967
         }
2968
         else {
2969
            printk("nicstar%d: %s == NULL \n", card->index,
2970
                   dev->phy ? "dev->phy->ioctl" : "dev->phy");
2971
            return -ENOIOCTLCMD;
2972
         }
2973
   }
2974
}
2975
 
2976
 
2977
static void which_list(ns_dev *card, struct sk_buff *skb)
2978
{
2979
        printk("skb buf_type: 0x%08x\n", NS_SKB_CB(skb)->buf_type);
2980
}
2981
 
2982
 
2983
static void ns_poll(unsigned long arg)
2984
{
2985
   int i;
2986
   ns_dev *card;
2987
   unsigned long flags;
2988
   u32 stat_r, stat_w;
2989
 
2990
   PRINTK("nicstar: Entering ns_poll().\n");
2991
   for (i = 0; i < num_cards; i++)
2992
   {
2993
      card = cards[i];
2994
      if (spin_is_locked(&card->int_lock)) {
2995
      /* Probably it isn't worth spinning */
2996
         continue;
2997
      }
2998
      ns_grab_int_lock(card, flags);
2999
 
3000
      stat_w = 0;
3001
      stat_r = readl(card->membase + STAT);
3002
      if (stat_r & NS_STAT_TSIF)
3003
         stat_w |= NS_STAT_TSIF;
3004
      if (stat_r & NS_STAT_EOPDU)
3005
         stat_w |= NS_STAT_EOPDU;
3006
 
3007
      process_tsq(card);
3008
      process_rsq(card);
3009
 
3010
      writel(stat_w, card->membase + STAT);
3011
      spin_unlock_irqrestore(&card->int_lock, flags);
3012
   }
3013
   mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD);
3014
   PRINTK("nicstar: Leaving ns_poll().\n");
3015
}
3016
 
3017
 
3018
 
3019
static int ns_parse_mac(char *mac, unsigned char *esi)
3020
{
3021
   int i, j;
3022
   short byte1, byte0;
3023
 
3024
   if (mac == NULL || esi == NULL)
3025
      return -1;
3026
   j = 0;
3027
   for (i = 0; i < 6; i++)
3028
   {
3029
      if ((byte1 = ns_h2i(mac[j++])) < 0)
3030
         return -1;
3031
      if ((byte0 = ns_h2i(mac[j++])) < 0)
3032
         return -1;
3033
      esi[i] = (unsigned char) (byte1 * 16 + byte0);
3034
      if (i < 5)
3035
      {
3036
         if (mac[j++] != ':')
3037
            return -1;
3038
      }
3039
   }
3040
   return 0;
3041
}
3042
 
3043
 
3044
 
3045
static short ns_h2i(char c)
3046
{
3047
   if (c >= '0' && c <= '9')
3048
      return (short) (c - '0');
3049
   if (c >= 'A' && c <= 'F')
3050
      return (short) (c - 'A' + 10);
3051
   if (c >= 'a' && c <= 'f')
3052
      return (short) (c - 'a' + 10);
3053
   return -1;
3054
}
3055
 
3056
 
3057
 
3058
static void ns_phy_put(struct atm_dev *dev, unsigned char value,
3059
                    unsigned long addr)
3060
{
3061
   ns_dev *card;
3062
   unsigned long flags;
3063
 
3064
   card = dev->dev_data;
3065
   ns_grab_res_lock(card, flags);
3066
   while(CMD_BUSY(card));
3067
   writel((unsigned long) value, card->membase + DR0);
3068
   writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF),
3069
          card->membase + CMD);
3070
   spin_unlock_irqrestore(&card->res_lock, flags);
3071
}
3072
 
3073
 
3074
 
3075
static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr)
3076
{
3077
   ns_dev *card;
3078
   unsigned long flags;
3079
   unsigned long data;
3080
 
3081
   card = dev->dev_data;
3082
   ns_grab_res_lock(card, flags);
3083
   while(CMD_BUSY(card));
3084
   writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF),
3085
          card->membase + CMD);
3086
   while(CMD_BUSY(card));
3087
   data = readl(card->membase + DR0) & 0x000000FF;
3088
   spin_unlock_irqrestore(&card->res_lock, flags);
3089
   return (unsigned char) data;
3090
}
3091
 
3092
 
3093
 
3094
module_init(nicstar_init);
3095
module_exit(nicstar_cleanup);

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.