OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [atm/] [iphase.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/******************************************************************************
2
         iphase.c: Device driver for Interphase ATM PCI adapter cards
3
                    Author: Peter Wang  <pwang@iphase.com>
4
                   Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5
                   Interphase Corporation  <www.iphase.com>
6
                               Version: 1.0
7
*******************************************************************************
8
 
9
      This software may be used and distributed according to the terms
10
      of the GNU General Public License (GPL), incorporated herein by reference.
11
      Drivers based on this skeleton fall under the GPL and must retain
12
      the authorship (implicit copyright) notice.
13
 
14
      This program is distributed in the hope that it will be useful, but
15
      WITHOUT ANY WARRANTY; without even the implied warranty of
16
      MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17
      General Public License for more details.
18
 
19
      Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20
      was originally written by Monalisa Agrawal at UNH. Now this driver
21
      supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22
      card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23
      in terms of PHY type, the size of control memory and the size of
24
      packet memory. The followings are the change log and history:
25
 
26
          Bugfix the Mona's UBR driver.
27
          Modify the basic memory allocation and dma logic.
28
          Port the driver to the latest kernel from 2.0.46.
29
          Complete the ABR logic of the driver, and added the ABR work-
30
              around for the hardware anormalies.
31
          Add the CBR support.
32
          Add the flow control logic to the driver to allow rate-limit VC.
33
          Add 4K VC support to the board with 512K control memory.
34
          Add the support of all the variants of the Interphase ATM PCI
35
          (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36
          (25M UTP25) and x531 (DS3 and E3).
37
          Add SMP support.
38
 
39
      Support and updates available at: ftp://ftp.iphase.com/pub/atm
40
 
41
*******************************************************************************/
42
 
43
#ifdef IA_MODULE
44
#define MODULE
45
#endif
46
#include <linux/version.h>
47
#include <linux/module.h>  
48
#include <linux/kernel.h>  
49
#include <linux/mm.h>  
50
#include <linux/pci.h>  
51
#include <linux/errno.h>  
52
#include <linux/atm.h>  
53
#include <linux/atmdev.h>  
54
#include <linux/sonet.h>  
55
#include <linux/skbuff.h>  
56
#include <linux/time.h>  
57
#include <linux/sched.h> /* for xtime */  
58
#include <linux/delay.h>  
59
#include <linux/uio.h>  
60
#include <linux/init.h>  
61
#include <asm/system.h>  
62
#include <asm/io.h>  
63
#include <asm/atomic.h>  
64
#include <asm/uaccess.h>  
65
#include <asm/string.h>  
66
#include <asm/byteorder.h>  
67
#include <linux/vmalloc.h>  
68
#include "iphase.h"               
69
#include "suni.h"                 
70
#define swap(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))  
71
struct suni_priv {
72
        struct k_sonet_stats sonet_stats; /* link diagnostics */
73
        unsigned char loop_mode;        /* loopback mode */
74
        struct atm_dev *dev;            /* device back-pointer */
75
        struct suni_priv *next;         /* next SUNI */
76
};
77
#define PRIV(dev) ((struct suni_priv *) dev->phy_data)
78
 
79
static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
80
 
81
static IADEV *ia_dev[8];
82
static struct atm_dev *_ia_dev[8];
83
static int iadev_count;
84
static void ia_led_timer(unsigned long arg);
85
static struct timer_list ia_timer = { function: ia_led_timer };
86
struct atm_vcc *vcc_close_que[100];
87
static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
88
static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
89
static u32 IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
90
            |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
91
 
92
#ifdef MODULE
93
MODULE_PARM(IA_TX_BUF, "i");
94
MODULE_PARM(IA_TX_BUF_SZ, "i");
95
MODULE_PARM(IA_RX_BUF, "i");
96
MODULE_PARM(IA_RX_BUF_SZ, "i");
97
MODULE_PARM(IADebugFlag, "i");
98
#endif
99
 
100
MODULE_LICENSE("GPL");
101
 
102
#if BITS_PER_LONG != 32
103
#  error FIXME: this driver only works on 32-bit platforms
104
#endif
105
 
106
/**************************** IA_LIB **********************************/
107
 
108
static void ia_init_rtn_q (IARTN_Q *que)
109
{
110
   que->next = NULL;
111
   que->tail = NULL;
112
}
113
 
114
static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
115
{
116
   data->next = NULL;
117
   if (que->next == NULL)
118
      que->next = que->tail = data;
119
   else {
120
      data->next = que->next;
121
      que->next = data;
122
   }
123
   return;
124
}
125
 
126
static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
127
   IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
128
   if (!entry) return -1;
129
   entry->data = data;
130
   entry->next = NULL;
131
   if (que->next == NULL)
132
      que->next = que->tail = entry;
133
   else {
134
      que->tail->next = entry;
135
      que->tail = que->tail->next;
136
   }
137
   return 1;
138
}
139
 
140
static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
141
   IARTN_Q *tmpdata;
142
   if (que->next == NULL)
143
      return NULL;
144
   tmpdata = que->next;
145
   if ( que->next == que->tail)
146
      que->next = que->tail = NULL;
147
   else
148
      que->next = que->next->next;
149
   return tmpdata;
150
}
151
 
152
static void ia_hack_tcq(IADEV *dev) {
153
 
154
  u_short               desc1;
155
  u_short               tcq_wr;
156
  struct ia_vcc         *iavcc_r = NULL;
157
  extern void desc_dbg(IADEV *iadev);
158
 
159
  tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
160
  while (dev->host_tcq_wr != tcq_wr) {
161
     desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
162
     if (!desc1) ;
163
     else if (!dev->desc_tbl[desc1 -1].timestamp) {
164
        IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
165
        *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
166
     }
167
     else if (dev->desc_tbl[desc1 -1].timestamp) {
168
        if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
169
           printk("IA: Fatal err in get_desc\n");
170
           continue;
171
        }
172
        iavcc_r->vc_desc_cnt--;
173
        dev->desc_tbl[desc1 -1].timestamp = 0;
174
        IF_EVENT(printk("ia_hack: return_q skb = 0x%x desc = %d\n",
175
                                   (u32)dev->desc_tbl[desc1 -1].txskb, desc1);)
176
        if (iavcc_r->pcr < dev->rate_limit) {
177
           IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
178
           if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
179
              printk("ia_hack_tcq: No memory available\n");
180
        }
181
        dev->desc_tbl[desc1 -1].iavcc = NULL;
182
        dev->desc_tbl[desc1 -1].txskb = NULL;
183
     }
184
     dev->host_tcq_wr += 2;
185
     if (dev->host_tcq_wr > dev->ffL.tcq_ed)
186
        dev->host_tcq_wr = dev->ffL.tcq_st;
187
  }
188
} /* ia_hack_tcq */
189
 
190
static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
191
  u_short               desc_num, i;
192
  struct sk_buff        *skb;
193
  struct ia_vcc         *iavcc_r = NULL;
194
  unsigned long delta;
195
  static unsigned long timer = 0;
196
  int ltimeout;
197
  extern void desc_dbg(IADEV *iadev);
198
 
199
  ia_hack_tcq (dev);
200
  if(((jiffies - timer)>50)||((dev->ffL.tcq_rd==dev->host_tcq_wr))){
201
     timer = jiffies;
202
     i=0;
203
     while (i < dev->num_tx_desc) {
204
        if (!dev->desc_tbl[i].timestamp) {
205
           i++;
206
           continue;
207
        }
208
        ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
209
        delta = jiffies - dev->desc_tbl[i].timestamp;
210
        if (delta >= ltimeout) {
211
           IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld,  time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
212
           if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
213
              dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
214
           else
215
              dev->ffL.tcq_rd -= 2;
216
           *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
217
           if (!(skb = dev->desc_tbl[i].txskb) ||
218
                          !(iavcc_r = dev->desc_tbl[i].iavcc))
219
              printk("Fatal err, desc table vcc or skb is NULL\n");
220
           else
221
              iavcc_r->vc_desc_cnt--;
222
           dev->desc_tbl[i].timestamp = 0;
223
           dev->desc_tbl[i].iavcc = NULL;
224
           dev->desc_tbl[i].txskb = NULL;
225
        }
226
        i++;
227
     } /* while */
228
  }
229
  if (dev->ffL.tcq_rd == dev->host_tcq_wr)
230
     return 0xFFFF;
231
 
232
  /* Get the next available descriptor number from TCQ */
233
  desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
234
 
235
  while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
236
     dev->ffL.tcq_rd += 2;
237
     if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
238
     dev->ffL.tcq_rd = dev->ffL.tcq_st;
239
     if (dev->ffL.tcq_rd == dev->host_tcq_wr)
240
        return 0xFFFF;
241
     desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
242
  }
243
 
244
  /* get system time */
245
  dev->desc_tbl[desc_num -1].timestamp = jiffies;
246
  return desc_num;
247
}
248
 
249
static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
250
  u_char                foundLockUp;
251
  vcstatus_t            *vcstatus;
252
  u_short               *shd_tbl;
253
  u_short               tempCellSlot, tempFract;
254
  struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
255
  struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
256
  u_int  i;
257
 
258
  if (vcc->qos.txtp.traffic_class == ATM_ABR) {
259
     vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
260
     vcstatus->cnt++;
261
     foundLockUp = 0;
262
     if( vcstatus->cnt == 0x05 ) {
263
        abr_vc += vcc->vci;
264
        eabr_vc += vcc->vci;
265
        if( eabr_vc->last_desc ) {
266
           if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
267
              /* Wait for 10 Micro sec */
268
              udelay(10);
269
              if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
270
                 foundLockUp = 1;
271
           }
272
           else {
273
              tempCellSlot = abr_vc->last_cell_slot;
274
              tempFract    = abr_vc->fraction;
275
              if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
276
                         && (tempFract == dev->testTable[vcc->vci]->fract))
277
                 foundLockUp = 1;
278
              dev->testTable[vcc->vci]->lastTime = tempCellSlot;
279
              dev->testTable[vcc->vci]->fract = tempFract;
280
           }
281
        } /* last descriptor */
282
        vcstatus->cnt = 0;
283
     } /* vcstatus->cnt */
284
 
285
     if (foundLockUp) {
286
        IF_ABR(printk("LOCK UP found\n");)
287
        writew(0xFFFD, dev->seg_reg+MODE_REG_0);
288
        /* Wait for 10 Micro sec */
289
        udelay(10);
290
        abr_vc->status &= 0xFFF8;
291
        abr_vc->status |= 0x0001;  /* state is idle */
292
        shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
293
        for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
294
        if (i < dev->num_vc)
295
           shd_tbl[i] = vcc->vci;
296
        else
297
           IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
298
        writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
299
        writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
300
        writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
301
        vcstatus->cnt = 0;
302
     } /* foundLockUp */
303
 
304
  } /* if an ABR VC */
305
 
306
 
307
}
308
 
309
/*
310
** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
311
**
312
**  +----+----+------------------+-------------------------------+
313
**  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
314
**  +----+----+------------------+-------------------------------+
315
**
316
**    R = reserverd (written as 0)
317
**    NZ = 0 if 0 cells/sec; 1 otherwise
318
**
319
**    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
320
*/
321
static u16
322
cellrate_to_float(u32 cr)
323
{
324
 
325
#define NZ              0x4000
326
#define M_BITS          9               /* Number of bits in mantissa */
327
#define E_BITS          5               /* Number of bits in exponent */
328
#define M_MASK          0x1ff           
329
#define E_MASK          0x1f
330
  u16   flot;
331
  u32   tmp = cr & 0x00ffffff;
332
  int   i   = 0;
333
  if (cr == 0)
334
     return 0;
335
  while (tmp != 1) {
336
     tmp >>= 1;
337
     i++;
338
  }
339
  if (i == M_BITS)
340
     flot = NZ | (i << M_BITS) | (cr & M_MASK);
341
  else if (i < M_BITS)
342
     flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
343
  else
344
     flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
345
  return flot;
346
}
347
 
348
#if 0
349
/*
350
** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
351
*/
352
static u32
353
float_to_cellrate(u16 rate)
354
{
355
  u32   exp, mantissa, cps;
356
  if ((rate & NZ) == 0)
357
     return 0;
358
  exp = (rate >> M_BITS) & E_MASK;
359
  mantissa = rate & M_MASK;
360
  if (exp == 0)
361
     return 1;
362
  cps = (1 << M_BITS) | mantissa;
363
  if (exp == M_BITS)
364
     cps = cps;
365
  else if (exp > M_BITS)
366
     cps <<= (exp - M_BITS);
367
  else
368
     cps >>= (M_BITS - exp);
369
  return cps;
370
}
371
#endif 
372
 
373
static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
374
  srv_p->class_type = ATM_ABR;
375
  srv_p->pcr        = dev->LineRate;
376
  srv_p->mcr        = 0;
377
  srv_p->icr        = 0x055cb7;
378
  srv_p->tbe        = 0xffffff;
379
  srv_p->frtt       = 0x3a;
380
  srv_p->rif        = 0xf;
381
  srv_p->rdf        = 0xb;
382
  srv_p->nrm        = 0x4;
383
  srv_p->trm        = 0x7;
384
  srv_p->cdf        = 0x3;
385
  srv_p->adtf       = 50;
386
}
387
 
388
static int
389
ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
390
                                                struct atm_vcc *vcc, u8 flag)
391
{
392
  f_vc_abr_entry  *f_abr_vc;
393
  r_vc_abr_entry  *r_abr_vc;
394
  u32           icr;
395
  u8            trm, nrm, crm;
396
  u16           adtf, air, *ptr16;
397
  f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
398
  f_abr_vc += vcc->vci;
399
  switch (flag) {
400
     case 1: /* FFRED initialization */
401
#if 0  /* sanity check */
402
       if (srv_p->pcr == 0)
403
          return INVALID_PCR;
404
       if (srv_p->pcr > dev->LineRate)
405
          srv_p->pcr = dev->LineRate;
406
       if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
407
          return MCR_UNAVAILABLE;
408
       if (srv_p->mcr > srv_p->pcr)
409
          return INVALID_MCR;
410
       if (!(srv_p->icr))
411
          srv_p->icr = srv_p->pcr;
412
       if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
413
          return INVALID_ICR;
414
       if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
415
          return INVALID_TBE;
416
       if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
417
          return INVALID_FRTT;
418
       if (srv_p->nrm > MAX_NRM)
419
          return INVALID_NRM;
420
       if (srv_p->trm > MAX_TRM)
421
          return INVALID_TRM;
422
       if (srv_p->adtf > MAX_ADTF)
423
          return INVALID_ADTF;
424
       else if (srv_p->adtf == 0)
425
          srv_p->adtf = 1;
426
       if (srv_p->cdf > MAX_CDF)
427
          return INVALID_CDF;
428
       if (srv_p->rif > MAX_RIF)
429
          return INVALID_RIF;
430
       if (srv_p->rdf > MAX_RDF)
431
          return INVALID_RDF;
432
#endif
433
       memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
434
       f_abr_vc->f_vc_type = ABR;
435
       nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
436
                                  /* i.e 2**n = 2 << (n-1) */
437
       f_abr_vc->f_nrm = nrm << 8 | nrm;
438
       trm = 100000/(2 << (16 - srv_p->trm));
439
       if ( trm == 0) trm = 1;
440
       f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
441
       crm = srv_p->tbe / nrm;
442
       if (crm == 0) crm = 1;
443
       f_abr_vc->f_crm = crm & 0xff;
444
       f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
445
       icr = MIN( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
446
                                ((srv_p->tbe/srv_p->frtt)*1000000) :
447
                                (1000000/(srv_p->frtt/srv_p->tbe)));
448
       f_abr_vc->f_icr = cellrate_to_float(icr);
449
       adtf = (10000 * srv_p->adtf)/8192;
450
       if (adtf == 0) adtf = 1;
451
       f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
452
       f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
453
       f_abr_vc->f_acr = f_abr_vc->f_icr;
454
       f_abr_vc->f_status = 0x0042;
455
       break;
456
    case 0: /* RFRED initialization */
457
       ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
458
       *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
459
       r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
460
       r_abr_vc += vcc->vci;
461
       r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
462
       air = srv_p->pcr << (15 - srv_p->rif);
463
       if (air == 0) air = 1;
464
       r_abr_vc->r_air = cellrate_to_float(air);
465
       dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
466
       dev->sum_mcr        += srv_p->mcr;
467
       dev->n_abr++;
468
       break;
469
    default:
470
       break;
471
  }
472
  return        0;
473
}
474
static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
475
   u32 rateLow=0, rateHigh, rate;
476
   int entries;
477
   struct ia_vcc *ia_vcc;
478
 
479
   int   idealSlot =0, testSlot, toBeAssigned, inc;
480
   u32   spacing;
481
   u16  *SchedTbl, *TstSchedTbl;
482
   u16  cbrVC, vcIndex;
483
   u32   fracSlot    = 0;
484
   u32   sp_mod      = 0;
485
   u32   sp_mod2     = 0;
486
 
487
   /* IpAdjustTrafficParams */
488
   if (vcc->qos.txtp.max_pcr <= 0) {
489
      IF_ERR(printk("PCR for CBR not defined\n");)
490
      return -1;
491
   }
492
   rate = vcc->qos.txtp.max_pcr;
493
   entries = rate / dev->Granularity;
494
   IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
495
                                entries, rate, dev->Granularity);)
496
   if (entries < 1)
497
      IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
498
   rateLow  =  entries * dev->Granularity;
499
   rateHigh = (entries + 1) * dev->Granularity;
500
   if (3*(rate - rateLow) > (rateHigh - rate))
501
      entries++;
502
   if (entries > dev->CbrRemEntries) {
503
      IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
504
      IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
505
                                       entries, dev->CbrRemEntries);)
506
      return -EBUSY;
507
   }
508
 
509
   ia_vcc = INPH_IA_VCC(vcc);
510
   ia_vcc->NumCbrEntry = entries;
511
   dev->sum_mcr += entries * dev->Granularity;
512
   /* IaFFrednInsertCbrSched */
513
   // Starting at an arbitrary location, place the entries into the table
514
   // as smoothly as possible
515
   cbrVC   = 0;
516
   spacing = dev->CbrTotEntries / entries;
517
   sp_mod  = dev->CbrTotEntries % entries; // get modulo
518
   toBeAssigned = entries;
519
   fracSlot = 0;
520
   vcIndex  = vcc->vci;
521
   IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
522
   while (toBeAssigned)
523
   {
524
      // If this is the first time, start the table loading for this connection
525
      // as close to entryPoint as possible.
526
      if (toBeAssigned == entries)
527
      {
528
         idealSlot = dev->CbrEntryPt;
529
         dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
530
         if (dev->CbrEntryPt >= dev->CbrTotEntries)
531
            dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
532
      } else {
533
         idealSlot += (u32)(spacing + fracSlot); // Point to the next location
534
         // in the table that would be  smoothest
535
         fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
536
         sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
537
      }
538
      if (idealSlot >= (int)dev->CbrTotEntries)
539
         idealSlot -= dev->CbrTotEntries;
540
      // Continuously check around this ideal value until a null
541
      // location is encountered.
542
      SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
543
      inc = 0;
544
      testSlot = idealSlot;
545
      TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
546
      IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%x, NumToAssign=%d\n",
547
                                testSlot, (u32)TstSchedTbl,toBeAssigned);)
548
      memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
549
      while (cbrVC)  // If another VC at this location, we have to keep looking
550
      {
551
          inc++;
552
          testSlot = idealSlot - inc;
553
          if (testSlot < 0) { // Wrap if necessary
554
             testSlot += dev->CbrTotEntries;
555
             IF_CBR(printk("Testslot Wrap. STable Start=0x%x,Testslot=%d\n",
556
                                                       (u32)SchedTbl,testSlot);)
557
          }
558
          TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
559
          memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
560
          if (!cbrVC)
561
             break;
562
          testSlot = idealSlot + inc;
563
          if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
564
             testSlot -= dev->CbrTotEntries;
565
             IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
566
             IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
567
                                            testSlot, toBeAssigned);)
568
          }
569
          // set table index and read in value
570
          TstSchedTbl = (u16*)(SchedTbl + testSlot);
571
          IF_CBR(printk("Reading CBR Tbl from 0x%x, CbrVal=0x%x Iteration %d\n",
572
                          (u32)TstSchedTbl,cbrVC,inc);)
573
          memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
574
       } /* while */
575
       // Move this VCI number into this location of the CBR Sched table.
576
       memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex,sizeof(TstSchedTbl));
577
       dev->CbrRemEntries--;
578
       toBeAssigned--;
579
   } /* while */
580
 
581
   /* IaFFrednCbrEnable */
582
   dev->NumEnabledCBR++;
583
   if (dev->NumEnabledCBR == 1) {
584
       writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
585
       IF_CBR(printk("CBR is enabled\n");)
586
   }
587
   return 0;
588
}
589
static void ia_cbrVc_close (struct atm_vcc *vcc) {
590
   IADEV *iadev;
591
   u16 *SchedTbl, NullVci = 0;
592
   u32 i, NumFound;
593
 
594
   iadev = INPH_IA_DEV(vcc->dev);
595
   iadev->NumEnabledCBR--;
596
   SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
597
   if (iadev->NumEnabledCBR == 0) {
598
      writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
599
      IF_CBR (printk("CBR support disabled\n");)
600
   }
601
   NumFound = 0;
602
   for (i=0; i < iadev->CbrTotEntries; i++)
603
   {
604
      if (*SchedTbl == vcc->vci) {
605
         iadev->CbrRemEntries++;
606
         *SchedTbl = NullVci;
607
         IF_CBR(NumFound++;)
608
      }
609
      SchedTbl++;
610
   }
611
   IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
612
}
613
 
614
static int ia_avail_descs(IADEV *iadev) {
615
   int tmp = 0;
616
   ia_hack_tcq(iadev);
617
   if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
618
      tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
619
   else
620
      tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
621
                   iadev->ffL.tcq_st) / 2;
622
   return tmp;
623
}
624
 
625
static int ia_que_tx (IADEV *iadev) {
626
   struct sk_buff *skb;
627
   int num_desc;
628
   struct atm_vcc *vcc;
629
   struct ia_vcc *iavcc;
630
   static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
631
   num_desc = ia_avail_descs(iadev);
632
   while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
633
      if (!(vcc = ATM_SKB(skb)->vcc)) {
634
         dev_kfree_skb_any(skb);
635
         printk("ia_que_tx: Null vcc\n");
636
         break;
637
      }
638
      if (!test_bit(ATM_VF_READY,&vcc->flags)) {
639
         dev_kfree_skb_any(skb);
640
         printk("Free the SKB on closed vci %d \n", vcc->vci);
641
         break;
642
      }
643
      iavcc = INPH_IA_VCC(vcc);
644
      if (ia_pkt_tx (vcc, skb)) {
645
         skb_queue_head(&iadev->tx_backlog, skb);
646
      }
647
      num_desc--;
648
   }
649
   return 0;
650
}
651
void ia_tx_poll (IADEV *iadev) {
652
   struct atm_vcc *vcc = NULL;
653
   struct sk_buff *skb = NULL, *skb1 = NULL;
654
   struct ia_vcc *iavcc;
655
   IARTN_Q *  rtne;
656
 
657
   ia_hack_tcq(iadev);
658
   while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
659
       skb = rtne->data.txskb;
660
       if (!skb) {
661
           printk("ia_tx_poll: skb is null\n");
662
           goto out;
663
       }
664
       vcc = ATM_SKB(skb)->vcc;
665
       if (!vcc) {
666
           printk("ia_tx_poll: vcc is null\n");
667
           dev_kfree_skb_any(skb);
668
           goto out;
669
       }
670
 
671
       iavcc = INPH_IA_VCC(vcc);
672
       if (!iavcc) {
673
           printk("ia_tx_poll: iavcc is null\n");
674
           dev_kfree_skb_any(skb);
675
           goto out;
676
       }
677
 
678
       skb1 = skb_dequeue(&iavcc->txing_skb);
679
       while (skb1 && (skb1 != skb)) {
680
          if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
681
             printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
682
          }
683
          IF_ERR(printk("Release the SKB not match\n");)
684
          if ((vcc->pop) && (skb1->len != 0))
685
          {
686
             vcc->pop(vcc, skb1);
687
             IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
688
                                                          (long)skb1);)
689
          }
690
          else
691
             dev_kfree_skb_any(skb1);
692
          skb1 = skb_dequeue(&iavcc->txing_skb);
693
       }
694
       if (!skb1) {
695
          IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
696
          ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
697
          break;
698
       }
699
       if ((vcc->pop) && (skb->len != 0))
700
       {
701
          vcc->pop(vcc, skb);
702
          IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
703
       }
704
       else
705
          dev_kfree_skb_any(skb);
706
       kfree(rtne);
707
    }
708
    ia_que_tx(iadev);
709
out:
710
    return;
711
}
712
#if 0
713
static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
714
{
715
        u32     t;
716
        int     i;
717
        /*
718
         * Issue a command to enable writes to the NOVRAM
719
         */
720
        NVRAM_CMD (EXTEND + EWEN);
721
        NVRAM_CLR_CE;
722
        /*
723
         * issue the write command
724
         */
725
        NVRAM_CMD(IAWRITE + addr);
726
        /*
727
         * Send the data, starting with D15, then D14, and so on for 16 bits
728
         */
729
        for (i=15; i>=0; i--) {
730
                NVRAM_CLKOUT (val & 0x8000);
731
                val <<= 1;
732
        }
733
        NVRAM_CLR_CE;
734
        CFG_OR(NVCE);
735
        t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
736
        while (!(t & NVDO))
737
                t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
738
 
739
        NVRAM_CLR_CE;
740
        /*
741
         * disable writes again
742
         */
743
        NVRAM_CMD(EXTEND + EWDS)
744
        NVRAM_CLR_CE;
745
        CFG_AND(~NVDI);
746
}
747
#endif
748
 
749
static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
750
{
751
        u_short val;
752
        u32     t;
753
        int     i;
754
        /*
755
         * Read the first bit that was clocked with the falling edge of the
756
         * the last command data clock
757
         */
758
        NVRAM_CMD(IAREAD + addr);
759
        /*
760
         * Now read the rest of the bits, the next bit read is D14, then D13,
761
         * and so on.
762
         */
763
        val = 0;
764
        for (i=15; i>=0; i--) {
765
                NVRAM_CLKIN(t);
766
                val |= (t << i);
767
        }
768
        NVRAM_CLR_CE;
769
        CFG_AND(~NVDI);
770
        return val;
771
}
772
 
773
static void ia_hw_type(IADEV *iadev) {
774
   u_short memType = ia_eeprom_get(iadev, 25);
775
   iadev->memType = memType;
776
   if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
777
      iadev->num_tx_desc = IA_TX_BUF;
778
      iadev->tx_buf_sz = IA_TX_BUF_SZ;
779
      iadev->num_rx_desc = IA_RX_BUF;
780
      iadev->rx_buf_sz = IA_RX_BUF_SZ;
781
   } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
782
      if (IA_TX_BUF == DFL_TX_BUFFERS)
783
        iadev->num_tx_desc = IA_TX_BUF / 2;
784
      else
785
        iadev->num_tx_desc = IA_TX_BUF;
786
      iadev->tx_buf_sz = IA_TX_BUF_SZ;
787
      if (IA_RX_BUF == DFL_RX_BUFFERS)
788
        iadev->num_rx_desc = IA_RX_BUF / 2;
789
      else
790
        iadev->num_rx_desc = IA_RX_BUF;
791
      iadev->rx_buf_sz = IA_RX_BUF_SZ;
792
   }
793
   else {
794
      if (IA_TX_BUF == DFL_TX_BUFFERS)
795
        iadev->num_tx_desc = IA_TX_BUF / 8;
796
      else
797
        iadev->num_tx_desc = IA_TX_BUF;
798
      iadev->tx_buf_sz = IA_TX_BUF_SZ;
799
      if (IA_RX_BUF == DFL_RX_BUFFERS)
800
        iadev->num_rx_desc = IA_RX_BUF / 8;
801
      else
802
        iadev->num_rx_desc = IA_RX_BUF;
803
      iadev->rx_buf_sz = IA_RX_BUF_SZ;
804
   }
805
   iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
806
   IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
807
         iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
808
         iadev->rx_buf_sz, iadev->rx_pkt_ram);)
809
 
810
#if 0
811
   if ((memType & FE_MASK) == FE_SINGLE_MODE) {
812
      iadev->phy_type = PHY_OC3C_S;
813
   else if ((memType & FE_MASK) == FE_UTP_OPTION)
814
      iadev->phy_type = PHY_UTP155;
815
   else
816
     iadev->phy_type = PHY_OC3C_M;
817
#endif
818
 
819
   iadev->phy_type = memType & FE_MASK;
820
   IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
821
                                         memType,iadev->phy_type);)
822
   if (iadev->phy_type == FE_25MBIT_PHY)
823
      iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
824
   else if (iadev->phy_type == FE_DS3_PHY)
825
      iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
826
   else if (iadev->phy_type == FE_E3_PHY)
827
      iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
828
   else
829
       iadev->LineRate = (u32)(ATM_OC3_PCR);
830
   IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
831
 
832
}
833
 
834
static void IaFrontEndIntr(IADEV *iadev) {
835
  volatile IA_SUNI *suni;
836
  volatile ia_mb25_t *mb25;
837
  volatile suni_pm7345_t *suni_pm7345;
838
  u32 intr_status;
839
  u_int frmr_intr;
840
 
841
  if(iadev->phy_type & FE_25MBIT_PHY) {
842
     mb25 = (ia_mb25_t*)iadev->phy;
843
     iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
844
  } else if (iadev->phy_type & FE_DS3_PHY) {
845
     suni_pm7345 = (suni_pm7345_t *)iadev->phy;
846
     /* clear FRMR interrupts */
847
     frmr_intr   = suni_pm7345->suni_ds3_frm_intr_stat;
848
     iadev->carrier_detect =
849
           Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
850
  } else if (iadev->phy_type & FE_E3_PHY ) {
851
     suni_pm7345 = (suni_pm7345_t *)iadev->phy;
852
     frmr_intr   = suni_pm7345->suni_e3_frm_maint_intr_ind;
853
     iadev->carrier_detect =
854
           Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
855
  }
856
  else {
857
     suni = (IA_SUNI *)iadev->phy;
858
     intr_status = suni->suni_rsop_status & 0xff;
859
     iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
860
  }
861
  if (iadev->carrier_detect)
862
    printk("IA: SUNI carrier detected\n");
863
  else
864
    printk("IA: SUNI carrier lost signal\n");
865
  return;
866
}
867
 
868
void ia_mb25_init (IADEV *iadev)
869
{
870
   volatile ia_mb25_t  *mb25 = (ia_mb25_t*)iadev->phy;
871
#if 0
872
   mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
873
#endif
874
   mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
875
   mb25->mb25_diag_control = 0;
876
   /*
877
    * Initialize carrier detect state
878
    */
879
   iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
880
   return;
881
}
882
 
883
void ia_suni_pm7345_init (IADEV *iadev)
884
{
885
   volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
886
   if (iadev->phy_type & FE_DS3_PHY)
887
   {
888
      iadev->carrier_detect =
889
          Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
890
      suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
891
      suni_pm7345->suni_ds3_frm_cfg = 1;
892
      suni_pm7345->suni_ds3_tran_cfg = 1;
893
      suni_pm7345->suni_config = 0;
894
      suni_pm7345->suni_splr_cfg = 0;
895
      suni_pm7345->suni_splt_cfg = 0;
896
   }
897
   else
898
   {
899
      iadev->carrier_detect =
900
          Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
901
      suni_pm7345->suni_e3_frm_fram_options = 0x4;
902
      suni_pm7345->suni_e3_frm_maint_options = 0x20;
903
      suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
904
      suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
905
      suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
906
      suni_pm7345->suni_e3_tran_fram_options = 0x1;
907
      suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
908
      suni_pm7345->suni_splr_cfg = 0x41;
909
      suni_pm7345->suni_splt_cfg = 0x41;
910
   }
911
   /*
912
    * Enable RSOP loss of signal interrupt.
913
    */
914
   suni_pm7345->suni_intr_enbl = 0x28;
915
 
916
   /*
917
    * Clear error counters
918
    */
919
   suni_pm7345->suni_id_reset = 0;
920
 
921
   /*
922
    * Clear "PMCTST" in master test register.
923
    */
924
   suni_pm7345->suni_master_test = 0;
925
 
926
   suni_pm7345->suni_rxcp_ctrl = 0x2c;
927
   suni_pm7345->suni_rxcp_fctrl = 0x81;
928
 
929
   suni_pm7345->suni_rxcp_idle_pat_h1 =
930
        suni_pm7345->suni_rxcp_idle_pat_h2 =
931
        suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
932
   suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
933
 
934
   suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
935
   suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
936
   suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
937
   suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
938
 
939
   suni_pm7345->suni_rxcp_cell_pat_h1 =
940
        suni_pm7345->suni_rxcp_cell_pat_h2 =
941
        suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
942
   suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
943
 
944
   suni_pm7345->suni_rxcp_cell_mask_h1 =
945
        suni_pm7345->suni_rxcp_cell_mask_h2 =
946
        suni_pm7345->suni_rxcp_cell_mask_h3 =
947
        suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
948
 
949
   suni_pm7345->suni_txcp_ctrl = 0xa4;
950
   suni_pm7345->suni_txcp_intr_en_sts = 0x10;
951
   suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
952
 
953
   suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
954
                                 SUNI_PM7345_CLB |
955
                                 SUNI_PM7345_DLB |
956
                                  SUNI_PM7345_PLB);
957
#ifdef __SNMP__
958
   suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
959
#endif /* __SNMP__ */
960
   return;
961
}
962
 
963
 
964
/***************************** IA_LIB END *****************************/
965
 
966
/* pwang_test debug utility */
967
int tcnter = 0, rcnter = 0;
968
void xdump( u_char*  cp, int  length, char*  prefix )
969
{
970
    int col, count;
971
    u_char prntBuf[120];
972
    u_char*  pBuf = prntBuf;
973
    count = 0;
974
    while(count < length){
975
        pBuf += sprintf( pBuf, "%s", prefix );
976
        for(col = 0;count + col < length && col < 16; col++){
977
            if (col != 0 && (col % 4) == 0)
978
                pBuf += sprintf( pBuf, " " );
979
            pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
980
        }
981
        while(col++ < 16){      /* pad end of buffer with blanks */
982
            if ((col % 4) == 0)
983
                sprintf( pBuf, " " );
984
            pBuf += sprintf( pBuf, "   " );
985
        }
986
        pBuf += sprintf( pBuf, "  " );
987
        for(col = 0;count + col < length && col < 16; col++){
988
            if (isprint((int)cp[count + col]))
989
                pBuf += sprintf( pBuf, "%c", cp[count + col] );
990
            else
991
                pBuf += sprintf( pBuf, "." );
992
                }
993
        sprintf( pBuf, "\n" );
994
        // SPrint(prntBuf);
995
        printk(prntBuf);
996
        count += col;
997
        pBuf = prntBuf;
998
    }
999
 
1000
}  /* close xdump(... */
1001
 
1002
 
1003
static struct atm_dev *ia_boards = NULL;
1004
 
1005
#define ACTUAL_RAM_BASE \
1006
        RAM_BASE*((iadev->mem)/(128 * 1024))
1007
#define ACTUAL_SEG_RAM_BASE \
1008
        IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1009
#define ACTUAL_REASS_RAM_BASE \
1010
        IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1011
 
1012
 
1013
/*-- some utilities and memory allocation stuff will come here -------------*/
1014
 
1015
void desc_dbg(IADEV *iadev) {
1016
 
1017
  u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1018
  u32 tmp, i;
1019
  // regval = readl((u32)ia_cmds->maddr);
1020
  tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1021
  printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1022
                     tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1023
                     readw(iadev->seg_ram+tcq_wr_ptr-2));
1024
  printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr,
1025
                   iadev->ffL.tcq_rd);
1026
  tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1027
  tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1028
  printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1029
  i = 0;
1030
  while (tcq_st_ptr != tcq_ed_ptr) {
1031
      tmp = iadev->seg_ram+tcq_st_ptr;
1032
      printk("TCQ slot %d desc = %d  Addr = 0x%x\n", i++, readw(tmp), tmp);
1033
      tcq_st_ptr += 2;
1034
  }
1035
  for(i=0; i <iadev->num_tx_desc; i++)
1036
      printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1037
}
1038
 
1039
 
1040
/*----------------------------- Recieving side stuff --------------------------*/
1041
 
1042
static void rx_excp_rcvd(struct atm_dev *dev)
1043
{
1044
#if 0 /* closing the receiving size will cause too many excp int */  
1045
  IADEV *iadev;
1046
  u_short state;
1047
  u_short excpq_rd_ptr;
1048
  //u_short *ptr;  
1049
  int vci, error = 1;
1050
  iadev = INPH_IA_DEV(dev);
1051
  state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1052
  while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
1053
  { printk("state = %x \n", state);
1054
        excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1055
 printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
1056
        if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1057
            IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1058
        // TODO: update exception stat
1059
        vci = readw(iadev->reass_ram+excpq_rd_ptr);
1060
        error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1061
        // pwang_test
1062
        excpq_rd_ptr += 4;
1063
        if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1064
            excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1065
        writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1066
        state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1067
  }
1068
#endif
1069
}
1070
 
1071
static void free_desc(struct atm_dev *dev, int desc)
1072
{
1073
        IADEV *iadev;
1074
        iadev = INPH_IA_DEV(dev);
1075
        writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1076
        iadev->rfL.fdq_wr +=2;
1077
        if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1078
                iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;
1079
        writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1080
}
1081
 
1082
 
1083
static int rx_pkt(struct atm_dev *dev)
1084
{
1085
        IADEV *iadev;
1086
        struct atm_vcc *vcc;
1087
        unsigned short status;
1088
        struct rx_buf_desc *buf_desc_ptr;
1089
        int desc;
1090
        struct dle* wr_ptr;
1091
        int len;
1092
        struct sk_buff *skb;
1093
        u_int buf_addr, dma_addr;
1094
        iadev = INPH_IA_DEV(dev);
1095
        if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1096
        {
1097
            printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1098
            return -EINVAL;
1099
        }
1100
        /* mask 1st 3 bits to get the actual descno. */
1101
        desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1102
        IF_RX(printk("reass_ram = 0x%x iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1103
                                    iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1104
              printk(" pcq_wr_ptr = 0x%x\n",
1105
                               readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1106
        /* update the read pointer  - maybe we shud do this in the end*/
1107
        if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1108
                iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1109
        else
1110
                iadev->rfL.pcq_rd += 2;
1111
        writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1112
 
1113
        /* get the buffer desc entry.
1114
                update stuff. - doesn't seem to be any update necessary
1115
        */
1116
        buf_desc_ptr = (struct rx_buf_desc *)iadev->RX_DESC_BASE_ADDR;
1117
        /* make the ptr point to the corresponding buffer desc entry */
1118
        buf_desc_ptr += desc;
1119
        if (!desc || (desc > iadev->num_rx_desc) ||
1120
                      ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) {
1121
            free_desc(dev, desc);
1122
            IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1123
            return -1;
1124
        }
1125
        vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1126
        if (!vcc)
1127
        {
1128
                free_desc(dev, desc);
1129
                printk("IA: null vcc, drop PDU\n");
1130
                return -1;
1131
        }
1132
 
1133
 
1134
        /* might want to check the status bits for errors */
1135
        status = (u_short) (buf_desc_ptr->desc_mode);
1136
        if (status & (RX_CER | RX_PTE | RX_OFL))
1137
        {
1138
                atomic_inc(&vcc->stats->rx_err);
1139
                IF_ERR(printk("IA: bad packet, dropping it");)
1140
                if (status & RX_CER) {
1141
                    IF_ERR(printk(" cause: packet CRC error\n");)
1142
                }
1143
                else if (status & RX_PTE) {
1144
                    IF_ERR(printk(" cause: packet time out\n");)
1145
                }
1146
                else {
1147
                    IF_ERR(printk(" cause: buffer over flow\n");)
1148
                }
1149
                goto out_free_desc;
1150
        }
1151
 
1152
        /*
1153
                build DLE.
1154
        */
1155
 
1156
        buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1157
        dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1158
        len = dma_addr - buf_addr;
1159
        if (len > iadev->rx_buf_sz) {
1160
           printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1161
           atomic_inc(&vcc->stats->rx_err);
1162
           goto out_free_desc;
1163
        }
1164
 
1165
#if LINUX_VERSION_CODE >= 0x20312
1166
        if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1167
#else
1168
        if (atm_charge(vcc, atm_pdu2truesize(len))) {
1169
           /* lets allocate an skb for now */
1170
           skb = alloc_skb(len, GFP_ATOMIC);
1171
           if (!skb)
1172
           {
1173
              IF_ERR(printk("can't allocate memory for recv, drop pkt!\n");)
1174
              atomic_inc(&vcc->stats->rx_drop);
1175
              atm_return(vcc, atm_pdu2truesize(len));
1176
              goto out_free_desc;
1177
           }
1178
        }
1179
        else {
1180
           IF_EVENT(printk("IA: Rx over the rx_quota %ld\n", vcc->rx_quota);)
1181
#endif
1182
           if (vcc->vci < 32)
1183
              printk("Drop control packets\n");
1184
              goto out_free_desc;
1185
        }
1186
        skb_put(skb,len);
1187
        // pwang_test
1188
        ATM_SKB(skb)->vcc = vcc;
1189
        ATM_DESC(skb) = desc;
1190
        skb_queue_tail(&iadev->rx_dma_q, skb);
1191
 
1192
        /* Build the DLE structure */
1193
        wr_ptr = iadev->rx_dle_q.write;
1194
        wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1195
                len, PCI_DMA_FROMDEVICE);
1196
        wr_ptr->local_pkt_addr = buf_addr;
1197
        wr_ptr->bytes = len;    /* We don't know this do we ?? */
1198
        wr_ptr->mode = DMA_INT_ENABLE;
1199
 
1200
        /* shud take care of wrap around here too. */
1201
        if(++wr_ptr == iadev->rx_dle_q.end)
1202
             wr_ptr = iadev->rx_dle_q.start;
1203
        iadev->rx_dle_q.write = wr_ptr;
1204
        udelay(1);
1205
        /* Increment transaction counter */
1206
        writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1207
out:    return 0;
1208
out_free_desc:
1209
        free_desc(dev, desc);
1210
        goto out;
1211
}
1212
 
1213
static void rx_intr(struct atm_dev *dev)
1214
{
1215
  IADEV *iadev;
1216
  u_short status;
1217
  u_short state, i;
1218
 
1219
  iadev = INPH_IA_DEV(dev);
1220
  status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1221
  IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1222
  if (status & RX_PKT_RCVD)
1223
  {
1224
        /* do something */
1225
        /* Basically recvd an interrupt for receving a packet.
1226
        A descriptor would have been written to the packet complete
1227
        queue. Get all the descriptors and set up dma to move the
1228
        packets till the packet complete queue is empty..
1229
        */
1230
        state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1231
        IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
1232
        while(!(state & PCQ_EMPTY))
1233
        {
1234
             rx_pkt(dev);
1235
             state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1236
        }
1237
        iadev->rxing = 1;
1238
  }
1239
  if (status & RX_FREEQ_EMPT)
1240
  {
1241
     if (iadev->rxing) {
1242
        iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1243
        iadev->rx_tmp_jif = jiffies;
1244
        iadev->rxing = 0;
1245
     }
1246
     else if (((jiffies - iadev->rx_tmp_jif) > 50) &&
1247
               ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1248
        for (i = 1; i <= iadev->num_rx_desc; i++)
1249
               free_desc(dev, i);
1250
printk("Test logic RUN!!!!\n");
1251
        writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1252
        iadev->rxing = 1;
1253
     }
1254
     IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
1255
  }
1256
 
1257
  if (status & RX_EXCP_RCVD)
1258
  {
1259
        /* probably need to handle the exception queue also. */
1260
        IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
1261
        rx_excp_rcvd(dev);
1262
  }
1263
 
1264
 
1265
  if (status & RX_RAW_RCVD)
1266
  {
1267
        /* need to handle the raw incoming cells. This deepnds on
1268
        whether we have programmed to receive the raw cells or not.
1269
        Else ignore. */
1270
        IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)
1271
  }
1272
}
1273
 
1274
 
1275
static void rx_dle_intr(struct atm_dev *dev)
1276
{
1277
  IADEV *iadev;
1278
  struct atm_vcc *vcc;
1279
  struct sk_buff *skb;
1280
  int desc;
1281
  u_short state;
1282
  struct dle *dle, *cur_dle;
1283
  u_int dle_lp;
1284
  int len;
1285
  iadev = INPH_IA_DEV(dev);
1286
 
1287
  /* free all the dles done, that is just update our own dle read pointer
1288
        - do we really need to do this. Think not. */
1289
  /* DMA is done, just get all the recevie buffers from the rx dma queue
1290
        and push them up to the higher layer protocol. Also free the desc
1291
        associated with the buffer. */
1292
  dle = iadev->rx_dle_q.read;
1293
  dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1294
  cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1295
  while(dle != cur_dle)
1296
  {
1297
      /* free the DMAed skb */
1298
      skb = skb_dequeue(&iadev->rx_dma_q);
1299
      if (!skb)
1300
         goto INCR_DLE;
1301
      desc = ATM_DESC(skb);
1302
      free_desc(dev, desc);
1303
 
1304
      if (!(len = skb->len))
1305
      {
1306
          printk("rx_dle_intr: skb len 0\n");
1307
          dev_kfree_skb_any(skb);
1308
      }
1309
      else
1310
      {
1311
          struct cpcs_trailer *trailer;
1312
          u_short length;
1313
          struct ia_vcc *ia_vcc;
1314
 
1315
          pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1316
                len, PCI_DMA_FROMDEVICE);
1317
          /* no VCC related housekeeping done as yet. lets see */
1318
          vcc = ATM_SKB(skb)->vcc;
1319
          if (!vcc) {
1320
              printk("IA: null vcc\n");
1321
              dev_kfree_skb_any(skb);
1322
              goto INCR_DLE;
1323
          }
1324
          ia_vcc = INPH_IA_VCC(vcc);
1325
          if (ia_vcc == NULL)
1326
          {
1327
             atomic_inc(&vcc->stats->rx_err);
1328
             dev_kfree_skb_any(skb);
1329
#if LINUX_VERSION_CODE >= 0x20312
1330
             atm_return(vcc, atm_guess_pdu2truesize(len));
1331
#else
1332
             atm_return(vcc, atm_pdu2truesize(len));
1333
#endif
1334
             goto INCR_DLE;
1335
           }
1336
          // get real pkt length  pwang_test
1337
          trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1338
                                 skb->len - sizeof(*trailer));
1339
          length =  swap(trailer->length);
1340
          if ((length > iadev->rx_buf_sz) || (length >
1341
                              (skb->len - sizeof(struct cpcs_trailer))))
1342
          {
1343
             atomic_inc(&vcc->stats->rx_err);
1344
             IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)",
1345
                                                            length, skb->len);)
1346
             dev_kfree_skb_any(skb);
1347
#if LINUX_VERSION_CODE >= 0x20312
1348
             atm_return(vcc, atm_guess_pdu2truesize(len));
1349
#else
1350
             atm_return(vcc, atm_pdu2truesize(len));
1351
#endif 
1352
             goto INCR_DLE;
1353
          }
1354
          skb_trim(skb, length);
1355
 
1356
          /* Display the packet */
1357
          IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1358
          xdump(skb->data, skb->len, "RX: ");
1359
          printk("\n");)
1360
 
1361
          IF_RX(printk("rx_dle_intr: skb push");)
1362
          vcc->push(vcc,skb);
1363
          atomic_inc(&vcc->stats->rx);
1364
          iadev->rx_pkt_cnt++;
1365
      }
1366
INCR_DLE:
1367
      if (++dle == iadev->rx_dle_q.end)
1368
          dle = iadev->rx_dle_q.start;
1369
  }
1370
  iadev->rx_dle_q.read = dle;
1371
 
1372
  /* if the interrupts are masked because there were no free desc available,
1373
                unmask them now. */
1374
  if (!iadev->rxing) {
1375
     state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1376
     if (!(state & FREEQ_EMPTY)) {
1377
        state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1378
        writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1379
                                      iadev->reass_reg+REASS_MASK_REG);
1380
        iadev->rxing++;
1381
     }
1382
  }
1383
}
1384
 
1385
 
1386
static int open_rx(struct atm_vcc *vcc)
1387
{
1388
        IADEV *iadev;
1389
        u_short *vc_table;
1390
        u_short *reass_ptr;
1391
        IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1392
 
1393
        if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1394
        iadev = INPH_IA_DEV(vcc->dev);
1395
        if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1396
           if (iadev->phy_type & FE_25MBIT_PHY) {
1397
               printk("IA:  ABR not support\n");
1398
               return -EINVAL;
1399
           }
1400
        }
1401
        /* Make only this VCI in the vc table valid and let all
1402
                others be invalid entries */
1403
        vc_table = (u_short *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1404
        vc_table += vcc->vci;
1405
        /* mask the last 6 bits and OR it with 3 for 1K VCs */
1406
 
1407
        *vc_table = vcc->vci << 6;
1408
        /* Also keep a list of open rx vcs so that we can attach them with
1409
                incoming PDUs later. */
1410
        if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1411
                                (vcc->qos.txtp.traffic_class == ATM_ABR))
1412
        {
1413
                srv_cls_param_t srv_p;
1414
                init_abr_vc(iadev, &srv_p);
1415
                ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1416
        }
1417
        else {  /* for UBR  later may need to add CBR logic */
1418
                reass_ptr = (u_short *)
1419
                           (iadev->reass_ram+REASS_TABLE*iadev->memSize);
1420
                reass_ptr += vcc->vci;
1421
                *reass_ptr = NO_AAL5_PKT;
1422
        }
1423
 
1424
        if (iadev->rx_open[vcc->vci])
1425
                printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
1426
                        vcc->dev->number, vcc->vci);
1427
        iadev->rx_open[vcc->vci] = vcc;
1428
        return 0;
1429
}
1430
 
1431
static int rx_init(struct atm_dev *dev)
1432
{
1433
        IADEV *iadev;
1434
        struct rx_buf_desc *buf_desc_ptr;
1435
        unsigned long rx_pkt_start = 0;
1436
        void *dle_addr;
1437
        struct abr_vc_table  *abr_vc_table;
1438
        u16 *vc_table;
1439
        u16 *reass_table;
1440
        u16 *ptr16;
1441
        int i,j, vcsize_sel;
1442
        u_short freeq_st_adr;
1443
        u_short *freeq_start;
1444
 
1445
        iadev = INPH_IA_DEV(dev);
1446
  //    spin_lock_init(&iadev->rx_lock); 
1447
 
1448
        /* Allocate 4k bytes - more aligned than needed (4k boundary) */
1449
        dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1450
                                        &iadev->rx_dle_dma);
1451
        if (!dle_addr)  {
1452
                printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1453
                goto err_out;
1454
        }
1455
        iadev->rx_dle_q.start = (struct dle*)dle_addr;
1456
        iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1457
        iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1458
        iadev->rx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1459
        /* the end of the dle q points to the entry after the last
1460
        DLE that can be used. */
1461
 
1462
        /* write the upper 20 bits of the start address to rx list address register */
1463
        writel(iadev->rx_dle_dma & 0xfffff000,
1464
               iadev->dma + IPHASE5575_RX_LIST_ADDR);
1465
        IF_INIT(printk("Tx Dle list addr: 0x%08x value: 0x%0x\n",
1466
                      (u32)(iadev->dma+IPHASE5575_TX_LIST_ADDR),
1467
                      *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));
1468
        printk("Rx Dle list addr: 0x%08x value: 0x%0x\n",
1469
                      (u32)(iadev->dma+IPHASE5575_RX_LIST_ADDR),
1470
                      *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)
1471
 
1472
        writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1473
        writew(0, iadev->reass_reg+MODE_REG);
1474
        writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1475
 
1476
        /* Receive side control memory map
1477
           -------------------------------
1478
 
1479
                Buffer descr    0x0000 (736 - 23K)
1480
                VP Table        0x5c00 (256 - 512)
1481
                Except q        0x5e00 (128 - 512)
1482
                Free buffer q   0x6000 (1K - 2K)
1483
                Packet comp q   0x6800 (1K - 2K)
1484
                Reass Table     0x7000 (1K - 2K)
1485
                VC Table        0x7800 (1K - 2K)
1486
                ABR VC Table    0x8000 (1K - 32K)
1487
        */
1488
 
1489
        /* Base address for Buffer Descriptor Table */
1490
        writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1491
        /* Set the buffer size register */
1492
        writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1493
 
1494
        /* Initialize each entry in the Buffer Descriptor Table */
1495
        iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1496
        buf_desc_ptr =(struct rx_buf_desc *)iadev->RX_DESC_BASE_ADDR;
1497
        memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1498
        buf_desc_ptr++;
1499
        rx_pkt_start = iadev->rx_pkt_ram;
1500
        for(i=1; i<=iadev->num_rx_desc; i++)
1501
        {
1502
                memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1503
                buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1504
                buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1505
                buf_desc_ptr++;
1506
                rx_pkt_start += iadev->rx_buf_sz;
1507
        }
1508
        IF_INIT(printk("Rx Buffer desc ptr: 0x%0x\n", (u32)(buf_desc_ptr));)
1509
        i = FREE_BUF_DESC_Q*iadev->memSize;
1510
        writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE);
1511
        writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1512
        writew(i+iadev->num_rx_desc*sizeof(u_short),
1513
                                         iadev->reass_reg+FREEQ_ED_ADR);
1514
        writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1515
        writew(i+iadev->num_rx_desc*sizeof(u_short),
1516
                                        iadev->reass_reg+FREEQ_WR_PTR);
1517
        /* Fill the FREEQ with all the free descriptors. */
1518
        freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1519
        freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1520
        for(i=1; i<=iadev->num_rx_desc; i++)
1521
        {
1522
                *freeq_start = (u_short)i;
1523
                freeq_start++;
1524
        }
1525
        IF_INIT(printk("freeq_start: 0x%0x\n", (u32)freeq_start);)
1526
        /* Packet Complete Queue */
1527
        i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1528
        writew(i, iadev->reass_reg+PCQ_ST_ADR);
1529
        writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1530
        writew(i, iadev->reass_reg+PCQ_RD_PTR);
1531
        writew(i, iadev->reass_reg+PCQ_WR_PTR);
1532
 
1533
        /* Exception Queue */
1534
        i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1535
        writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1536
        writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
1537
                                             iadev->reass_reg+EXCP_Q_ED_ADR);
1538
        writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1539
        writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1540
 
1541
        /* Load local copy of FREEQ and PCQ ptrs */
1542
        iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1543
        iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1544
        iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1545
        iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1546
        iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1547
        iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1548
        iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1549
        iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1550
 
1551
        IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1552
              iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1553
              iadev->rfL.pcq_wr);)
1554
        /* just for check - no VP TBL */
1555
        /* VP Table */
1556
        /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1557
        /* initialize VP Table for invalid VPIs
1558
                - I guess we can write all 1s or 0x000f in the entire memory
1559
                  space or something similar.
1560
        */
1561
 
1562
        /* This seems to work and looks right to me too !!! */
1563
        i =  REASS_TABLE * iadev->memSize;
1564
        writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1565
        /* initialize Reassembly table to I don't know what ???? */
1566
        reass_table = (u16 *)(iadev->reass_ram+i);
1567
        j = REASS_TABLE_SZ * iadev->memSize;
1568
        for(i=0; i < j; i++)
1569
                *reass_table++ = NO_AAL5_PKT;
1570
       i = 8*1024;
1571
       vcsize_sel =  0;
1572
       while (i != iadev->num_vc) {
1573
          i /= 2;
1574
          vcsize_sel++;
1575
       }
1576
       i = RX_VC_TABLE * iadev->memSize;
1577
       writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1578
       vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1579
        j = RX_VC_TABLE_SZ * iadev->memSize;
1580
        for(i = 0; i < j; i++)
1581
        {
1582
                /* shift the reassembly pointer by 3 + lower 3 bits of
1583
                vc_lkup_base register (=3 for 1K VCs) and the last byte
1584
                is those low 3 bits.
1585
                Shall program this later.
1586
                */
1587
                *vc_table = (i << 6) | 15;      /* for invalid VCI */
1588
                vc_table++;
1589
        }
1590
        /* ABR VC table */
1591
        i =  ABR_VC_TABLE * iadev->memSize;
1592
        writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1593
 
1594
        i = ABR_VC_TABLE * iadev->memSize;
1595
        abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1596
        j = REASS_TABLE_SZ * iadev->memSize;
1597
        memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1598
        for(i = 0; i < j; i++) {
1599
                abr_vc_table->rdf = 0x0003;
1600
                abr_vc_table->air = 0x5eb1;
1601
                abr_vc_table++;
1602
        }
1603
 
1604
        /* Initialize other registers */
1605
 
1606
        /* VP Filter Register set for VC Reassembly only */
1607
        writew(0xff00, iadev->reass_reg+VP_FILTER);
1608
        writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1609
        writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1610
 
1611
        /* Packet Timeout Count  related Registers :
1612
           Set packet timeout to occur in about 3 seconds
1613
           Set Packet Aging Interval count register to overflow in about 4 us
1614
        */
1615
        writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1616
        ptr16 = (u16*)j;
1617
        i = ((u32)ptr16 >> 6) & 0xff;
1618
        ptr16  += j - 1;
1619
        i |=(((u32)ptr16 << 2) & 0xff00);
1620
        writew(i, iadev->reass_reg+TMOUT_RANGE);
1621
        /* initiate the desc_tble */
1622
        for(i=0; i<iadev->num_tx_desc;i++)
1623
            iadev->desc_tbl[i].timestamp = 0;
1624
 
1625
        /* to clear the interrupt status register - read it */
1626
        readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1627
 
1628
        /* Mask Register - clear it */
1629
        writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1630
 
1631
        skb_queue_head_init(&iadev->rx_dma_q);
1632
        iadev->rx_free_desc_qhead = NULL;
1633
        iadev->rx_open = kmalloc(4*iadev->num_vc,GFP_KERNEL);
1634
        if (!iadev->rx_open)
1635
        {
1636
                printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1637
                dev->number);
1638
                goto err_free_dle;
1639
        }
1640
        memset(iadev->rx_open, 0, 4*iadev->num_vc);
1641
        iadev->rxing = 1;
1642
        iadev->rx_pkt_cnt = 0;
1643
        /* Mode Register */
1644
        writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1645
        return 0;
1646
 
1647
err_free_dle:
1648
        pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1649
                            iadev->rx_dle_dma);
1650
err_out:
1651
        return -ENOMEM;
1652
}
1653
 
1654
 
1655
/*
1656
        The memory map suggested in appendix A and the coding for it.
1657
        Keeping it around just in case we change our mind later.
1658
 
1659
                Buffer descr    0x0000 (128 - 4K)
1660
                UBR sched       0x1000 (1K - 4K)
1661
                UBR Wait q      0x2000 (1K - 4K)
1662
                Commn queues    0x3000 Packet Ready, Trasmit comp(0x3100)
1663
                                        (128 - 256) each
1664
                extended VC     0x4000 (1K - 8K)
1665
                ABR sched       0x6000  and ABR wait queue (1K - 2K) each
1666
                CBR sched       0x7000 (as needed)
1667
                VC table        0x8000 (1K - 32K)
1668
*/
1669
 
1670
static void tx_intr(struct atm_dev *dev)
1671
{
1672
        IADEV *iadev;
1673
        unsigned short status;
1674
        unsigned long flags;
1675
 
1676
        iadev = INPH_IA_DEV(dev);
1677
 
1678
        status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1679
        if (status & TRANSMIT_DONE){
1680
 
1681
           IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1682
           spin_lock_irqsave(&iadev->tx_lock, flags);
1683
           ia_tx_poll(iadev);
1684
           spin_unlock_irqrestore(&iadev->tx_lock, flags);
1685
           writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1686
           if (iadev->close_pending)
1687
               wake_up(&iadev->close_wait);
1688
        }
1689
        if (status & TCQ_NOT_EMPTY)
1690
        {
1691
            IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1692
        }
1693
}
1694
 
1695
static void tx_dle_intr(struct atm_dev *dev)
1696
{
1697
        IADEV *iadev;
1698
        struct dle *dle, *cur_dle;
1699
        struct sk_buff *skb;
1700
        struct atm_vcc *vcc;
1701
        struct ia_vcc  *iavcc;
1702
        u_int dle_lp;
1703
        unsigned long flags;
1704
 
1705
        iadev = INPH_IA_DEV(dev);
1706
        spin_lock_irqsave(&iadev->tx_lock, flags);
1707
        dle = iadev->tx_dle_q.read;
1708
        dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1709
                                        (sizeof(struct dle)*DLE_ENTRIES - 1);
1710
        cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1711
        while (dle != cur_dle)
1712
        {
1713
            /* free the DMAed skb */
1714
            skb = skb_dequeue(&iadev->tx_dma_q);
1715
            if (!skb) break;
1716
 
1717
            /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1718
            if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1719
                pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1720
                                 PCI_DMA_TODEVICE);
1721
            }
1722
            vcc = ATM_SKB(skb)->vcc;
1723
            if (!vcc) {
1724
                  printk("tx_dle_intr: vcc is null\n");
1725
                  spin_unlock_irqrestore(&iadev->tx_lock, flags);
1726
                  dev_kfree_skb_any(skb);
1727
 
1728
                  return;
1729
            }
1730
            iavcc = INPH_IA_VCC(vcc);
1731
            if (!iavcc) {
1732
                  printk("tx_dle_intr: iavcc is null\n");
1733
                  spin_unlock_irqrestore(&iadev->tx_lock, flags);
1734
                  dev_kfree_skb_any(skb);
1735
                  return;
1736
            }
1737
            if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1738
               if ((vcc->pop) && (skb->len != 0))
1739
               {
1740
                 vcc->pop(vcc, skb);
1741
               }
1742
               else {
1743
                 dev_kfree_skb_any(skb);
1744
               }
1745
            }
1746
            else { /* Hold the rate-limited skb for flow control */
1747
               IA_SKB_STATE(skb) |= IA_DLED;
1748
               skb_queue_tail(&iavcc->txing_skb, skb);
1749
            }
1750
            IF_EVENT(printk("tx_dle_intr: enque skb = 0x%x \n", (u32)skb);)
1751
            if (++dle == iadev->tx_dle_q.end)
1752
                 dle = iadev->tx_dle_q.start;
1753
        }
1754
        iadev->tx_dle_q.read = dle;
1755
        spin_unlock_irqrestore(&iadev->tx_lock, flags);
1756
}
1757
 
1758
static int open_tx(struct atm_vcc *vcc)
1759
{
1760
        struct ia_vcc *ia_vcc;
1761
        IADEV *iadev;
1762
        struct main_vc *vc;
1763
        struct ext_vc *evc;
1764
        int ret;
1765
        IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1766
        if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1767
        iadev = INPH_IA_DEV(vcc->dev);
1768
 
1769
        if (iadev->phy_type & FE_25MBIT_PHY) {
1770
           if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1771
               printk("IA:  ABR not support\n");
1772
               return -EINVAL;
1773
           }
1774
          if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1775
               printk("IA:  CBR not support\n");
1776
               return -EINVAL;
1777
          }
1778
        }
1779
        ia_vcc =  INPH_IA_VCC(vcc);
1780
        memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1781
        if (vcc->qos.txtp.max_sdu >
1782
                         (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1783
           printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1784
                  vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1785
           INPH_IA_VCC(vcc) = NULL;
1786
           kfree(ia_vcc);
1787
           return -EINVAL;
1788
        }
1789
        ia_vcc->vc_desc_cnt = 0;
1790
        ia_vcc->txing = 1;
1791
 
1792
        /* find pcr */
1793
        if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1794
           vcc->qos.txtp.pcr = iadev->LineRate;
1795
        else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1796
           vcc->qos.txtp.pcr = iadev->LineRate;
1797
        else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1798
           vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1799
        if (vcc->qos.txtp.pcr > iadev->LineRate)
1800
             vcc->qos.txtp.pcr = iadev->LineRate;
1801
        ia_vcc->pcr = vcc->qos.txtp.pcr;
1802
 
1803
        if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1804
        else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1805
        else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1806
        else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1807
        if (ia_vcc->pcr < iadev->rate_limit)
1808
           skb_queue_head_init (&ia_vcc->txing_skb);
1809
        if (ia_vcc->pcr < iadev->rate_limit) {
1810
           if (vcc->qos.txtp.max_sdu != 0) {
1811
               if (ia_vcc->pcr > 60000)
1812
                  vcc->sk->sndbuf = vcc->qos.txtp.max_sdu * 5;
1813
               else if (ia_vcc->pcr > 2000)
1814
                  vcc->sk->sndbuf = vcc->qos.txtp.max_sdu * 4;
1815
               else
1816
                 vcc->sk->sndbuf = 3*vcc->qos.txtp.max_sdu;
1817
           }
1818
           else
1819
             vcc->sk->sndbuf = 24576;
1820
        }
1821
 
1822
        vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1823
        evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1824
        vc += vcc->vci;
1825
        evc += vcc->vci;
1826
        memset((caddr_t)vc, 0, sizeof(*vc));
1827
        memset((caddr_t)evc, 0, sizeof(*evc));
1828
 
1829
        /* store the most significant 4 bits of vci as the last 4 bits
1830
                of first part of atm header.
1831
           store the last 12 bits of vci as first 12 bits of the second
1832
                part of the atm header.
1833
        */
1834
        evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1835
        evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1836
 
1837
        /* check the following for different traffic classes */
1838
        if (vcc->qos.txtp.traffic_class == ATM_UBR)
1839
        {
1840
                vc->type = UBR;
1841
                vc->status = CRC_APPEND;
1842
                vc->acr = cellrate_to_float(iadev->LineRate);
1843
                if (vcc->qos.txtp.pcr > 0)
1844
                   vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1845
                IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1846
                                             vcc->qos.txtp.max_pcr,vc->acr);)
1847
        }
1848
        else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1849
        {       srv_cls_param_t srv_p;
1850
                IF_ABR(printk("Tx ABR VCC\n");)
1851
                init_abr_vc(iadev, &srv_p);
1852
                if (vcc->qos.txtp.pcr > 0)
1853
                   srv_p.pcr = vcc->qos.txtp.pcr;
1854
                if (vcc->qos.txtp.min_pcr > 0) {
1855
                   int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1856
                   if (tmpsum > iadev->LineRate)
1857
                       return -EBUSY;
1858
                   srv_p.mcr = vcc->qos.txtp.min_pcr;
1859
                   iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1860
                }
1861
                else srv_p.mcr = 0;
1862
                if (vcc->qos.txtp.icr)
1863
                   srv_p.icr = vcc->qos.txtp.icr;
1864
                if (vcc->qos.txtp.tbe)
1865
                   srv_p.tbe = vcc->qos.txtp.tbe;
1866
                if (vcc->qos.txtp.frtt)
1867
                   srv_p.frtt = vcc->qos.txtp.frtt;
1868
                if (vcc->qos.txtp.rif)
1869
                   srv_p.rif = vcc->qos.txtp.rif;
1870
                if (vcc->qos.txtp.rdf)
1871
                   srv_p.rdf = vcc->qos.txtp.rdf;
1872
                if (vcc->qos.txtp.nrm_pres)
1873
                   srv_p.nrm = vcc->qos.txtp.nrm;
1874
                if (vcc->qos.txtp.trm_pres)
1875
                   srv_p.trm = vcc->qos.txtp.trm;
1876
                if (vcc->qos.txtp.adtf_pres)
1877
                   srv_p.adtf = vcc->qos.txtp.adtf;
1878
                if (vcc->qos.txtp.cdf_pres)
1879
                   srv_p.cdf = vcc->qos.txtp.cdf;
1880
                if (srv_p.icr > srv_p.pcr)
1881
                   srv_p.icr = srv_p.pcr;
1882
                IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n",
1883
                                                      srv_p.pcr, srv_p.mcr);)
1884
                ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1885
        } else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1886
                if (iadev->phy_type & FE_25MBIT_PHY) {
1887
                    printk("IA:  CBR not support\n");
1888
                    return -EINVAL;
1889
                }
1890
                if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1891
                   IF_CBR(printk("PCR is not available\n");)
1892
                   return -1;
1893
                }
1894
                vc->type = CBR;
1895
                vc->status = CRC_APPEND;
1896
                if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
1897
                    return ret;
1898
                }
1899
       }
1900
        else
1901
           printk("iadev:  Non UBR, ABR and CBR traffic not supported\n");
1902
 
1903
        iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1904
        IF_EVENT(printk("ia open_tx returning \n");)
1905
        return 0;
1906
}
1907
 
1908
 
1909
static int tx_init(struct atm_dev *dev)
1910
{
1911
        IADEV *iadev;
1912
        struct tx_buf_desc *buf_desc_ptr;
1913
        unsigned int tx_pkt_start;
1914
        void *dle_addr;
1915
        int i;
1916
        u_short tcq_st_adr;
1917
        u_short *tcq_start;
1918
        u_short prq_st_adr;
1919
        u_short *prq_start;
1920
        struct main_vc *vc;
1921
        struct ext_vc *evc;
1922
        u_short tmp16;
1923
        u32 vcsize_sel;
1924
 
1925
        iadev = INPH_IA_DEV(dev);
1926
        spin_lock_init(&iadev->tx_lock);
1927
 
1928
        IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1929
                                readw(iadev->seg_reg+SEG_MASK_REG));)
1930
 
1931
        /* Allocate 4k (boundary aligned) bytes */
1932
        dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1933
                                        &iadev->tx_dle_dma);
1934
        if (!dle_addr)  {
1935
                printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1936
                goto err_out;
1937
        }
1938
        iadev->tx_dle_q.start = (struct dle*)dle_addr;
1939
        iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1940
        iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1941
        iadev->tx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1942
 
1943
        /* write the upper 20 bits of the start address to tx list address register */
1944
        writel(iadev->tx_dle_dma & 0xfffff000,
1945
               iadev->dma + IPHASE5575_TX_LIST_ADDR);
1946
        writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1947
        writew(0, iadev->seg_reg+MODE_REG_0);
1948
        writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1949
        iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1950
        iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1951
        iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1952
 
1953
        /*
1954
           Transmit side control memory map
1955
           --------------------------------
1956
         Buffer descr   0x0000 (128 - 4K)
1957
         Commn queues   0x1000  Transmit comp, Packet ready(0x1400)
1958
                                        (512 - 1K) each
1959
                                        TCQ - 4K, PRQ - 5K
1960
         CBR Table      0x1800 (as needed) - 6K
1961
         UBR Table      0x3000 (1K - 4K) - 12K
1962
         UBR Wait queue 0x4000 (1K - 4K) - 16K
1963
         ABR sched      0x5000  and ABR wait queue (1K - 2K) each
1964
                                ABR Tbl - 20K, ABR Wq - 22K
1965
         extended VC    0x6000 (1K - 8K) - 24K
1966
         VC Table       0x8000 (1K - 32K) - 32K
1967
 
1968
        Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1969
        and Wait q, which can be allotted later.
1970
        */
1971
 
1972
        /* Buffer Descriptor Table Base address */
1973
        writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1974
 
1975
        /* initialize each entry in the buffer descriptor table */
1976
        buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1977
        memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1978
        buf_desc_ptr++;
1979
        tx_pkt_start = TX_PACKET_RAM;
1980
        for(i=1; i<=iadev->num_tx_desc; i++)
1981
        {
1982
                memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1983
                buf_desc_ptr->desc_mode = AAL5;
1984
                buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1985
                buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1986
                buf_desc_ptr++;
1987
                tx_pkt_start += iadev->tx_buf_sz;
1988
        }
1989
        iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1990
        if (!iadev->tx_buf) {
1991
            printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1992
            goto err_free_dle;
1993
        }
1994
        for (i= 0; i< iadev->num_tx_desc; i++)
1995
        {
1996
            struct cpcs_trailer *cpcs;
1997
 
1998
            cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1999
            if(!cpcs) {
2000
                printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
2001
                goto err_free_tx_bufs;
2002
            }
2003
            iadev->tx_buf[i].cpcs = cpcs;
2004
            iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
2005
                cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
2006
        }
2007
        iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
2008
                                   sizeof(struct desc_tbl_t), GFP_KERNEL);
2009
        if(!iadev->desc_tbl)
2010
                goto err_free_all_tx_bufs;
2011
 
2012
        /* Communication Queues base address */
2013
        i = TX_COMP_Q * iadev->memSize;
2014
        writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
2015
 
2016
        /* Transmit Complete Queue */
2017
        writew(i, iadev->seg_reg+TCQ_ST_ADR);
2018
        writew(i, iadev->seg_reg+TCQ_RD_PTR);
2019
        writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
2020
        iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2021
        writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2022
                                              iadev->seg_reg+TCQ_ED_ADR);
2023
        /* Fill the TCQ with all the free descriptors. */
2024
        tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
2025
        tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
2026
        for(i=1; i<=iadev->num_tx_desc; i++)
2027
        {
2028
                *tcq_start = (u_short)i;
2029
                tcq_start++;
2030
        }
2031
 
2032
        /* Packet Ready Queue */
2033
        i = PKT_RDY_Q * iadev->memSize;
2034
        writew(i, iadev->seg_reg+PRQ_ST_ADR);
2035
        writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2036
                                              iadev->seg_reg+PRQ_ED_ADR);
2037
        writew(i, iadev->seg_reg+PRQ_RD_PTR);
2038
        writew(i, iadev->seg_reg+PRQ_WR_PTR);
2039
 
2040
        /* Load local copy of PRQ and TCQ ptrs */
2041
        iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2042
        iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2043
        iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2044
 
2045
        iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2046
        iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2047
        iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2048
 
2049
        /* Just for safety initializing the queue to have desc 1 always */
2050
        /* Fill the PRQ with all the free descriptors. */
2051
        prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2052
        prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2053
        for(i=1; i<=iadev->num_tx_desc; i++)
2054
        {
2055
                *prq_start = (u_short)0; /* desc 1 in all entries */
2056
                prq_start++;
2057
        }
2058
        /* CBR Table */
2059
        IF_INIT(printk("Start CBR Init\n");)
2060
#if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2061
        writew(0,iadev->seg_reg+CBR_PTR_BASE);
2062
#else /* Charlie's logic is wrong ? */
2063
        tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2064
        IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2065
        writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2066
#endif
2067
 
2068
        IF_INIT(printk("value in register = 0x%x\n",
2069
                                   readw(iadev->seg_reg+CBR_PTR_BASE));)
2070
        tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2071
        writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2072
        IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2073
                                        readw(iadev->seg_reg+CBR_TAB_BEG));)
2074
        writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2075
        tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2076
        writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2077
        IF_INIT(printk("iadev->seg_reg = 0x%x CBR_PTR_BASE = 0x%x\n",
2078
               (u32)iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2079
        IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2080
          readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2081
          readw(iadev->seg_reg+CBR_TAB_END+1));)
2082
        tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
2083
 
2084
        /* Initialize the CBR Schedualing Table */
2085
        memset((caddr_t)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize),
2086
                                                          0, iadev->num_vc*6);
2087
        iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2088
        iadev->CbrEntryPt = 0;
2089
        iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2090
        iadev->NumEnabledCBR = 0;
2091
 
2092
        /* UBR scheduling Table and wait queue */
2093
        /* initialize all bytes of UBR scheduler table and wait queue to 0
2094
                - SCHEDSZ is 1K (# of entries).
2095
                - UBR Table size is 4K
2096
                - UBR wait queue is 4K
2097
           since the table and wait queues are contiguous, all the bytes
2098
           can be intialized by one memeset.
2099
        */
2100
 
2101
        vcsize_sel = 0;
2102
        i = 8*1024;
2103
        while (i != iadev->num_vc) {
2104
          i /= 2;
2105
          vcsize_sel++;
2106
        }
2107
 
2108
        i = MAIN_VC_TABLE * iadev->memSize;
2109
        writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2110
        i =  EXT_VC_TABLE * iadev->memSize;
2111
        writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2112
        i = UBR_SCHED_TABLE * iadev->memSize;
2113
        writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2114
        i = UBR_WAIT_Q * iadev->memSize;
2115
        writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2116
        memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2117
                                                       0, iadev->num_vc*8);
2118
        /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2119
        /* initialize all bytes of ABR scheduler table and wait queue to 0
2120
                - SCHEDSZ is 1K (# of entries).
2121
                - ABR Table size is 2K
2122
                - ABR wait queue is 2K
2123
           since the table and wait queues are contiguous, all the bytes
2124
           can be intialized by one memeset.
2125
        */
2126
        i = ABR_SCHED_TABLE * iadev->memSize;
2127
        writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2128
        i = ABR_WAIT_Q * iadev->memSize;
2129
        writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2130
 
2131
        i = ABR_SCHED_TABLE*iadev->memSize;
2132
        memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2133
        vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2134
        evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2135
        iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL);
2136
        if (!iadev->testTable) {
2137
           printk("Get freepage  failed\n");
2138
           goto err_free_desc_tbl;
2139
        }
2140
        for(i=0; i<iadev->num_vc; i++)
2141
        {
2142
                memset((caddr_t)vc, 0, sizeof(*vc));
2143
                memset((caddr_t)evc, 0, sizeof(*evc));
2144
                iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2145
                                                GFP_KERNEL);
2146
                if (!iadev->testTable[i])
2147
                        goto err_free_test_tables;
2148
                iadev->testTable[i]->lastTime = 0;
2149
                iadev->testTable[i]->fract = 0;
2150
                iadev->testTable[i]->vc_status = VC_UBR;
2151
                vc++;
2152
                evc++;
2153
        }
2154
 
2155
        /* Other Initialization */
2156
 
2157
        /* Max Rate Register */
2158
        if (iadev->phy_type & FE_25MBIT_PHY) {
2159
           writew(RATE25, iadev->seg_reg+MAXRATE);
2160
           writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2161
        }
2162
        else {
2163
           writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2164
           writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2165
        }
2166
        /* Set Idle Header Reigisters to be sure */
2167
        writew(0, iadev->seg_reg+IDLEHEADHI);
2168
        writew(0, iadev->seg_reg+IDLEHEADLO);
2169
 
2170
        /* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2171
        writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2172
 
2173
        iadev->close_pending = 0;
2174
#if LINUX_VERSION_CODE >= 0x20303
2175
        init_waitqueue_head(&iadev->close_wait);
2176
        init_waitqueue_head(&iadev->timeout_wait);
2177
#else
2178
        iadev->close_wait = NULL;
2179
        iadev->timeout_wait = NULL;
2180
#endif 
2181
        skb_queue_head_init(&iadev->tx_dma_q);
2182
        ia_init_rtn_q(&iadev->tx_return_q);
2183
 
2184
        /* RM Cell Protocol ID and Message Type */
2185
        writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2186
        skb_queue_head_init (&iadev->tx_backlog);
2187
 
2188
        /* Mode Register 1 */
2189
        writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2190
 
2191
        /* Mode Register 0 */
2192
        writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2193
 
2194
        /* Interrupt Status Register - read to clear */
2195
        readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2196
 
2197
        /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2198
        writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2199
        writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2200
        iadev->tx_pkt_cnt = 0;
2201
        iadev->rate_limit = iadev->LineRate / 3;
2202
 
2203
        return 0;
2204
 
2205
err_free_test_tables:
2206
        while (--i >= 0)
2207
                kfree(iadev->testTable[i]);
2208
        kfree(iadev->testTable);
2209
err_free_desc_tbl:
2210
        kfree(iadev->desc_tbl);
2211
err_free_all_tx_bufs:
2212
        i = iadev->num_tx_desc;
2213
err_free_tx_bufs:
2214
        while (--i >= 0) {
2215
                struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2216
 
2217
                pci_unmap_single(iadev->pci, desc->dma_addr,
2218
                        sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2219
                kfree(desc->cpcs);
2220
        }
2221
        kfree(iadev->tx_buf);
2222
err_free_dle:
2223
        pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2224
                            iadev->tx_dle_dma);
2225
err_out:
2226
        return -ENOMEM;
2227
}
2228
 
2229
static void ia_int(int irq, void *dev_id, struct pt_regs *regs)
2230
{
2231
   struct atm_dev *dev;
2232
   IADEV *iadev;
2233
   unsigned int status;
2234
 
2235
   dev = dev_id;
2236
   iadev = INPH_IA_DEV(dev);
2237
   while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2238
   {
2239
        IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
2240
        if (status & STAT_REASSINT)
2241
        {
2242
           /* do something */
2243
           IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2244
           rx_intr(dev);
2245
        }
2246
        if (status & STAT_DLERINT)
2247
        {
2248
           /* Clear this bit by writing a 1 to it. */
2249
           *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
2250
           rx_dle_intr(dev);
2251
        }
2252
        if (status & STAT_SEGINT)
2253
        {
2254
           /* do something */
2255
           IF_EVENT(printk("IA: tx_intr \n");)
2256
           tx_intr(dev);
2257
        }
2258
        if (status & STAT_DLETINT)
2259
        {
2260
           *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;
2261
           tx_dle_intr(dev);
2262
        }
2263
        if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
2264
        {
2265
           if (status & STAT_FEINT)
2266
               IaFrontEndIntr(iadev);
2267
        }
2268
   }
2269
}
2270
 
2271
 
2272
 
2273
/*----------------------------- entries --------------------------------*/
2274
static int get_esi(struct atm_dev *dev)
2275
{
2276
        IADEV *iadev;
2277
        int i;
2278
        u32 mac1;
2279
        u16 mac2;
2280
 
2281
        iadev = INPH_IA_DEV(dev);
2282
        mac1 = cpu_to_be32(le32_to_cpu(readl(
2283
                                iadev->reg+IPHASE5575_MAC1)));
2284
        mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2285
        IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
2286
        for (i=0; i<MAC1_LEN; i++)
2287
                dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2288
 
2289
        for (i=0; i<MAC2_LEN; i++)
2290
                dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2291
        return 0;
2292
}
2293
 
2294
static int reset_sar(struct atm_dev *dev)
2295
{
2296
        IADEV *iadev;
2297
        int i, error = 1;
2298
        unsigned int pci[64];
2299
 
2300
        iadev = INPH_IA_DEV(dev);
2301
        for(i=0; i<64; i++)
2302
          if ((error = pci_read_config_dword(iadev->pci,
2303
                                i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
2304
              return error;
2305
        writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2306
        for(i=0; i<64; i++)
2307
          if ((error = pci_write_config_dword(iadev->pci,
2308
                                        i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
2309
            return error;
2310
        udelay(5);
2311
        return 0;
2312
}
2313
 
2314
 
2315
#if LINUX_VERSION_CODE >= 0x20312
2316
static int __init ia_init(struct atm_dev *dev)
2317
#else
2318
__initfunc(static int ia_init(struct atm_dev *dev))
2319
#endif  
2320
{
2321
        IADEV *iadev;
2322
        unsigned long real_base, base;
2323
        unsigned short command;
2324
        unsigned char revision;
2325
        int error, i;
2326
 
2327
        /* The device has been identified and registered. Now we read
2328
           necessary configuration info like memory base address,
2329
           interrupt number etc */
2330
 
2331
        IF_INIT(printk(">ia_init\n");)
2332
        dev->ci_range.vpi_bits = 0;
2333
        dev->ci_range.vci_bits = NR_VCI_LD;
2334
 
2335
        iadev = INPH_IA_DEV(dev);
2336
        real_base = pci_resource_start (iadev->pci, 0);
2337
        iadev->irq = iadev->pci->irq;
2338
 
2339
        if ((error = pci_read_config_word(iadev->pci, PCI_COMMAND,&command))
2340
                    || (error = pci_read_config_byte(iadev->pci,
2341
                                PCI_REVISION_ID,&revision)))
2342
        {
2343
                printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
2344
                                dev->number,error);
2345
                return -EINVAL;
2346
        }
2347
        IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2348
                        dev->number, revision, real_base, iadev->irq);)
2349
 
2350
        /* find mapping size of board */
2351
 
2352
        iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2353
 
2354
        if (iadev->pci_map_size == 0x100000){
2355
          iadev->num_vc = 4096;
2356
          dev->ci_range.vci_bits = NR_VCI_4K_LD;
2357
          iadev->memSize = 4;
2358
        }
2359
        else if (iadev->pci_map_size == 0x40000) {
2360
          iadev->num_vc = 1024;
2361
          iadev->memSize = 1;
2362
        }
2363
        else {
2364
           printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2365
           return -EINVAL;
2366
        }
2367
        IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2368
 
2369
        /* enable bus mastering */
2370
        pci_set_master(iadev->pci);
2371
 
2372
        /*
2373
         * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2374
         */
2375
        udelay(10);
2376
 
2377
        /* mapping the physical address to a virtual address in address space */
2378
        base=(unsigned long)ioremap((unsigned long)real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */
2379
 
2380
        if (!base)
2381
        {
2382
                printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2383
                            dev->number);
2384
                return error;
2385
        }
2386
        IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=0x%lx,irq=%d\n",
2387
                        dev->number, revision, base, iadev->irq);)
2388
 
2389
        /* filling the iphase dev structure */
2390
        iadev->mem = iadev->pci_map_size /2;
2391
        iadev->base_diff = real_base - base;
2392
        iadev->real_base = real_base;
2393
        iadev->base = base;
2394
 
2395
        /* Bus Interface Control Registers */
2396
        iadev->reg = (u32 *) (base + REG_BASE);
2397
        /* Segmentation Control Registers */
2398
        iadev->seg_reg = (u32 *) (base + SEG_BASE);
2399
        /* Reassembly Control Registers */
2400
        iadev->reass_reg = (u32 *) (base + REASS_BASE);
2401
        /* Front end/ DMA control registers */
2402
        iadev->phy = (u32 *) (base + PHY_BASE);
2403
        iadev->dma = (u32 *) (base + PHY_BASE);
2404
        /* RAM - Segmentation RAm and Reassembly RAM */
2405
        iadev->ram = (u32 *) (base + ACTUAL_RAM_BASE);
2406
        iadev->seg_ram =  (base + ACTUAL_SEG_RAM_BASE);
2407
        iadev->reass_ram = (base + ACTUAL_REASS_RAM_BASE);
2408
 
2409
        /* lets print out the above */
2410
        IF_INIT(printk("Base addrs: %08x %08x %08x \n %08x %08x %08x %08x\n",
2411
          (u32)iadev->reg,(u32)iadev->seg_reg,(u32)iadev->reass_reg,
2412
          (u32)iadev->phy, (u32)iadev->ram, (u32)iadev->seg_ram,
2413
          (u32)iadev->reass_ram);)
2414
 
2415
        /* lets try reading the MAC address */
2416
        error = get_esi(dev);
2417
        if (error) {
2418
          iounmap((void *) iadev->base);
2419
          return error;
2420
        }
2421
        printk("IA: ");
2422
        for (i=0; i < ESI_LEN; i++)
2423
                printk("%s%02X",i ? "-" : "",dev->esi[i]);
2424
        printk("\n");
2425
 
2426
        /* reset SAR */
2427
        if (reset_sar(dev)) {
2428
           iounmap((void *) iadev->base);
2429
           printk("IA: reset SAR fail, please try again\n");
2430
           return 1;
2431
        }
2432
        return 0;
2433
}
2434
 
2435
static void ia_update_stats(IADEV *iadev) {
2436
    if (!iadev->carrier_detect)
2437
        return;
2438
    iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2439
    iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2440
    iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2441
    iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2442
    iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2443
    iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2444
    return;
2445
}
2446
 
2447
static void ia_led_timer(unsigned long arg) {
2448
        unsigned long flags;
2449
        static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2450
        u_char i;
2451
        static u32 ctrl_reg;
2452
        for (i = 0; i < iadev_count; i++) {
2453
           if (ia_dev[i]) {
2454
              ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2455
              if (blinking[i] == 0) {
2456
                 blinking[i]++;
2457
                 ctrl_reg &= (~CTRL_LED);
2458
                 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2459
                 ia_update_stats(ia_dev[i]);
2460
              }
2461
              else {
2462
                 blinking[i] = 0;
2463
                 ctrl_reg |= CTRL_LED;
2464
                 writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2465
                 spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2466
                 if (ia_dev[i]->close_pending)
2467
                    wake_up(&ia_dev[i]->close_wait);
2468
                 ia_tx_poll(ia_dev[i]);
2469
                 spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2470
              }
2471
           }
2472
        }
2473
        mod_timer(&ia_timer, jiffies + HZ / 4);
2474
        return;
2475
}
2476
 
2477
static void ia_phy_put(struct atm_dev *dev, unsigned char value,
2478
        unsigned long addr)
2479
{
2480
        writel(value, INPH_IA_DEV(dev)->phy+addr);
2481
}
2482
 
2483
static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
2484
{
2485
        return readl(INPH_IA_DEV(dev)->phy+addr);
2486
}
2487
 
2488
static void ia_free_tx(IADEV *iadev)
2489
{
2490
        int i;
2491
 
2492
        kfree(iadev->desc_tbl);
2493
        for (i = 0; i < iadev->num_vc; i++)
2494
                kfree(iadev->testTable[i]);
2495
        kfree(iadev->testTable);
2496
        for (i = 0; i < iadev->num_tx_desc; i++) {
2497
                struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2498
 
2499
                pci_unmap_single(iadev->pci, desc->dma_addr,
2500
                        sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2501
                kfree(desc->cpcs);
2502
        }
2503
        kfree(iadev->tx_buf);
2504
        pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2505
                            iadev->tx_dle_dma);
2506
}
2507
 
2508
static void ia_free_rx(IADEV *iadev)
2509
{
2510
        kfree(iadev->rx_open);
2511
        pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2512
                            iadev->rx_dle_dma);
2513
}
2514
 
2515
#if LINUX_VERSION_CODE >= 0x20312
2516
static int __init ia_start(struct atm_dev *dev)
2517
#else
2518
__initfunc(static int ia_start(struct atm_dev *dev))
2519
#endif  
2520
{
2521
        IADEV *iadev;
2522
        int error;
2523
        unsigned char phy;
2524
        u32 ctrl_reg;
2525
        IF_EVENT(printk(">ia_start\n");)
2526
        iadev = INPH_IA_DEV(dev);
2527
        if (request_irq(iadev->irq, &ia_int, SA_SHIRQ, DEV_LABEL, dev)) {
2528
                printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
2529
                    dev->number, iadev->irq);
2530
                error = -EAGAIN;
2531
                goto err_out;
2532
        }
2533
        /* @@@ should release IRQ on error */
2534
        /* enabling memory + master */
2535
        if ((error = pci_write_config_word(iadev->pci,
2536
                                PCI_COMMAND,
2537
                                PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
2538
        {
2539
                printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
2540
                    "master (0x%x)\n",dev->number, error);
2541
                error = -EIO;
2542
                goto err_free_irq;
2543
        }
2544
        udelay(10);
2545
 
2546
        /* Maybe we should reset the front end, initialize Bus Interface Control
2547
                Registers and see. */
2548
 
2549
        IF_INIT(printk("Bus ctrl reg: %08x\n",
2550
                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2551
        ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2552
        ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
2553
                        | CTRL_B8
2554
                        | CTRL_B16
2555
                        | CTRL_B32
2556
                        | CTRL_B48
2557
                        | CTRL_B64
2558
                        | CTRL_B128
2559
                        | CTRL_ERRMASK
2560
                        | CTRL_DLETMASK         /* shud be removed l8r */
2561
                        | CTRL_DLERMASK
2562
                        | CTRL_SEGMASK
2563
                        | CTRL_REASSMASK
2564
                        | CTRL_FEMASK
2565
                        | CTRL_CSPREEMPT;
2566
 
2567
       writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2568
 
2569
        IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2570
                           readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2571
           printk("Bus status reg after init: %08x\n",
2572
                            readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2573
 
2574
        ia_hw_type(iadev);
2575
        error = tx_init(dev);
2576
        if (error)
2577
                goto err_free_irq;
2578
        error = rx_init(dev);
2579
        if (error)
2580
                goto err_free_tx;
2581
 
2582
        ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2583
        writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2584
        IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2585
                               readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2586
        phy = 0; /* resolve compiler complaint */
2587
        IF_INIT (
2588
        if ((phy=ia_phy_get(dev,0)) == 0x30)
2589
                printk("IA: pm5346,rev.%d\n",phy&0x0f);
2590
        else
2591
                printk("IA: utopia,rev.%0x\n",phy);)
2592
 
2593
        if (iadev->phy_type &  FE_25MBIT_PHY) {
2594
           ia_mb25_init(iadev);
2595
        } else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY)) {
2596
           ia_suni_pm7345_init(iadev);
2597
        } else {
2598
                error = suni_init(dev);
2599
                if (error)
2600
                        goto err_free_rx;
2601
                /*
2602
                 * Enable interrupt on loss of signal
2603
                 * SUNI_RSOP_CIE - 0x10
2604
                 * SUNI_RSOP_CIE_LOSE - 0x04
2605
                 */
2606
                ia_phy_put(dev, ia_phy_get(dev, 0x10) | 0x04, 0x10);
2607
#ifndef MODULE
2608
                error = dev->phy->start(dev);
2609
                if (error)
2610
                        goto err_free_rx;
2611
#endif
2612
                /* Get iadev->carrier_detect status */
2613
                IaFrontEndIntr(iadev);
2614
        }
2615
        return 0;
2616
 
2617
err_free_rx:
2618
        ia_free_rx(iadev);
2619
err_free_tx:
2620
        ia_free_tx(iadev);
2621
err_free_irq:
2622
        free_irq(iadev->irq, dev);
2623
err_out:
2624
        return error;
2625
}
2626
 
2627
static void ia_close(struct atm_vcc *vcc)
2628
{
2629
        u16 *vc_table;
2630
        IADEV *iadev;
2631
        struct ia_vcc *ia_vcc;
2632
        struct sk_buff *skb = NULL;
2633
        struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2634
        unsigned long closetime, flags;
2635
        int ctimeout;
2636
 
2637
        iadev = INPH_IA_DEV(vcc->dev);
2638
        ia_vcc = INPH_IA_VCC(vcc);
2639
        if (!ia_vcc) return;
2640
 
2641
        IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n",
2642
                                              ia_vcc->vc_desc_cnt,vcc->vci);)
2643
        clear_bit(ATM_VF_READY,&vcc->flags);
2644
        skb_queue_head_init (&tmp_tx_backlog);
2645
        skb_queue_head_init (&tmp_vcc_backlog);
2646
        if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2647
           iadev->close_pending++;
2648
           sleep_on_timeout(&iadev->timeout_wait, 50);
2649
           spin_lock_irqsave(&iadev->tx_lock, flags);
2650
           while((skb = skb_dequeue(&iadev->tx_backlog))) {
2651
              if (ATM_SKB(skb)->vcc == vcc){
2652
                 if (vcc->pop) vcc->pop(vcc, skb);
2653
                 else dev_kfree_skb_any(skb);
2654
              }
2655
              else
2656
                 skb_queue_tail(&tmp_tx_backlog, skb);
2657
           }
2658
           while((skb = skb_dequeue(&tmp_tx_backlog)))
2659
             skb_queue_tail(&iadev->tx_backlog, skb);
2660
           IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2661
           closetime = jiffies;
2662
           ctimeout = 300000 / ia_vcc->pcr;
2663
           if (ctimeout == 0)
2664
              ctimeout = 1;
2665
           while (ia_vcc->vc_desc_cnt > 0){
2666
              if ((jiffies - closetime) >= ctimeout)
2667
                 break;
2668
              spin_unlock_irqrestore(&iadev->tx_lock, flags);
2669
              sleep_on(&iadev->close_wait);
2670
              spin_lock_irqsave(&iadev->tx_lock, flags);
2671
           }
2672
           iadev->close_pending--;
2673
           iadev->testTable[vcc->vci]->lastTime = 0;
2674
           iadev->testTable[vcc->vci]->fract = 0;
2675
           iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2676
           if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2677
              if (vcc->qos.txtp.min_pcr > 0)
2678
                 iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2679
           }
2680
           if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2681
              ia_vcc = INPH_IA_VCC(vcc);
2682
              iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2683
              ia_cbrVc_close (vcc);
2684
           }
2685
           spin_unlock_irqrestore(&iadev->tx_lock, flags);
2686
        }
2687
 
2688
        if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2689
           // reset reass table
2690
           vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2691
           vc_table += vcc->vci;
2692
           *vc_table = NO_AAL5_PKT;
2693
           // reset vc table
2694
           vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2695
           vc_table += vcc->vci;
2696
           *vc_table = (vcc->vci << 6) | 15;
2697
           if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2698
              struct abr_vc_table *abr_vc_table = (struct abr_vc_table *)
2699
                                (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2700
              abr_vc_table +=  vcc->vci;
2701
              abr_vc_table->rdf = 0x0003;
2702
              abr_vc_table->air = 0x5eb1;
2703
           }
2704
           // Drain the packets
2705
           rx_dle_intr(vcc->dev);
2706
           iadev->rx_open[vcc->vci] = 0;
2707
        }
2708
        kfree(INPH_IA_VCC(vcc));
2709
        ia_vcc = NULL;
2710
        INPH_IA_VCC(vcc) = NULL;
2711
        clear_bit(ATM_VF_ADDR,&vcc->flags);
2712
        return;
2713
}
2714
 
2715
static int ia_open(struct atm_vcc *vcc, short vpi, int vci)
2716
{
2717
        IADEV *iadev;
2718
        struct ia_vcc *ia_vcc;
2719
        int error;
2720
        if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2721
        {
2722
                IF_EVENT(printk("ia: not partially allocated resources\n");)
2723
                INPH_IA_VCC(vcc) = NULL;
2724
        }
2725
        iadev = INPH_IA_DEV(vcc->dev);
2726
        error = atm_find_ci(vcc, &vpi, &vci);
2727
        if (error)
2728
        {
2729
            printk("iadev: atm_find_ci returned error %d\n", error);
2730
            return error;
2731
        }
2732
        vcc->vpi = vpi;
2733
        vcc->vci = vci;
2734
        if (vci != ATM_VPI_UNSPEC && vpi != ATM_VCI_UNSPEC)
2735
        {
2736
                IF_EVENT(printk("iphase open: unspec part\n");)
2737
                set_bit(ATM_VF_ADDR,&vcc->flags);
2738
        }
2739
        if (vcc->qos.aal != ATM_AAL5)
2740
                return -EINVAL;
2741
        IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
2742
                                 vcc->dev->number, vcc->vpi, vcc->vci);)
2743
 
2744
        /* Device dependent initialization */
2745
        ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
2746
        if (!ia_vcc) return -ENOMEM;
2747
        INPH_IA_VCC(vcc) = ia_vcc;
2748
 
2749
        if ((error = open_rx(vcc)))
2750
        {
2751
                IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2752
                ia_close(vcc);
2753
                return error;
2754
        }
2755
 
2756
        if ((error = open_tx(vcc)))
2757
        {
2758
                IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2759
                ia_close(vcc);
2760
                return error;
2761
        }
2762
 
2763
        set_bit(ATM_VF_READY,&vcc->flags);
2764
 
2765
#ifndef MODULE
2766
        {
2767
           static u8 first = 1;
2768
           if (first) {
2769
              ia_timer.expires = jiffies + 3*HZ;
2770
              add_timer(&ia_timer);
2771
              first = 0;
2772
           }
2773
        }
2774
#endif
2775
        IF_EVENT(printk("ia open returning\n");)
2776
        return 0;
2777
}
2778
 
2779
static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
2780
{
2781
        IF_EVENT(printk(">ia_change_qos\n");)
2782
        return 0;
2783
}
2784
 
2785
static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void *arg)
2786
{
2787
   IA_CMDBUF ia_cmds;
2788
   IADEV *iadev;
2789
   int i, board;
2790
   u16 *tmps;
2791
   IF_EVENT(printk(">ia_ioctl\n");)
2792
   if (cmd != IA_CMD) {
2793
      if (!dev->phy->ioctl) return -EINVAL;
2794
      return dev->phy->ioctl(dev,cmd,arg);
2795
   }
2796
   if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2797
   board = ia_cmds.status;
2798
   if ((board < 0) || (board > iadev_count))
2799
         board = 0;
2800
   iadev = ia_dev[board];
2801
   switch (ia_cmds.cmd) {
2802
   case MEMDUMP:
2803
   {
2804
        switch (ia_cmds.sub_cmd) {
2805
          case MEMDUMP_DEV:
2806
             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2807
             if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2808
                return -EFAULT;
2809
             ia_cmds.status = 0;
2810
             break;
2811
          case MEMDUMP_SEGREG:
2812
             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2813
             tmps = (u16 *)ia_cmds.buf;
2814
             for(i=0; i<0x80; i+=2, tmps++)
2815
                if(put_user(*(u16*)(iadev->seg_reg+i), tmps)) return -EFAULT;
2816
             ia_cmds.status = 0;
2817
             ia_cmds.len = 0x80;
2818
             break;
2819
          case MEMDUMP_REASSREG:
2820
             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2821
             tmps = (u16 *)ia_cmds.buf;
2822
             for(i=0; i<0x80; i+=2, tmps++)
2823
                if(put_user(*(u16*)(iadev->reass_reg+i), tmps)) return -EFAULT;
2824
             ia_cmds.status = 0;
2825
             ia_cmds.len = 0x80;
2826
             break;
2827
          case MEMDUMP_FFL:
2828
          {
2829
             ia_regs_t       *regs_local;
2830
             ffredn_t        *ffL;
2831
             rfredn_t        *rfL;
2832
 
2833
             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2834
             regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2835
             if (!regs_local) return -ENOMEM;
2836
             ffL = &regs_local->ffredn;
2837
             rfL = &regs_local->rfredn;
2838
             /* Copy real rfred registers into the local copy */
2839
             for (i=0; i<(sizeof (rfredn_t))/4; i++)
2840
                ((u_int *)rfL)[i] = ((u_int *)iadev->reass_reg)[i] & 0xffff;
2841
                /* Copy real ffred registers into the local copy */
2842
             for (i=0; i<(sizeof (ffredn_t))/4; i++)
2843
                ((u_int *)ffL)[i] = ((u_int *)iadev->seg_reg)[i] & 0xffff;
2844
 
2845
             if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2846
                kfree(regs_local);
2847
                return -EFAULT;
2848
             }
2849
             kfree(regs_local);
2850
             printk("Board %d registers dumped\n", board);
2851
             ia_cmds.status = 0;
2852
         }
2853
             break;
2854
         case READ_REG:
2855
         {
2856
             if (!capable(CAP_NET_ADMIN)) return -EPERM;
2857
             desc_dbg(iadev);
2858
             ia_cmds.status = 0;
2859
         }
2860
             break;
2861
         case 0x6:
2862
         {
2863
             ia_cmds.status = 0;
2864
             printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2865
             printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2866
         }
2867
             break;
2868
         case 0x8:
2869
         {
2870
             struct k_sonet_stats *stats;
2871
             stats = &PRIV(_ia_dev[board])->sonet_stats;
2872
             printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2873
             printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2874
             printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2875
             printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2876
             printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2877
             printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2878
             printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2879
             printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2880
             printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2881
         }
2882
            ia_cmds.status = 0;
2883
            break;
2884
         case 0x9:
2885
            if (!capable(CAP_NET_ADMIN)) return -EPERM;
2886
            for (i = 1; i <= iadev->num_rx_desc; i++)
2887
               free_desc(_ia_dev[board], i);
2888
            writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
2889
                                            iadev->reass_reg+REASS_MASK_REG);
2890
            iadev->rxing = 1;
2891
 
2892
            ia_cmds.status = 0;
2893
            break;
2894
 
2895
         case 0xb:
2896
            if (!capable(CAP_NET_ADMIN)) return -EPERM;
2897
            IaFrontEndIntr(iadev);
2898
            break;
2899
         case 0xa:
2900
            if (!capable(CAP_NET_ADMIN)) return -EPERM;
2901
         {
2902
             ia_cmds.status = 0;
2903
             IADebugFlag = ia_cmds.maddr;
2904
             printk("New debug option loaded\n");
2905
         }
2906
             break;
2907
         default:
2908
             ia_cmds.status = 0;
2909
             break;
2910
      }
2911
   }
2912
      break;
2913
   default:
2914
      break;
2915
 
2916
   }
2917
   return 0;
2918
}
2919
 
2920
static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,
2921
        void *optval, int optlen)
2922
{
2923
        IF_EVENT(printk(">ia_getsockopt\n");)
2924
        return -EINVAL;
2925
}
2926
 
2927
static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,
2928
        void *optval, int optlen)
2929
{
2930
        IF_EVENT(printk(">ia_setsockopt\n");)
2931
        return -EINVAL;
2932
}
2933
 
2934
static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2935
        IADEV *iadev;
2936
        struct dle *wr_ptr;
2937
        struct tx_buf_desc *buf_desc_ptr;
2938
        int desc;
2939
        int comp_code;
2940
        int total_len;
2941
        struct cpcs_trailer *trailer;
2942
        struct ia_vcc *iavcc;
2943
 
2944
        iadev = INPH_IA_DEV(vcc->dev);
2945
        iavcc = INPH_IA_VCC(vcc);
2946
        if (!iavcc->txing) {
2947
           printk("discard packet on closed VC\n");
2948
           if (vcc->pop)
2949
                vcc->pop(vcc, skb);
2950
           else
2951
                dev_kfree_skb_any(skb);
2952
           return 0;
2953
        }
2954
 
2955
        if (skb->len > iadev->tx_buf_sz - 8) {
2956
           printk("Transmit size over tx buffer size\n");
2957
           if (vcc->pop)
2958
                 vcc->pop(vcc, skb);
2959
           else
2960
                 dev_kfree_skb_any(skb);
2961
          return 0;
2962
        }
2963
        if ((u32)skb->data & 3) {
2964
           /* The copy will end up aligned */
2965
           struct sk_buff *newskb = skb_copy(skb, GFP_ATOMIC);
2966
           if(newskb == NULL)
2967
           {
2968
                   if (vcc->pop)
2969
                         vcc->pop(vcc, skb);
2970
                   else
2971
                         dev_kfree_skb_any(skb);
2972
                   return 0;
2973
           }
2974
           dev_kfree_skb_any(skb);
2975
           skb = newskb;
2976
        }
2977
        /* Get a descriptor number from our free descriptor queue
2978
           We get the descr number from the TCQ now, since I am using
2979
           the TCQ as a free buffer queue. Initially TCQ will be
2980
           initialized with all the descriptors and is hence, full.
2981
        */
2982
        desc = get_desc (iadev, iavcc);
2983
        if (desc == 0xffff)
2984
            return 1;
2985
        comp_code = desc >> 13;
2986
        desc &= 0x1fff;
2987
 
2988
        if ((desc == 0) || (desc > iadev->num_tx_desc))
2989
        {
2990
                IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
2991
                atomic_inc(&vcc->stats->tx);
2992
                if (vcc->pop)
2993
                    vcc->pop(vcc, skb);
2994
                else
2995
                    dev_kfree_skb_any(skb);
2996
                return 0;   /* return SUCCESS */
2997
        }
2998
 
2999
        if (comp_code)
3000
        {
3001
            IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
3002
                                                            desc, comp_code);)
3003
        }
3004
 
3005
        /* remember the desc and vcc mapping */
3006
        iavcc->vc_desc_cnt++;
3007
        iadev->desc_tbl[desc-1].iavcc = iavcc;
3008
        iadev->desc_tbl[desc-1].txskb = skb;
3009
        IA_SKB_STATE(skb) = 0;
3010
 
3011
        iadev->ffL.tcq_rd += 2;
3012
        if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
3013
                iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
3014
        writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
3015
 
3016
        /* Put the descriptor number in the packet ready queue
3017
                and put the updated write pointer in the DLE field
3018
        */
3019
        *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
3020
 
3021
        iadev->ffL.prq_wr += 2;
3022
        if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
3023
                iadev->ffL.prq_wr = iadev->ffL.prq_st;
3024
 
3025
        /* Figure out the exact length of the packet and padding required to
3026
           make it  aligned on a 48 byte boundary.  */
3027
        total_len = skb->len + sizeof(struct cpcs_trailer);
3028
        total_len = ((total_len + 47) / 48) * 48;
3029
        IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
3030
 
3031
        /* Put the packet in a tx buffer */
3032
        trailer = iadev->tx_buf[desc-1].cpcs;
3033
        IF_TX(printk("Sent: skb = 0x%x skb->data: 0x%x len: %d, desc: %d\n",
3034
                  (u32)skb, (u32)skb->data, skb->len, desc);)
3035
        trailer->control = 0;
3036
        /*big endian*/
3037
        trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
3038
        trailer->crc32 = 0;      /* not needed - dummy bytes */
3039
 
3040
        /* Display the packet */
3041
        IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
3042
                                                        skb->len, tcnter++);
3043
        xdump(skb->data, skb->len, "TX: ");
3044
        printk("\n");)
3045
 
3046
        /* Build the buffer descriptor */
3047
        buf_desc_ptr = (struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
3048
        buf_desc_ptr += desc;   /* points to the corresponding entry */
3049
        buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
3050
        /* Huh ? p.115 of users guide describes this as a read-only register */
3051
        writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
3052
        buf_desc_ptr->vc_index = vcc->vci;
3053
        buf_desc_ptr->bytes = total_len;
3054
 
3055
        if (vcc->qos.txtp.traffic_class == ATM_ABR)
3056
           clear_lockup (vcc, iadev);
3057
 
3058
        /* Build the DLE structure */
3059
        wr_ptr = iadev->tx_dle_q.write;
3060
        memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
3061
        wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
3062
                skb->len, PCI_DMA_TODEVICE);
3063
        wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
3064
                                                  buf_desc_ptr->buf_start_lo;
3065
        /* wr_ptr->bytes = swap(total_len);     didn't seem to affect ?? */
3066
        wr_ptr->bytes = skb->len;
3067
 
3068
        /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3069
        if ((wr_ptr->bytes >> 2) == 0xb)
3070
           wr_ptr->bytes = 0x30;
3071
 
3072
        wr_ptr->mode = TX_DLE_PSI;
3073
        wr_ptr->prq_wr_ptr_data = 0;
3074
 
3075
        /* end is not to be used for the DLE q */
3076
        if (++wr_ptr == iadev->tx_dle_q.end)
3077
                wr_ptr = iadev->tx_dle_q.start;
3078
 
3079
        /* Build trailer dle */
3080
        wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3081
        wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3082
          buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3083
 
3084
        wr_ptr->bytes = sizeof(struct cpcs_trailer);
3085
        wr_ptr->mode = DMA_INT_ENABLE;
3086
        wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3087
 
3088
        /* end is not to be used for the DLE q */
3089
        if (++wr_ptr == iadev->tx_dle_q.end)
3090
                wr_ptr = iadev->tx_dle_q.start;
3091
 
3092
        iadev->tx_dle_q.write = wr_ptr;
3093
        ATM_DESC(skb) = vcc->vci;
3094
        skb_queue_tail(&iadev->tx_dma_q, skb);
3095
 
3096
        atomic_inc(&vcc->stats->tx);
3097
        iadev->tx_pkt_cnt++;
3098
        /* Increment transaction counter */
3099
        writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3100
 
3101
#if 0        
3102
        /* add flow control logic */
3103
        if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3104
          if (iavcc->vc_desc_cnt > 10) {
3105
             vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3106
            printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3107
              iavcc->flow_inc = -1;
3108
              iavcc->saved_tx_quota = vcc->tx_quota;
3109
           } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3110
             // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3111
             printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3112
              iavcc->flow_inc = 0;
3113
           }
3114
        }
3115
#endif
3116
        IF_TX(printk("ia send done\n");)
3117
        return 0;
3118
}
3119
 
3120
static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3121
{
3122
        IADEV *iadev;
3123
        struct ia_vcc *iavcc;
3124
        unsigned long flags;
3125
 
3126
        iadev = INPH_IA_DEV(vcc->dev);
3127
        iavcc = INPH_IA_VCC(vcc);
3128
        if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3129
        {
3130
            if (!skb)
3131
                printk(KERN_CRIT "null skb in ia_send\n");
3132
            else dev_kfree_skb_any(skb);
3133
            return -EINVAL;
3134
        }
3135
        spin_lock_irqsave(&iadev->tx_lock, flags);
3136
        if (!test_bit(ATM_VF_READY,&vcc->flags)){
3137
            dev_kfree_skb_any(skb);
3138
            spin_unlock_irqrestore(&iadev->tx_lock, flags);
3139
            return -EINVAL;
3140
        }
3141
        ATM_SKB(skb)->vcc = vcc;
3142
 
3143
        if (skb_peek(&iadev->tx_backlog)) {
3144
           skb_queue_tail(&iadev->tx_backlog, skb);
3145
        }
3146
        else {
3147
           if (ia_pkt_tx (vcc, skb)) {
3148
              skb_queue_tail(&iadev->tx_backlog, skb);
3149
           }
3150
        }
3151
        spin_unlock_irqrestore(&iadev->tx_lock, flags);
3152
        return 0;
3153
 
3154
}
3155
 
3156
static int ia_sg_send(struct atm_vcc *vcc, unsigned long start,
3157
        unsigned long size)
3158
{
3159
        IF_EVENT(printk(">ia_sg_send\n");)
3160
        return 0;
3161
}
3162
 
3163
 
3164
static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3165
{
3166
  int   left = *pos, n;
3167
  char  *tmpPtr;
3168
  IADEV *iadev = INPH_IA_DEV(dev);
3169
  if(!left--) {
3170
     if (iadev->phy_type == FE_25MBIT_PHY) {
3171
       n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3172
       return n;
3173
     }
3174
     if (iadev->phy_type == FE_DS3_PHY)
3175
        n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3176
     else if (iadev->phy_type == FE_E3_PHY)
3177
        n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3178
     else if (iadev->phy_type == FE_UTP_OPTION)
3179
         n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155");
3180
     else
3181
        n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3182
     tmpPtr = page + n;
3183
     if (iadev->pci_map_size == 0x40000)
3184
        n += sprintf(tmpPtr, "-1KVC-");
3185
     else
3186
        n += sprintf(tmpPtr, "-4KVC-");
3187
     tmpPtr = page + n;
3188
     if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3189
        n += sprintf(tmpPtr, "1M  \n");
3190
     else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3191
        n += sprintf(tmpPtr, "512K\n");
3192
     else
3193
       n += sprintf(tmpPtr, "128K\n");
3194
     return n;
3195
  }
3196
  if (!left) {
3197
     return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3198
                           "  Size of Tx Buffer  :  %u\n"
3199
                           "  Number of Rx Buffer:  %u\n"
3200
                           "  Size of Rx Buffer  :  %u\n"
3201
                           "  Packets Receiverd  :  %u\n"
3202
                           "  Packets Transmitted:  %u\n"
3203
                           "  Cells Received     :  %u\n"
3204
                           "  Cells Transmitted  :  %u\n"
3205
                           "  Board Dropped Cells:  %u\n"
3206
                           "  Board Dropped Pkts :  %u\n",
3207
                           iadev->num_tx_desc,  iadev->tx_buf_sz,
3208
                           iadev->num_rx_desc,  iadev->rx_buf_sz,
3209
                           iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3210
                           iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3211
                           iadev->drop_rxcell, iadev->drop_rxpkt);
3212
  }
3213
  return 0;
3214
}
3215
 
3216
static const struct atmdev_ops ops = {
3217
        open:           ia_open,
3218
        close:          ia_close,
3219
        ioctl:          ia_ioctl,
3220
        getsockopt:     ia_getsockopt,
3221
        setsockopt:     ia_setsockopt,
3222
        send:           ia_send,
3223
        sg_send:        ia_sg_send,
3224
        phy_put:        ia_phy_put,
3225
        phy_get:        ia_phy_get,
3226
        change_qos:     ia_change_qos,
3227
        proc_read:      ia_proc_read,
3228
        owner:          THIS_MODULE,
3229
};
3230
 
3231
 
3232
static int __devinit ia_init_one(struct pci_dev *pdev,
3233
                                 const struct pci_device_id *ent)
3234
{
3235
        struct atm_dev *dev;
3236
        IADEV *iadev;
3237
        unsigned long flags;
3238
        int ret;
3239
 
3240
        iadev = kmalloc(sizeof(*iadev), GFP_KERNEL);
3241
        if (!iadev) {
3242
                ret = -ENOMEM;
3243
                goto err_out;
3244
        }
3245
        memset(iadev, 0, sizeof(*iadev));
3246
        iadev->pci = pdev;
3247
 
3248
        IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3249
                pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3250
        if (pci_enable_device(pdev)) {
3251
                ret = -ENODEV;
3252
                goto err_out_free_iadev;
3253
        }
3254
        dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
3255
        if (!dev) {
3256
                ret = -ENOMEM;
3257
                goto err_out_disable_dev;
3258
        }
3259
        INPH_IA_DEV(dev) = iadev;
3260
        IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3261
        IF_INIT(printk("dev_id = 0x%x iadev->LineRate = %d \n", (u32)dev,
3262
                iadev->LineRate);)
3263
 
3264
        ia_dev[iadev_count] = iadev;
3265
        _ia_dev[iadev_count] = dev;
3266
        iadev_count++;
3267
        spin_lock_init(&iadev->misc_lock);
3268
        /* First fixes first. I don't want to think about this now. */
3269
        spin_lock_irqsave(&iadev->misc_lock, flags);
3270
        if (ia_init(dev) || ia_start(dev)) {
3271
                IF_INIT(printk("IA register failed!\n");)
3272
                iadev_count--;
3273
                ia_dev[iadev_count] = NULL;
3274
                _ia_dev[iadev_count] = NULL;
3275
                spin_unlock_irqrestore(&iadev->misc_lock, flags);
3276
                ret = -EINVAL;
3277
                goto err_out_deregister_dev;
3278
        }
3279
        spin_unlock_irqrestore(&iadev->misc_lock, flags);
3280
        IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3281
 
3282
        iadev->next_board = ia_boards;
3283
        ia_boards = dev;
3284
 
3285
        pci_set_drvdata(pdev, dev);
3286
 
3287
        return 0;
3288
 
3289
err_out_deregister_dev:
3290
        atm_dev_deregister(dev);
3291
err_out_disable_dev:
3292
        pci_disable_device(pdev);
3293
err_out_free_iadev:
3294
        kfree(iadev);
3295
err_out:
3296
        return ret;
3297
}
3298
 
3299
static void __devexit ia_remove_one(struct pci_dev *pdev)
3300
{
3301
        struct atm_dev *dev = pci_get_drvdata(pdev);
3302
        IADEV *iadev = INPH_IA_DEV(dev);
3303
 
3304
        ia_phy_put(dev, ia_phy_get(dev,0x10) & ~(0x4), 0x10);
3305
        udelay(1);
3306
 
3307
        /* De-register device */
3308
        free_irq(iadev->irq, dev);
3309
        iadev_count--;
3310
        ia_dev[iadev_count] = NULL;
3311
        _ia_dev[iadev_count] = NULL;
3312
        atm_dev_deregister(dev);
3313
        IF_EVENT(printk("iav deregistered at (itf:%d)\n", dev->number);)
3314
 
3315
        iounmap((void *) iadev->base);
3316
        pci_disable_device(pdev);
3317
 
3318
        ia_free_rx(iadev);
3319
        ia_free_tx(iadev);
3320
 
3321
        kfree(iadev);
3322
}
3323
 
3324
static struct pci_device_id ia_pci_tbl[] __devinitdata = {
3325
        { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3326
        { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3327
        { 0,}
3328
};
3329
MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3330
 
3331
static struct pci_driver ia_driver = {
3332
        .name =         DEV_LABEL,
3333
        .id_table =     ia_pci_tbl,
3334
        .probe =        ia_init_one,
3335
        .remove =       __devexit_p(ia_remove_one),
3336
};
3337
 
3338
static int __init ia_init_module(void)
3339
{
3340
        int ret;
3341
 
3342
        ret = pci_module_init(&ia_driver);
3343
        if (ret >= 0) {
3344
                ia_timer.expires = jiffies + 3*HZ;
3345
                add_timer(&ia_timer);
3346
        }
3347
        return ret;
3348
}
3349
 
3350
static void __exit ia_cleanup_module(void)
3351
{
3352
        pci_unregister_driver(&ia_driver);
3353
 
3354
        del_timer(&ia_timer);
3355
}
3356
 
3357
module_init(ia_init_module);
3358
module_exit(ia_cleanup_module);

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.