OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [atm/] [ambassador.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*
2
  Madge Ambassador ATM Adapter driver.
3
  Copyright (C) 1995-1999  Madge Networks Ltd.
4
 
5
  This program is free software; you can redistribute it and/or modify
6
  it under the terms of the GNU General Public License as published by
7
  the Free Software Foundation; either version 2 of the License, or
8
  (at your option) any later version.
9
 
10
  This program is distributed in the hope that it will be useful,
11
  but WITHOUT ANY WARRANTY; without even the implied warranty of
12
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13
  GNU General Public License for more details.
14
 
15
  You should have received a copy of the GNU General Public License
16
  along with this program; if not, write to the Free Software
17
  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18
 
19
  The GNU GPL is contained in /usr/doc/copyright/GPL on a Debian
20
  system and in the file COPYING in the Linux kernel source.
21
*/
22
 
23
/* * dedicated to the memory of Graham Gordon 1971-1998 * */
24
 
25
#include <linux/module.h>
26
#include <linux/types.h>
27
#include <linux/pci.h>
28
#include <linux/kernel.h>
29
#include <linux/init.h>
30
#include <linux/ioport.h>
31
#include <linux/atmdev.h>
32
#include <linux/delay.h>
33
#include <linux/interrupt.h>
34
 
35
#include <asm/atomic.h>
36
#include <asm/io.h>
37
#include <asm/byteorder.h>
38
 
39
#include "ambassador.h"
40
 
41
#define maintainer_string "Giuliano Procida at Madge Networks <gprocida@madge.com>"
42
#define description_string "Madge ATM Ambassador driver"
43
#define version_string "1.2.4"
44
 
45
static inline void __init show_version (void) {
46
  printk ("%s version %s\n", description_string, version_string);
47
}
48
 
49
/*
50
 
51
  Theory of Operation
52
 
53
  I Hardware, detection, initialisation and shutdown.
54
 
55
  1. Supported Hardware
56
 
57
  This driver is for the PCI ATMizer-based Ambassador card (except
58
  very early versions). It is not suitable for the similar EISA "TR7"
59
  card. Commercially, both cards are known as Collage Server ATM
60
  adapters.
61
 
62
  The loader supports image transfer to the card, image start and few
63
  other miscellaneous commands.
64
 
65
  Only AAL5 is supported with vpi = 0 and vci in the range 0 to 1023.
66
 
67
  The cards are big-endian.
68
 
69
  2. Detection
70
 
71
  Standard PCI stuff, the early cards are detected and rejected.
72
 
73
  3. Initialisation
74
 
75
  The cards are reset and the self-test results are checked. The
76
  microcode image is then transferred and started. This waits for a
77
  pointer to a descriptor containing details of the host-based queues
78
  and buffers and various parameters etc. Once they are processed
79
  normal operations may begin. The BIA is read using a microcode
80
  command.
81
 
82
  4. Shutdown
83
 
84
  This may be accomplished either by a card reset or via the microcode
85
  shutdown command. Further investigation required.
86
 
87
  5. Persistent state
88
 
89
  The card reset does not affect PCI configuration (good) or the
90
  contents of several other "shared run-time registers" (bad) which
91
  include doorbell and interrupt control as well as EEPROM and PCI
92
  control. The driver must be careful when modifying these registers
93
  not to touch bits it does not use and to undo any changes at exit.
94
 
95
  II Driver software
96
 
97
  0. Generalities
98
 
99
  The adapter is quite intelligent (fast) and has a simple interface
100
  (few features). VPI is always zero, 1024 VCIs are supported. There
101
  is limited cell rate support. UBR channels can be capped and ABR
102
  (explicit rate, but not EFCI) is supported. There is no CBR or VBR
103
  support.
104
 
105
  1. Driver <-> Adapter Communication
106
 
107
  Apart from the basic loader commands, the driver communicates
108
  through three entities: the command queue (CQ), the transmit queue
109
  pair (TXQ) and the receive queue pairs (RXQ). These three entities
110
  are set up by the host and passed to the microcode just after it has
111
  been started.
112
 
113
  All queues are host-based circular queues. They are contiguous and
114
  (due to hardware limitations) have some restrictions as to their
115
  locations in (bus) memory. They are of the "full means the same as
116
  empty so don't do that" variety since the adapter uses pointers
117
  internally.
118
 
119
  The queue pairs work as follows: one queue is for supply to the
120
  adapter, items in it are pending and are owned by the adapter; the
121
  other is the queue for return from the adapter, items in it have
122
  been dealt with by the adapter. The host adds items to the supply
123
  (TX descriptors and free RX buffer descriptors) and removes items
124
  from the return (TX and RX completions). The adapter deals with out
125
  of order completions.
126
 
127
  Interrupts (card to host) and the doorbell (host to card) are used
128
  for signalling.
129
 
130
  1. CQ
131
 
132
  This is to communicate "open VC", "close VC", "get stats" etc. to
133
  the adapter. At most one command is retired every millisecond by the
134
  card. There is no out of order completion or notification. The
135
  driver needs to check the return code of the command, waiting as
136
  appropriate.
137
 
138
  2. TXQ
139
 
140
  TX supply items are of variable length (scatter gather support) and
141
  so the queue items are (more or less) pointers to the real thing.
142
  Each TX supply item contains a unique, host-supplied handle (the skb
143
  bus address seems most sensible as this works for Alphas as well,
144
  there is no need to do any endian conversions on the handles).
145
 
146
  TX return items consist of just the handles above.
147
 
148
  3. RXQ (up to 4 of these with different lengths and buffer sizes)
149
 
150
  RX supply items consist of a unique, host-supplied handle (the skb
151
  bus address again) and a pointer to the buffer data area.
152
 
153
  RX return items consist of the handle above, the VC, length and a
154
  status word. This just screams "oh so easy" doesn't it?
155
 
156
  Note on RX pool sizes:
157
 
158
  Each pool should have enough buffers to handle a back-to-back stream
159
  of minimum sized frames on a single VC. For example:
160
 
161
    frame spacing = 3us (about right)
162
 
163
    delay = IRQ lat + RX handling + RX buffer replenish = 20 (us)  (a guess)
164
 
165
    min number of buffers for one VC = 1 + delay/spacing (buffers)
166
 
167
    delay/spacing = latency = (20+2)/3 = 7 (buffers)  (rounding up)
168
 
169
  The 20us delay assumes that there is no need to sleep; if we need to
170
  sleep to get buffers we are going to drop frames anyway.
171
 
172
  In fact, each pool should have enough buffers to support the
173
  simultaneous reassembly of a separate frame on each VC and cope with
174
  the case in which frames complete in round robin cell fashion on
175
  each VC.
176
 
177
  Only one frame can complete at each cell arrival, so if "n" VCs are
178
  open, the worst case is to have them all complete frames together
179
  followed by all starting new frames together.
180
 
181
    desired number of buffers = n + delay/spacing
182
 
183
  These are the extreme requirements, however, they are "n+k" for some
184
  "k" so we have only the constant to choose. This is the argument
185
  rx_lats which current defaults to 7.
186
 
187
  Actually, "n ? n+k : 0" is better and this is what is implemented,
188
  subject to the limit given by the pool size.
189
 
190
  4. Driver locking
191
 
192
  Simple spinlocks are used around the TX and RX queue mechanisms.
193
  Anyone with a faster, working method is welcome to implement it.
194
 
195
  The adapter command queue is protected with a spinlock. We always
196
  wait for commands to complete.
197
 
198
  A more complex form of locking is used around parts of the VC open
199
  and close functions. There are three reasons for a lock: 1. we need
200
  to do atomic rate reservation and release (not used yet), 2. Opening
201
  sometimes involves two adapter commands which must not be separated
202
  by another command on the same VC, 3. the changes to RX pool size
203
  must be atomic. The lock needs to work over context switches, so we
204
  use a semaphore.
205
 
206
  III Hardware Features and Microcode Bugs
207
 
208
  1. Byte Ordering
209
 
210
  *%^"$&%^$*&^"$(%^$#&^%$(&#%$*(&^#%!"!"!*!
211
 
212
  2. Memory access
213
 
214
  All structures that are not accessed using DMA must be 4-byte
215
  aligned (not a problem) and must not cross 4MB boundaries.
216
 
217
  There is a DMA memory hole at E0000000-E00000FF (groan).
218
 
219
  TX fragments (DMA read) must not cross 4MB boundaries (would be 16MB
220
  but for a hardware bug).
221
 
222
  RX buffers (DMA write) must not cross 16MB boundaries and must
223
  include spare trailing bytes up to the next 4-byte boundary; they
224
  will be written with rubbish.
225
 
226
  The PLX likes to prefetch; if reading up to 4 u32 past the end of
227
  each TX fragment is not a problem, then TX can be made to go a
228
  little faster by passing a flag at init that disables a prefetch
229
  workaround. We do not pass this flag. (new microcode only)
230
 
231
  Now we:
232
  . Note that alloc_skb rounds up size to a 16byte boundary.
233
  . Ensure all areas do not traverse 4MB boundaries.
234
  . Ensure all areas do not start at a E00000xx bus address.
235
  (I cannot be certain, but this may always hold with Linux)
236
  . Make all failures cause a loud message.
237
  . Discard non-conforming SKBs (causes TX failure or RX fill delay).
238
  . Discard non-conforming TX fragment descriptors (the TX fails).
239
  In the future we could:
240
  . Allow RX areas that traverse 4MB (but not 16MB) boundaries.
241
  . Segment TX areas into some/more fragments, when necessary.
242
  . Relax checks for non-DMA items (ignore hole).
243
  . Give scatter-gather (iovec) requirements using ???. (?)
244
 
245
  3. VC close is broken (only for new microcode)
246
 
247
  The VC close adapter microcode command fails to do anything if any
248
  frames have been received on the VC but none have been transmitted.
249
  Frames continue to be reassembled and passed (with IRQ) to the
250
  driver.
251
 
252
  IV To Do List
253
 
254
  . Fix bugs!
255
 
256
  . Timer code may be broken.
257
 
258
  . Deal with buggy VC close (somehow) in microcode 12.
259
 
260
  . Handle interrupted and/or non-blocking writes - is this a job for
261
    the protocol layer?
262
 
263
  . Add code to break up TX fragments when they span 4MB boundaries.
264
 
265
  . Add SUNI phy layer (need to know where SUNI lives on card).
266
 
267
  . Implement a tx_alloc fn to (a) satisfy TX alignment etc. and (b)
268
    leave extra headroom space for Ambassador TX descriptors.
269
 
270
  . Understand these elements of struct atm_vcc: recvq (proto?),
271
    sleep, callback, listenq, backlog_quota, reply and user_back.
272
 
273
  . Adjust TX/RX skb allocation to favour IP with LANE/CLIP (configurable).
274
 
275
  . Impose a TX-pending limit (2?) on each VC, help avoid TX q overflow.
276
 
277
  . Decide whether RX buffer recycling is or can be made completely safe;
278
    turn it back on. It looks like Werner is going to axe this.
279
 
280
  . Implement QoS changes on open VCs (involves extracting parts of VC open
281
    and close into separate functions and using them to make changes).
282
 
283
  . Hack on command queue so that someone can issue multiple commands and wait
284
    on the last one (OR only "no-op" or "wait" commands are waited for).
285
 
286
  . Eliminate need for while-schedule around do_command.
287
 
288
*/
289
 
290
/********** microcode **********/
291
 
292
#ifdef AMB_NEW_MICROCODE
293
#define UCODE(x) UCODE2(atmsar12.x)
294
#else
295
#define UCODE(x) UCODE2(atmsar11.x)
296
#endif
297
#define UCODE2(x) #x
298
 
299
static u32 __initdata ucode_start =
300
#include UCODE(start)
301
;
302
 
303
static region __initdata ucode_regions[] = {
304
#include UCODE(regions)
305
  { 0, 0 }
306
};
307
 
308
static u32 __initdata ucode_data[] = {
309
#include UCODE(data)
310
  0xdeadbeef
311
};
312
 
313
/********** globals **********/
314
 
315
static amb_dev * amb_devs = NULL;
316
static struct timer_list housekeeping;
317
 
318
static unsigned short debug = 0;
319
static unsigned int cmds = 8;
320
static unsigned int txs = 32;
321
static unsigned int rxs[NUM_RX_POOLS] = { 64, 64, 64, 64 };
322
static unsigned int rxs_bs[NUM_RX_POOLS] = { 4080, 12240, 36720, 65535 };
323
static unsigned int rx_lats = 7;
324
static unsigned char pci_lat = 0;
325
 
326
static const unsigned long onegigmask = -1 << 30;
327
 
328
/********** access to adapter **********/
329
 
330
static inline void wr_plain (const amb_dev * dev, size_t addr, u32 data) {
331
  PRINTD (DBG_FLOW|DBG_REGS, "wr: %08x <- %08x", addr, data);
332
#ifdef AMB_MMIO
333
  dev->membase[addr / sizeof(u32)] = data;
334
#else
335
  outl (data, dev->iobase + addr);
336
#endif
337
}
338
 
339
static inline u32 rd_plain (const amb_dev * dev, size_t addr) {
340
#ifdef AMB_MMIO
341
  u32 data = dev->membase[addr / sizeof(u32)];
342
#else
343
  u32 data = inl (dev->iobase + addr);
344
#endif
345
  PRINTD (DBG_FLOW|DBG_REGS, "rd: %08x -> %08x", addr, data);
346
  return data;
347
}
348
 
349
static inline void wr_mem (const amb_dev * dev, size_t addr, u32 data) {
350
  u32 be = cpu_to_be32 (data);
351
  PRINTD (DBG_FLOW|DBG_REGS, "wr: %08x <- %08x b[%08x]", addr, data, be);
352
#ifdef AMB_MMIO
353
  dev->membase[addr / sizeof(u32)] = be;
354
#else
355
  outl (be, dev->iobase + addr);
356
#endif
357
}
358
 
359
static inline u32 rd_mem (const amb_dev * dev, size_t addr) {
360
#ifdef AMB_MMIO
361
  u32 be = dev->membase[addr / sizeof(u32)];
362
#else
363
  u32 be = inl (dev->iobase + addr);
364
#endif
365
  u32 data = be32_to_cpu (be);
366
  PRINTD (DBG_FLOW|DBG_REGS, "rd: %08x -> %08x b[%08x]", addr, data, be);
367
  return data;
368
}
369
 
370
/********** dump routines **********/
371
 
372
static inline void dump_registers (const amb_dev * dev) {
373
#ifdef DEBUG_AMBASSADOR
374
  if (debug & DBG_REGS) {
375
    size_t i;
376
    PRINTD (DBG_REGS, "reading PLX control: ");
377
    for (i = 0x00; i < 0x30; i += sizeof(u32))
378
      rd_mem (dev, i);
379
    PRINTD (DBG_REGS, "reading mailboxes: ");
380
    for (i = 0x40; i < 0x60; i += sizeof(u32))
381
      rd_mem (dev, i);
382
    PRINTD (DBG_REGS, "reading doorb irqev irqen reset:");
383
    for (i = 0x60; i < 0x70; i += sizeof(u32))
384
      rd_mem (dev, i);
385
  }
386
#else
387
  (void) dev;
388
#endif
389
  return;
390
}
391
 
392
static inline void dump_loader_block (volatile loader_block * lb) {
393
#ifdef DEBUG_AMBASSADOR
394
  unsigned int i;
395
  PRINTDB (DBG_LOAD, "lb @ %p; res: %d, cmd: %d, pay:",
396
           lb, be32_to_cpu (lb->result), be32_to_cpu (lb->command));
397
  for (i = 0; i < MAX_COMMAND_DATA; ++i)
398
    PRINTDM (DBG_LOAD, " %08x", be32_to_cpu (lb->payload.data[i]));
399
  PRINTDE (DBG_LOAD, ", vld: %08x", be32_to_cpu (lb->valid));
400
#else
401
  (void) lb;
402
#endif
403
  return;
404
}
405
 
406
static inline void dump_command (command * cmd) {
407
#ifdef DEBUG_AMBASSADOR
408
  unsigned int i;
409
  PRINTDB (DBG_CMD, "cmd @ %p, req: %08x, pars:",
410
           cmd, /*be32_to_cpu*/ (cmd->request));
411
  for (i = 0; i < 3; ++i)
412
    PRINTDM (DBG_CMD, " %08x", /*be32_to_cpu*/ (cmd->args.par[i]));
413
  PRINTDE (DBG_CMD, "");
414
#else
415
  (void) cmd;
416
#endif
417
  return;
418
}
419
 
420
static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) {
421
#ifdef DEBUG_AMBASSADOR
422
  unsigned int i;
423
  unsigned char * data = skb->data;
424
  PRINTDB (DBG_DATA, "%s(%u) ", prefix, vc);
425
  for (i=0; i<skb->len && i < 256;i++)
426
    PRINTDM (DBG_DATA, "%02x ", data[i]);
427
  PRINTDE (DBG_DATA,"");
428
#else
429
  (void) prefix;
430
  (void) vc;
431
  (void) skb;
432
#endif
433
  return;
434
}
435
 
436
/********** check memory areas for use by Ambassador **********/
437
 
438
/* see limitations under Hardware Features */
439
 
440
static inline int check_area (void * start, size_t length) {
441
  // assumes length > 0
442
  const u32 fourmegmask = -1 << 22;
443
  const u32 twofivesixmask = -1 << 8;
444
  const u32 starthole = 0xE0000000;
445
  u32 startaddress = virt_to_bus (start);
446
  u32 lastaddress = startaddress+length-1;
447
  if ((startaddress ^ lastaddress) & fourmegmask ||
448
      (startaddress & twofivesixmask) == starthole) {
449
    PRINTK (KERN_ERR, "check_area failure: [%x,%x] - mail maintainer!",
450
            startaddress, lastaddress);
451
    return -1;
452
  } else {
453
    return 0;
454
  }
455
}
456
 
457
/********** free an skb (as per ATM device driver documentation) **********/
458
 
459
static inline void amb_kfree_skb (struct sk_buff * skb) {
460
  if (ATM_SKB(skb)->vcc->pop) {
461
    ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
462
  } else {
463
    dev_kfree_skb_any (skb);
464
  }
465
}
466
 
467
/********** TX completion **********/
468
 
469
static inline void tx_complete (amb_dev * dev, tx_out * tx) {
470
  tx_simple * tx_descr = bus_to_virt (tx->handle);
471
  struct sk_buff * skb = tx_descr->skb;
472
 
473
  PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
474
 
475
  // VC layer stats
476
  atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
477
 
478
  // free the descriptor
479
  kfree (tx_descr);
480
 
481
  // free the skb
482
  amb_kfree_skb (skb);
483
 
484
  dev->stats.tx_ok++;
485
  return;
486
}
487
 
488
/********** RX completion **********/
489
 
490
static void rx_complete (amb_dev * dev, rx_out * rx) {
491
  struct sk_buff * skb = bus_to_virt (rx->handle);
492
  u16 vc = be16_to_cpu (rx->vc);
493
  // unused: u16 lec_id = be16_to_cpu (rx->lec_id);
494
  u16 status = be16_to_cpu (rx->status);
495
  u16 rx_len = be16_to_cpu (rx->length);
496
 
497
  PRINTD (DBG_FLOW|DBG_RX, "rx_complete %p %p (len=%hu)", dev, rx, rx_len);
498
 
499
  // XXX move this in and add to VC stats ???
500
  if (!status) {
501
    struct atm_vcc * atm_vcc = dev->rxer[vc];
502
    dev->stats.rx.ok++;
503
 
504
    if (atm_vcc) {
505
 
506
      if (rx_len <= atm_vcc->qos.rxtp.max_sdu) {
507
 
508
        if (atm_charge (atm_vcc, skb->truesize)) {
509
 
510
          // prepare socket buffer
511
          ATM_SKB(skb)->vcc = atm_vcc;
512
          skb_put (skb, rx_len);
513
 
514
          dump_skb ("<<<", vc, skb);
515
 
516
          // VC layer stats
517
          atomic_inc(&atm_vcc->stats->rx);
518
          skb->stamp = xtime;
519
          // end of our responsability
520
          atm_vcc->push (atm_vcc, skb);
521
          return;
522
 
523
        } else {
524
          // someone fix this (message), please!
525
          PRINTD (DBG_INFO|DBG_RX, "dropped thanks to atm_charge (vc %hu, truesize %u)", vc, skb->truesize);
526
          // drop stats incremented in atm_charge
527
        }
528
 
529
      } else {
530
        PRINTK (KERN_INFO, "dropped over-size frame");
531
        // should we count this?
532
        atomic_inc(&atm_vcc->stats->rx_drop);
533
      }
534
 
535
    } else {
536
      PRINTD (DBG_WARN|DBG_RX, "got frame but RX closed for channel %hu", vc);
537
      // this is an adapter bug, only in new version of microcode
538
    }
539
 
540
  } else {
541
    dev->stats.rx.error++;
542
    if (status & CRC_ERR)
543
      dev->stats.rx.badcrc++;
544
    if (status & LEN_ERR)
545
      dev->stats.rx.toolong++;
546
    if (status & ABORT_ERR)
547
      dev->stats.rx.aborted++;
548
    if (status & UNUSED_ERR)
549
      dev->stats.rx.unused++;
550
  }
551
 
552
  dev_kfree_skb_any (skb);
553
  return;
554
}
555
 
556
/*
557
 
558
  Note on queue handling.
559
 
560
  Here "give" and "take" refer to queue entries and a queue (pair)
561
  rather than frames to or from the host or adapter. Empty frame
562
  buffers are given to the RX queue pair and returned unused or
563
  containing RX frames. TX frames (well, pointers to TX fragment
564
  lists) are given to the TX queue pair, completions are returned.
565
 
566
*/
567
 
568
/********** command queue **********/
569
 
570
// I really don't like this, but it's the best I can do at the moment
571
 
572
// also, the callers are responsible for byte order as the microcode
573
// sometimes does 16-bit accesses (yuk yuk yuk)
574
 
575
static int command_do (amb_dev * dev, command * cmd) {
576
  amb_cq * cq = &dev->cq;
577
  volatile amb_cq_ptrs * ptrs = &cq->ptrs;
578
  command * my_slot;
579
  unsigned long timeout;
580
 
581
  PRINTD (DBG_FLOW|DBG_CMD, "command_do %p", dev);
582
 
583
  if (test_bit (dead, &dev->flags))
584
    return 0;
585
 
586
  spin_lock (&cq->lock);
587
 
588
  // if not full...
589
  if (cq->pending < cq->maximum) {
590
    // remember my slot for later
591
    my_slot = ptrs->in;
592
    PRINTD (DBG_CMD, "command in slot %p", my_slot);
593
 
594
    dump_command (cmd);
595
 
596
    // copy command in
597
    *ptrs->in = *cmd;
598
    cq->pending++;
599
    ptrs->in = NEXTQ (ptrs->in, ptrs->start, ptrs->limit);
600
 
601
    // mail the command
602
    wr_mem (dev, offsetof(amb_mem, mb.adapter.cmd_address), virt_to_bus (ptrs->in));
603
 
604
    // prepare to wait for cq->pending milliseconds
605
    // effectively one centisecond on i386
606
    timeout = (cq->pending*HZ+999)/1000;
607
 
608
    if (cq->pending > cq->high)
609
      cq->high = cq->pending;
610
    spin_unlock (&cq->lock);
611
 
612
    while (timeout) {
613
      // go to sleep
614
      // PRINTD (DBG_CMD, "wait: sleeping %lu for command", timeout);
615
      set_current_state(TASK_UNINTERRUPTIBLE);
616
      timeout = schedule_timeout (timeout);
617
    }
618
 
619
    // wait for my slot to be reached (all waiters are here or above, until...)
620
    while (ptrs->out != my_slot) {
621
      PRINTD (DBG_CMD, "wait: command slot (now at %p)", ptrs->out);
622
      set_current_state(TASK_UNINTERRUPTIBLE);
623
      schedule();
624
    }
625
 
626
    // wait on my slot (... one gets to its slot, and... )
627
    while (ptrs->out->request != cpu_to_be32 (SRB_COMPLETE)) {
628
      PRINTD (DBG_CMD, "wait: command slot completion");
629
      set_current_state(TASK_UNINTERRUPTIBLE);
630
      schedule();
631
    }
632
 
633
    PRINTD (DBG_CMD, "command complete");
634
    // update queue (... moves the queue along to the next slot)
635
    spin_lock (&cq->lock);
636
    cq->pending--;
637
    // copy command out
638
    *cmd = *ptrs->out;
639
    ptrs->out = NEXTQ (ptrs->out, ptrs->start, ptrs->limit);
640
    spin_unlock (&cq->lock);
641
 
642
    return 0;
643
  } else {
644
    cq->filled++;
645
    spin_unlock (&cq->lock);
646
    return -EAGAIN;
647
  }
648
 
649
}
650
 
651
/********** TX queue pair **********/
652
 
653
static inline int tx_give (amb_dev * dev, tx_in * tx) {
654
  amb_txq * txq = &dev->txq;
655
  unsigned long flags;
656
 
657
  PRINTD (DBG_FLOW|DBG_TX, "tx_give %p", dev);
658
 
659
  if (test_bit (dead, &dev->flags))
660
    return 0;
661
 
662
  spin_lock_irqsave (&txq->lock, flags);
663
 
664
  if (txq->pending < txq->maximum) {
665
    PRINTD (DBG_TX, "TX in slot %p", txq->in.ptr);
666
 
667
    *txq->in.ptr = *tx;
668
    txq->pending++;
669
    txq->in.ptr = NEXTQ (txq->in.ptr, txq->in.start, txq->in.limit);
670
    // hand over the TX and ring the bell
671
    wr_mem (dev, offsetof(amb_mem, mb.adapter.tx_address), virt_to_bus (txq->in.ptr));
672
    wr_mem (dev, offsetof(amb_mem, doorbell), TX_FRAME);
673
 
674
    if (txq->pending > txq->high)
675
      txq->high = txq->pending;
676
    spin_unlock_irqrestore (&txq->lock, flags);
677
    return 0;
678
  } else {
679
    txq->filled++;
680
    spin_unlock_irqrestore (&txq->lock, flags);
681
    return -EAGAIN;
682
  }
683
}
684
 
685
static inline int tx_take (amb_dev * dev) {
686
  amb_txq * txq = &dev->txq;
687
  unsigned long flags;
688
 
689
  PRINTD (DBG_FLOW|DBG_TX, "tx_take %p", dev);
690
 
691
  spin_lock_irqsave (&txq->lock, flags);
692
 
693
  if (txq->pending && txq->out.ptr->handle) {
694
    // deal with TX completion
695
    tx_complete (dev, txq->out.ptr);
696
    // mark unused again
697
    txq->out.ptr->handle = 0;
698
    // remove item
699
    txq->pending--;
700
    txq->out.ptr = NEXTQ (txq->out.ptr, txq->out.start, txq->out.limit);
701
 
702
    spin_unlock_irqrestore (&txq->lock, flags);
703
    return 0;
704
  } else {
705
 
706
    spin_unlock_irqrestore (&txq->lock, flags);
707
    return -1;
708
  }
709
}
710
 
711
/********** RX queue pairs **********/
712
 
713
static inline int rx_give (amb_dev * dev, rx_in * rx, unsigned char pool) {
714
  amb_rxq * rxq = &dev->rxq[pool];
715
  unsigned long flags;
716
 
717
  PRINTD (DBG_FLOW|DBG_RX, "rx_give %p[%hu]", dev, pool);
718
 
719
  spin_lock_irqsave (&rxq->lock, flags);
720
 
721
  if (rxq->pending < rxq->maximum) {
722
    PRINTD (DBG_RX, "RX in slot %p", rxq->in.ptr);
723
 
724
    *rxq->in.ptr = *rx;
725
    rxq->pending++;
726
    rxq->in.ptr = NEXTQ (rxq->in.ptr, rxq->in.start, rxq->in.limit);
727
    // hand over the RX buffer
728
    wr_mem (dev, offsetof(amb_mem, mb.adapter.rx_address[pool]), virt_to_bus (rxq->in.ptr));
729
 
730
    spin_unlock_irqrestore (&rxq->lock, flags);
731
    return 0;
732
  } else {
733
    spin_unlock_irqrestore (&rxq->lock, flags);
734
    return -1;
735
  }
736
}
737
 
738
static inline int rx_take (amb_dev * dev, unsigned char pool) {
739
  amb_rxq * rxq = &dev->rxq[pool];
740
  unsigned long flags;
741
 
742
  PRINTD (DBG_FLOW|DBG_RX, "rx_take %p[%hu]", dev, pool);
743
 
744
  spin_lock_irqsave (&rxq->lock, flags);
745
 
746
  if (rxq->pending && (rxq->out.ptr->status || rxq->out.ptr->length)) {
747
    // deal with RX completion
748
    rx_complete (dev, rxq->out.ptr);
749
    // mark unused again
750
    rxq->out.ptr->status = 0;
751
    rxq->out.ptr->length = 0;
752
    // remove item
753
    rxq->pending--;
754
    rxq->out.ptr = NEXTQ (rxq->out.ptr, rxq->out.start, rxq->out.limit);
755
 
756
    if (rxq->pending < rxq->low)
757
      rxq->low = rxq->pending;
758
    spin_unlock_irqrestore (&rxq->lock, flags);
759
    return 0;
760
  } else {
761
    if (!rxq->pending && rxq->buffers_wanted)
762
      rxq->emptied++;
763
    spin_unlock_irqrestore (&rxq->lock, flags);
764
    return -1;
765
  }
766
}
767
 
768
/********** RX Pool handling **********/
769
 
770
/* pre: buffers_wanted = 0, post: pending = 0 */
771
static inline void drain_rx_pool (amb_dev * dev, unsigned char pool) {
772
  amb_rxq * rxq = &dev->rxq[pool];
773
 
774
  PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pool %p %hu", dev, pool);
775
 
776
  if (test_bit (dead, &dev->flags))
777
    return;
778
 
779
  /* we are not quite like the fill pool routines as we cannot just
780
     remove one buffer, we have to remove all of them, but we might as
781
     well pretend... */
782
  if (rxq->pending > rxq->buffers_wanted) {
783
    command cmd;
784
    cmd.request = cpu_to_be32 (SRB_FLUSH_BUFFER_Q);
785
    cmd.args.flush.flags = cpu_to_be32 (pool << SRB_POOL_SHIFT);
786
    while (command_do (dev, &cmd))
787
      schedule();
788
    /* the pool may also be emptied via the interrupt handler */
789
    while (rxq->pending > rxq->buffers_wanted)
790
      if (rx_take (dev, pool))
791
        schedule();
792
  }
793
 
794
  return;
795
}
796
 
797
#ifdef MODULE
798
static void drain_rx_pools (amb_dev * dev) {
799
  unsigned char pool;
800
 
801
  PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pools %p", dev);
802
 
803
  for (pool = 0; pool < NUM_RX_POOLS; ++pool)
804
    drain_rx_pool (dev, pool);
805
 
806
  return;
807
}
808
#endif
809
 
810
static inline void fill_rx_pool (amb_dev * dev, unsigned char pool, int priority) {
811
  rx_in rx;
812
  amb_rxq * rxq;
813
 
814
  PRINTD (DBG_FLOW|DBG_POOL, "fill_rx_pool %p %hu %x", dev, pool, priority);
815
 
816
  if (test_bit (dead, &dev->flags))
817
    return;
818
 
819
  rxq = &dev->rxq[pool];
820
  while (rxq->pending < rxq->maximum && rxq->pending < rxq->buffers_wanted) {
821
 
822
    struct sk_buff * skb = alloc_skb (rxq->buffer_size, priority);
823
    if (!skb) {
824
      PRINTD (DBG_SKB|DBG_POOL, "failed to allocate skb for RX pool %hu", pool);
825
      return;
826
    }
827
    if (check_area (skb->data, skb->truesize)) {
828
      dev_kfree_skb_any (skb);
829
      return;
830
    }
831
    // cast needed as there is no %? for pointer differences
832
    PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li",
833
            skb, skb->head, (long) (skb->end - skb->head));
834
    rx.handle = virt_to_bus (skb);
835
    rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
836
    if (rx_give (dev, &rx, pool))
837
      dev_kfree_skb_any (skb);
838
 
839
  }
840
 
841
  return;
842
}
843
 
844
// top up all RX pools (can also be called as a bottom half)
845
static void fill_rx_pools (amb_dev * dev) {
846
  unsigned char pool;
847
 
848
  PRINTD (DBG_FLOW|DBG_POOL, "fill_rx_pools %p", dev);
849
 
850
  for (pool = 0; pool < NUM_RX_POOLS; ++pool)
851
    fill_rx_pool (dev, pool, GFP_ATOMIC);
852
 
853
  return;
854
}
855
 
856
/********** enable host interrupts **********/
857
 
858
static inline void interrupts_on (amb_dev * dev) {
859
  wr_plain (dev, offsetof(amb_mem, interrupt_control),
860
            rd_plain (dev, offsetof(amb_mem, interrupt_control))
861
            | AMB_INTERRUPT_BITS);
862
}
863
 
864
/********** disable host interrupts **********/
865
 
866
static inline void interrupts_off (amb_dev * dev) {
867
  wr_plain (dev, offsetof(amb_mem, interrupt_control),
868
            rd_plain (dev, offsetof(amb_mem, interrupt_control))
869
            &~ AMB_INTERRUPT_BITS);
870
}
871
 
872
/********** interrupt handling **********/
873
 
874
static void interrupt_handler (int irq, void * dev_id, struct pt_regs * pt_regs) {
875
  amb_dev * dev = amb_devs;
876
  (void) pt_regs;
877
 
878
  PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler: %p", dev_id);
879
 
880
  if (!dev_id) {
881
    PRINTD (DBG_IRQ|DBG_ERR, "irq with NULL dev_id: %d", irq);
882
    return;
883
  }
884
  // Did one of our cards generate the interrupt?
885
  while (dev) {
886
    if (dev == dev_id)
887
      break;
888
    dev = dev->prev;
889
  }
890
  // impossible - unless we add the device to our list after both
891
  // registering the IRQ handler for it and enabling interrupts, AND
892
  // the card generates an IRQ at startup - should not happen again
893
  if (!dev) {
894
    PRINTD (DBG_IRQ, "irq for unknown device: %d", irq);
895
    return;
896
  }
897
  // impossible - unless we have memory corruption of dev or kernel
898
  if (irq != dev->irq) {
899
    PRINTD (DBG_IRQ|DBG_ERR, "irq mismatch: %d", irq);
900
    return;
901
  }
902
 
903
  {
904
    u32 interrupt = rd_plain (dev, offsetof(amb_mem, interrupt));
905
 
906
    // for us or someone else sharing the same interrupt
907
    if (!interrupt) {
908
      PRINTD (DBG_IRQ, "irq not for me: %d", irq);
909
      return;
910
    }
911
 
912
    // definitely for us
913
    PRINTD (DBG_IRQ, "FYI: interrupt was %08x", interrupt);
914
    wr_plain (dev, offsetof(amb_mem, interrupt), -1);
915
  }
916
 
917
  {
918
    unsigned int irq_work = 0;
919
    unsigned char pool;
920
    for (pool = 0; pool < NUM_RX_POOLS; ++pool)
921
      while (!rx_take (dev, pool))
922
        ++irq_work;
923
    while (!tx_take (dev))
924
      ++irq_work;
925
 
926
    if (irq_work) {
927
#ifdef FILL_RX_POOLS_IN_BH
928
      queue_task (&dev->bh, &tq_immediate);
929
      mark_bh (IMMEDIATE_BH);
930
#else
931
      fill_rx_pools (dev);
932
#endif
933
 
934
      PRINTD (DBG_IRQ, "work done: %u", irq_work);
935
    } else {
936
      PRINTD (DBG_IRQ|DBG_WARN, "no work done");
937
    }
938
  }
939
 
940
  PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler done: %p", dev_id);
941
  return;
942
}
943
 
944
/********** don't panic... yeah, right **********/
945
 
946
#ifdef DEBUG_AMBASSADOR
947
static void dont_panic (amb_dev * dev) {
948
  amb_cq * cq = &dev->cq;
949
  volatile amb_cq_ptrs * ptrs = &cq->ptrs;
950
  amb_txq * txq;
951
  amb_rxq * rxq;
952
  command * cmd;
953
  tx_in * tx;
954
  tx_simple * tx_descr;
955
  unsigned char pool;
956
  rx_in * rx;
957
 
958
  unsigned long flags;
959
  save_flags (flags);
960
  cli();
961
 
962
  PRINTK (KERN_INFO, "don't panic - putting adapter into reset");
963
  wr_plain (dev, offsetof(amb_mem, reset_control),
964
            rd_plain (dev, offsetof(amb_mem, reset_control)) | AMB_RESET_BITS);
965
 
966
  PRINTK (KERN_INFO, "marking all commands complete");
967
  for (cmd = ptrs->start; cmd < ptrs->limit; ++cmd)
968
    cmd->request = cpu_to_be32 (SRB_COMPLETE);
969
 
970
  PRINTK (KERN_INFO, "completing all TXs");
971
  txq = &dev->txq;
972
  tx = txq->in.ptr;
973
  while (txq->pending--) {
974
    if (tx == txq->in.start)
975
      tx = txq->in.limit;
976
    --tx;
977
    tx_descr = bus_to_virt (be32_to_cpu (tx->tx_descr_addr));
978
    amb_kfree_skb (tx_descr->skb);
979
    kfree (tx_descr);
980
  }
981
 
982
  PRINTK (KERN_INFO, "freeing all RX buffers");
983
  for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
984
    rxq = &dev->rxq[pool];
985
    rx = rxq->in.ptr;
986
    while (rxq->pending--) {
987
      if (rx == rxq->in.start)
988
        rx = rxq->in.limit;
989
      --rx;
990
      dev_kfree_skb_any (bus_to_virt (rx->handle));
991
    }
992
  }
993
 
994
  PRINTK (KERN_INFO, "don't panic over - close all VCs and rmmod");
995
  set_bit (dead, &dev->flags);
996
  restore_flags (flags);
997
  return;
998
}
999
#endif
1000
 
1001
/********** make rate (not quite as much fun as Horizon) **********/
1002
 
1003
static unsigned int make_rate (unsigned int rate, rounding r,
1004
                               u16 * bits, unsigned int * actual) {
1005
  unsigned char exp = -1; // hush gcc
1006
  unsigned int man = -1;  // hush gcc
1007
 
1008
  PRINTD (DBG_FLOW|DBG_QOS, "make_rate %u", rate);
1009
 
1010
  // rates in cells per second, ITU format (nasty 16-bit floating-point)
1011
  // given 5-bit e and 9-bit m:
1012
  // rate = EITHER (1+m/2^9)*2^e    OR 0
1013
  // bits = EITHER 1<<14 | e<<9 | m OR 0
1014
  // (bit 15 is "reserved", bit 14 "non-zero")
1015
  // smallest rate is 0 (special representation)
1016
  // largest rate is (1+511/512)*2^31 = 4290772992 (< 2^32-1)
1017
  // smallest non-zero rate is (1+0/512)*2^0 = 1 (> 0)
1018
  // simple algorithm:
1019
  // find position of top bit, this gives e
1020
  // remove top bit and shift (rounding if feeling clever) by 9-e
1021
 
1022
  // ucode bug: please don't set bit 14! so 0 rate not representable
1023
 
1024
  if (rate > 0xffc00000U) {
1025
    // larger than largest representable rate
1026
 
1027
    if (r == round_up) {
1028
        return -EINVAL;
1029
    } else {
1030
      exp = 31;
1031
      man = 511;
1032
    }
1033
 
1034
  } else if (rate) {
1035
    // representable rate
1036
 
1037
    exp = 31;
1038
    man = rate;
1039
 
1040
    // invariant: rate = man*2^(exp-31)
1041
    while (!(man & (1<<31))) {
1042
      exp = exp - 1;
1043
      man = man<<1;
1044
    }
1045
 
1046
    // man has top bit set
1047
    // rate = (2^31+(man-2^31))*2^(exp-31)
1048
    // rate = (1+(man-2^31)/2^31)*2^exp
1049
    man = man<<1;
1050
    man &= 0xffffffffU; // a nop on 32-bit systems
1051
    // rate = (1+man/2^32)*2^exp
1052
 
1053
    // exp is in the range 0 to 31, man is in the range 0 to 2^32-1
1054
    // time to lose significance... we want m in the range 0 to 2^9-1
1055
    // rounding presents a minor problem... we first decide which way
1056
    // we are rounding (based on given rounding direction and possibly
1057
    // the bits of the mantissa that are to be discarded).
1058
 
1059
    switch (r) {
1060
      case round_down: {
1061
        // just truncate
1062
        man = man>>(32-9);
1063
        break;
1064
      }
1065
      case round_up: {
1066
        // check all bits that we are discarding
1067
        if (man & (-1>>9)) {
1068
          man = (man>>(32-9)) + 1;
1069
          if (man == (1<<9)) {
1070
            // no need to check for round up outside of range
1071
            man = 0;
1072
            exp += 1;
1073
          }
1074
        } else {
1075
          man = (man>>(32-9));
1076
        }
1077
        break;
1078
      }
1079
      case round_nearest: {
1080
        // check msb that we are discarding
1081
        if (man & (1<<(32-9-1))) {
1082
          man = (man>>(32-9)) + 1;
1083
          if (man == (1<<9)) {
1084
            // no need to check for round up outside of range
1085
            man = 0;
1086
            exp += 1;
1087
          }
1088
        } else {
1089
          man = (man>>(32-9));
1090
        }
1091
        break;
1092
      }
1093
    }
1094
 
1095
  } else {
1096
    // zero rate - not representable
1097
 
1098
    if (r == round_down) {
1099
      return -EINVAL;
1100
    } else {
1101
      exp = 0;
1102
      man = 0;
1103
    }
1104
 
1105
  }
1106
 
1107
  PRINTD (DBG_QOS, "rate: man=%u, exp=%hu", man, exp);
1108
 
1109
  if (bits)
1110
    *bits = /* (1<<14) | */ (exp<<9) | man;
1111
 
1112
  if (actual)
1113
    *actual = (exp >= 9)
1114
      ? (1 << exp) + (man << (exp-9))
1115
      : (1 << exp) + ((man + (1<<(9-exp-1))) >> (9-exp));
1116
 
1117
  return 0;
1118
}
1119
 
1120
/********** Linux ATM Operations **********/
1121
 
1122
// some are not yet implemented while others do not make sense for
1123
// this device
1124
 
1125
/********** Open a VC **********/
1126
 
1127
static int amb_open (struct atm_vcc * atm_vcc, short vpi, int vci) {
1128
  int error;
1129
 
1130
  struct atm_qos * qos;
1131
  struct atm_trafprm * txtp;
1132
  struct atm_trafprm * rxtp;
1133
  u16 tx_rate_bits;
1134
  u16 tx_vc_bits = -1; // hush gcc
1135
  u16 tx_frame_bits = -1; // hush gcc
1136
 
1137
  amb_dev * dev = AMB_DEV(atm_vcc->dev);
1138
  amb_vcc * vcc;
1139
  unsigned char pool = -1; // hush gcc
1140
 
1141
  PRINTD (DBG_FLOW|DBG_VCC, "amb_open %x %x", vpi, vci);
1142
 
1143
#ifdef ATM_VPI_UNSPEC
1144
  // UNSPEC is deprecated, remove this code eventually
1145
  if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) {
1146
    PRINTK (KERN_WARNING, "rejecting open with unspecified VPI/VCI (deprecated)");
1147
    return -EINVAL;
1148
  }
1149
#endif
1150
 
1151
  // deal with possibly wildcarded VCs
1152
  error = atm_find_ci (atm_vcc, &vpi, &vci);
1153
  if (error) {
1154
    PRINTD (DBG_WARN|DBG_VCC, "atm_find_ci failed!");
1155
    return error;
1156
  }
1157
  PRINTD (DBG_VCC, "atm_find_ci gives %x %x", vpi, vci);
1158
 
1159
  if (!(0 <= vpi && vpi < (1<<NUM_VPI_BITS) &&
1160
 
1161
    PRINTD (DBG_WARN|DBG_VCC, "VPI/VCI out of range: %hd/%d", vpi, vci);
1162
    return -EINVAL;
1163
  }
1164
 
1165
  qos = &atm_vcc->qos;
1166
 
1167
  if (qos->aal != ATM_AAL5) {
1168
    PRINTD (DBG_QOS, "AAL not supported");
1169
    return -EINVAL;
1170
  }
1171
 
1172
  // traffic parameters
1173
 
1174
  PRINTD (DBG_QOS, "TX:");
1175
  txtp = &qos->txtp;
1176
  if (txtp->traffic_class != ATM_NONE) {
1177
    switch (txtp->traffic_class) {
1178
      case ATM_UBR: {
1179
        // we take "the PCR" as a rate-cap
1180
        int pcr = atm_pcr_goal (txtp);
1181
        if (!pcr) {
1182
          // no rate cap
1183
          tx_rate_bits = 0;
1184
          tx_vc_bits = TX_UBR;
1185
          tx_frame_bits = TX_FRAME_NOTCAP;
1186
        } else {
1187
          rounding r;
1188
          if (pcr < 0) {
1189
            r = round_down;
1190
            pcr = -pcr;
1191
          } else {
1192
            r = round_up;
1193
          }
1194
          error = make_rate (pcr, r, &tx_rate_bits, 0);
1195
          tx_vc_bits = TX_UBR_CAPPED;
1196
          tx_frame_bits = TX_FRAME_CAPPED;
1197
        }
1198
        break;
1199
      }
1200
#if 0
1201
      case ATM_ABR: {
1202
        pcr = atm_pcr_goal (txtp);
1203
        PRINTD (DBG_QOS, "pcr goal = %d", pcr);
1204
        break;
1205
      }
1206
#endif
1207
      default: {
1208
        // PRINTD (DBG_QOS, "request for non-UBR/ABR denied");
1209
        PRINTD (DBG_QOS, "request for non-UBR denied");
1210
        return -EINVAL;
1211
      }
1212
    }
1213
    PRINTD (DBG_QOS, "tx_rate_bits=%hx, tx_vc_bits=%hx",
1214
            tx_rate_bits, tx_vc_bits);
1215
  }
1216
 
1217
  PRINTD (DBG_QOS, "RX:");
1218
  rxtp = &qos->rxtp;
1219
  if (rxtp->traffic_class == ATM_NONE) {
1220
    // do nothing
1221
  } else {
1222
    // choose an RX pool (arranged in increasing size)
1223
    for (pool = 0; pool < NUM_RX_POOLS; ++pool)
1224
      if ((unsigned int) rxtp->max_sdu <= dev->rxq[pool].buffer_size) {
1225
        PRINTD (DBG_VCC|DBG_QOS|DBG_POOL, "chose pool %hu (max_sdu %u <= %u)",
1226
                pool, rxtp->max_sdu, dev->rxq[pool].buffer_size);
1227
        break;
1228
      }
1229
    if (pool == NUM_RX_POOLS) {
1230
      PRINTD (DBG_WARN|DBG_VCC|DBG_QOS|DBG_POOL,
1231
              "no pool suitable for VC (RX max_sdu %d is too large)",
1232
              rxtp->max_sdu);
1233
      return -EINVAL;
1234
    }
1235
 
1236
    switch (rxtp->traffic_class) {
1237
      case ATM_UBR: {
1238
        break;
1239
      }
1240
#if 0
1241
      case ATM_ABR: {
1242
        pcr = atm_pcr_goal (rxtp);
1243
        PRINTD (DBG_QOS, "pcr goal = %d", pcr);
1244
        break;
1245
      }
1246
#endif
1247
      default: {
1248
        // PRINTD (DBG_QOS, "request for non-UBR/ABR denied");
1249
        PRINTD (DBG_QOS, "request for non-UBR denied");
1250
        return -EINVAL;
1251
      }
1252
    }
1253
  }
1254
 
1255
  // get space for our vcc stuff
1256
  vcc = kmalloc (sizeof(amb_vcc), GFP_KERNEL);
1257
  if (!vcc) {
1258
    PRINTK (KERN_ERR, "out of memory!");
1259
    return -ENOMEM;
1260
  }
1261
  atm_vcc->dev_data = (void *) vcc;
1262
 
1263
  // no failures beyond this point
1264
 
1265
  // we are not really "immediately before allocating the connection
1266
  // identifier in hardware", but it will just have to do!
1267
  set_bit(ATM_VF_ADDR,&atm_vcc->flags);
1268
 
1269
  if (txtp->traffic_class != ATM_NONE) {
1270
    command cmd;
1271
 
1272
    vcc->tx_frame_bits = tx_frame_bits;
1273
 
1274
    down (&dev->vcc_sf);
1275
    if (dev->rxer[vci]) {
1276
      // RXer on the channel already, just modify rate...
1277
      cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE);
1278
      cmd.args.modify_rate.vc = cpu_to_be32 (vci);  // vpi 0
1279
      cmd.args.modify_rate.rate = cpu_to_be32 (tx_rate_bits << SRB_RATE_SHIFT);
1280
      while (command_do (dev, &cmd))
1281
        schedule();
1282
      // ... and TX flags, preserving the RX pool
1283
      cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS);
1284
      cmd.args.modify_flags.vc = cpu_to_be32 (vci);  // vpi 0
1285
      cmd.args.modify_flags.flags = cpu_to_be32
1286
        ( (AMB_VCC(dev->rxer[vci])->rx_info.pool << SRB_POOL_SHIFT)
1287
          | (tx_vc_bits << SRB_FLAGS_SHIFT) );
1288
      while (command_do (dev, &cmd))
1289
        schedule();
1290
    } else {
1291
      // no RXer on the channel, just open (with pool zero)
1292
      cmd.request = cpu_to_be32 (SRB_OPEN_VC);
1293
      cmd.args.open.vc = cpu_to_be32 (vci);  // vpi 0
1294
      cmd.args.open.flags = cpu_to_be32 (tx_vc_bits << SRB_FLAGS_SHIFT);
1295
      cmd.args.open.rate = cpu_to_be32 (tx_rate_bits << SRB_RATE_SHIFT);
1296
      while (command_do (dev, &cmd))
1297
        schedule();
1298
    }
1299
    dev->txer[vci].tx_present = 1;
1300
    up (&dev->vcc_sf);
1301
  }
1302
 
1303
  if (rxtp->traffic_class != ATM_NONE) {
1304
    command cmd;
1305
 
1306
    vcc->rx_info.pool = pool;
1307
 
1308
    down (&dev->vcc_sf);
1309
    /* grow RX buffer pool */
1310
    if (!dev->rxq[pool].buffers_wanted)
1311
      dev->rxq[pool].buffers_wanted = rx_lats;
1312
    dev->rxq[pool].buffers_wanted += 1;
1313
    fill_rx_pool (dev, pool, GFP_KERNEL);
1314
 
1315
    if (dev->txer[vci].tx_present) {
1316
      // TXer on the channel already
1317
      // switch (from pool zero) to this pool, preserving the TX bits
1318
      cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS);
1319
      cmd.args.modify_flags.vc = cpu_to_be32 (vci);  // vpi 0
1320
      cmd.args.modify_flags.flags = cpu_to_be32
1321
        ( (pool << SRB_POOL_SHIFT)
1322
          | (dev->txer[vci].tx_vc_bits << SRB_FLAGS_SHIFT) );
1323
    } else {
1324
      // no TXer on the channel, open the VC (with no rate info)
1325
      cmd.request = cpu_to_be32 (SRB_OPEN_VC);
1326
      cmd.args.open.vc = cpu_to_be32 (vci);  // vpi 0
1327
      cmd.args.open.flags = cpu_to_be32 (pool << SRB_POOL_SHIFT);
1328
      cmd.args.open.rate = cpu_to_be32 (0);
1329
    }
1330
    while (command_do (dev, &cmd))
1331
      schedule();
1332
    // this link allows RX frames through
1333
    dev->rxer[vci] = atm_vcc;
1334
    up (&dev->vcc_sf);
1335
  }
1336
 
1337
  // set elements of vcc
1338
  atm_vcc->vpi = vpi; // 0
1339
  atm_vcc->vci = vci;
1340
 
1341
  // indicate readiness
1342
  set_bit(ATM_VF_READY,&atm_vcc->flags);
1343
 
1344
  return 0;
1345
}
1346
 
1347
/********** Close a VC **********/
1348
 
1349
static void amb_close (struct atm_vcc * atm_vcc) {
1350
  amb_dev * dev = AMB_DEV (atm_vcc->dev);
1351
  amb_vcc * vcc = AMB_VCC (atm_vcc);
1352
  u16 vci = atm_vcc->vci;
1353
 
1354
  PRINTD (DBG_VCC|DBG_FLOW, "amb_close");
1355
 
1356
  // indicate unreadiness
1357
  clear_bit(ATM_VF_READY,&atm_vcc->flags);
1358
 
1359
  // disable TXing
1360
  if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) {
1361
    command cmd;
1362
 
1363
    down (&dev->vcc_sf);
1364
    if (dev->rxer[vci]) {
1365
      // RXer still on the channel, just modify rate... XXX not really needed
1366
      cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE);
1367
      cmd.args.modify_rate.vc = cpu_to_be32 (vci);  // vpi 0
1368
      cmd.args.modify_rate.rate = cpu_to_be32 (0);
1369
      // ... and clear TX rate flags (XXX to stop RM cell output?), preserving RX pool
1370
    } else {
1371
      // no RXer on the channel, close channel
1372
      cmd.request = cpu_to_be32 (SRB_CLOSE_VC);
1373
      cmd.args.close.vc = cpu_to_be32 (vci); // vpi 0
1374
    }
1375
    dev->txer[vci].tx_present = 0;
1376
    while (command_do (dev, &cmd))
1377
      schedule();
1378
    up (&dev->vcc_sf);
1379
  }
1380
 
1381
  // disable RXing
1382
  if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) {
1383
    command cmd;
1384
 
1385
    // this is (the?) one reason why we need the amb_vcc struct
1386
    unsigned char pool = vcc->rx_info.pool;
1387
 
1388
    down (&dev->vcc_sf);
1389
    if (dev->txer[vci].tx_present) {
1390
      // TXer still on the channel, just go to pool zero XXX not really needed
1391
      cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS);
1392
      cmd.args.modify_flags.vc = cpu_to_be32 (vci);  // vpi 0
1393
      cmd.args.modify_flags.flags = cpu_to_be32
1394
        (dev->txer[vci].tx_vc_bits << SRB_FLAGS_SHIFT);
1395
    } else {
1396
      // no TXer on the channel, close the VC
1397
      cmd.request = cpu_to_be32 (SRB_CLOSE_VC);
1398
      cmd.args.close.vc = cpu_to_be32 (vci); // vpi 0
1399
    }
1400
    // forget the rxer - no more skbs will be pushed
1401
    if (atm_vcc != dev->rxer[vci])
1402
      PRINTK (KERN_ERR, "%s vcc=%p rxer[vci]=%p",
1403
              "arghhh! we're going to die!",
1404
              vcc, dev->rxer[vci]);
1405
    dev->rxer[vci] = 0;
1406
    while (command_do (dev, &cmd))
1407
      schedule();
1408
 
1409
    /* shrink RX buffer pool */
1410
    dev->rxq[pool].buffers_wanted -= 1;
1411
    if (dev->rxq[pool].buffers_wanted == rx_lats) {
1412
      dev->rxq[pool].buffers_wanted = 0;
1413
      drain_rx_pool (dev, pool);
1414
    }
1415
    up (&dev->vcc_sf);
1416
  }
1417
 
1418
  // free our structure
1419
  kfree (vcc);
1420
 
1421
  // say the VPI/VCI is free again
1422
  clear_bit(ATM_VF_ADDR,&atm_vcc->flags);
1423
 
1424
  return;
1425
}
1426
 
1427
/********** DebugIoctl **********/
1428
 
1429
#if 0
1430
static int amb_ioctl (struct atm_dev * dev, unsigned int cmd, void * arg) {
1431
  unsigned short newdebug;
1432
  if (cmd == AMB_SETDEBUG) {
1433
    if (!capable(CAP_NET_ADMIN))
1434
      return -EPERM;
1435
    if (copy_from_user (&newdebug, arg, sizeof(newdebug))) {
1436
      // moan
1437
      return -EFAULT;
1438
    } else {
1439
      debug = newdebug;
1440
      return 0;
1441
    }
1442
  } else if (cmd == AMB_DONTPANIC) {
1443
    if (!capable(CAP_NET_ADMIN))
1444
      return -EPERM;
1445
    dont_panic (dev);
1446
  } else {
1447
    // moan
1448
    return -ENOIOCTLCMD;
1449
  }
1450
}
1451
#endif
1452
 
1453
/********** Set socket options for a VC **********/
1454
 
1455
// int amb_getsockopt (struct atm_vcc * atm_vcc, int level, int optname, void * optval, int optlen);
1456
 
1457
/********** Set socket options for a VC **********/
1458
 
1459
// int amb_setsockopt (struct atm_vcc * atm_vcc, int level, int optname, void * optval, int optlen);
1460
 
1461
/********** Send **********/
1462
 
1463
static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
1464
  amb_dev * dev = AMB_DEV(atm_vcc->dev);
1465
  amb_vcc * vcc = AMB_VCC(atm_vcc);
1466
  u16 vc = atm_vcc->vci;
1467
  unsigned int tx_len = skb->len;
1468
  unsigned char * tx_data = skb->data;
1469
  tx_simple * tx_descr;
1470
  tx_in tx;
1471
 
1472
  if (test_bit (dead, &dev->flags))
1473
    return -EIO;
1474
 
1475
  PRINTD (DBG_FLOW|DBG_TX, "amb_send vc %x data %p len %u",
1476
          vc, tx_data, tx_len);
1477
 
1478
  dump_skb (">>>", vc, skb);
1479
 
1480
  if (!dev->txer[vc].tx_present) {
1481
    PRINTK (KERN_ERR, "attempt to send on RX-only VC %x", vc);
1482
    return -EBADFD;
1483
  }
1484
 
1485
  // this is a driver private field so we have to set it ourselves,
1486
  // despite the fact that we are _required_ to use it to check for a
1487
  // pop function
1488
  ATM_SKB(skb)->vcc = atm_vcc;
1489
 
1490
  if (skb->len > (size_t) atm_vcc->qos.txtp.max_sdu) {
1491
    PRINTK (KERN_ERR, "sk_buff length greater than agreed max_sdu, dropping...");
1492
    return -EIO;
1493
  }
1494
 
1495
  if (check_area (skb->data, skb->len)) {
1496
    atomic_inc(&atm_vcc->stats->tx_err);
1497
    return -ENOMEM; // ?
1498
  }
1499
 
1500
  // allocate memory for fragments
1501
  tx_descr = kmalloc (sizeof(tx_simple), GFP_KERNEL);
1502
  if (!tx_descr) {
1503
    PRINTK (KERN_ERR, "could not allocate TX descriptor");
1504
    return -ENOMEM;
1505
  }
1506
  if (check_area (tx_descr, sizeof(tx_simple))) {
1507
    kfree (tx_descr);
1508
    return -ENOMEM;
1509
  }
1510
  PRINTD (DBG_TX, "fragment list allocated at %p", tx_descr);
1511
 
1512
  tx_descr->skb = skb;
1513
 
1514
  tx_descr->tx_frag.bytes = cpu_to_be32 (tx_len);
1515
  tx_descr->tx_frag.address = cpu_to_be32 (virt_to_bus (tx_data));
1516
 
1517
  tx_descr->tx_frag_end.handle = virt_to_bus (tx_descr);
1518
  tx_descr->tx_frag_end.vc = 0;
1519
  tx_descr->tx_frag_end.next_descriptor_length = 0;
1520
  tx_descr->tx_frag_end.next_descriptor = 0;
1521
#ifdef AMB_NEW_MICROCODE
1522
  tx_descr->tx_frag_end.cpcs_uu = 0;
1523
  tx_descr->tx_frag_end.cpi = 0;
1524
  tx_descr->tx_frag_end.pad = 0;
1525
#endif
1526
 
1527
  tx.vc = cpu_to_be16 (vcc->tx_frame_bits | vc);
1528
  tx.tx_descr_length = cpu_to_be16 (sizeof(tx_frag)+sizeof(tx_frag_end));
1529
  tx.tx_descr_addr = cpu_to_be32 (virt_to_bus (&tx_descr->tx_frag));
1530
 
1531
#ifdef DEBUG_AMBASSADOR
1532
  /* wey-hey! */
1533
  if (vc == 1023) {
1534
    unsigned int i;
1535
    unsigned short d = 0;
1536
    char * s = skb->data;
1537
    switch (*s++) {
1538
      case 'D': {
1539
        for (i = 0; i < 4; ++i) {
1540
          d = (d<<4) | ((*s <= '9') ? (*s - '0') : (*s - 'a' + 10));
1541
          ++s;
1542
        }
1543
        PRINTK (KERN_INFO, "debug bitmap is now %hx", debug = d);
1544
        break;
1545
      }
1546
      case 'R': {
1547
        if (*s++ == 'e' && *s++ == 's' && *s++ == 'e' && *s++ == 't')
1548
          dont_panic (dev);
1549
        break;
1550
      }
1551
      default: {
1552
        break;
1553
      }
1554
    }
1555
  }
1556
#endif
1557
 
1558
  while (tx_give (dev, &tx))
1559
    schedule();
1560
  return 0;
1561
}
1562
 
1563
/********** Scatter Gather Send Capability **********/
1564
 
1565
static int amb_sg_send (struct atm_vcc * atm_vcc,
1566
                        unsigned long start,
1567
                        unsigned long size) {
1568
  PRINTD (DBG_FLOW|DBG_VCC, "amb_sg_send: never");
1569
  return 0;
1570
  if (atm_vcc->qos.aal == ATM_AAL5) {
1571
    PRINTD (DBG_FLOW|DBG_VCC, "amb_sg_send: yes");
1572
    return 1;
1573
  } else {
1574
    PRINTD (DBG_FLOW|DBG_VCC, "amb_sg_send: no");
1575
    return 0;
1576
  }
1577
  PRINTD (DBG_FLOW|DBG_VCC, "amb_sg_send: always");
1578
  return 1;
1579
}
1580
 
1581
/********** Send OAM **********/
1582
 
1583
// static int amb_send_oam (struct atm_vcc * atm_vcc, void * cell, int flags);
1584
 
1585
/********** Feedback to Driver **********/
1586
 
1587
// void amb_feedback (struct atm_vcc * atm_vcc, struct sk_buff * skb,
1588
// unsigned long start, unsigned long dest, int len);
1589
 
1590
/********** Change QoS on a VC **********/
1591
 
1592
// int amb_change_qos (struct atm_vcc * atm_vcc, struct atm_qos * qos, int flags);
1593
 
1594
/********** Free RX Socket Buffer **********/
1595
 
1596
#if 0
1597
static void amb_free_rx_skb (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
1598
  amb_dev * dev = AMB_DEV (atm_vcc->dev);
1599
  amb_vcc * vcc = AMB_VCC (atm_vcc);
1600
  unsigned char pool = vcc->rx_info.pool;
1601
  rx_in rx;
1602
 
1603
  // This may be unsafe for various reasons that I cannot really guess
1604
  // at. However, I note that the ATM layer calls kfree_skb rather
1605
  // than dev_kfree_skb at this point so we are least covered as far
1606
  // as buffer locking goes. There may be bugs if pcap clones RX skbs.
1607
 
1608
  PRINTD (DBG_FLOW|DBG_SKB, "amb_rx_free skb %p (atm_vcc %p, vcc %p)",
1609
          skb, atm_vcc, vcc);
1610
 
1611
  rx.handle = virt_to_bus (skb);
1612
  rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
1613
 
1614
  skb->data = skb->head;
1615
  skb->tail = skb->head;
1616
  skb->len = 0;
1617
 
1618
  if (!rx_give (dev, &rx, pool)) {
1619
    // success
1620
    PRINTD (DBG_SKB|DBG_POOL, "recycled skb for pool %hu", pool);
1621
    return;
1622
  }
1623
 
1624
  // just do what the ATM layer would have done
1625
  dev_kfree_skb_any (skb);
1626
 
1627
  return;
1628
}
1629
#endif
1630
 
1631
/********** Proc File Output **********/
1632
 
1633
static int amb_proc_read (struct atm_dev * atm_dev, loff_t * pos, char * page) {
1634
  amb_dev * dev = AMB_DEV (atm_dev);
1635
  int left = *pos;
1636
  unsigned char pool;
1637
 
1638
  PRINTD (DBG_FLOW, "amb_proc_read");
1639
 
1640
  /* more diagnostics here? */
1641
 
1642
  if (!left--) {
1643
    amb_stats * s = &dev->stats;
1644
    return sprintf (page,
1645
                    "frames: TX OK %lu, RX OK %lu, RX bad %lu "
1646
                    "(CRC %lu, long %lu, aborted %lu, unused %lu).\n",
1647
                    s->tx_ok, s->rx.ok, s->rx.error,
1648
                    s->rx.badcrc, s->rx.toolong,
1649
                    s->rx.aborted, s->rx.unused);
1650
  }
1651
 
1652
  if (!left--) {
1653
    amb_cq * c = &dev->cq;
1654
    return sprintf (page, "cmd queue [cur/hi/max]: %u/%u/%u. ",
1655
                    c->pending, c->high, c->maximum);
1656
  }
1657
 
1658
  if (!left--) {
1659
    amb_txq * t = &dev->txq;
1660
    return sprintf (page, "TX queue [cur/max high full]: %u/%u %u %u.\n",
1661
                    t->pending, t->maximum, t->high, t->filled);
1662
  }
1663
 
1664
  if (!left--) {
1665
    unsigned int count = sprintf (page, "RX queues [cur/max/req low empty]:");
1666
    for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
1667
      amb_rxq * r = &dev->rxq[pool];
1668
      count += sprintf (page+count, " %u/%u/%u %u %u",
1669
                        r->pending, r->maximum, r->buffers_wanted, r->low, r->emptied);
1670
    }
1671
    count += sprintf (page+count, ".\n");
1672
    return count;
1673
  }
1674
 
1675
  if (!left--) {
1676
    unsigned int count = sprintf (page, "RX buffer sizes:");
1677
    for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
1678
      amb_rxq * r = &dev->rxq[pool];
1679
      count += sprintf (page+count, " %u", r->buffer_size);
1680
    }
1681
    count += sprintf (page+count, ".\n");
1682
    return count;
1683
  }
1684
 
1685
#if 0
1686
  if (!left--) {
1687
    // suni block etc?
1688
  }
1689
#endif
1690
 
1691
  return 0;
1692
}
1693
 
1694
/********** Operation Structure **********/
1695
 
1696
static const struct atmdev_ops amb_ops = {
1697
  open:         amb_open,
1698
  close:        amb_close,
1699
  send:         amb_send,
1700
  sg_send:      amb_sg_send,
1701
  proc_read:    amb_proc_read,
1702
  owner:        THIS_MODULE,
1703
};
1704
 
1705
/********** housekeeping **********/
1706
 
1707
static inline void set_timer (struct timer_list * timer, unsigned long delay) {
1708
  timer->expires = jiffies + delay;
1709
  add_timer (timer);
1710
  return;
1711
}
1712
 
1713
static void do_housekeeping (unsigned long arg) {
1714
  amb_dev * dev = amb_devs;
1715
  // data is set to zero at module unload
1716
  (void) arg;
1717
 
1718
  if (housekeeping.data) {
1719
    while (dev) {
1720
 
1721
      // could collect device-specific (not driver/atm-linux) stats here
1722
 
1723
      // last resort refill once every ten seconds
1724
      fill_rx_pools (dev);
1725
 
1726
      dev = dev->prev;
1727
    }
1728
    set_timer (&housekeeping, 10*HZ);
1729
  }
1730
 
1731
  return;
1732
}
1733
 
1734
/********** creation of communication queues **********/
1735
 
1736
static int __init create_queues (amb_dev * dev, unsigned int cmds,
1737
                                 unsigned int txs, unsigned int * rxs,
1738
                                 unsigned int * rx_buffer_sizes) {
1739
  unsigned char pool;
1740
  size_t total = 0;
1741
  void * memory;
1742
  void * limit;
1743
 
1744
  PRINTD (DBG_FLOW, "create_queues %p", dev);
1745
 
1746
  total += cmds * sizeof(command);
1747
 
1748
  total += txs * (sizeof(tx_in) + sizeof(tx_out));
1749
 
1750
  for (pool = 0; pool < NUM_RX_POOLS; ++pool)
1751
    total += rxs[pool] * (sizeof(rx_in) + sizeof(rx_out));
1752
 
1753
  memory = kmalloc (total, GFP_KERNEL);
1754
  if (!memory) {
1755
    PRINTK (KERN_ERR, "could not allocate queues");
1756
    return -ENOMEM;
1757
  }
1758
  if (check_area (memory, total)) {
1759
    PRINTK (KERN_ERR, "queues allocated in nasty area");
1760
    kfree (memory);
1761
    return -ENOMEM;
1762
  }
1763
 
1764
  limit = memory + total;
1765
  PRINTD (DBG_INIT, "queues from %p to %p", memory, limit);
1766
 
1767
  PRINTD (DBG_CMD, "command queue at %p", memory);
1768
 
1769
  {
1770
    command * cmd = memory;
1771
    amb_cq * cq = &dev->cq;
1772
 
1773
    cq->pending = 0;
1774
    cq->high = 0;
1775
    cq->maximum = cmds - 1;
1776
 
1777
    cq->ptrs.start = cmd;
1778
    cq->ptrs.in = cmd;
1779
    cq->ptrs.out = cmd;
1780
    cq->ptrs.limit = cmd + cmds;
1781
 
1782
    memory = cq->ptrs.limit;
1783
  }
1784
 
1785
  PRINTD (DBG_TX, "TX queue pair at %p", memory);
1786
 
1787
  {
1788
    tx_in * in = memory;
1789
    tx_out * out;
1790
    amb_txq * txq = &dev->txq;
1791
 
1792
    txq->pending = 0;
1793
    txq->high = 0;
1794
    txq->filled = 0;
1795
    txq->maximum = txs - 1;
1796
 
1797
    txq->in.start = in;
1798
    txq->in.ptr = in;
1799
    txq->in.limit = in + txs;
1800
 
1801
    memory = txq->in.limit;
1802
    out = memory;
1803
 
1804
    txq->out.start = out;
1805
    txq->out.ptr = out;
1806
    txq->out.limit = out + txs;
1807
 
1808
    memory = txq->out.limit;
1809
  }
1810
 
1811
  PRINTD (DBG_RX, "RX queue pairs at %p", memory);
1812
 
1813
  for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
1814
    rx_in * in = memory;
1815
    rx_out * out;
1816
    amb_rxq * rxq = &dev->rxq[pool];
1817
 
1818
    rxq->buffer_size = rx_buffer_sizes[pool];
1819
    rxq->buffers_wanted = 0;
1820
 
1821
    rxq->pending = 0;
1822
    rxq->low = rxs[pool] - 1;
1823
    rxq->emptied = 0;
1824
    rxq->maximum = rxs[pool] - 1;
1825
 
1826
    rxq->in.start = in;
1827
    rxq->in.ptr = in;
1828
    rxq->in.limit = in + rxs[pool];
1829
 
1830
    memory = rxq->in.limit;
1831
    out = memory;
1832
 
1833
    rxq->out.start = out;
1834
    rxq->out.ptr = out;
1835
    rxq->out.limit = out + rxs[pool];
1836
 
1837
    memory = rxq->out.limit;
1838
  }
1839
 
1840
  if (memory == limit) {
1841
    return 0;
1842
  } else {
1843
    PRINTK (KERN_ERR, "bad queue alloc %p != %p (tell maintainer)", memory, limit);
1844
    kfree (limit - total);
1845
    return -ENOMEM;
1846
  }
1847
 
1848
}
1849
 
1850
/********** destruction of communication queues **********/
1851
 
1852
static void destroy_queues (amb_dev * dev) {
1853
  // all queues assumed empty
1854
  void * memory = dev->cq.ptrs.start;
1855
  // includes txq.in, txq.out, rxq[].in and rxq[].out
1856
 
1857
  PRINTD (DBG_FLOW, "destroy_queues %p", dev);
1858
 
1859
  PRINTD (DBG_INIT, "freeing queues at %p", memory);
1860
  kfree (memory);
1861
 
1862
  return;
1863
}
1864
 
1865
/********** basic loader commands and error handling **********/
1866
 
1867
static int __init do_loader_command (volatile loader_block * lb,
1868
                                     const amb_dev * dev, loader_command cmd) {
1869
  // centisecond timeouts - guessing away here
1870
  unsigned int command_timeouts [] = {
1871
    [host_memory_test]     = 15,
1872
    [read_adapter_memory]  = 2,
1873
    [write_adapter_memory] = 2,
1874
    [adapter_start]        = 50,
1875
    [get_version_number]   = 10,
1876
    [interrupt_host]       = 1,
1877
    [flash_erase_sector]   = 1,
1878
    [adap_download_block]  = 1,
1879
    [adap_erase_flash]     = 1,
1880
    [adap_run_in_iram]     = 1,
1881
    [adap_end_download]    = 1
1882
  };
1883
 
1884
  unsigned int command_successes [] = {
1885
    [host_memory_test]     = COMMAND_PASSED_TEST,
1886
    [read_adapter_memory]  = COMMAND_READ_DATA_OK,
1887
    [write_adapter_memory] = COMMAND_WRITE_DATA_OK,
1888
    [adapter_start]        = COMMAND_COMPLETE,
1889
    [get_version_number]   = COMMAND_COMPLETE,
1890
    [interrupt_host]       = COMMAND_COMPLETE,
1891
    [flash_erase_sector]   = COMMAND_COMPLETE,
1892
    [adap_download_block]  = COMMAND_COMPLETE,
1893
    [adap_erase_flash]     = COMMAND_COMPLETE,
1894
    [adap_run_in_iram]     = COMMAND_COMPLETE,
1895
    [adap_end_download]    = COMMAND_COMPLETE
1896
  };
1897
 
1898
  int decode_loader_result (loader_command cmd, u32 result) {
1899
    int res;
1900
    const char * msg;
1901
 
1902
    if (result == command_successes[cmd])
1903
      return 0;
1904
 
1905
    switch (result) {
1906
      case BAD_COMMAND:
1907
        res = -EINVAL;
1908
        msg = "bad command";
1909
        break;
1910
      case COMMAND_IN_PROGRESS:
1911
        res = -ETIMEDOUT;
1912
        msg = "command in progress";
1913
        break;
1914
      case COMMAND_PASSED_TEST:
1915
        res = 0;
1916
        msg = "command passed test";
1917
        break;
1918
      case COMMAND_FAILED_TEST:
1919
        res = -EIO;
1920
        msg = "command failed test";
1921
        break;
1922
      case COMMAND_READ_DATA_OK:
1923
        res = 0;
1924
        msg = "command read data ok";
1925
        break;
1926
      case COMMAND_READ_BAD_ADDRESS:
1927
        res = -EINVAL;
1928
        msg = "command read bad address";
1929
        break;
1930
      case COMMAND_WRITE_DATA_OK:
1931
        res = 0;
1932
        msg = "command write data ok";
1933
        break;
1934
      case COMMAND_WRITE_BAD_ADDRESS:
1935
        res = -EINVAL;
1936
        msg = "command write bad address";
1937
        break;
1938
      case COMMAND_WRITE_FLASH_FAILURE:
1939
        res = -EIO;
1940
        msg = "command write flash failure";
1941
        break;
1942
      case COMMAND_COMPLETE:
1943
        res = 0;
1944
        msg = "command complete";
1945
        break;
1946
      case COMMAND_FLASH_ERASE_FAILURE:
1947
        res = -EIO;
1948
        msg = "command flash erase failure";
1949
        break;
1950
      case COMMAND_WRITE_BAD_DATA:
1951
        res = -EINVAL;
1952
        msg = "command write bad data";
1953
        break;
1954
      default:
1955
        res = -EINVAL;
1956
        msg = "unknown error";
1957
        PRINTD (DBG_LOAD|DBG_ERR, "decode_loader_result got %d=%x !",
1958
                result, result);
1959
        break;
1960
    }
1961
 
1962
    PRINTK (KERN_ERR, "%s", msg);
1963
    return res;
1964
  }
1965
 
1966
  unsigned long timeout;
1967
 
1968
  PRINTD (DBG_FLOW|DBG_LOAD, "do_loader_command");
1969
 
1970
  /* do a command
1971
 
1972
     Set the return value to zero, set the command type and set the
1973
     valid entry to the right magic value. The payload is already
1974
     correctly byte-ordered so we leave it alone. Hit the doorbell
1975
     with the bus address of this structure.
1976
 
1977
  */
1978
 
1979
  lb->result = 0;
1980
  lb->command = cpu_to_be32 (cmd);
1981
  lb->valid = cpu_to_be32 (DMA_VALID);
1982
  // dump_registers (dev);
1983
  // dump_loader_block (lb);
1984
  wr_mem (dev, offsetof(amb_mem, doorbell), virt_to_bus (lb) & ~onegigmask);
1985
 
1986
  timeout = command_timeouts[cmd] * HZ/100;
1987
 
1988
  while (!lb->result || lb->result == cpu_to_be32 (COMMAND_IN_PROGRESS))
1989
    if (timeout) {
1990
      set_current_state(TASK_UNINTERRUPTIBLE);
1991
      timeout = schedule_timeout (timeout);
1992
    } else {
1993
      PRINTD (DBG_LOAD|DBG_ERR, "command %d timed out", cmd);
1994
      dump_registers (dev);
1995
      dump_loader_block (lb);
1996
      return -ETIMEDOUT;
1997
    }
1998
 
1999
  if (cmd == adapter_start) {
2000
    // wait for start command to acknowledge...
2001
    timeout = HZ/10;
2002
    while (rd_plain (dev, offsetof(amb_mem, doorbell)))
2003
      if (timeout) {
2004
        timeout = schedule_timeout (timeout);
2005
      } else {
2006
        PRINTD (DBG_LOAD|DBG_ERR, "start command did not clear doorbell, res=%08x",
2007
                be32_to_cpu (lb->result));
2008
        dump_registers (dev);
2009
        return -ETIMEDOUT;
2010
      }
2011
    return 0;
2012
  } else {
2013
    return decode_loader_result (cmd, be32_to_cpu (lb->result));
2014
  }
2015
 
2016
}
2017
 
2018
/* loader: determine loader version */
2019
 
2020
static int __init get_loader_version (loader_block * lb,
2021
                                      const amb_dev * dev, u32 * version) {
2022
  int res;
2023
 
2024
  PRINTD (DBG_FLOW|DBG_LOAD, "get_loader_version");
2025
 
2026
  res = do_loader_command (lb, dev, get_version_number);
2027
  if (res)
2028
    return res;
2029
  if (version)
2030
    *version = be32_to_cpu (lb->payload.version);
2031
  return 0;
2032
}
2033
 
2034
/* loader: write memory data blocks */
2035
 
2036
static int __init loader_write (loader_block * lb,
2037
                                const amb_dev * dev, const u32 * data,
2038
                                u32 address, unsigned int count) {
2039
  unsigned int i;
2040
  transfer_block * tb = &lb->payload.transfer;
2041
 
2042
  PRINTD (DBG_FLOW|DBG_LOAD, "loader_write");
2043
 
2044
  if (count > MAX_TRANSFER_DATA)
2045
    return -EINVAL;
2046
  tb->address = cpu_to_be32 (address);
2047
  tb->count = cpu_to_be32 (count);
2048
  for (i = 0; i < count; ++i)
2049
    tb->data[i] = cpu_to_be32 (data[i]);
2050
  return do_loader_command (lb, dev, write_adapter_memory);
2051
}
2052
 
2053
/* loader: verify memory data blocks */
2054
 
2055
static int __init loader_verify (loader_block * lb,
2056
                                 const amb_dev * dev, const u32 * data,
2057
                                 u32 address, unsigned int count) {
2058
  unsigned int i;
2059
  transfer_block * tb = &lb->payload.transfer;
2060
  int res;
2061
 
2062
  PRINTD (DBG_FLOW|DBG_LOAD, "loader_verify");
2063
 
2064
  if (count > MAX_TRANSFER_DATA)
2065
    return -EINVAL;
2066
  tb->address = cpu_to_be32 (address);
2067
  tb->count = cpu_to_be32 (count);
2068
  res = do_loader_command (lb, dev, read_adapter_memory);
2069
  if (!res)
2070
    for (i = 0; i < count; ++i)
2071
      if (tb->data[i] != cpu_to_be32 (data[i])) {
2072
        res = -EINVAL;
2073
        break;
2074
      }
2075
  return res;
2076
}
2077
 
2078
/* loader: start microcode */
2079
 
2080
static int __init loader_start (loader_block * lb,
2081
                                const amb_dev * dev, u32 address) {
2082
  PRINTD (DBG_FLOW|DBG_LOAD, "loader_start");
2083
 
2084
  lb->payload.start = cpu_to_be32 (address);
2085
  return do_loader_command (lb, dev, adapter_start);
2086
}
2087
 
2088
/********** reset card **********/
2089
 
2090
static int amb_reset (amb_dev * dev, int diags) {
2091
  u32 word;
2092
 
2093
  PRINTD (DBG_FLOW|DBG_LOAD, "amb_reset");
2094
 
2095
  word = rd_plain (dev, offsetof(amb_mem, reset_control));
2096
  // put card into reset state
2097
  wr_plain (dev, offsetof(amb_mem, reset_control), word | AMB_RESET_BITS);
2098
  // wait a short while
2099
  udelay (10);
2100
#if 1
2101
  // put card into known good state
2102
  wr_plain (dev, offsetof(amb_mem, interrupt_control), AMB_DOORBELL_BITS);
2103
  // clear all interrupts just in case
2104
  wr_plain (dev, offsetof(amb_mem, interrupt), -1);
2105
#endif
2106
  // clear self-test done flag
2107
  wr_plain (dev, offsetof(amb_mem, mb.loader.ready), 0);
2108
  // take card out of reset state
2109
  wr_plain (dev, offsetof(amb_mem, reset_control), word &~ AMB_RESET_BITS);
2110
 
2111
  if (diags) {
2112
    unsigned long timeout;
2113
    // 4.2 second wait
2114
    timeout = HZ*42/10;
2115
    while (timeout) {
2116
      set_current_state(TASK_UNINTERRUPTIBLE);
2117
      timeout = schedule_timeout (timeout);
2118
    }
2119
    // half second time-out
2120
    timeout = HZ/2;
2121
    while (!rd_plain (dev, offsetof(amb_mem, mb.loader.ready)))
2122
      if (timeout) {
2123
        set_current_state(TASK_UNINTERRUPTIBLE);
2124
        timeout = schedule_timeout (timeout);
2125
      } else {
2126
        PRINTD (DBG_LOAD|DBG_ERR, "reset timed out");
2127
        return -ETIMEDOUT;
2128
      }
2129
 
2130
    // get results of self-test
2131
    // XXX double check byte-order
2132
    word = rd_mem (dev, offsetof(amb_mem, mb.loader.result));
2133
    if (word & SELF_TEST_FAILURE) {
2134
      void sf (const char * msg) {
2135
        PRINTK (KERN_ERR, "self-test failed: %s", msg);
2136
      }
2137
      if (word & GPINT_TST_FAILURE)
2138
        sf ("interrupt");
2139
      if (word & SUNI_DATA_PATTERN_FAILURE)
2140
        sf ("SUNI data pattern");
2141
      if (word & SUNI_DATA_BITS_FAILURE)
2142
        sf ("SUNI data bits");
2143
      if (word & SUNI_UTOPIA_FAILURE)
2144
        sf ("SUNI UTOPIA interface");
2145
      if (word & SUNI_FIFO_FAILURE)
2146
        sf ("SUNI cell buffer FIFO");
2147
      if (word & SRAM_FAILURE)
2148
        sf ("bad SRAM");
2149
      // better return value?
2150
      return -EIO;
2151
    }
2152
 
2153
  }
2154
  return 0;
2155
}
2156
 
2157
/********** transfer and start the microcode **********/
2158
 
2159
static int __init ucode_init (loader_block * lb, amb_dev * dev) {
2160
  unsigned int i = 0;
2161
  unsigned int total = 0;
2162
  const u32 * pointer = ucode_data;
2163
  u32 address;
2164
  unsigned int count;
2165
  int res;
2166
 
2167
  PRINTD (DBG_FLOW|DBG_LOAD, "ucode_init");
2168
 
2169
  while (address = ucode_regions[i].start,
2170
         count = ucode_regions[i].count) {
2171
    PRINTD (DBG_LOAD, "starting region (%x, %u)", address, count);
2172
    while (count) {
2173
      unsigned int words;
2174
      if (count <= MAX_TRANSFER_DATA)
2175
        words = count;
2176
      else
2177
        words = MAX_TRANSFER_DATA;
2178
      total += words;
2179
      res = loader_write (lb, dev, pointer, address, words);
2180
      if (res)
2181
        return res;
2182
      res = loader_verify (lb, dev, pointer, address, words);
2183
      if (res)
2184
        return res;
2185
      count -= words;
2186
      address += sizeof(u32) * words;
2187
      pointer += words;
2188
    }
2189
    i += 1;
2190
  }
2191
  if (*pointer == 0xdeadbeef) {
2192
    return loader_start (lb, dev, ucode_start);
2193
  } else {
2194
    // cast needed as there is no %? for pointer differnces
2195
    PRINTD (DBG_LOAD|DBG_ERR,
2196
            "offset=%li, *pointer=%x, address=%x, total=%u",
2197
            (long) (pointer - ucode_data), *pointer, address, total);
2198
    PRINTK (KERN_ERR, "incorrect microcode data");
2199
    return -ENOMEM;
2200
  }
2201
}
2202
 
2203
/********** give adapter parameters **********/
2204
 
2205
static int __init amb_talk (amb_dev * dev) {
2206
  adap_talk_block a;
2207
  unsigned char pool;
2208
  unsigned long timeout;
2209
 
2210
  u32 x (void * addr) {
2211
    return cpu_to_be32 (virt_to_bus (addr));
2212
  }
2213
 
2214
  PRINTD (DBG_FLOW, "amb_talk %p", dev);
2215
 
2216
  a.command_start = x (dev->cq.ptrs.start);
2217
  a.command_end   = x (dev->cq.ptrs.limit);
2218
  a.tx_start      = x (dev->txq.in.start);
2219
  a.tx_end        = x (dev->txq.in.limit);
2220
  a.txcom_start   = x (dev->txq.out.start);
2221
  a.txcom_end     = x (dev->txq.out.limit);
2222
 
2223
  for (pool = 0; pool < NUM_RX_POOLS; ++pool) {
2224
    // the other "a" items are set up by the adapter
2225
    a.rec_struct[pool].buffer_start = x (dev->rxq[pool].in.start);
2226
    a.rec_struct[pool].buffer_end   = x (dev->rxq[pool].in.limit);
2227
    a.rec_struct[pool].rx_start     = x (dev->rxq[pool].out.start);
2228
    a.rec_struct[pool].rx_end       = x (dev->rxq[pool].out.limit);
2229
    a.rec_struct[pool].buffer_size = cpu_to_be32 (dev->rxq[pool].buffer_size);
2230
  }
2231
 
2232
#ifdef AMB_NEW_MICROCODE
2233
  // disable fast PLX prefetching
2234
  a.init_flags = 0;
2235
#endif
2236
 
2237
  // pass the structure
2238
  wr_mem (dev, offsetof(amb_mem, doorbell), virt_to_bus (&a));
2239
 
2240
  // 2.2 second wait (must not touch doorbell during 2 second DMA test)
2241
  timeout = HZ*22/10;
2242
  while (timeout)
2243
    timeout = schedule_timeout (timeout);
2244
  // give the adapter another half second?
2245
  timeout = HZ/2;
2246
  while (rd_plain (dev, offsetof(amb_mem, doorbell)))
2247
    if (timeout) {
2248
      timeout = schedule_timeout (timeout);
2249
    } else {
2250
      PRINTD (DBG_INIT|DBG_ERR, "adapter init timed out");
2251
      return -ETIMEDOUT;
2252
    }
2253
 
2254
  return 0;
2255
}
2256
 
2257
// get microcode version
2258
static void __init amb_ucode_version (amb_dev * dev) {
2259
  u32 major;
2260
  u32 minor;
2261
  command cmd;
2262
  cmd.request = cpu_to_be32 (SRB_GET_VERSION);
2263
  while (command_do (dev, &cmd)) {
2264
    set_current_state(TASK_UNINTERRUPTIBLE);
2265
    schedule();
2266
  }
2267
  major = be32_to_cpu (cmd.args.version.major);
2268
  minor = be32_to_cpu (cmd.args.version.minor);
2269
  PRINTK (KERN_INFO, "microcode version is %u.%u", major, minor);
2270
}
2271
 
2272
// get end station address
2273
static void __init amb_esi (amb_dev * dev, u8 * esi) {
2274
  u32 lower4;
2275
  u16 upper2;
2276
  command cmd;
2277
 
2278
  // swap bits within byte to get Ethernet ordering
2279
  u8 bit_swap (u8 byte) {
2280
    const u8 swap[] = {
2281
      0x0, 0x8, 0x4, 0xc,
2282
      0x2, 0xa, 0x6, 0xe,
2283
      0x1, 0x9, 0x5, 0xd,
2284
      0x3, 0xb, 0x7, 0xf
2285
    };
2286
    return ((swap[byte & 0xf]<<4) | swap[byte>>4]);
2287
  }
2288
 
2289
  cmd.request = cpu_to_be32 (SRB_GET_BIA);
2290
  while (command_do (dev, &cmd)) {
2291
    set_current_state(TASK_UNINTERRUPTIBLE);
2292
    schedule();
2293
  }
2294
  lower4 = be32_to_cpu (cmd.args.bia.lower4);
2295
  upper2 = be32_to_cpu (cmd.args.bia.upper2);
2296
  PRINTD (DBG_LOAD, "BIA: lower4: %08x, upper2 %04x", lower4, upper2);
2297
 
2298
  if (esi) {
2299
    unsigned int i;
2300
 
2301
    PRINTDB (DBG_INIT, "ESI:");
2302
    for (i = 0; i < ESI_LEN; ++i) {
2303
      if (i < 4)
2304
          esi[i] = bit_swap (lower4>>(8*i));
2305
      else
2306
          esi[i] = bit_swap (upper2>>(8*(i-4)));
2307
      PRINTDM (DBG_INIT, " %02x", esi[i]);
2308
    }
2309
 
2310
    PRINTDE (DBG_INIT, "");
2311
  }
2312
 
2313
  return;
2314
}
2315
 
2316
static int __init amb_init (amb_dev * dev) {
2317
  loader_block lb;
2318
 
2319
  void fixup_plx_window (void) {
2320
    // fix up the PLX-mapped window base address to match the block
2321
    unsigned long blb;
2322
    u32 mapreg;
2323
    blb = virt_to_bus (&lb);
2324
    // the kernel stack had better not ever cross a 1Gb boundary!
2325
    mapreg = rd_plain (dev, offsetof(amb_mem, stuff[10]));
2326
    mapreg &= ~onegigmask;
2327
    mapreg |= blb & onegigmask;
2328
    wr_plain (dev, offsetof(amb_mem, stuff[10]), mapreg);
2329
    return;
2330
  }
2331
 
2332
  u32 version;
2333
 
2334
  if (amb_reset (dev, 1)) {
2335
    PRINTK (KERN_ERR, "card reset failed!");
2336
  } else {
2337
    fixup_plx_window ();
2338
 
2339
    if (get_loader_version (&lb, dev, &version)) {
2340
      PRINTK (KERN_INFO, "failed to get loader version");
2341
    } else {
2342
      PRINTK (KERN_INFO, "loader version is %08x", version);
2343
 
2344
      if (ucode_init (&lb, dev)) {
2345
        PRINTK (KERN_ERR, "microcode failure");
2346
      } else if (create_queues (dev, cmds, txs, rxs, rxs_bs)) {
2347
        PRINTK (KERN_ERR, "failed to get memory for queues");
2348
      } else {
2349
 
2350
        if (amb_talk (dev)) {
2351
          PRINTK (KERN_ERR, "adapter did not accept queues");
2352
        } else {
2353
 
2354
          amb_ucode_version (dev);
2355
          return 0;
2356
 
2357
        } /* amb_talk */
2358
 
2359
        destroy_queues (dev);
2360
      } /* create_queues, ucode_init */
2361
 
2362
      amb_reset (dev, 0);
2363
    } /* get_loader_version */
2364
 
2365
  } /* amb_reset */
2366
 
2367
  return -1;
2368
}
2369
 
2370
static int __init amb_probe (void) {
2371
  struct pci_dev * pci_dev;
2372
  int devs;
2373
 
2374
  void __init do_pci_device (void) {
2375
    amb_dev * dev;
2376
 
2377
    // read resources from PCI configuration space
2378
    u8 irq = pci_dev->irq;
2379
    u32 * membase = bus_to_virt (pci_resource_start (pci_dev, 0));
2380
    u32 iobase = pci_resource_start (pci_dev, 1);
2381
 
2382
    void setup_dev (void) {
2383
      unsigned char pool;
2384
      memset (dev, 0, sizeof(amb_dev));
2385
 
2386
      // set up known dev items straight away
2387
      dev->pci_dev = pci_dev;
2388
 
2389
      dev->iobase = iobase;
2390
      dev->irq = irq;
2391
      dev->membase = membase;
2392
 
2393
      // flags (currently only dead)
2394
      dev->flags = 0;
2395
 
2396
      // Allocate cell rates (fibre)
2397
      // ATM_OC3_PCR = 1555200000/8/270*260/53 - 29/53
2398
      // to be really pedantic, this should be ATM_OC3c_PCR
2399
      dev->tx_avail = ATM_OC3_PCR;
2400
      dev->rx_avail = ATM_OC3_PCR;
2401
 
2402
#ifdef FILL_RX_POOLS_IN_BH
2403
      // initialise bottom half
2404
      INIT_LIST_HEAD(&dev->bh.list);
2405
      dev->bh.sync = 0;
2406
      dev->bh.routine = (void (*)(void *)) fill_rx_pools;
2407
      dev->bh.data = dev;
2408
#endif
2409
 
2410
      // semaphore for txer/rxer modifications - we cannot use a
2411
      // spinlock as the critical region needs to switch processes
2412
      init_MUTEX (&dev->vcc_sf);
2413
      // queue manipulation spinlocks; we want atomic reads and
2414
      // writes to the queue descriptors (handles IRQ and SMP)
2415
      // consider replacing "int pending" -> "atomic_t available"
2416
      // => problem related to who gets to move queue pointers
2417
      spin_lock_init (&dev->cq.lock);
2418
      spin_lock_init (&dev->txq.lock);
2419
      for (pool = 0; pool < NUM_RX_POOLS; ++pool)
2420
        spin_lock_init (&dev->rxq[pool].lock);
2421
    }
2422
 
2423
    void setup_pci_dev (void) {
2424
      unsigned char lat;
2425
 
2426
      /* XXX check return value */
2427
      pci_enable_device (pci_dev);
2428
 
2429
      // enable bus master accesses
2430
      pci_set_master (pci_dev);
2431
 
2432
      // frobnicate latency (upwards, usually)
2433
      pci_read_config_byte (pci_dev, PCI_LATENCY_TIMER, &lat);
2434
      if (pci_lat) {
2435
        PRINTD (DBG_INIT, "%s PCI latency timer from %hu to %hu",
2436
                "changing", lat, pci_lat);
2437
        pci_write_config_byte (pci_dev, PCI_LATENCY_TIMER, pci_lat);
2438
      } else if (lat < MIN_PCI_LATENCY) {
2439
        PRINTK (KERN_INFO, "%s PCI latency timer from %hu to %hu",
2440
                "increasing", lat, MIN_PCI_LATENCY);
2441
        pci_write_config_byte (pci_dev, PCI_LATENCY_TIMER, MIN_PCI_LATENCY);
2442
      }
2443
    }
2444
 
2445
    PRINTD (DBG_INFO, "found Madge ATM adapter (amb) at"
2446
            " IO %x, IRQ %u, MEM %p", iobase, irq, membase);
2447
 
2448
    // check IO region
2449
    if (check_region (iobase, AMB_EXTENT)) {
2450
      PRINTK (KERN_ERR, "IO range already in use!");
2451
      return;
2452
    }
2453
 
2454
    dev = kmalloc (sizeof(amb_dev), GFP_KERNEL);
2455
    if (!dev) {
2456
      // perhaps we should be nice: deregister all adapters and abort?
2457
      PRINTK (KERN_ERR, "out of memory!");
2458
      return;
2459
    }
2460
 
2461
    setup_dev();
2462
 
2463
    if (amb_init (dev)) {
2464
      PRINTK (KERN_ERR, "adapter initialisation failure");
2465
    } else {
2466
 
2467
      setup_pci_dev();
2468
 
2469
      // grab (but share) IRQ and install handler
2470
      if (request_irq (irq, interrupt_handler, SA_SHIRQ, DEV_LABEL, dev)) {
2471
        PRINTK (KERN_ERR, "request IRQ failed!");
2472
        // free_irq is at "endif"
2473
      } else {
2474
 
2475
        // reserve IO region
2476
        request_region (iobase, AMB_EXTENT, DEV_LABEL);
2477
 
2478
        dev->atm_dev = atm_dev_register (DEV_LABEL, &amb_ops, -1, NULL);
2479
        if (!dev->atm_dev) {
2480
          PRINTD (DBG_ERR, "failed to register Madge ATM adapter");
2481
        } else {
2482
 
2483
          PRINTD (DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p",
2484
                  dev->atm_dev->number, dev, dev->atm_dev);
2485
          dev->atm_dev->dev_data = (void *) dev;
2486
 
2487
          // register our address
2488
          amb_esi (dev, dev->atm_dev->esi);
2489
 
2490
          // 0 bits for vpi, 10 bits for vci
2491
          dev->atm_dev->ci_range.vpi_bits = NUM_VPI_BITS;
2492
          dev->atm_dev->ci_range.vci_bits = NUM_VCI_BITS;
2493
 
2494
          // update count and linked list
2495
          ++devs;
2496
          dev->prev = amb_devs;
2497
          amb_devs = dev;
2498
 
2499
          // enable host interrupts
2500
          interrupts_on (dev);
2501
 
2502
          // success
2503
          return;
2504
 
2505
          // not currently reached
2506
          atm_dev_deregister (dev->atm_dev);
2507
        } /* atm_dev_register */
2508
 
2509
        release_region (iobase, AMB_EXTENT);
2510
        free_irq (irq, dev);
2511
      } /* request_region, request_irq */
2512
 
2513
      amb_reset (dev, 0);
2514
    } /* amb_init */
2515
 
2516
    kfree (dev);
2517
  } /* kmalloc, end-of-fn */
2518
 
2519
  PRINTD (DBG_FLOW, "amb_probe");
2520
 
2521
  if (!pci_present())
2522
    return 0;
2523
 
2524
  devs = 0;
2525
  pci_dev = NULL;
2526
  while ((pci_dev = pci_find_device
2527
          (PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR, pci_dev)
2528
          ))
2529
    do_pci_device();
2530
 
2531
  pci_dev = NULL;
2532
  while ((pci_dev = pci_find_device
2533
          (PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD, pci_dev)
2534
          ))
2535
    PRINTK (KERN_ERR, "skipped broken (PLX rev 2) card");
2536
 
2537
  return devs;
2538
}
2539
 
2540
static void __init amb_check_args (void) {
2541
  unsigned char pool;
2542
  unsigned int max_rx_size;
2543
 
2544
#ifdef DEBUG_AMBASSADOR
2545
  PRINTK (KERN_NOTICE, "debug bitmap is %hx", debug &= DBG_MASK);
2546
#else
2547
  if (debug)
2548
    PRINTK (KERN_NOTICE, "no debugging support");
2549
#endif
2550
 
2551
  if (cmds < MIN_QUEUE_SIZE)
2552
    PRINTK (KERN_NOTICE, "cmds has been raised to %u",
2553
            cmds = MIN_QUEUE_SIZE);
2554
 
2555
  if (txs < MIN_QUEUE_SIZE)
2556
    PRINTK (KERN_NOTICE, "txs has been raised to %u",
2557
            txs = MIN_QUEUE_SIZE);
2558
 
2559
  for (pool = 0; pool < NUM_RX_POOLS; ++pool)
2560
    if (rxs[pool] < MIN_QUEUE_SIZE)
2561
      PRINTK (KERN_NOTICE, "rxs[%hu] has been raised to %u",
2562
              pool, rxs[pool] = MIN_QUEUE_SIZE);
2563
 
2564
  // buffers sizes should be greater than zero and strictly increasing
2565
  max_rx_size = 0;
2566
  for (pool = 0; pool < NUM_RX_POOLS; ++pool)
2567
    if (rxs_bs[pool] <= max_rx_size)
2568
      PRINTK (KERN_NOTICE, "useless pool (rxs_bs[%hu] = %u)",
2569
              pool, rxs_bs[pool]);
2570
    else
2571
      max_rx_size = rxs_bs[pool];
2572
 
2573
  if (rx_lats < MIN_RX_BUFFERS)
2574
    PRINTK (KERN_NOTICE, "rx_lats has been raised to %u",
2575
            rx_lats = MIN_RX_BUFFERS);
2576
 
2577
  return;
2578
}
2579
 
2580
/********** module stuff **********/
2581
 
2582
#ifdef MODULE
2583
EXPORT_NO_SYMBOLS;
2584
 
2585
MODULE_AUTHOR(maintainer_string);
2586
MODULE_DESCRIPTION(description_string);
2587
MODULE_LICENSE("GPL");
2588
MODULE_PARM(debug,   "h");
2589
MODULE_PARM(cmds,    "i");
2590
MODULE_PARM(txs,     "i");
2591
MODULE_PARM(rxs,     __MODULE_STRING(NUM_RX_POOLS) "i");
2592
MODULE_PARM(rxs_bs,  __MODULE_STRING(NUM_RX_POOLS) "i");
2593
MODULE_PARM(rx_lats, "i");
2594
MODULE_PARM(pci_lat, "b");
2595
MODULE_PARM_DESC(debug,   "debug bitmap, see .h file");
2596
MODULE_PARM_DESC(cmds,    "number of command queue entries");
2597
MODULE_PARM_DESC(txs,     "number of TX queue entries");
2598
MODULE_PARM_DESC(rxs,     "number of RX queue entries [" __MODULE_STRING(NUM_RX_POOLS) "]");
2599
MODULE_PARM_DESC(rxs_bs,  "size of RX buffers [" __MODULE_STRING(NUM_RX_POOLS) "]");
2600
MODULE_PARM_DESC(rx_lats, "number of extra buffers to cope with RX latencies");
2601
MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles");
2602
 
2603
/********** module entry **********/
2604
 
2605
int init_module (void) {
2606
  int devs;
2607
 
2608
  PRINTD (DBG_FLOW|DBG_INIT, "init_module");
2609
 
2610
  // sanity check - cast needed as printk does not support %Zu
2611
  if (sizeof(amb_mem) != 4*16 + 4*12) {
2612
    PRINTK (KERN_ERR, "Fix amb_mem (is %lu words).",
2613
            (unsigned long) sizeof(amb_mem));
2614
    return -ENOMEM;
2615
  }
2616
 
2617
  show_version();
2618
 
2619
  amb_check_args();
2620
 
2621
  // get the juice
2622
  devs = amb_probe();
2623
 
2624
  if (devs) {
2625
    init_timer (&housekeeping);
2626
    housekeeping.function = do_housekeeping;
2627
    // paranoia
2628
    housekeeping.data = 1;
2629
    set_timer (&housekeeping, 0);
2630
  } else {
2631
    PRINTK (KERN_INFO, "no (usable) adapters found");
2632
  }
2633
 
2634
  return devs ? 0 : -ENODEV;
2635
}
2636
 
2637
/********** module exit **********/
2638
 
2639
void cleanup_module (void) {
2640
  amb_dev * dev;
2641
 
2642
  PRINTD (DBG_FLOW|DBG_INIT, "cleanup_module");
2643
 
2644
  // paranoia
2645
  housekeeping.data = 0;
2646
  del_timer (&housekeeping);
2647
 
2648
  while (amb_devs) {
2649
    dev = amb_devs;
2650
    amb_devs = dev->prev;
2651
 
2652
    PRINTD (DBG_INFO|DBG_INIT, "closing %p (atm_dev = %p)", dev, dev->atm_dev);
2653
    // the drain should not be necessary
2654
    drain_rx_pools (dev);
2655
    interrupts_off (dev);
2656
    amb_reset (dev, 0);
2657
    destroy_queues (dev);
2658
    atm_dev_deregister (dev->atm_dev);
2659
    free_irq (dev->irq, dev);
2660
    release_region (dev->iobase, AMB_EXTENT);
2661
    kfree (dev);
2662
  }
2663
 
2664
  return;
2665
}
2666
 
2667
#else
2668
 
2669
/********** monolithic entry **********/
2670
 
2671
int __init amb_detect (void) {
2672
  int devs;
2673
 
2674
  // sanity check - cast needed as printk does not support %Zu
2675
  if (sizeof(amb_mem) != 4*16 + 4*12) {
2676
    PRINTK (KERN_ERR, "Fix amb_mem (is %lu words).",
2677
            (unsigned long) sizeof(amb_mem));
2678
    return 0;
2679
  }
2680
 
2681
  show_version();
2682
 
2683
  amb_check_args();
2684
 
2685
  // get the juice
2686
  devs = amb_probe();
2687
 
2688
  if (devs) {
2689
    init_timer (&housekeeping);
2690
    housekeeping.function = do_housekeeping;
2691
    // paranoia
2692
    housekeeping.data = 1;
2693
    set_timer (&housekeeping, 0);
2694
  } else {
2695
    PRINTK (KERN_INFO, "no (usable) adapters found");
2696
  }
2697
 
2698
  return devs;
2699
}
2700
 
2701
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.