OpenCores
URL https://opencores.org/ocsvn/test_project/test_project/trunk

Subversion Repositories test_project

[/] [test_project/] [trunk/] [linux_sd_driver/] [drivers/] [atm/] [horizon.c] - Blame information for rev 65

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 62 marcus.erl
/*
2
  Madge Horizon ATM Adapter driver.
3
  Copyright (C) 1995-1999  Madge Networks Ltd.
4
 
5
  This program is free software; you can redistribute it and/or modify
6
  it under the terms of the GNU General Public License as published by
7
  the Free Software Foundation; either version 2 of the License, or
8
  (at your option) any later version.
9
 
10
  This program is distributed in the hope that it will be useful,
11
  but WITHOUT ANY WARRANTY; without even the implied warranty of
12
  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13
  GNU General Public License for more details.
14
 
15
  You should have received a copy of the GNU General Public License
16
  along with this program; if not, write to the Free Software
17
  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18
 
19
  The GNU GPL is contained in /usr/doc/copyright/GPL on a Debian
20
  system and in the file COPYING in the Linux kernel source.
21
*/
22
 
23
/*
24
  IMPORTANT NOTE: Madge Networks no longer makes the adapters
25
  supported by this driver and makes no commitment to maintain it.
26
*/
27
 
28
#include <linux/module.h>
29
#include <linux/kernel.h>
30
#include <linux/mm.h>
31
#include <linux/pci.h>
32
#include <linux/errno.h>
33
#include <linux/atm.h>
34
#include <linux/atmdev.h>
35
#include <linux/sonet.h>
36
#include <linux/skbuff.h>
37
#include <linux/time.h>
38
#include <linux/delay.h>
39
#include <linux/uio.h>
40
#include <linux/init.h>
41
#include <linux/ioport.h>
42
#include <linux/wait.h>
43
 
44
#include <asm/system.h>
45
#include <asm/io.h>
46
#include <asm/atomic.h>
47
#include <asm/uaccess.h>
48
#include <asm/string.h>
49
#include <asm/byteorder.h>
50
 
51
#include "horizon.h"
52
 
53
#define maintainer_string "Giuliano Procida at Madge Networks <gprocida@madge.com>"
54
#define description_string "Madge ATM Horizon [Ultra] driver"
55
#define version_string "1.2.1"
56
 
57
static inline void __init show_version (void) {
58
  printk ("%s version %s\n", description_string, version_string);
59
}
60
 
61
/*
62
 
63
  CREDITS
64
 
65
  Driver and documentation by:
66
 
67
  Chris Aston        Madge Networks
68
  Giuliano Procida   Madge Networks
69
  Simon Benham       Madge Networks
70
  Simon Johnson      Madge Networks
71
  Various Others     Madge Networks
72
 
73
  Some inspiration taken from other drivers by:
74
 
75
  Alexandru Cucos    UTBv
76
  Kari Mettinen      University of Helsinki
77
  Werner Almesberger EPFL LRC
78
 
79
  Theory of Operation
80
 
81
  I Hardware, detection, initialisation and shutdown.
82
 
83
  1. Supported Hardware
84
 
85
  This driver should handle all variants of the PCI Madge ATM adapters
86
  with the Horizon chipset. These are all PCI cards supporting PIO, BM
87
  DMA and a form of MMIO (registers only, not internal RAM).
88
 
89
  The driver is only known to work with SONET and UTP Horizon Ultra
90
  cards at 155Mb/s. However, code is in place to deal with both the
91
  original Horizon and 25Mb/s operation.
92
 
93
  There are two revisions of the Horizon ASIC: the original and the
94
  Ultra. Details of hardware bugs are in section III.
95
 
96
  The ASIC version can be distinguished by chip markings but is NOT
97
  indicated by the PCI revision (all adapters seem to have PCI rev 1).
98
 
99
  I believe that:
100
 
101
  Horizon       => Collage  25 PCI Adapter (UTP and STP)
102
  Horizon Ultra => Collage 155 PCI Client (UTP or SONET)
103
  Ambassador x  => Collage 155 PCI Server (completely different)
104
 
105
  Horizon (25Mb/s) is fitted with UTP and STP connectors. It seems to
106
  have a Madge B154 plus glue logic serializer. I have also found a
107
  really ancient version of this with slightly different glue. It
108
  comes with the revision 0 (140-025-01) ASIC.
109
 
110
  Horizon Ultra (155Mb/s) is fitted with either a Pulse Medialink
111
  output (UTP) or an HP HFBR 5205 output (SONET). It has either
112
  Madge's SAMBA framer or a SUNI-lite device (early versions). It
113
  comes with the revision 1 (140-027-01) ASIC.
114
 
115
  2. Detection
116
 
117
  All Horizon-based cards present with the same PCI Vendor and Device
118
  IDs. The standard Linux 2.2 PCI API is used to locate any cards and
119
  to enable bus-mastering (with appropriate latency).
120
 
121
  ATM_LAYER_STATUS in the control register distinguishes between the
122
  two possible physical layers (25 and 155). It is not clear whether
123
  the 155 cards can also operate at 25Mbps. We rely on the fact that a
124
  card operates at 155 if and only if it has the newer Horizon Ultra
125
  ASIC.
126
 
127
  For 155 cards the two possible framers are probed for and then set
128
  up for loop-timing.
129
 
130
  3. Initialisation
131
 
132
  The card is reset and then put into a known state. The physical
133
  layer is configured for normal operation at the appropriate speed;
134
  in the case of the 155 cards, the framer is initialised with
135
  line-based timing; the internal RAM is zeroed and the allocation of
136
  buffers for RX and TX is made; the Burnt In Address is read and
137
  copied to the ATM ESI; various policy settings for RX (VPI bits,
138
  unknown VCs, oam cells) are made. Ideally all policy items should be
139
  configurable at module load (if not actually on-demand), however,
140
  only the vpi vs vci bit allocation can be specified at insmod.
141
 
142
  4. Shutdown
143
 
144
  This is in response to module_cleaup. No VCs are in use and the card
145
  should be idle; it is reset.
146
 
147
  II Driver software (as it should be)
148
 
149
  0. Traffic Parameters
150
 
151
  The traffic classes (not an enumeration) are currently: ATM_NONE (no
152
  traffic), ATM_UBR, ATM_CBR, ATM_VBR and ATM_ABR, ATM_ANYCLASS
153
  (compatible with everything). Together with (perhaps only some of)
154
  the following items they make up the traffic specification.
155
 
156
  struct atm_trafprm {
157
    unsigned char traffic_class; traffic class (ATM_UBR, ...)
158
    int           max_pcr;       maximum PCR in cells per second
159
    int           pcr;           desired PCR in cells per second
160
    int           min_pcr;       minimum PCR in cells per second
161
    int           max_cdv;       maximum CDV in microseconds
162
    int           max_sdu;       maximum SDU in bytes
163
  };
164
 
165
  Note that these denote bandwidth available not bandwidth used; the
166
  possibilities according to ATMF are:
167
 
168
  Real Time (cdv and max CDT given)
169
 
170
  CBR(pcr)             pcr bandwidth always available
171
  rtVBR(pcr,scr,mbs)   scr bandwidth always available, upto pcr at mbs too
172
 
173
  Non Real Time
174
 
175
  nrtVBR(pcr,scr,mbs)  scr bandwidth always available, upto pcr at mbs too
176
  UBR()
177
  ABR(mcr,pcr)         mcr bandwidth always available, upto pcr (depending) too
178
 
179
  mbs is max burst size (bucket)
180
  pcr and scr have associated cdvt values
181
  mcr is like scr but has no cdtv
182
  cdtv may differ at each hop
183
 
184
  Some of the above items are qos items (as opposed to traffic
185
  parameters). We have nothing to do with qos. All except ABR can have
186
  their traffic parameters converted to GCRA parameters. The GCRA may
187
  be implemented as a (real-number) leaky bucket. The GCRA can be used
188
  in complicated ways by switches and in simpler ways by end-stations.
189
  It can be used both to filter incoming cells and shape out-going
190
  cells.
191
 
192
  ATM Linux actually supports:
193
 
194
  ATM_NONE() (no traffic in this direction)
195
  ATM_UBR(max_frame_size)
196
  ATM_CBR(max/min_pcr, max_cdv, max_frame_size)
197
 
198
 
199
 
200
  A traffic specification consists of the AAL type and separate
201
  traffic specifications for either direction. In ATM Linux it is:
202
 
203
  struct atm_qos {
204
  struct atm_trafprm txtp;
205
  struct atm_trafprm rxtp;
206
  unsigned char aal;
207
  };
208
 
209
  AAL types are:
210
 
211
  ATM_NO_AAL    AAL not specified
212
  ATM_AAL0      "raw" ATM cells
213
  ATM_AAL1      AAL1 (CBR)
214
  ATM_AAL2      AAL2 (VBR)
215
  ATM_AAL34     AAL3/4 (data)
216
  ATM_AAL5      AAL5 (data)
217
  ATM_SAAL      signaling AAL
218
 
219
  The Horizon has support for AAL frame types: 0, 3/4 and 5. However,
220
  it does not implement AAL 3/4 SAR and it has a different notion of
221
  "raw cell" to ATM Linux's (48 bytes vs. 52 bytes) so neither are
222
  supported by this driver.
223
 
224
  The Horizon has limited support for ABR (including UBR), VBR and
225
  CBR. Each TX channel has a bucket (containing up to 31 cell units)
226
  and two timers (PCR and SCR) associated with it that can be used to
227
  govern cell emissions and host notification (in the case of ABR this
228
  is presumably so that RM cells may be emitted at appropriate times).
229
  The timers may either be disabled or may be set to any of 240 values
230
  (determined by the clock crystal, a fixed (?) per-device divider, a
231
  configurable divider and a configurable timer preload value).
232
 
233
  At the moment only UBR and CBR are supported by the driver. VBR will
234
  be supported as soon as ATM for Linux supports it. ABR support is
235
  very unlikely as RM cell handling is completely up to the driver.
236
 
237
  1. TX (TX channel setup and TX transfer)
238
 
239
  The TX half of the driver owns the TX Horizon registers. The TX
240
  component in the IRQ handler is the BM completion handler. This can
241
  only be entered when tx_busy is true (enforced by hardware). The
242
  other TX component can only be entered when tx_busy is false
243
  (enforced by driver). So TX is single-threaded.
244
 
245
  Apart from a minor optimisation to not re-select the last channel,
246
  the TX send component works as follows:
247
 
248
  Atomic test and set tx_busy until we succeed; we should implement
249
  some sort of timeout so that tx_busy will never be stuck at true.
250
 
251
  If no TX channel is set up for this VC we wait for an idle one (if
252
  necessary) and set it up.
253
 
254
  At this point we have a TX channel ready for use. We wait for enough
255
  buffers to become available then start a TX transmit (set the TX
256
  descriptor, schedule transfer, exit).
257
 
258
  The IRQ component handles TX completion (stats, free buffer, tx_busy
259
  unset, exit). We also re-schedule further transfers for the same
260
  frame if needed.
261
 
262
  TX setup in more detail:
263
 
264
  TX open is a nop, the relevant information is held in the hrz_vcc
265
  (vcc->dev_data) structure and is "cached" on the card.
266
 
267
  TX close gets the TX lock and clears the channel from the "cache".
268
 
269
  2. RX (Data Available and RX transfer)
270
 
271
  The RX half of the driver owns the RX registers. There are two RX
272
  components in the IRQ handler: the data available handler deals with
273
  fresh data that has arrived on the card, the BM completion handler
274
  is very similar to the TX completion handler. The data available
275
  handler grabs the rx_lock and it is only released once the data has
276
  been discarded or completely transferred to the host. The BM
277
  completion handler only runs when the lock is held; the data
278
  available handler is locked out over the same period.
279
 
280
  Data available on the card triggers an interrupt. If the data is not
281
  suitable for our existing RX channels or we cannot allocate a buffer
282
  it is flushed. Otherwise an RX receive is scheduled. Multiple RX
283
  transfers may be scheduled for the same frame.
284
 
285
  RX setup in more detail:
286
 
287
  RX open...
288
  RX close...
289
 
290
  III Hardware Bugs
291
 
292
  0. Byte vs Word addressing of adapter RAM.
293
 
294
  A design feature; see the .h file (especially the memory map).
295
 
296
  1. Bus Master Data Transfers (original Horizon only, fixed in Ultra)
297
 
298
  The host must not start a transmit direction transfer at a
299
  non-four-byte boundary in host memory. Instead the host should
300
  perform a byte, or a two byte, or one byte followed by two byte
301
  transfer in order to start the rest of the transfer on a four byte
302
  boundary. RX is OK.
303
 
304
  Simultaneous transmit and receive direction bus master transfers are
305
  not allowed.
306
 
307
  The simplest solution to these two is to always do PIO (never DMA)
308
  in the TX direction on the original Horizon. More complicated
309
  solutions are likely to hurt my brain.
310
 
311
  2. Loss of buffer on close VC
312
 
313
  When a VC is being closed, the buffer associated with it is not
314
  returned to the pool. The host must store the reference to this
315
  buffer and when opening a new VC then give it to that new VC.
316
 
317
  The host intervention currently consists of stacking such a buffer
318
  pointer at VC close and checking the stack at VC open.
319
 
320
  3. Failure to close a VC
321
 
322
  If a VC is currently receiving a frame then closing the VC may fail
323
  and the frame continues to be received.
324
 
325
  The solution is to make sure any received frames are flushed when
326
  ready. This is currently done just before the solution to 2.
327
 
328
  4. PCI bus (original Horizon only, fixed in Ultra)
329
 
330
  Reading from the data port prior to initialisation will hang the PCI
331
  bus. Just don't do that then! We don't.
332
 
333
  IV To Do List
334
 
335
  . Timer code may be broken.
336
 
337
  . Allow users to specify buffer allocation split for TX and RX.
338
 
339
  . Deal once and for all with buggy VC close.
340
 
341
  . Handle interrupted and/or non-blocking operations.
342
 
343
  . Change some macros to functions and move from .h to .c.
344
 
345
  . Try to limit the number of TX frames each VC may have queued, in
346
    order to reduce the chances of TX buffer exhaustion.
347
 
348
  . Implement VBR (bucket and timers not understood) and ABR (need to
349
    do RM cells manually); also no Linux support for either.
350
 
351
  . Implement QoS changes on open VCs (involves extracting parts of VC open
352
    and close into separate functions and using them to make changes).
353
 
354
*/
355
 
356
/********** globals **********/
357
 
358
static void do_housekeeping (unsigned long arg);
359
 
360
static unsigned short debug = 0;
361
static unsigned short vpi_bits = 0;
362
static int max_tx_size = 9000;
363
static int max_rx_size = 9000;
364
static unsigned char pci_lat = 0;
365
 
366
/********** access functions **********/
367
 
368
/* Read / Write Horizon registers */
369
static inline void wr_regl (const hrz_dev * dev, unsigned char reg, u32 data) {
370
  outl (cpu_to_le32 (data), dev->iobase + reg);
371
}
372
 
373
static inline u32 rd_regl (const hrz_dev * dev, unsigned char reg) {
374
  return le32_to_cpu (inl (dev->iobase + reg));
375
}
376
 
377
static inline void wr_regw (const hrz_dev * dev, unsigned char reg, u16 data) {
378
  outw (cpu_to_le16 (data), dev->iobase + reg);
379
}
380
 
381
static inline u16 rd_regw (const hrz_dev * dev, unsigned char reg) {
382
  return le16_to_cpu (inw (dev->iobase + reg));
383
}
384
 
385
static inline void wrs_regb (const hrz_dev * dev, unsigned char reg, void * addr, u32 len) {
386
  outsb (dev->iobase + reg, addr, len);
387
}
388
 
389
static inline void rds_regb (const hrz_dev * dev, unsigned char reg, void * addr, u32 len) {
390
  insb (dev->iobase + reg, addr, len);
391
}
392
 
393
/* Read / Write to a given address in Horizon buffer memory.
394
   Interrupts must be disabled between the address register and data
395
   port accesses as these must form an atomic operation. */
396
static inline void wr_mem (const hrz_dev * dev, HDW * addr, u32 data) {
397
  // wr_regl (dev, MEM_WR_ADDR_REG_OFF, (u32) addr);
398
  wr_regl (dev, MEM_WR_ADDR_REG_OFF, (addr - (HDW *) 0) * sizeof(HDW));
399
  wr_regl (dev, MEMORY_PORT_OFF, data);
400
}
401
 
402
static inline u32 rd_mem (const hrz_dev * dev, HDW * addr) {
403
  // wr_regl (dev, MEM_RD_ADDR_REG_OFF, (u32) addr);
404
  wr_regl (dev, MEM_RD_ADDR_REG_OFF, (addr - (HDW *) 0) * sizeof(HDW));
405
  return rd_regl (dev, MEMORY_PORT_OFF);
406
}
407
 
408
static inline void wr_framer (const hrz_dev * dev, u32 addr, u32 data) {
409
  wr_regl (dev, MEM_WR_ADDR_REG_OFF, (u32) addr | 0x80000000);
410
  wr_regl (dev, MEMORY_PORT_OFF, data);
411
}
412
 
413
static inline u32 rd_framer (const hrz_dev * dev, u32 addr) {
414
  wr_regl (dev, MEM_RD_ADDR_REG_OFF, (u32) addr | 0x80000000);
415
  return rd_regl (dev, MEMORY_PORT_OFF);
416
}
417
 
418
/********** specialised access functions **********/
419
 
420
/* RX */
421
 
422
static inline void FLUSH_RX_CHANNEL (hrz_dev * dev, u16 channel) {
423
  wr_regw (dev, RX_CHANNEL_PORT_OFF, FLUSH_CHANNEL | channel);
424
  return;
425
}
426
 
427
static inline void WAIT_FLUSH_RX_COMPLETE (hrz_dev * dev) {
428
  while (rd_regw (dev, RX_CHANNEL_PORT_OFF) & FLUSH_CHANNEL)
429
    ;
430
  return;
431
}
432
 
433
static inline void SELECT_RX_CHANNEL (hrz_dev * dev, u16 channel) {
434
  wr_regw (dev, RX_CHANNEL_PORT_OFF, channel);
435
  return;
436
}
437
 
438
static inline void WAIT_UPDATE_COMPLETE (hrz_dev * dev) {
439
  while (rd_regw (dev, RX_CHANNEL_PORT_OFF) & RX_CHANNEL_UPDATE_IN_PROGRESS)
440
    ;
441
  return;
442
}
443
 
444
/* TX */
445
 
446
static inline void SELECT_TX_CHANNEL (hrz_dev * dev, u16 tx_channel) {
447
  wr_regl (dev, TX_CHANNEL_PORT_OFF, tx_channel);
448
  return;
449
}
450
 
451
/* Update or query one configuration parameter of a particular channel. */
452
 
453
static inline void update_tx_channel_config (hrz_dev * dev, short chan, u8 mode, u16 value) {
454
  wr_regw (dev, TX_CHANNEL_CONFIG_COMMAND_OFF,
455
           chan * TX_CHANNEL_CONFIG_MULT | mode);
456
    wr_regw (dev, TX_CHANNEL_CONFIG_DATA_OFF, value);
457
    return;
458
}
459
 
460
static inline u16 query_tx_channel_config (hrz_dev * dev, short chan, u8 mode) {
461
  wr_regw (dev, TX_CHANNEL_CONFIG_COMMAND_OFF,
462
           chan * TX_CHANNEL_CONFIG_MULT | mode);
463
    return rd_regw (dev, TX_CHANNEL_CONFIG_DATA_OFF);
464
}
465
 
466
/********** dump functions **********/
467
 
468
static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * skb) {
469
#ifdef DEBUG_HORIZON
470
  unsigned int i;
471
  unsigned char * data = skb->data;
472
  PRINTDB (DBG_DATA, "%s(%u) ", prefix, vc);
473
  for (i=0; i<skb->len && i < 256;i++)
474
    PRINTDM (DBG_DATA, "%02x ", data[i]);
475
  PRINTDE (DBG_DATA,"");
476
#else
477
  (void) prefix;
478
  (void) vc;
479
  (void) skb;
480
#endif
481
  return;
482
}
483
 
484
static inline void dump_regs (hrz_dev * dev) {
485
#ifdef DEBUG_HORIZON
486
  PRINTD (DBG_REGS, "CONTROL 0: %#x", rd_regl (dev, CONTROL_0_REG));
487
  PRINTD (DBG_REGS, "RX CONFIG: %#x", rd_regw (dev, RX_CONFIG_OFF));
488
  PRINTD (DBG_REGS, "TX CONFIG: %#x", rd_regw (dev, TX_CONFIG_OFF));
489
  PRINTD (DBG_REGS, "TX STATUS: %#x", rd_regw (dev, TX_STATUS_OFF));
490
  PRINTD (DBG_REGS, "IRQ ENBLE: %#x", rd_regl (dev, INT_ENABLE_REG_OFF));
491
  PRINTD (DBG_REGS, "IRQ SORCE: %#x", rd_regl (dev, INT_SOURCE_REG_OFF));
492
#else
493
  (void) dev;
494
#endif
495
  return;
496
}
497
 
498
static inline void dump_framer (hrz_dev * dev) {
499
#ifdef DEBUG_HORIZON
500
  unsigned int i;
501
  PRINTDB (DBG_REGS, "framer registers:");
502
  for (i = 0; i < 0x10; ++i)
503
    PRINTDM (DBG_REGS, " %02x", rd_framer (dev, i));
504
  PRINTDE (DBG_REGS,"");
505
#else
506
  (void) dev;
507
#endif
508
  return;
509
}
510
 
511
/********** VPI/VCI <-> (RX) channel conversions **********/
512
 
513
/* RX channels are 10 bit integers, these fns are quite paranoid */
514
 
515
static inline int channel_to_vpivci (const u16 channel, short * vpi, int * vci) {
516
  unsigned short vci_bits = 10 - vpi_bits;
517
  if ((channel & RX_CHANNEL_MASK) == channel) {
518
    *vci = channel & ((~0)<<vci_bits);
519
    *vpi = channel >> vci_bits;
520
    return channel ? 0 : -EINVAL;
521
  }
522
  return -EINVAL;
523
}
524
 
525
static inline int vpivci_to_channel (u16 * channel, const short vpi, const int vci) {
526
  unsigned short vci_bits = 10 - vpi_bits;
527
  if (0 <= vpi && vpi < 1<<vpi_bits && 0 <= vci && vci < 1<<vci_bits) {
528
    *channel = vpi<<vci_bits | vci;
529
    return *channel ? 0 : -EINVAL;
530
  }
531
  return -EINVAL;
532
}
533
 
534
/********** decode RX queue entries **********/
535
 
536
static inline u16 rx_q_entry_to_length (u32 x) {
537
  return x & RX_Q_ENTRY_LENGTH_MASK;
538
}
539
 
540
static inline u16 rx_q_entry_to_rx_channel (u32 x) {
541
  return (x>>RX_Q_ENTRY_CHANNEL_SHIFT) & RX_CHANNEL_MASK;
542
}
543
 
544
/* Cell Transmit Rate Values
545
 *
546
 * the cell transmit rate (cells per sec) can be set to a variety of
547
 * different values by specifying two parameters: a timer preload from
548
 * 1 to 16 (stored as 0 to 15) and a clock divider (2 to the power of
549
 * an exponent from 0 to 14; the special value 15 disables the timer).
550
 *
551
 * cellrate = baserate / (preload * 2^divider)
552
 *
553
 * The maximum cell rate that can be specified is therefore just the
554
 * base rate. Halving the preload is equivalent to adding 1 to the
555
 * divider and so values 1 to 8 of the preload are redundant except
556
 * in the case of a maximal divider (14).
557
 *
558
 * Given a desired cell rate, an algorithm to determine the preload
559
 * and divider is:
560
 *
561
 * a) x = baserate / cellrate, want p * 2^d = x (as far as possible)
562
 * b) if x > 16 * 2^14 then set p = 16, d = 14 (min rate), done
563
 *    if x <= 16 then set p = x, d = 0 (high rates), done
564
 * c) now have 16 < x <= 2^18, or 1 < x/16 <= 2^14 and we want to
565
 *    know n such that 2^(n-1) < x/16 <= 2^n, so slide a bit until
566
 *    we find the range (n will be between 1 and 14), set d = n
567
 * d) Also have 8 < x/2^n <= 16, so set p nearest x/2^n
568
 *
569
 * The algorithm used below is a minor variant of the above.
570
 *
571
 * The base rate is derived from the oscillator frequency (Hz) using a
572
 * fixed divider:
573
 *
574
 * baserate = freq / 32 in the case of some Unknown Card
575
 * baserate = freq / 8  in the case of the Horizon        25
576
 * baserate = freq / 8  in the case of the Horizon Ultra 155
577
 *
578
 * The Horizon cards have oscillators and base rates as follows:
579
 *
580
 * Card               Oscillator  Base Rate
581
 * Unknown Card       33 MHz      1.03125 MHz (33 MHz = PCI freq)
582
 * Horizon        25  32 MHz      4       MHz
583
 * Horizon Ultra 155  40 MHz      5       MHz
584
 *
585
 * The following defines give the base rates in Hz. These were
586
 * previously a factor of 100 larger, no doubt someone was using
587
 * cps*100.
588
 */
589
 
590
#define BR_UKN 1031250l
591
#define BR_HRZ 4000000l
592
#define BR_ULT 5000000l
593
 
594
// d is an exponent
595
#define CR_MIND 0
596
#define CR_MAXD 14
597
 
598
// p ranges from 1 to a power of 2
599
#define CR_MAXPEXP 4
600
 
601
static int make_rate (const hrz_dev * dev, u32 c, rounding r,
602
                      u16 * bits, unsigned int * actual)
603
{
604
        // note: rounding the rate down means rounding 'p' up
605
        const unsigned long br = test_bit(ultra, &dev->flags) ? BR_ULT : BR_HRZ;
606
 
607
        u32 div = CR_MIND;
608
        u32 pre;
609
 
610
        // br_exp and br_man are used to avoid overflowing (c*maxp*2^d) in
611
        // the tests below. We could think harder about exact possibilities
612
        // of failure...
613
 
614
        unsigned long br_man = br;
615
        unsigned int br_exp = 0;
616
 
617
        PRINTD (DBG_QOS|DBG_FLOW, "make_rate b=%lu, c=%u, %s", br, c,
618
                r == round_up ? "up" : r == round_down ? "down" : "nearest");
619
 
620
        // avoid div by zero
621
        if (!c) {
622
                PRINTD (DBG_QOS|DBG_ERR, "zero rate is not allowed!");
623
                return -EINVAL;
624
        }
625
 
626
        while (br_exp < CR_MAXPEXP + CR_MIND && (br_man % 2 == 0)) {
627
                br_man = br_man >> 1;
628
                ++br_exp;
629
        }
630
        // (br >>br_exp) <<br_exp == br and
631
        // br_exp <= CR_MAXPEXP+CR_MIND
632
 
633
        if (br_man <= (c << (CR_MAXPEXP+CR_MIND-br_exp))) {
634
                // Equivalent to: B <= (c << (MAXPEXP+MIND))
635
                // take care of rounding
636
                switch (r) {
637
                        case round_down:
638
                                pre = (br+(c<<div)-1)/(c<<div);
639
                                // but p must be non-zero
640
                                if (!pre)
641
                                        pre = 1;
642
                                break;
643
                        case round_nearest:
644
                                pre = (br+(c<<div)/2)/(c<<div);
645
                                // but p must be non-zero
646
                                if (!pre)
647
                                        pre = 1;
648
                                break;
649
                        default:        /* round_up */
650
                                pre = br/(c<<div);
651
                                // but p must be non-zero
652
                                if (!pre)
653
                                        return -EINVAL;
654
                }
655
                PRINTD (DBG_QOS, "A: p=%u, d=%u", pre, div);
656
                goto got_it;
657
        }
658
 
659
        // at this point we have
660
        // d == MIND and (c << (MAXPEXP+MIND)) < B
661
        while (div < CR_MAXD) {
662
                div++;
663
                if (br_man <= (c << (CR_MAXPEXP+div-br_exp))) {
664
                        // Equivalent to: B <= (c << (MAXPEXP+d))
665
                        // c << (MAXPEXP+d-1) < B <= c << (MAXPEXP+d)
666
                        // 1 << (MAXPEXP-1) < B/2^d/c <= 1 << MAXPEXP
667
                        // MAXP/2 < B/c2^d <= MAXP
668
                        // take care of rounding
669
                        switch (r) {
670
                                case round_down:
671
                                        pre = (br+(c<<div)-1)/(c<<div);
672
                                        break;
673
                                case round_nearest:
674
                                        pre = (br+(c<<div)/2)/(c<<div);
675
                                        break;
676
                                default: /* round_up */
677
                                        pre = br/(c<<div);
678
                        }
679
                        PRINTD (DBG_QOS, "B: p=%u, d=%u", pre, div);
680
                        goto got_it;
681
                }
682
        }
683
        // at this point we have
684
        // d == MAXD and (c << (MAXPEXP+MAXD)) < B
685
        // but we cannot go any higher
686
        // take care of rounding
687
        if (r == round_down)
688
                return -EINVAL;
689
        pre = 1 << CR_MAXPEXP;
690
        PRINTD (DBG_QOS, "C: p=%u, d=%u", pre, div);
691
got_it:
692
        // paranoia
693
        if (div > CR_MAXD || (!pre) || pre > 1<<CR_MAXPEXP) {
694
                PRINTD (DBG_QOS, "set_cr internal failure: d=%u p=%u",
695
                        div, pre);
696
                return -EINVAL;
697
        } else {
698
                if (bits)
699
                        *bits = (div<<CLOCK_SELECT_SHIFT) | (pre-1);
700
                if (actual) {
701
                        *actual = (br + (pre<<div) - 1) / (pre<<div);
702
                        PRINTD (DBG_QOS, "actual rate: %u", *actual);
703
                }
704
                return 0;
705
        }
706
}
707
 
708
static int make_rate_with_tolerance (const hrz_dev * dev, u32 c, rounding r, unsigned int tol,
709
                                     u16 * bit_pattern, unsigned int * actual) {
710
  unsigned int my_actual;
711
 
712
  PRINTD (DBG_QOS|DBG_FLOW, "make_rate_with_tolerance c=%u, %s, tol=%u",
713
          c, (r == round_up) ? "up" : (r == round_down) ? "down" : "nearest", tol);
714
 
715
  if (!actual)
716
    // actual rate is not returned
717
    actual = &my_actual;
718
 
719
  if (make_rate (dev, c, round_nearest, bit_pattern, actual))
720
    // should never happen as round_nearest always succeeds
721
    return -1;
722
 
723
  if (c - tol <= *actual && *actual <= c + tol)
724
    // within tolerance
725
    return 0;
726
  else
727
    // intolerant, try rounding instead
728
    return make_rate (dev, c, r, bit_pattern, actual);
729
}
730
 
731
/********** Listen on a VC **********/
732
 
733
static int hrz_open_rx (hrz_dev * dev, u16 channel) {
734
  // is there any guarantee that we don't get two simulataneous
735
  // identical calls of this function from different processes? yes
736
  // rate_lock
737
  unsigned long flags;
738
  u32 channel_type; // u16?
739
 
740
  u16 buf_ptr = RX_CHANNEL_IDLE;
741
 
742
  rx_ch_desc * rx_desc = &memmap->rx_descs[channel];
743
 
744
  PRINTD (DBG_FLOW, "hrz_open_rx %x", channel);
745
 
746
  spin_lock_irqsave (&dev->mem_lock, flags);
747
  channel_type = rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK;
748
  spin_unlock_irqrestore (&dev->mem_lock, flags);
749
 
750
  // very serious error, should never occur
751
  if (channel_type != RX_CHANNEL_DISABLED) {
752
    PRINTD (DBG_ERR|DBG_VCC, "RX channel for VC already open");
753
    return -EBUSY; // clean up?
754
  }
755
 
756
  // Give back spare buffer
757
  if (dev->noof_spare_buffers) {
758
    buf_ptr = dev->spare_buffers[--dev->noof_spare_buffers];
759
    PRINTD (DBG_VCC, "using a spare buffer: %u", buf_ptr);
760
    // should never occur
761
    if (buf_ptr == RX_CHANNEL_DISABLED || buf_ptr == RX_CHANNEL_IDLE) {
762
      // but easy to recover from
763
      PRINTD (DBG_ERR|DBG_VCC, "bad spare buffer pointer, using IDLE");
764
      buf_ptr = RX_CHANNEL_IDLE;
765
    }
766
  } else {
767
    PRINTD (DBG_VCC, "using IDLE buffer pointer");
768
  }
769
 
770
  // Channel is currently disabled so change its status to idle
771
 
772
  // do we really need to save the flags again?
773
  spin_lock_irqsave (&dev->mem_lock, flags);
774
 
775
  wr_mem (dev, &rx_desc->wr_buf_type,
776
          buf_ptr | CHANNEL_TYPE_AAL5 | FIRST_CELL_OF_AAL5_FRAME);
777
  if (buf_ptr != RX_CHANNEL_IDLE)
778
    wr_mem (dev, &rx_desc->rd_buf_type, buf_ptr);
779
 
780
  spin_unlock_irqrestore (&dev->mem_lock, flags);
781
 
782
  // rxer->rate = make_rate (qos->peak_cells);
783
 
784
  PRINTD (DBG_FLOW, "hrz_open_rx ok");
785
 
786
  return 0;
787
}
788
 
789
#if 0
790
/********** change vc rate for a given vc **********/
791
 
792
static void hrz_change_vc_qos (ATM_RXER * rxer, MAAL_QOS * qos) {
793
  rxer->rate = make_rate (qos->peak_cells);
794
}
795
#endif
796
 
797
/********** free an skb (as per ATM device driver documentation) **********/
798
 
799
static inline void hrz_kfree_skb (struct sk_buff * skb) {
800
  if (ATM_SKB(skb)->vcc->pop) {
801
    ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
802
  } else {
803
    dev_kfree_skb_any (skb);
804
  }
805
}
806
 
807
/********** cancel listen on a VC **********/
808
 
809
static void hrz_close_rx (hrz_dev * dev, u16 vc) {
810
  unsigned long flags;
811
 
812
  u32 value;
813
 
814
  u32 r1, r2;
815
 
816
  rx_ch_desc * rx_desc = &memmap->rx_descs[vc];
817
 
818
  int was_idle = 0;
819
 
820
  spin_lock_irqsave (&dev->mem_lock, flags);
821
  value = rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK;
822
  spin_unlock_irqrestore (&dev->mem_lock, flags);
823
 
824
  if (value == RX_CHANNEL_DISABLED) {
825
    // I suppose this could happen once we deal with _NONE traffic properly
826
    PRINTD (DBG_VCC, "closing VC: RX channel %u already disabled", vc);
827
    return;
828
  }
829
  if (value == RX_CHANNEL_IDLE)
830
    was_idle = 1;
831
 
832
  spin_lock_irqsave (&dev->mem_lock, flags);
833
 
834
  for (;;) {
835
    wr_mem (dev, &rx_desc->wr_buf_type, RX_CHANNEL_DISABLED);
836
 
837
    if ((rd_mem (dev, &rx_desc->wr_buf_type) & BUFFER_PTR_MASK) == RX_CHANNEL_DISABLED)
838
      break;
839
 
840
    was_idle = 0;
841
  }
842
 
843
  if (was_idle) {
844
    spin_unlock_irqrestore (&dev->mem_lock, flags);
845
    return;
846
  }
847
 
848
  WAIT_FLUSH_RX_COMPLETE(dev);
849
 
850
  // XXX Is this all really necessary? We can rely on the rx_data_av
851
  // handler to discard frames that remain queued for delivery. If the
852
  // worry is that immediately reopening the channel (perhaps by a
853
  // different process) may cause some data to be mis-delivered then
854
  // there may still be a simpler solution (such as busy-waiting on
855
  // rx_busy once the channel is disabled or before a new one is
856
  // opened - does this leave any holes?). Arguably setting up and
857
  // tearing down the TX and RX halves of each virtual circuit could
858
  // most safely be done within ?x_busy protected regions.
859
 
860
  // OK, current changes are that Simon's marker is disabled and we DO
861
  // look for NULL rxer elsewhere. The code here seems flush frames
862
  // and then remember the last dead cell belonging to the channel
863
  // just disabled - the cell gets relinked at the next vc_open.
864
  // However, when all VCs are closed or only a few opened there are a
865
  // handful of buffers that are unusable.
866
 
867
  // Does anyone feel like documenting spare_buffers properly?
868
  // Does anyone feel like fixing this in a nicer way?
869
 
870
  // Flush any data which is left in the channel
871
  for (;;) {
872
    // Change the rx channel port to something different to the RX
873
    // channel we are trying to close to force Horizon to flush the rx
874
    // channel read and write pointers.
875
 
876
    u16 other = vc^(RX_CHANS/2);
877
 
878
    SELECT_RX_CHANNEL (dev, other);
879
    WAIT_UPDATE_COMPLETE (dev);
880
 
881
    r1 = rd_mem (dev, &rx_desc->rd_buf_type);
882
 
883
    // Select this RX channel. Flush doesn't seem to work unless we
884
    // select an RX channel before hand
885
 
886
    SELECT_RX_CHANNEL (dev, vc);
887
    WAIT_UPDATE_COMPLETE (dev);
888
 
889
    // Attempt to flush a frame on this RX channel
890
 
891
    FLUSH_RX_CHANNEL (dev, vc);
892
    WAIT_FLUSH_RX_COMPLETE (dev);
893
 
894
    // Force Horizon to flush rx channel read and write pointers as before
895
 
896
    SELECT_RX_CHANNEL (dev, other);
897
    WAIT_UPDATE_COMPLETE (dev);
898
 
899
    r2 = rd_mem (dev, &rx_desc->rd_buf_type);
900
 
901
    PRINTD (DBG_VCC|DBG_RX, "r1 = %u, r2 = %u", r1, r2);
902
 
903
    if (r1 == r2) {
904
      dev->spare_buffers[dev->noof_spare_buffers++] = (u16)r1;
905
      break;
906
    }
907
  }
908
 
909
#if 0
910
  {
911
    rx_q_entry * wr_ptr = &memmap->rx_q_entries[rd_regw (dev, RX_QUEUE_WR_PTR_OFF)];
912
    rx_q_entry * rd_ptr = dev->rx_q_entry;
913
 
914
    PRINTD (DBG_VCC|DBG_RX, "rd_ptr = %u, wr_ptr = %u", rd_ptr, wr_ptr);
915
 
916
    while (rd_ptr != wr_ptr) {
917
      u32 x = rd_mem (dev, (HDW *) rd_ptr);
918
 
919
      if (vc == rx_q_entry_to_rx_channel (x)) {
920
        x |= SIMONS_DODGEY_MARKER;
921
 
922
        PRINTD (DBG_RX|DBG_VCC|DBG_WARN, "marking a frame as dodgey");
923
 
924
        wr_mem (dev, (HDW *) rd_ptr, x);
925
      }
926
 
927
      if (rd_ptr == dev->rx_q_wrap)
928
        rd_ptr = dev->rx_q_reset;
929
      else
930
        rd_ptr++;
931
    }
932
  }
933
#endif
934
 
935
  spin_unlock_irqrestore (&dev->mem_lock, flags);
936
 
937
  return;
938
}
939
 
940
/********** schedule RX transfers **********/
941
 
942
// Note on tail recursion: a GCC developer said that it is not likely
943
// to be fixed soon, so do not define TAILRECUSRIONWORKS unless you
944
// are sure it does as you may otherwise overflow the kernel stack.
945
 
946
// giving this fn a return value would help GCC, alledgedly
947
 
948
static void rx_schedule (hrz_dev * dev, int irq) {
949
  unsigned int rx_bytes;
950
 
951
  int pio_instead = 0;
952
#ifndef TAILRECURSIONWORKS
953
  pio_instead = 1;
954
  while (pio_instead) {
955
#endif
956
    // bytes waiting for RX transfer
957
    rx_bytes = dev->rx_bytes;
958
 
959
#if 0
960
    spin_count = 0;
961
    while (rd_regl (dev, MASTER_RX_COUNT_REG_OFF)) {
962
      PRINTD (DBG_RX|DBG_WARN, "RX error: other PCI Bus Master RX still in progress!");
963
      if (++spin_count > 10) {
964
        PRINTD (DBG_RX|DBG_ERR, "spun out waiting PCI Bus Master RX completion");
965
        wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
966
        clear_bit (rx_busy, &dev->flags);
967
        hrz_kfree_skb (dev->rx_skb);
968
        return;
969
      }
970
    }
971
#endif
972
 
973
    // this code follows the TX code but (at the moment) there is only
974
    // one region - the skb itself. I don't know if this will change,
975
    // but it doesn't hurt to have the code here, disabled.
976
 
977
    if (rx_bytes) {
978
      // start next transfer within same region
979
      if (rx_bytes <= MAX_PIO_COUNT) {
980
        PRINTD (DBG_RX|DBG_BUS, "(pio)");
981
        pio_instead = 1;
982
      }
983
      if (rx_bytes <= MAX_TRANSFER_COUNT) {
984
        PRINTD (DBG_RX|DBG_BUS, "(simple or last multi)");
985
        dev->rx_bytes = 0;
986
      } else {
987
        PRINTD (DBG_RX|DBG_BUS, "(continuing multi)");
988
        dev->rx_bytes = rx_bytes - MAX_TRANSFER_COUNT;
989
        rx_bytes = MAX_TRANSFER_COUNT;
990
      }
991
    } else {
992
      // rx_bytes == 0 -- we're between regions
993
      // regions remaining to transfer
994
#if 0
995
      unsigned int rx_regions = dev->rx_regions;
996
#else
997
      unsigned int rx_regions = 0;
998
#endif
999
 
1000
      if (rx_regions) {
1001
#if 0
1002
        // start a new region
1003
        dev->rx_addr = dev->rx_iovec->iov_base;
1004
        rx_bytes = dev->rx_iovec->iov_len;
1005
        ++dev->rx_iovec;
1006
        dev->rx_regions = rx_regions - 1;
1007
 
1008
        if (rx_bytes <= MAX_PIO_COUNT) {
1009
          PRINTD (DBG_RX|DBG_BUS, "(pio)");
1010
          pio_instead = 1;
1011
        }
1012
        if (rx_bytes <= MAX_TRANSFER_COUNT) {
1013
          PRINTD (DBG_RX|DBG_BUS, "(full region)");
1014
          dev->rx_bytes = 0;
1015
        } else {
1016
          PRINTD (DBG_RX|DBG_BUS, "(start multi region)");
1017
          dev->rx_bytes = rx_bytes - MAX_TRANSFER_COUNT;
1018
          rx_bytes = MAX_TRANSFER_COUNT;
1019
        }
1020
#endif
1021
      } else {
1022
        // rx_regions == 0
1023
        // that's all folks - end of frame
1024
        struct sk_buff * skb = dev->rx_skb;
1025
        // dev->rx_iovec = 0;
1026
 
1027
        FLUSH_RX_CHANNEL (dev, dev->rx_channel);
1028
 
1029
        dump_skb ("<<<", dev->rx_channel, skb);
1030
 
1031
        PRINTD (DBG_RX|DBG_SKB, "push %p %u", skb->data, skb->len);
1032
 
1033
        {
1034
          struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
1035
          // VC layer stats
1036
          atomic_inc(&vcc->stats->rx);
1037
          __net_timestamp(skb);
1038
          // end of our responsability
1039
          vcc->push (vcc, skb);
1040
        }
1041
      }
1042
    }
1043
 
1044
    // note: writing RX_COUNT clears any interrupt condition
1045
    if (rx_bytes) {
1046
      if (pio_instead) {
1047
        if (irq)
1048
          wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
1049
        rds_regb (dev, DATA_PORT_OFF, dev->rx_addr, rx_bytes);
1050
      } else {
1051
        wr_regl (dev, MASTER_RX_ADDR_REG_OFF, virt_to_bus (dev->rx_addr));
1052
        wr_regl (dev, MASTER_RX_COUNT_REG_OFF, rx_bytes);
1053
      }
1054
      dev->rx_addr += rx_bytes;
1055
    } else {
1056
      if (irq)
1057
        wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
1058
      // allow another RX thread to start
1059
      YELLOW_LED_ON(dev);
1060
      clear_bit (rx_busy, &dev->flags);
1061
      PRINTD (DBG_RX, "cleared rx_busy for dev %p", dev);
1062
    }
1063
 
1064
#ifdef TAILRECURSIONWORKS
1065
    // and we all bless optimised tail calls
1066
    if (pio_instead)
1067
      return rx_schedule (dev, 0);
1068
    return;
1069
#else
1070
    // grrrrrrr!
1071
    irq = 0;
1072
  }
1073
  return;
1074
#endif
1075
}
1076
 
1077
/********** handle RX bus master complete events **********/
1078
 
1079
static inline void rx_bus_master_complete_handler (hrz_dev * dev) {
1080
  if (test_bit (rx_busy, &dev->flags)) {
1081
    rx_schedule (dev, 1);
1082
  } else {
1083
    PRINTD (DBG_RX|DBG_ERR, "unexpected RX bus master completion");
1084
    // clear interrupt condition on adapter
1085
    wr_regl (dev, MASTER_RX_COUNT_REG_OFF, 0);
1086
  }
1087
  return;
1088
}
1089
 
1090
/********** (queue to) become the next TX thread **********/
1091
 
1092
static inline int tx_hold (hrz_dev * dev) {
1093
  PRINTD (DBG_TX, "sleeping at tx lock %p %lu", dev, dev->flags);
1094
  wait_event_interruptible(dev->tx_queue, (!test_and_set_bit(tx_busy, &dev->flags)));
1095
  PRINTD (DBG_TX, "woken at tx lock %p %lu", dev, dev->flags);
1096
  if (signal_pending (current))
1097
    return -1;
1098
  PRINTD (DBG_TX, "set tx_busy for dev %p", dev);
1099
  return 0;
1100
}
1101
 
1102
/********** allow another TX thread to start **********/
1103
 
1104
static inline void tx_release (hrz_dev * dev) {
1105
  clear_bit (tx_busy, &dev->flags);
1106
  PRINTD (DBG_TX, "cleared tx_busy for dev %p", dev);
1107
  wake_up_interruptible (&dev->tx_queue);
1108
}
1109
 
1110
/********** schedule TX transfers **********/
1111
 
1112
static void tx_schedule (hrz_dev * const dev, int irq) {
1113
  unsigned int tx_bytes;
1114
 
1115
  int append_desc = 0;
1116
 
1117
  int pio_instead = 0;
1118
#ifndef TAILRECURSIONWORKS
1119
  pio_instead = 1;
1120
  while (pio_instead) {
1121
#endif
1122
    // bytes in current region waiting for TX transfer
1123
    tx_bytes = dev->tx_bytes;
1124
 
1125
#if 0
1126
    spin_count = 0;
1127
    while (rd_regl (dev, MASTER_TX_COUNT_REG_OFF)) {
1128
      PRINTD (DBG_TX|DBG_WARN, "TX error: other PCI Bus Master TX still in progress!");
1129
      if (++spin_count > 10) {
1130
        PRINTD (DBG_TX|DBG_ERR, "spun out waiting PCI Bus Master TX completion");
1131
        wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
1132
        tx_release (dev);
1133
        hrz_kfree_skb (dev->tx_skb);
1134
        return;
1135
      }
1136
    }
1137
#endif
1138
 
1139
    if (tx_bytes) {
1140
      // start next transfer within same region
1141
      if (!test_bit (ultra, &dev->flags) || tx_bytes <= MAX_PIO_COUNT) {
1142
        PRINTD (DBG_TX|DBG_BUS, "(pio)");
1143
        pio_instead = 1;
1144
      }
1145
      if (tx_bytes <= MAX_TRANSFER_COUNT) {
1146
        PRINTD (DBG_TX|DBG_BUS, "(simple or last multi)");
1147
        if (!dev->tx_iovec) {
1148
          // end of last region
1149
          append_desc = 1;
1150
        }
1151
        dev->tx_bytes = 0;
1152
      } else {
1153
        PRINTD (DBG_TX|DBG_BUS, "(continuing multi)");
1154
        dev->tx_bytes = tx_bytes - MAX_TRANSFER_COUNT;
1155
        tx_bytes = MAX_TRANSFER_COUNT;
1156
      }
1157
    } else {
1158
      // tx_bytes == 0 -- we're between regions
1159
      // regions remaining to transfer
1160
      unsigned int tx_regions = dev->tx_regions;
1161
 
1162
      if (tx_regions) {
1163
        // start a new region
1164
        dev->tx_addr = dev->tx_iovec->iov_base;
1165
        tx_bytes = dev->tx_iovec->iov_len;
1166
        ++dev->tx_iovec;
1167
        dev->tx_regions = tx_regions - 1;
1168
 
1169
        if (!test_bit (ultra, &dev->flags) || tx_bytes <= MAX_PIO_COUNT) {
1170
          PRINTD (DBG_TX|DBG_BUS, "(pio)");
1171
          pio_instead = 1;
1172
        }
1173
        if (tx_bytes <= MAX_TRANSFER_COUNT) {
1174
          PRINTD (DBG_TX|DBG_BUS, "(full region)");
1175
          dev->tx_bytes = 0;
1176
        } else {
1177
          PRINTD (DBG_TX|DBG_BUS, "(start multi region)");
1178
          dev->tx_bytes = tx_bytes - MAX_TRANSFER_COUNT;
1179
          tx_bytes = MAX_TRANSFER_COUNT;
1180
        }
1181
      } else {
1182
        // tx_regions == 0
1183
        // that's all folks - end of frame
1184
        struct sk_buff * skb = dev->tx_skb;
1185
        dev->tx_iovec = NULL;
1186
 
1187
        // VC layer stats
1188
        atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
1189
 
1190
        // free the skb
1191
        hrz_kfree_skb (skb);
1192
      }
1193
    }
1194
 
1195
    // note: writing TX_COUNT clears any interrupt condition
1196
    if (tx_bytes) {
1197
      if (pio_instead) {
1198
        if (irq)
1199
          wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
1200
        wrs_regb (dev, DATA_PORT_OFF, dev->tx_addr, tx_bytes);
1201
        if (append_desc)
1202
          wr_regl (dev, TX_DESCRIPTOR_PORT_OFF, cpu_to_be32 (dev->tx_skb->len));
1203
      } else {
1204
        wr_regl (dev, MASTER_TX_ADDR_REG_OFF, virt_to_bus (dev->tx_addr));
1205
        if (append_desc)
1206
          wr_regl (dev, TX_DESCRIPTOR_REG_OFF, cpu_to_be32 (dev->tx_skb->len));
1207
        wr_regl (dev, MASTER_TX_COUNT_REG_OFF,
1208
                 append_desc
1209
                 ? tx_bytes | MASTER_TX_AUTO_APPEND_DESC
1210
                 : tx_bytes);
1211
      }
1212
      dev->tx_addr += tx_bytes;
1213
    } else {
1214
      if (irq)
1215
        wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
1216
      YELLOW_LED_ON(dev);
1217
      tx_release (dev);
1218
    }
1219
 
1220
#ifdef TAILRECURSIONWORKS
1221
    // and we all bless optimised tail calls
1222
    if (pio_instead)
1223
      return tx_schedule (dev, 0);
1224
    return;
1225
#else
1226
    // grrrrrrr!
1227
    irq = 0;
1228
  }
1229
  return;
1230
#endif
1231
}
1232
 
1233
/********** handle TX bus master complete events **********/
1234
 
1235
static inline void tx_bus_master_complete_handler (hrz_dev * dev) {
1236
  if (test_bit (tx_busy, &dev->flags)) {
1237
    tx_schedule (dev, 1);
1238
  } else {
1239
    PRINTD (DBG_TX|DBG_ERR, "unexpected TX bus master completion");
1240
    // clear interrupt condition on adapter
1241
    wr_regl (dev, MASTER_TX_COUNT_REG_OFF, 0);
1242
  }
1243
  return;
1244
}
1245
 
1246
/********** move RX Q pointer to next item in circular buffer **********/
1247
 
1248
// called only from IRQ sub-handler
1249
static inline u32 rx_queue_entry_next (hrz_dev * dev) {
1250
  u32 rx_queue_entry;
1251
  spin_lock (&dev->mem_lock);
1252
  rx_queue_entry = rd_mem (dev, &dev->rx_q_entry->entry);
1253
  if (dev->rx_q_entry == dev->rx_q_wrap)
1254
    dev->rx_q_entry = dev->rx_q_reset;
1255
  else
1256
    dev->rx_q_entry++;
1257
  wr_regw (dev, RX_QUEUE_RD_PTR_OFF, dev->rx_q_entry - dev->rx_q_reset);
1258
  spin_unlock (&dev->mem_lock);
1259
  return rx_queue_entry;
1260
}
1261
 
1262
/********** handle RX disabled by device **********/
1263
 
1264
static inline void rx_disabled_handler (hrz_dev * dev) {
1265
  wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE);
1266
  // count me please
1267
  PRINTK (KERN_WARNING, "RX was disabled!");
1268
}
1269
 
1270
/********** handle RX data received by device **********/
1271
 
1272
// called from IRQ handler
1273
static inline void rx_data_av_handler (hrz_dev * dev) {
1274
  u32 rx_queue_entry;
1275
  u32 rx_queue_entry_flags;
1276
  u16 rx_len;
1277
  u16 rx_channel;
1278
 
1279
  PRINTD (DBG_FLOW, "hrz_data_av_handler");
1280
 
1281
  // try to grab rx lock (not possible during RX bus mastering)
1282
  if (test_and_set_bit (rx_busy, &dev->flags)) {
1283
    PRINTD (DBG_RX, "locked out of rx lock");
1284
    return;
1285
  }
1286
  PRINTD (DBG_RX, "set rx_busy for dev %p", dev);
1287
  // lock is cleared if we fail now, o/w after bus master completion
1288
 
1289
  YELLOW_LED_OFF(dev);
1290
 
1291
  rx_queue_entry = rx_queue_entry_next (dev);
1292
 
1293
  rx_len = rx_q_entry_to_length (rx_queue_entry);
1294
  rx_channel = rx_q_entry_to_rx_channel (rx_queue_entry);
1295
 
1296
  WAIT_FLUSH_RX_COMPLETE (dev);
1297
 
1298
  SELECT_RX_CHANNEL (dev, rx_channel);
1299
 
1300
  PRINTD (DBG_RX, "rx_queue_entry is: %#x", rx_queue_entry);
1301
  rx_queue_entry_flags = rx_queue_entry & (RX_CRC_32_OK|RX_COMPLETE_FRAME|SIMONS_DODGEY_MARKER);
1302
 
1303
  if (!rx_len) {
1304
    // (at least) bus-mastering breaks if we try to handle a
1305
    // zero-length frame, besides AAL5 does not support them
1306
    PRINTK (KERN_ERR, "zero-length frame!");
1307
    rx_queue_entry_flags &= ~RX_COMPLETE_FRAME;
1308
  }
1309
 
1310
  if (rx_queue_entry_flags & SIMONS_DODGEY_MARKER) {
1311
    PRINTD (DBG_RX|DBG_ERR, "Simon's marker detected!");
1312
  }
1313
  if (rx_queue_entry_flags == (RX_CRC_32_OK | RX_COMPLETE_FRAME)) {
1314
    struct atm_vcc * atm_vcc;
1315
 
1316
    PRINTD (DBG_RX, "got a frame on rx_channel %x len %u", rx_channel, rx_len);
1317
 
1318
    atm_vcc = dev->rxer[rx_channel];
1319
    // if no vcc is assigned to this channel, we should drop the frame
1320
    // (is this what SIMONS etc. was trying to achieve?)
1321
 
1322
    if (atm_vcc) {
1323
 
1324
      if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) {
1325
 
1326
        if (rx_len <= atm_vcc->qos.rxtp.max_sdu) {
1327
 
1328
          struct sk_buff * skb = atm_alloc_charge (atm_vcc, rx_len, GFP_ATOMIC);
1329
          if (skb) {
1330
            // remember this so we can push it later
1331
            dev->rx_skb = skb;
1332
            // remember this so we can flush it later
1333
            dev->rx_channel = rx_channel;
1334
 
1335
            // prepare socket buffer
1336
            skb_put (skb, rx_len);
1337
            ATM_SKB(skb)->vcc = atm_vcc;
1338
 
1339
            // simple transfer
1340
            // dev->rx_regions = 0;
1341
            // dev->rx_iovec = 0;
1342
            dev->rx_bytes = rx_len;
1343
            dev->rx_addr = skb->data;
1344
            PRINTD (DBG_RX, "RX start simple transfer (addr %p, len %d)",
1345
                    skb->data, rx_len);
1346
 
1347
            // do the business
1348
            rx_schedule (dev, 0);
1349
            return;
1350
 
1351
          } else {
1352
            PRINTD (DBG_SKB|DBG_WARN, "failed to get skb");
1353
          }
1354
 
1355
        } else {
1356
          PRINTK (KERN_INFO, "frame received on TX-only VC %x", rx_channel);
1357
          // do we count this?
1358
        }
1359
 
1360
      } else {
1361
        PRINTK (KERN_WARNING, "dropped over-size frame");
1362
        // do we count this?
1363
      }
1364
 
1365
    } else {
1366
      PRINTD (DBG_WARN|DBG_VCC|DBG_RX, "no VCC for this frame (VC closed)");
1367
      // do we count this?
1368
    }
1369
 
1370
  } else {
1371
    // Wait update complete ? SPONG
1372
  }
1373
 
1374
  // RX was aborted
1375
  YELLOW_LED_ON(dev);
1376
 
1377
  FLUSH_RX_CHANNEL (dev,rx_channel);
1378
  clear_bit (rx_busy, &dev->flags);
1379
 
1380
  return;
1381
}
1382
 
1383
/********** interrupt handler **********/
1384
 
1385
static irqreturn_t interrupt_handler(int irq, void *dev_id)
1386
{
1387
  hrz_dev *dev = dev_id;
1388
  u32 int_source;
1389
  unsigned int irq_ok;
1390
 
1391
  PRINTD (DBG_FLOW, "interrupt_handler: %p", dev_id);
1392
 
1393
  // definitely for us
1394
  irq_ok = 0;
1395
  while ((int_source = rd_regl (dev, INT_SOURCE_REG_OFF)
1396
          & INTERESTING_INTERRUPTS)) {
1397
    // In the interests of fairness, the (inline) handlers below are
1398
    // called in sequence and without immediate return to the head of
1399
    // the while loop. This is only of issue for slow hosts (or when
1400
    // debugging messages are on). Really slow hosts may find a fast
1401
    // sender keeps them permanently in the IRQ handler. :(
1402
 
1403
    // (only an issue for slow hosts) RX completion goes before
1404
    // rx_data_av as the former implies rx_busy and so the latter
1405
    // would just abort. If it reschedules another transfer
1406
    // (continuing the same frame) then it will not clear rx_busy.
1407
 
1408
    // (only an issue for slow hosts) TX completion goes before RX
1409
    // data available as it is a much shorter routine - there is the
1410
    // chance that any further transfers it schedules will be complete
1411
    // by the time of the return to the head of the while loop
1412
 
1413
    if (int_source & RX_BUS_MASTER_COMPLETE) {
1414
      ++irq_ok;
1415
      PRINTD (DBG_IRQ|DBG_BUS|DBG_RX, "rx_bus_master_complete asserted");
1416
      rx_bus_master_complete_handler (dev);
1417
    }
1418
    if (int_source & TX_BUS_MASTER_COMPLETE) {
1419
      ++irq_ok;
1420
      PRINTD (DBG_IRQ|DBG_BUS|DBG_TX, "tx_bus_master_complete asserted");
1421
      tx_bus_master_complete_handler (dev);
1422
    }
1423
    if (int_source & RX_DATA_AV) {
1424
      ++irq_ok;
1425
      PRINTD (DBG_IRQ|DBG_RX, "rx_data_av asserted");
1426
      rx_data_av_handler (dev);
1427
    }
1428
  }
1429
  if (irq_ok) {
1430
    PRINTD (DBG_IRQ, "work done: %u", irq_ok);
1431
  } else {
1432
    PRINTD (DBG_IRQ|DBG_WARN, "spurious interrupt source: %#x", int_source);
1433
  }
1434
 
1435
  PRINTD (DBG_IRQ|DBG_FLOW, "interrupt_handler done: %p", dev_id);
1436
  if (irq_ok)
1437
        return IRQ_HANDLED;
1438
  return IRQ_NONE;
1439
}
1440
 
1441
/********** housekeeping **********/
1442
 
1443
static void do_housekeeping (unsigned long arg) {
1444
  // just stats at the moment
1445
  hrz_dev * dev = (hrz_dev *) arg;
1446
 
1447
  // collect device-specific (not driver/atm-linux) stats here
1448
  dev->tx_cell_count += rd_regw (dev, TX_CELL_COUNT_OFF);
1449
  dev->rx_cell_count += rd_regw (dev, RX_CELL_COUNT_OFF);
1450
  dev->hec_error_count += rd_regw (dev, HEC_ERROR_COUNT_OFF);
1451
  dev->unassigned_cell_count += rd_regw (dev, UNASSIGNED_CELL_COUNT_OFF);
1452
 
1453
  mod_timer (&dev->housekeeping, jiffies + HZ/10);
1454
 
1455
  return;
1456
}
1457
 
1458
/********** find an idle channel for TX and set it up **********/
1459
 
1460
// called with tx_busy set
1461
static inline short setup_idle_tx_channel (hrz_dev * dev, hrz_vcc * vcc) {
1462
  unsigned short idle_channels;
1463
  short tx_channel = -1;
1464
  unsigned int spin_count;
1465
  PRINTD (DBG_FLOW|DBG_TX, "setup_idle_tx_channel %p", dev);
1466
 
1467
  // better would be to fail immediately, the caller can then decide whether
1468
  // to wait or drop (depending on whether this is UBR etc.)
1469
  spin_count = 0;
1470
  while (!(idle_channels = rd_regw (dev, TX_STATUS_OFF) & IDLE_CHANNELS_MASK)) {
1471
    PRINTD (DBG_TX|DBG_WARN, "waiting for idle TX channel");
1472
    // delay a bit here
1473
    if (++spin_count > 100) {
1474
      PRINTD (DBG_TX|DBG_ERR, "spun out waiting for idle TX channel");
1475
      return -EBUSY;
1476
    }
1477
  }
1478
 
1479
  // got an idle channel
1480
  {
1481
    // tx_idle ensures we look for idle channels in RR order
1482
    int chan = dev->tx_idle;
1483
 
1484
    int keep_going = 1;
1485
    while (keep_going) {
1486
      if (idle_channels & (1<<chan)) {
1487
        tx_channel = chan;
1488
        keep_going = 0;
1489
      }
1490
      ++chan;
1491
      if (chan == TX_CHANS)
1492
        chan = 0;
1493
    }
1494
 
1495
    dev->tx_idle = chan;
1496
  }
1497
 
1498
  // set up the channel we found
1499
  {
1500
    // Initialise the cell header in the transmit channel descriptor
1501
    // a.k.a. prepare the channel and remember that we have done so.
1502
 
1503
    tx_ch_desc * tx_desc = &memmap->tx_descs[tx_channel];
1504
    u32 rd_ptr;
1505
    u32 wr_ptr;
1506
    u16 channel = vcc->channel;
1507
 
1508
    unsigned long flags;
1509
    spin_lock_irqsave (&dev->mem_lock, flags);
1510
 
1511
    // Update the transmit channel record.
1512
    dev->tx_channel_record[tx_channel] = channel;
1513
 
1514
    // xBR channel
1515
    update_tx_channel_config (dev, tx_channel, RATE_TYPE_ACCESS,
1516
                              vcc->tx_xbr_bits);
1517
 
1518
    // Update the PCR counter preload value etc.
1519
    update_tx_channel_config (dev, tx_channel, PCR_TIMER_ACCESS,
1520
                              vcc->tx_pcr_bits);
1521
 
1522
#if 0
1523
    if (vcc->tx_xbr_bits == VBR_RATE_TYPE) {
1524
      // SCR timer
1525
      update_tx_channel_config (dev, tx_channel, SCR_TIMER_ACCESS,
1526
                                vcc->tx_scr_bits);
1527
 
1528
      // Bucket size...
1529
      update_tx_channel_config (dev, tx_channel, BUCKET_CAPACITY_ACCESS,
1530
                                vcc->tx_bucket_bits);
1531
 
1532
      // ... and fullness
1533
      update_tx_channel_config (dev, tx_channel, BUCKET_FULLNESS_ACCESS,
1534
                                vcc->tx_bucket_bits);
1535
    }
1536
#endif
1537
 
1538
    // Initialise the read and write buffer pointers
1539
    rd_ptr = rd_mem (dev, &tx_desc->rd_buf_type) & BUFFER_PTR_MASK;
1540
    wr_ptr = rd_mem (dev, &tx_desc->wr_buf_type) & BUFFER_PTR_MASK;
1541
 
1542
    // idle TX channels should have identical pointers
1543
    if (rd_ptr != wr_ptr) {
1544
      PRINTD (DBG_TX|DBG_ERR, "TX buffer pointers are broken!");
1545
      // spin_unlock... return -E...
1546
      // I wonder if gcc would get rid of one of the pointer aliases
1547
    }
1548
    PRINTD (DBG_TX, "TX buffer pointers are: rd %x, wr %x.",
1549
            rd_ptr, wr_ptr);
1550
 
1551
    switch (vcc->aal) {
1552
      case aal0:
1553
        PRINTD (DBG_QOS|DBG_TX, "tx_channel: aal0");
1554
        rd_ptr |= CHANNEL_TYPE_RAW_CELLS;
1555
        wr_ptr |= CHANNEL_TYPE_RAW_CELLS;
1556
        break;
1557
      case aal34:
1558
        PRINTD (DBG_QOS|DBG_TX, "tx_channel: aal34");
1559
        rd_ptr |= CHANNEL_TYPE_AAL3_4;
1560
        wr_ptr |= CHANNEL_TYPE_AAL3_4;
1561
        break;
1562
      case aal5:
1563
        rd_ptr |= CHANNEL_TYPE_AAL5;
1564
        wr_ptr |= CHANNEL_TYPE_AAL5;
1565
        // Initialise the CRC
1566
        wr_mem (dev, &tx_desc->partial_crc, INITIAL_CRC);
1567
        break;
1568
    }
1569
 
1570
    wr_mem (dev, &tx_desc->rd_buf_type, rd_ptr);
1571
    wr_mem (dev, &tx_desc->wr_buf_type, wr_ptr);
1572
 
1573
    // Write the Cell Header
1574
    // Payload Type, CLP and GFC would go here if non-zero
1575
    wr_mem (dev, &tx_desc->cell_header, channel);
1576
 
1577
    spin_unlock_irqrestore (&dev->mem_lock, flags);
1578
  }
1579
 
1580
  return tx_channel;
1581
}
1582
 
1583
/********** send a frame **********/
1584
 
1585
static int hrz_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
1586
  unsigned int spin_count;
1587
  int free_buffers;
1588
  hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
1589
  hrz_vcc * vcc = HRZ_VCC(atm_vcc);
1590
  u16 channel = vcc->channel;
1591
 
1592
  u32 buffers_required;
1593
 
1594
  /* signed for error return */
1595
  short tx_channel;
1596
 
1597
  PRINTD (DBG_FLOW|DBG_TX, "hrz_send vc %x data %p len %u",
1598
          channel, skb->data, skb->len);
1599
 
1600
  dump_skb (">>>", channel, skb);
1601
 
1602
  if (atm_vcc->qos.txtp.traffic_class == ATM_NONE) {
1603
    PRINTK (KERN_ERR, "attempt to send on RX-only VC %x", channel);
1604
    hrz_kfree_skb (skb);
1605
    return -EIO;
1606
  }
1607
 
1608
  // don't understand this
1609
  ATM_SKB(skb)->vcc = atm_vcc;
1610
 
1611
  if (skb->len > atm_vcc->qos.txtp.max_sdu) {
1612
    PRINTK (KERN_ERR, "sk_buff length greater than agreed max_sdu, dropping...");
1613
    hrz_kfree_skb (skb);
1614
    return -EIO;
1615
  }
1616
 
1617
  if (!channel) {
1618
    PRINTD (DBG_ERR|DBG_TX, "attempt to transmit on zero (rx_)channel");
1619
    hrz_kfree_skb (skb);
1620
    return -EIO;
1621
  }
1622
 
1623
#if 0
1624
  {
1625
    // where would be a better place for this? housekeeping?
1626
    u16 status;
1627
    pci_read_config_word (dev->pci_dev, PCI_STATUS, &status);
1628
    if (status & PCI_STATUS_REC_MASTER_ABORT) {
1629
      PRINTD (DBG_BUS|DBG_ERR, "Clearing PCI Master Abort (and cleaning up)");
1630
      status &= ~PCI_STATUS_REC_MASTER_ABORT;
1631
      pci_write_config_word (dev->pci_dev, PCI_STATUS, status);
1632
      if (test_bit (tx_busy, &dev->flags)) {
1633
        hrz_kfree_skb (dev->tx_skb);
1634
        tx_release (dev);
1635
      }
1636
    }
1637
  }
1638
#endif
1639
 
1640
#ifdef DEBUG_HORIZON
1641
  /* wey-hey! */
1642
  if (channel == 1023) {
1643
    unsigned int i;
1644
    unsigned short d = 0;
1645
    char * s = skb->data;
1646
    if (*s++ == 'D') {
1647
      for (i = 0; i < 4; ++i) {
1648
        d = (d<<4) | ((*s <= '9') ? (*s - '0') : (*s - 'a' + 10));
1649
        ++s;
1650
      }
1651
      PRINTK (KERN_INFO, "debug bitmap is now %hx", debug = d);
1652
    }
1653
  }
1654
#endif
1655
 
1656
  // wait until TX is free and grab lock
1657
  if (tx_hold (dev)) {
1658
    hrz_kfree_skb (skb);
1659
    return -ERESTARTSYS;
1660
  }
1661
 
1662
  // Wait for enough space to be available in transmit buffer memory.
1663
 
1664
  // should be number of cells needed + 2 (according to hardware docs)
1665
  // = ((framelen+8)+47) / 48 + 2
1666
  // = (framelen+7) / 48 + 3, hmm... faster to put addition inside XXX
1667
  buffers_required = (skb->len+(ATM_AAL5_TRAILER-1)) / ATM_CELL_PAYLOAD + 3;
1668
 
1669
  // replace with timer and sleep, add dev->tx_buffers_queue (max 1 entry)
1670
  spin_count = 0;
1671
  while ((free_buffers = rd_regw (dev, TX_FREE_BUFFER_COUNT_OFF)) < buffers_required) {
1672
    PRINTD (DBG_TX, "waiting for free TX buffers, got %d of %d",
1673
            free_buffers, buffers_required);
1674
    // what is the appropriate delay? implement a timeout? (depending on line speed?)
1675
    // mdelay (1);
1676
    // what happens if we kill (current_pid, SIGKILL) ?
1677
    schedule();
1678
    if (++spin_count > 1000) {
1679
      PRINTD (DBG_TX|DBG_ERR, "spun out waiting for tx buffers, got %d of %d",
1680
              free_buffers, buffers_required);
1681
      tx_release (dev);
1682
      hrz_kfree_skb (skb);
1683
      return -ERESTARTSYS;
1684
    }
1685
  }
1686
 
1687
  // Select a channel to transmit the frame on.
1688
  if (channel == dev->last_vc) {
1689
    PRINTD (DBG_TX, "last vc hack: hit");
1690
    tx_channel = dev->tx_last;
1691
  } else {
1692
    PRINTD (DBG_TX, "last vc hack: miss");
1693
    // Are we currently transmitting this VC on one of the channels?
1694
    for (tx_channel = 0; tx_channel < TX_CHANS; ++tx_channel)
1695
      if (dev->tx_channel_record[tx_channel] == channel) {
1696
        PRINTD (DBG_TX, "vc already on channel: hit");
1697
        break;
1698
      }
1699
    if (tx_channel == TX_CHANS) {
1700
      PRINTD (DBG_TX, "vc already on channel: miss");
1701
      // Find and set up an idle channel.
1702
      tx_channel = setup_idle_tx_channel (dev, vcc);
1703
      if (tx_channel < 0) {
1704
        PRINTD (DBG_TX|DBG_ERR, "failed to get channel");
1705
        tx_release (dev);
1706
        return tx_channel;
1707
      }
1708
    }
1709
 
1710
    PRINTD (DBG_TX, "got channel");
1711
    SELECT_TX_CHANNEL(dev, tx_channel);
1712
 
1713
    dev->last_vc = channel;
1714
    dev->tx_last = tx_channel;
1715
  }
1716
 
1717
  PRINTD (DBG_TX, "using channel %u", tx_channel);
1718
 
1719
  YELLOW_LED_OFF(dev);
1720
 
1721
  // TX start transfer
1722
 
1723
  {
1724
    unsigned int tx_len = skb->len;
1725
    unsigned int tx_iovcnt = skb_shinfo(skb)->nr_frags;
1726
    // remember this so we can free it later
1727
    dev->tx_skb = skb;
1728
 
1729
    if (tx_iovcnt) {
1730
      // scatter gather transfer
1731
      dev->tx_regions = tx_iovcnt;
1732
      dev->tx_iovec = NULL;             /* @@@ needs rewritten */
1733
      dev->tx_bytes = 0;
1734
      PRINTD (DBG_TX|DBG_BUS, "TX start scatter-gather transfer (iovec %p, len %d)",
1735
              skb->data, tx_len);
1736
      tx_release (dev);
1737
      hrz_kfree_skb (skb);
1738
      return -EIO;
1739
    } else {
1740
      // simple transfer
1741
      dev->tx_regions = 0;
1742
      dev->tx_iovec = NULL;
1743
      dev->tx_bytes = tx_len;
1744
      dev->tx_addr = skb->data;
1745
      PRINTD (DBG_TX|DBG_BUS, "TX start simple transfer (addr %p, len %d)",
1746
              skb->data, tx_len);
1747
    }
1748
 
1749
    // and do the business
1750
    tx_schedule (dev, 0);
1751
 
1752
  }
1753
 
1754
  return 0;
1755
}
1756
 
1757
/********** reset a card **********/
1758
 
1759
static void hrz_reset (const hrz_dev * dev) {
1760
  u32 control_0_reg = rd_regl (dev, CONTROL_0_REG);
1761
 
1762
  // why not set RESET_HORIZON to one and wait for the card to
1763
  // reassert that bit as zero? Like so:
1764
  control_0_reg = control_0_reg & RESET_HORIZON;
1765
  wr_regl (dev, CONTROL_0_REG, control_0_reg);
1766
  while (control_0_reg & RESET_HORIZON)
1767
    control_0_reg = rd_regl (dev, CONTROL_0_REG);
1768
 
1769
  // old reset code retained:
1770
  wr_regl (dev, CONTROL_0_REG, control_0_reg |
1771
           RESET_ATM | RESET_RX | RESET_TX | RESET_HOST);
1772
  // just guessing here
1773
  udelay (1000);
1774
 
1775
  wr_regl (dev, CONTROL_0_REG, control_0_reg);
1776
}
1777
 
1778
/********** read the burnt in address **********/
1779
 
1780
static inline void WRITE_IT_WAIT (const hrz_dev *dev, u32 ctrl)
1781
{
1782
        wr_regl (dev, CONTROL_0_REG, ctrl);
1783
        udelay (5);
1784
}
1785
 
1786
static inline void CLOCK_IT (const hrz_dev *dev, u32 ctrl)
1787
{
1788
        // DI must be valid around rising SK edge
1789
        WRITE_IT_WAIT(dev, ctrl & ~SEEPROM_SK);
1790
        WRITE_IT_WAIT(dev, ctrl | SEEPROM_SK);
1791
}
1792
 
1793
static u16 __devinit read_bia (const hrz_dev * dev, u16 addr)
1794
{
1795
  u32 ctrl = rd_regl (dev, CONTROL_0_REG);
1796
 
1797
  const unsigned int addr_bits = 6;
1798
  const unsigned int data_bits = 16;
1799
 
1800
  unsigned int i;
1801
 
1802
  u16 res;
1803
 
1804
  ctrl &= ~(SEEPROM_CS | SEEPROM_SK | SEEPROM_DI);
1805
  WRITE_IT_WAIT(dev, ctrl);
1806
 
1807
  // wake Serial EEPROM and send 110 (READ) command
1808
  ctrl |=  (SEEPROM_CS | SEEPROM_DI);
1809
  CLOCK_IT(dev, ctrl);
1810
 
1811
  ctrl |= SEEPROM_DI;
1812
  CLOCK_IT(dev, ctrl);
1813
 
1814
  ctrl &= ~SEEPROM_DI;
1815
  CLOCK_IT(dev, ctrl);
1816
 
1817
  for (i=0; i<addr_bits; i++) {
1818
    if (addr & (1 << (addr_bits-1)))
1819
      ctrl |= SEEPROM_DI;
1820
    else
1821
      ctrl &= ~SEEPROM_DI;
1822
 
1823
    CLOCK_IT(dev, ctrl);
1824
 
1825
    addr = addr << 1;
1826
  }
1827
 
1828
  // we could check that we have DO = 0 here
1829
  ctrl &= ~SEEPROM_DI;
1830
 
1831
  res = 0;
1832
  for (i=0;i<data_bits;i++) {
1833
    res = res >> 1;
1834
 
1835
    CLOCK_IT(dev, ctrl);
1836
 
1837
    if (rd_regl (dev, CONTROL_0_REG) & SEEPROM_DO)
1838
      res |= (1 << (data_bits-1));
1839
  }
1840
 
1841
  ctrl &= ~(SEEPROM_SK | SEEPROM_CS);
1842
  WRITE_IT_WAIT(dev, ctrl);
1843
 
1844
  return res;
1845
}
1846
 
1847
/********** initialise a card **********/
1848
 
1849
static int __devinit hrz_init (hrz_dev * dev) {
1850
  int onefivefive;
1851
 
1852
  u16 chan;
1853
 
1854
  int buff_count;
1855
 
1856
  HDW * mem;
1857
 
1858
  cell_buf * tx_desc;
1859
  cell_buf * rx_desc;
1860
 
1861
  u32 ctrl;
1862
 
1863
  ctrl = rd_regl (dev, CONTROL_0_REG);
1864
  PRINTD (DBG_INFO, "ctrl0reg is %#x", ctrl);
1865
  onefivefive = ctrl & ATM_LAYER_STATUS;
1866
 
1867
  if (onefivefive)
1868
    printk (DEV_LABEL ": Horizon Ultra (at 155.52 MBps)");
1869
  else
1870
    printk (DEV_LABEL ": Horizon (at 25 MBps)");
1871
 
1872
  printk (":");
1873
  // Reset the card to get everything in a known state
1874
 
1875
  printk (" reset");
1876
  hrz_reset (dev);
1877
 
1878
  // Clear all the buffer memory
1879
 
1880
  printk (" clearing memory");
1881
 
1882
  for (mem = (HDW *) memmap; mem < (HDW *) (memmap + 1); ++mem)
1883
    wr_mem (dev, mem, 0);
1884
 
1885
  printk (" tx channels");
1886
 
1887
  // All transmit eight channels are set up as AAL5 ABR channels with
1888
  // a 16us cell spacing. Why?
1889
 
1890
  // Channel 0 gets the free buffer at 100h, channel 1 gets the free
1891
  // buffer at 110h etc.
1892
 
1893
  for (chan = 0; chan < TX_CHANS; ++chan) {
1894
    tx_ch_desc * tx_desc = &memmap->tx_descs[chan];
1895
    cell_buf * buf = &memmap->inittxbufs[chan];
1896
 
1897
    // initialise the read and write buffer pointers
1898
    wr_mem (dev, &tx_desc->rd_buf_type, BUF_PTR(buf));
1899
    wr_mem (dev, &tx_desc->wr_buf_type, BUF_PTR(buf));
1900
 
1901
    // set the status of the initial buffers to empty
1902
    wr_mem (dev, &buf->next, BUFF_STATUS_EMPTY);
1903
  }
1904
 
1905
  // Use space bufn3 at the moment for tx buffers
1906
 
1907
  printk (" tx buffers");
1908
 
1909
  tx_desc = memmap->bufn3;
1910
 
1911
  wr_mem (dev, &memmap->txfreebufstart.next, BUF_PTR(tx_desc) | BUFF_STATUS_EMPTY);
1912
 
1913
  for (buff_count = 0; buff_count < BUFN3_SIZE-1; buff_count++) {
1914
    wr_mem (dev, &tx_desc->next, BUF_PTR(tx_desc+1) | BUFF_STATUS_EMPTY);
1915
    tx_desc++;
1916
  }
1917
 
1918
  wr_mem (dev, &tx_desc->next, BUF_PTR(&memmap->txfreebufend) | BUFF_STATUS_EMPTY);
1919
 
1920
  // Initialise the transmit free buffer count
1921
  wr_regw (dev, TX_FREE_BUFFER_COUNT_OFF, BUFN3_SIZE);
1922
 
1923
  printk (" rx channels");
1924
 
1925
  // Initialise all of the receive channels to be AAL5 disabled with
1926
  // an interrupt threshold of 0
1927
 
1928
  for (chan = 0; chan < RX_CHANS; ++chan) {
1929
    rx_ch_desc * rx_desc = &memmap->rx_descs[chan];
1930
 
1931
    wr_mem (dev, &rx_desc->wr_buf_type, CHANNEL_TYPE_AAL5 | RX_CHANNEL_DISABLED);
1932
  }
1933
 
1934
  printk (" rx buffers");
1935
 
1936
  // Use space bufn4 at the moment for rx buffers
1937
 
1938
  rx_desc = memmap->bufn4;
1939
 
1940
  wr_mem (dev, &memmap->rxfreebufstart.next, BUF_PTR(rx_desc) | BUFF_STATUS_EMPTY);
1941
 
1942
  for (buff_count = 0; buff_count < BUFN4_SIZE-1; buff_count++) {
1943
    wr_mem (dev, &rx_desc->next, BUF_PTR(rx_desc+1) | BUFF_STATUS_EMPTY);
1944
 
1945
    rx_desc++;
1946
  }
1947
 
1948
  wr_mem (dev, &rx_desc->next, BUF_PTR(&memmap->rxfreebufend) | BUFF_STATUS_EMPTY);
1949
 
1950
  // Initialise the receive free buffer count
1951
  wr_regw (dev, RX_FREE_BUFFER_COUNT_OFF, BUFN4_SIZE);
1952
 
1953
  // Initialize Horizons registers
1954
 
1955
  // TX config
1956
  wr_regw (dev, TX_CONFIG_OFF,
1957
           ABR_ROUND_ROBIN | TX_NORMAL_OPERATION | DRVR_DRVRBAR_ENABLE);
1958
 
1959
  // RX config. Use 10-x VC bits, x VP bits, non user cells in channel 0.
1960
  wr_regw (dev, RX_CONFIG_OFF,
1961
           DISCARD_UNUSED_VPI_VCI_BITS_SET | NON_USER_CELLS_IN_ONE_CHANNEL | vpi_bits);
1962
 
1963
  // RX line config
1964
  wr_regw (dev, RX_LINE_CONFIG_OFF,
1965
           LOCK_DETECT_ENABLE | FREQUENCY_DETECT_ENABLE | GXTALOUT_SELECT_DIV4);
1966
 
1967
  // Set the max AAL5 cell count to be just enough to contain the
1968
  // largest AAL5 frame that the user wants to receive
1969
  wr_regw (dev, MAX_AAL5_CELL_COUNT_OFF,
1970
           (max_rx_size + ATM_AAL5_TRAILER + ATM_CELL_PAYLOAD - 1) / ATM_CELL_PAYLOAD);
1971
 
1972
  // Enable receive
1973
  wr_regw (dev, RX_CONFIG_OFF, rd_regw (dev, RX_CONFIG_OFF) | RX_ENABLE);
1974
 
1975
  printk (" control");
1976
 
1977
  // Drive the OE of the LEDs then turn the green LED on
1978
  ctrl |= GREEN_LED_OE | YELLOW_LED_OE | GREEN_LED | YELLOW_LED;
1979
  wr_regl (dev, CONTROL_0_REG, ctrl);
1980
 
1981
  // Test for a 155-capable card
1982
 
1983
  if (onefivefive) {
1984
    // Select 155 mode... make this a choice (or: how do we detect
1985
    // external line speed and switch?)
1986
    ctrl |= ATM_LAYER_SELECT;
1987
    wr_regl (dev, CONTROL_0_REG, ctrl);
1988
 
1989
    // test SUNI-lite vs SAMBA
1990
 
1991
    // Register 0x00 in the SUNI will have some of bits 3-7 set, and
1992
    // they will always be zero for the SAMBA.  Ha!  Bloody hardware
1993
    // engineers.  It'll never work.
1994
 
1995
    if (rd_framer (dev, 0) & 0x00f0) {
1996
      // SUNI
1997
      printk (" SUNI");
1998
 
1999
      // Reset, just in case
2000
      wr_framer (dev, 0x00, 0x0080);
2001
      wr_framer (dev, 0x00, 0x0000);
2002
 
2003
      // Configure transmit FIFO
2004
      wr_framer (dev, 0x63, rd_framer (dev, 0x63) | 0x0002);
2005
 
2006
      // Set line timed mode
2007
      wr_framer (dev, 0x05, rd_framer (dev, 0x05) | 0x0001);
2008
    } else {
2009
      // SAMBA
2010
      printk (" SAMBA");
2011
 
2012
      // Reset, just in case
2013
      wr_framer (dev, 0, rd_framer (dev, 0) | 0x0001);
2014
      wr_framer (dev, 0, rd_framer (dev, 0) &~ 0x0001);
2015
 
2016
      // Turn off diagnostic loopback and enable line-timed mode
2017
      wr_framer (dev, 0, 0x0002);
2018
 
2019
      // Turn on transmit outputs
2020
      wr_framer (dev, 2, 0x0B80);
2021
    }
2022
  } else {
2023
    // Select 25 mode
2024
    ctrl &= ~ATM_LAYER_SELECT;
2025
 
2026
    // Madge B154 setup
2027
    // none required?
2028
  }
2029
 
2030
  printk (" LEDs");
2031
 
2032
  GREEN_LED_ON(dev);
2033
  YELLOW_LED_ON(dev);
2034
 
2035
  printk (" ESI=");
2036
 
2037
  {
2038
    u16 b = 0;
2039
    int i;
2040
    u8 * esi = dev->atm_dev->esi;
2041
 
2042
    // in the card I have, EEPROM
2043
    // addresses 0, 1, 2 contain 0
2044
    // addresess 5, 6 etc. contain ffff
2045
    // NB: Madge prefix is 00 00 f6 (which is 00 00 6f in Ethernet bit order)
2046
    // the read_bia routine gets the BIA in Ethernet bit order
2047
 
2048
    for (i=0; i < ESI_LEN; ++i) {
2049
      if (i % 2 == 0)
2050
        b = read_bia (dev, i/2 + 2);
2051
      else
2052
        b = b >> 8;
2053
      esi[i] = b & 0xFF;
2054
      printk ("%02x", esi[i]);
2055
    }
2056
  }
2057
 
2058
  // Enable RX_Q and ?X_COMPLETE interrupts only
2059
  wr_regl (dev, INT_ENABLE_REG_OFF, INTERESTING_INTERRUPTS);
2060
  printk (" IRQ on");
2061
 
2062
  printk (".\n");
2063
 
2064
  return onefivefive;
2065
}
2066
 
2067
/********** check max_sdu **********/
2068
 
2069
static int check_max_sdu (hrz_aal aal, struct atm_trafprm * tp, unsigned int max_frame_size) {
2070
  PRINTD (DBG_FLOW|DBG_QOS, "check_max_sdu");
2071
 
2072
  switch (aal) {
2073
    case aal0:
2074
      if (!(tp->max_sdu)) {
2075
        PRINTD (DBG_QOS, "defaulting max_sdu");
2076
        tp->max_sdu = ATM_AAL0_SDU;
2077
      } else if (tp->max_sdu != ATM_AAL0_SDU) {
2078
        PRINTD (DBG_QOS|DBG_ERR, "rejecting max_sdu");
2079
        return -EINVAL;
2080
      }
2081
      break;
2082
    case aal34:
2083
      if (tp->max_sdu == 0 || tp->max_sdu > ATM_MAX_AAL34_PDU) {
2084
        PRINTD (DBG_QOS, "%sing max_sdu", tp->max_sdu ? "capp" : "default");
2085
        tp->max_sdu = ATM_MAX_AAL34_PDU;
2086
      }
2087
      break;
2088
    case aal5:
2089
      if (tp->max_sdu == 0 || tp->max_sdu > max_frame_size) {
2090
        PRINTD (DBG_QOS, "%sing max_sdu", tp->max_sdu ? "capp" : "default");
2091
        tp->max_sdu = max_frame_size;
2092
      }
2093
      break;
2094
  }
2095
  return 0;
2096
}
2097
 
2098
/********** check pcr **********/
2099
 
2100
// something like this should be part of ATM Linux
2101
static int atm_pcr_check (struct atm_trafprm * tp, unsigned int pcr) {
2102
  // we are assuming non-UBR, and non-special values of pcr
2103
  if (tp->min_pcr == ATM_MAX_PCR)
2104
    PRINTD (DBG_QOS, "luser gave min_pcr = ATM_MAX_PCR");
2105
  else if (tp->min_pcr < 0)
2106
    PRINTD (DBG_QOS, "luser gave negative min_pcr");
2107
  else if (tp->min_pcr && tp->min_pcr > pcr)
2108
    PRINTD (DBG_QOS, "pcr less than min_pcr");
2109
  else
2110
    // !! max_pcr = UNSPEC (0) is equivalent to max_pcr = MAX (-1)
2111
    // easier to #define ATM_MAX_PCR 0 and have all rates unsigned?
2112
    // [this would get rid of next two conditionals]
2113
    if ((0) && tp->max_pcr == ATM_MAX_PCR)
2114
      PRINTD (DBG_QOS, "luser gave max_pcr = ATM_MAX_PCR");
2115
    else if ((tp->max_pcr != ATM_MAX_PCR) && tp->max_pcr < 0)
2116
      PRINTD (DBG_QOS, "luser gave negative max_pcr");
2117
    else if (tp->max_pcr && tp->max_pcr != ATM_MAX_PCR && tp->max_pcr < pcr)
2118
      PRINTD (DBG_QOS, "pcr greater than max_pcr");
2119
    else {
2120
      // each limit unspecified or not violated
2121
      PRINTD (DBG_QOS, "xBR(pcr) OK");
2122
      return 0;
2123
    }
2124
  PRINTD (DBG_QOS, "pcr=%u, tp: min_pcr=%d, pcr=%d, max_pcr=%d",
2125
          pcr, tp->min_pcr, tp->pcr, tp->max_pcr);
2126
  return -EINVAL;
2127
}
2128
 
2129
/********** open VC **********/
2130
 
2131
static int hrz_open (struct atm_vcc *atm_vcc)
2132
{
2133
  int error;
2134
  u16 channel;
2135
 
2136
  struct atm_qos * qos;
2137
  struct atm_trafprm * txtp;
2138
  struct atm_trafprm * rxtp;
2139
 
2140
  hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
2141
  hrz_vcc vcc;
2142
  hrz_vcc * vccp; // allocated late
2143
  short vpi = atm_vcc->vpi;
2144
  int vci = atm_vcc->vci;
2145
  PRINTD (DBG_FLOW|DBG_VCC, "hrz_open %x %x", vpi, vci);
2146
 
2147
#ifdef ATM_VPI_UNSPEC
2148
  // UNSPEC is deprecated, remove this code eventually
2149
  if (vpi == ATM_VPI_UNSPEC || vci == ATM_VCI_UNSPEC) {
2150
    PRINTK (KERN_WARNING, "rejecting open with unspecified VPI/VCI (deprecated)");
2151
    return -EINVAL;
2152
  }
2153
#endif
2154
 
2155
  error = vpivci_to_channel (&channel, vpi, vci);
2156
  if (error) {
2157
    PRINTD (DBG_WARN|DBG_VCC, "VPI/VCI out of range: %hd/%d", vpi, vci);
2158
    return error;
2159
  }
2160
 
2161
  vcc.channel = channel;
2162
  // max speed for the moment
2163
  vcc.tx_rate = 0x0;
2164
 
2165
  qos = &atm_vcc->qos;
2166
 
2167
  // check AAL and remember it
2168
  switch (qos->aal) {
2169
    case ATM_AAL0:
2170
      // we would if it were 48 bytes and not 52!
2171
      PRINTD (DBG_QOS|DBG_VCC, "AAL0");
2172
      vcc.aal = aal0;
2173
      break;
2174
    case ATM_AAL34:
2175
      // we would if I knew how do the SAR!
2176
      PRINTD (DBG_QOS|DBG_VCC, "AAL3/4");
2177
      vcc.aal = aal34;
2178
      break;
2179
    case ATM_AAL5:
2180
      PRINTD (DBG_QOS|DBG_VCC, "AAL5");
2181
      vcc.aal = aal5;
2182
      break;
2183
    default:
2184
      PRINTD (DBG_QOS|DBG_VCC, "Bad AAL!");
2185
      return -EINVAL;
2186
      break;
2187
  }
2188
 
2189
  // TX traffic parameters
2190
 
2191
  // there are two, interrelated problems here: 1. the reservation of
2192
  // PCR is not a binary choice, we are given bounds and/or a
2193
  // desirable value; 2. the device is only capable of certain values,
2194
  // most of which are not integers. It is almost certainly acceptable
2195
  // to be off by a maximum of 1 to 10 cps.
2196
 
2197
  // Pragmatic choice: always store an integral PCR as that which has
2198
  // been allocated, even if we allocate a little (or a lot) less,
2199
  // after rounding. The actual allocation depends on what we can
2200
  // manage with our rate selection algorithm. The rate selection
2201
  // algorithm is given an integral PCR and a tolerance and told
2202
  // whether it should round the value up or down if the tolerance is
2203
  // exceeded; it returns: a) the actual rate selected (rounded up to
2204
  // the nearest integer), b) a bit pattern to feed to the timer
2205
  // register, and c) a failure value if no applicable rate exists.
2206
 
2207
  // Part of the job is done by atm_pcr_goal which gives us a PCR
2208
  // specification which says: EITHER grab the maximum available PCR
2209
  // (and perhaps a lower bound which we musn't pass), OR grab this
2210
  // amount, rounding down if you have to (and perhaps a lower bound
2211
  // which we musn't pass) OR grab this amount, rounding up if you
2212
  // have to (and perhaps an upper bound which we musn't pass). If any
2213
  // bounds ARE passed we fail. Note that rounding is only rounding to
2214
  // match device limitations, we do not round down to satisfy
2215
  // bandwidth availability even if this would not violate any given
2216
  // lower bound.
2217
 
2218
  // Note: telephony = 64kb/s = 48 byte cell payload @ 500/3 cells/s
2219
  // (say) so this is not even a binary fixpoint cell rate (but this
2220
  // device can do it). To avoid this sort of hassle we use a
2221
  // tolerance parameter (currently fixed at 10 cps).
2222
 
2223
  PRINTD (DBG_QOS, "TX:");
2224
 
2225
  txtp = &qos->txtp;
2226
 
2227
  // set up defaults for no traffic
2228
  vcc.tx_rate = 0;
2229
  // who knows what would actually happen if you try and send on this?
2230
  vcc.tx_xbr_bits = IDLE_RATE_TYPE;
2231
  vcc.tx_pcr_bits = CLOCK_DISABLE;
2232
#if 0
2233
  vcc.tx_scr_bits = CLOCK_DISABLE;
2234
  vcc.tx_bucket_bits = 0;
2235
#endif
2236
 
2237
  if (txtp->traffic_class != ATM_NONE) {
2238
    error = check_max_sdu (vcc.aal, txtp, max_tx_size);
2239
    if (error) {
2240
      PRINTD (DBG_QOS, "TX max_sdu check failed");
2241
      return error;
2242
    }
2243
 
2244
    switch (txtp->traffic_class) {
2245
      case ATM_UBR: {
2246
        // we take "the PCR" as a rate-cap
2247
        // not reserved
2248
        vcc.tx_rate = 0;
2249
        make_rate (dev, 1<<30, round_nearest, &vcc.tx_pcr_bits, NULL);
2250
        vcc.tx_xbr_bits = ABR_RATE_TYPE;
2251
        break;
2252
      }
2253
#if 0
2254
      case ATM_ABR: {
2255
        // reserve min, allow up to max
2256
        vcc.tx_rate = 0; // ?
2257
        make_rate (dev, 1<<30, round_nearest, &vcc.tx_pcr_bits, 0);
2258
        vcc.tx_xbr_bits = ABR_RATE_TYPE;
2259
        break;
2260
      }
2261
#endif
2262
      case ATM_CBR: {
2263
        int pcr = atm_pcr_goal (txtp);
2264
        rounding r;
2265
        if (!pcr) {
2266
          // down vs. up, remaining bandwidth vs. unlimited bandwidth!!
2267
          // should really have: once someone gets unlimited bandwidth
2268
          // that no more non-UBR channels can be opened until the
2269
          // unlimited one closes?? For the moment, round_down means
2270
          // greedy people actually get something and not nothing
2271
          r = round_down;
2272
          // slight race (no locking) here so we may get -EAGAIN
2273
          // later; the greedy bastards would deserve it :)
2274
          PRINTD (DBG_QOS, "snatching all remaining TX bandwidth");
2275
          pcr = dev->tx_avail;
2276
        } else if (pcr < 0) {
2277
          r = round_down;
2278
          pcr = -pcr;
2279
        } else {
2280
          r = round_up;
2281
        }
2282
        error = make_rate_with_tolerance (dev, pcr, r, 10,
2283
                                          &vcc.tx_pcr_bits, &vcc.tx_rate);
2284
        if (error) {
2285
          PRINTD (DBG_QOS, "could not make rate from TX PCR");
2286
          return error;
2287
        }
2288
        // not really clear what further checking is needed
2289
        error = atm_pcr_check (txtp, vcc.tx_rate);
2290
        if (error) {
2291
          PRINTD (DBG_QOS, "TX PCR failed consistency check");
2292
          return error;
2293
        }
2294
        vcc.tx_xbr_bits = CBR_RATE_TYPE;
2295
        break;
2296
      }
2297
#if 0
2298
      case ATM_VBR: {
2299
        int pcr = atm_pcr_goal (txtp);
2300
        // int scr = atm_scr_goal (txtp);
2301
        int scr = pcr/2; // just for fun
2302
        unsigned int mbs = 60; // just for fun
2303
        rounding pr;
2304
        rounding sr;
2305
        unsigned int bucket;
2306
        if (!pcr) {
2307
          pr = round_nearest;
2308
          pcr = 1<<30;
2309
        } else if (pcr < 0) {
2310
          pr = round_down;
2311
          pcr = -pcr;
2312
        } else {
2313
          pr = round_up;
2314
        }
2315
        error = make_rate_with_tolerance (dev, pcr, pr, 10,
2316
                                          &vcc.tx_pcr_bits, 0);
2317
        if (!scr) {
2318
          // see comments for PCR with CBR above
2319
          sr = round_down;
2320
          // slight race (no locking) here so we may get -EAGAIN
2321
          // later; the greedy bastards would deserve it :)
2322
          PRINTD (DBG_QOS, "snatching all remaining TX bandwidth");
2323
          scr = dev->tx_avail;
2324
        } else if (scr < 0) {
2325
          sr = round_down;
2326
          scr = -scr;
2327
        } else {
2328
          sr = round_up;
2329
        }
2330
        error = make_rate_with_tolerance (dev, scr, sr, 10,
2331
                                          &vcc.tx_scr_bits, &vcc.tx_rate);
2332
        if (error) {
2333
          PRINTD (DBG_QOS, "could not make rate from TX SCR");
2334
          return error;
2335
        }
2336
        // not really clear what further checking is needed
2337
        // error = atm_scr_check (txtp, vcc.tx_rate);
2338
        if (error) {
2339
          PRINTD (DBG_QOS, "TX SCR failed consistency check");
2340
          return error;
2341
        }
2342
        // bucket calculations (from a piece of paper...) cell bucket
2343
        // capacity must be largest integer smaller than m(p-s)/p + 1
2344
        // where m = max burst size, p = pcr, s = scr
2345
        bucket = mbs*(pcr-scr)/pcr;
2346
        if (bucket*pcr != mbs*(pcr-scr))
2347
          bucket += 1;
2348
        if (bucket > BUCKET_MAX_SIZE) {
2349
          PRINTD (DBG_QOS, "shrinking bucket from %u to %u",
2350
                  bucket, BUCKET_MAX_SIZE);
2351
          bucket = BUCKET_MAX_SIZE;
2352
        }
2353
        vcc.tx_xbr_bits = VBR_RATE_TYPE;
2354
        vcc.tx_bucket_bits = bucket;
2355
        break;
2356
      }
2357
#endif
2358
      default: {
2359
        PRINTD (DBG_QOS, "unsupported TX traffic class");
2360
        return -EINVAL;
2361
        break;
2362
      }
2363
    }
2364
  }
2365
 
2366
  // RX traffic parameters
2367
 
2368
  PRINTD (DBG_QOS, "RX:");
2369
 
2370
  rxtp = &qos->rxtp;
2371
 
2372
  // set up defaults for no traffic
2373
  vcc.rx_rate = 0;
2374
 
2375
  if (rxtp->traffic_class != ATM_NONE) {
2376
    error = check_max_sdu (vcc.aal, rxtp, max_rx_size);
2377
    if (error) {
2378
      PRINTD (DBG_QOS, "RX max_sdu check failed");
2379
      return error;
2380
    }
2381
    switch (rxtp->traffic_class) {
2382
      case ATM_UBR: {
2383
        // not reserved
2384
        break;
2385
      }
2386
#if 0
2387
      case ATM_ABR: {
2388
        // reserve min
2389
        vcc.rx_rate = 0; // ?
2390
        break;
2391
      }
2392
#endif
2393
      case ATM_CBR: {
2394
        int pcr = atm_pcr_goal (rxtp);
2395
        if (!pcr) {
2396
          // slight race (no locking) here so we may get -EAGAIN
2397
          // later; the greedy bastards would deserve it :)
2398
          PRINTD (DBG_QOS, "snatching all remaining RX bandwidth");
2399
          pcr = dev->rx_avail;
2400
        } else if (pcr < 0) {
2401
          pcr = -pcr;
2402
        }
2403
        vcc.rx_rate = pcr;
2404
        // not really clear what further checking is needed
2405
        error = atm_pcr_check (rxtp, vcc.rx_rate);
2406
        if (error) {
2407
          PRINTD (DBG_QOS, "RX PCR failed consistency check");
2408
          return error;
2409
        }
2410
        break;
2411
      }
2412
#if 0
2413
      case ATM_VBR: {
2414
        // int scr = atm_scr_goal (rxtp);
2415
        int scr = 1<<16; // just for fun
2416
        if (!scr) {
2417
          // slight race (no locking) here so we may get -EAGAIN
2418
          // later; the greedy bastards would deserve it :)
2419
          PRINTD (DBG_QOS, "snatching all remaining RX bandwidth");
2420
          scr = dev->rx_avail;
2421
        } else if (scr < 0) {
2422
          scr = -scr;
2423
        }
2424
        vcc.rx_rate = scr;
2425
        // not really clear what further checking is needed
2426
        // error = atm_scr_check (rxtp, vcc.rx_rate);
2427
        if (error) {
2428
          PRINTD (DBG_QOS, "RX SCR failed consistency check");
2429
          return error;
2430
        }
2431
        break;
2432
      }
2433
#endif
2434
      default: {
2435
        PRINTD (DBG_QOS, "unsupported RX traffic class");
2436
        return -EINVAL;
2437
        break;
2438
      }
2439
    }
2440
  }
2441
 
2442
 
2443
  // late abort useful for diagnostics
2444
  if (vcc.aal != aal5) {
2445
    PRINTD (DBG_QOS, "AAL not supported");
2446
    return -EINVAL;
2447
  }
2448
 
2449
  // get space for our vcc stuff and copy parameters into it
2450
  vccp = kmalloc (sizeof(hrz_vcc), GFP_KERNEL);
2451
  if (!vccp) {
2452
    PRINTK (KERN_ERR, "out of memory!");
2453
    return -ENOMEM;
2454
  }
2455
  *vccp = vcc;
2456
 
2457
  // clear error and grab cell rate resource lock
2458
  error = 0;
2459
  spin_lock (&dev->rate_lock);
2460
 
2461
  if (vcc.tx_rate > dev->tx_avail) {
2462
    PRINTD (DBG_QOS, "not enough TX PCR left");
2463
    error = -EAGAIN;
2464
  }
2465
 
2466
  if (vcc.rx_rate > dev->rx_avail) {
2467
    PRINTD (DBG_QOS, "not enough RX PCR left");
2468
    error = -EAGAIN;
2469
  }
2470
 
2471
  if (!error) {
2472
    // really consume cell rates
2473
    dev->tx_avail -= vcc.tx_rate;
2474
    dev->rx_avail -= vcc.rx_rate;
2475
    PRINTD (DBG_QOS|DBG_VCC, "reserving %u TX PCR and %u RX PCR",
2476
            vcc.tx_rate, vcc.rx_rate);
2477
  }
2478
 
2479
  // release lock and exit on error
2480
  spin_unlock (&dev->rate_lock);
2481
  if (error) {
2482
    PRINTD (DBG_QOS|DBG_VCC, "insufficient cell rate resources");
2483
    kfree (vccp);
2484
    return error;
2485
  }
2486
 
2487
  // this is "immediately before allocating the connection identifier
2488
  // in hardware" - so long as the next call does not fail :)
2489
  set_bit(ATM_VF_ADDR,&atm_vcc->flags);
2490
 
2491
  // any errors here are very serious and should never occur
2492
 
2493
  if (rxtp->traffic_class != ATM_NONE) {
2494
    if (dev->rxer[channel]) {
2495
      PRINTD (DBG_ERR|DBG_VCC, "VC already open for RX");
2496
      error = -EBUSY;
2497
    }
2498
    if (!error)
2499
      error = hrz_open_rx (dev, channel);
2500
    if (error) {
2501
      kfree (vccp);
2502
      return error;
2503
    }
2504
    // this link allows RX frames through
2505
    dev->rxer[channel] = atm_vcc;
2506
  }
2507
 
2508
  // success, set elements of atm_vcc
2509
  atm_vcc->dev_data = (void *) vccp;
2510
 
2511
  // indicate readiness
2512
  set_bit(ATM_VF_READY,&atm_vcc->flags);
2513
 
2514
  return 0;
2515
}
2516
 
2517
/********** close VC **********/
2518
 
2519
static void hrz_close (struct atm_vcc * atm_vcc) {
2520
  hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
2521
  hrz_vcc * vcc = HRZ_VCC(atm_vcc);
2522
  u16 channel = vcc->channel;
2523
  PRINTD (DBG_VCC|DBG_FLOW, "hrz_close");
2524
 
2525
  // indicate unreadiness
2526
  clear_bit(ATM_VF_READY,&atm_vcc->flags);
2527
 
2528
  if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) {
2529
    unsigned int i;
2530
 
2531
    // let any TX on this channel that has started complete
2532
    // no restart, just keep trying
2533
    while (tx_hold (dev))
2534
      ;
2535
    // remove record of any tx_channel having been setup for this channel
2536
    for (i = 0; i < TX_CHANS; ++i)
2537
      if (dev->tx_channel_record[i] == channel) {
2538
        dev->tx_channel_record[i] = -1;
2539
        break;
2540
      }
2541
    if (dev->last_vc == channel)
2542
      dev->tx_last = -1;
2543
    tx_release (dev);
2544
  }
2545
 
2546
  if (atm_vcc->qos.rxtp.traffic_class != ATM_NONE) {
2547
    // disable RXing - it tries quite hard
2548
    hrz_close_rx (dev, channel);
2549
    // forget the vcc - no more skbs will be pushed
2550
    if (atm_vcc != dev->rxer[channel])
2551
      PRINTK (KERN_ERR, "%s atm_vcc=%p rxer[channel]=%p",
2552
              "arghhh! we're going to die!",
2553
              atm_vcc, dev->rxer[channel]);
2554
    dev->rxer[channel] = NULL;
2555
  }
2556
 
2557
  // atomically release our rate reservation
2558
  spin_lock (&dev->rate_lock);
2559
  PRINTD (DBG_QOS|DBG_VCC, "releasing %u TX PCR and %u RX PCR",
2560
          vcc->tx_rate, vcc->rx_rate);
2561
  dev->tx_avail += vcc->tx_rate;
2562
  dev->rx_avail += vcc->rx_rate;
2563
  spin_unlock (&dev->rate_lock);
2564
 
2565
  // free our structure
2566
  kfree (vcc);
2567
  // say the VPI/VCI is free again
2568
  clear_bit(ATM_VF_ADDR,&atm_vcc->flags);
2569
}
2570
 
2571
#if 0
2572
static int hrz_getsockopt (struct atm_vcc * atm_vcc, int level, int optname,
2573
                           void *optval, int optlen) {
2574
  hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
2575
  PRINTD (DBG_FLOW|DBG_VCC, "hrz_getsockopt");
2576
  switch (level) {
2577
    case SOL_SOCKET:
2578
      switch (optname) {
2579
//      case SO_BCTXOPT:
2580
//        break;
2581
//      case SO_BCRXOPT:
2582
//        break;
2583
        default:
2584
          return -ENOPROTOOPT;
2585
          break;
2586
      };
2587
      break;
2588
  }
2589
  return -EINVAL;
2590
}
2591
 
2592
static int hrz_setsockopt (struct atm_vcc * atm_vcc, int level, int optname,
2593
                           void *optval, int optlen) {
2594
  hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
2595
  PRINTD (DBG_FLOW|DBG_VCC, "hrz_setsockopt");
2596
  switch (level) {
2597
    case SOL_SOCKET:
2598
      switch (optname) {
2599
//      case SO_BCTXOPT:
2600
//        break;
2601
//      case SO_BCRXOPT:
2602
//        break;
2603
        default:
2604
          return -ENOPROTOOPT;
2605
          break;
2606
      };
2607
      break;
2608
  }
2609
  return -EINVAL;
2610
}
2611
#endif
2612
 
2613
#if 0
2614
static int hrz_ioctl (struct atm_dev * atm_dev, unsigned int cmd, void *arg) {
2615
  hrz_dev * dev = HRZ_DEV(atm_dev);
2616
  PRINTD (DBG_FLOW, "hrz_ioctl");
2617
  return -1;
2618
}
2619
 
2620
unsigned char hrz_phy_get (struct atm_dev * atm_dev, unsigned long addr) {
2621
  hrz_dev * dev = HRZ_DEV(atm_dev);
2622
  PRINTD (DBG_FLOW, "hrz_phy_get");
2623
  return 0;
2624
}
2625
 
2626
static void hrz_phy_put (struct atm_dev * atm_dev, unsigned char value,
2627
                         unsigned long addr) {
2628
  hrz_dev * dev = HRZ_DEV(atm_dev);
2629
  PRINTD (DBG_FLOW, "hrz_phy_put");
2630
}
2631
 
2632
static int hrz_change_qos (struct atm_vcc * atm_vcc, struct atm_qos *qos, int flgs) {
2633
  hrz_dev * dev = HRZ_DEV(vcc->dev);
2634
  PRINTD (DBG_FLOW, "hrz_change_qos");
2635
  return -1;
2636
}
2637
#endif
2638
 
2639
/********** proc file contents **********/
2640
 
2641
static int hrz_proc_read (struct atm_dev * atm_dev, loff_t * pos, char * page) {
2642
  hrz_dev * dev = HRZ_DEV(atm_dev);
2643
  int left = *pos;
2644
  PRINTD (DBG_FLOW, "hrz_proc_read");
2645
 
2646
  /* more diagnostics here? */
2647
 
2648
#if 0
2649
  if (!left--) {
2650
    unsigned int count = sprintf (page, "vbr buckets:");
2651
    unsigned int i;
2652
    for (i = 0; i < TX_CHANS; ++i)
2653
      count += sprintf (page, " %u/%u",
2654
                        query_tx_channel_config (dev, i, BUCKET_FULLNESS_ACCESS),
2655
                        query_tx_channel_config (dev, i, BUCKET_CAPACITY_ACCESS));
2656
    count += sprintf (page+count, ".\n");
2657
    return count;
2658
  }
2659
#endif
2660
 
2661
  if (!left--)
2662
    return sprintf (page,
2663
                    "cells: TX %lu, RX %lu, HEC errors %lu, unassigned %lu.\n",
2664
                    dev->tx_cell_count, dev->rx_cell_count,
2665
                    dev->hec_error_count, dev->unassigned_cell_count);
2666
 
2667
  if (!left--)
2668
    return sprintf (page,
2669
                    "free cell buffers: TX %hu, RX %hu+%hu.\n",
2670
                    rd_regw (dev, TX_FREE_BUFFER_COUNT_OFF),
2671
                    rd_regw (dev, RX_FREE_BUFFER_COUNT_OFF),
2672
                    dev->noof_spare_buffers);
2673
 
2674
  if (!left--)
2675
    return sprintf (page,
2676
                    "cps remaining: TX %u, RX %u\n",
2677
                    dev->tx_avail, dev->rx_avail);
2678
 
2679
  return 0;
2680
}
2681
 
2682
static const struct atmdev_ops hrz_ops = {
2683
  .open = hrz_open,
2684
  .close        = hrz_close,
2685
  .send = hrz_send,
2686
  .proc_read    = hrz_proc_read,
2687
  .owner        = THIS_MODULE,
2688
};
2689
 
2690
static int __devinit hrz_probe(struct pci_dev *pci_dev, const struct pci_device_id *pci_ent)
2691
{
2692
        hrz_dev * dev;
2693
        int err = 0;
2694
 
2695
        // adapter slot free, read resources from PCI configuration space
2696
        u32 iobase = pci_resource_start (pci_dev, 0);
2697
        u32 * membase = bus_to_virt (pci_resource_start (pci_dev, 1));
2698
        unsigned int irq;
2699
        unsigned char lat;
2700
 
2701
        PRINTD (DBG_FLOW, "hrz_probe");
2702
 
2703
        if (pci_enable_device(pci_dev))
2704
                return -EINVAL;
2705
 
2706
        /* XXX DEV_LABEL is a guess */
2707
        if (!request_region(iobase, HRZ_IO_EXTENT, DEV_LABEL)) {
2708
                return -EINVAL;
2709
                goto out_disable;
2710
        }
2711
 
2712
        dev = kzalloc(sizeof(hrz_dev), GFP_KERNEL);
2713
        if (!dev) {
2714
                // perhaps we should be nice: deregister all adapters and abort?
2715
                PRINTD(DBG_ERR, "out of memory");
2716
                err = -ENOMEM;
2717
                goto out_release;
2718
        }
2719
 
2720
        pci_set_drvdata(pci_dev, dev);
2721
 
2722
        // grab IRQ and install handler - move this someplace more sensible
2723
        irq = pci_dev->irq;
2724
        if (request_irq(irq,
2725
                        interrupt_handler,
2726
                        IRQF_SHARED, /* irqflags guess */
2727
                        DEV_LABEL, /* name guess */
2728
                        dev)) {
2729
                PRINTD(DBG_WARN, "request IRQ failed!");
2730
                err = -EINVAL;
2731
                goto out_free;
2732
        }
2733
 
2734
        PRINTD(DBG_INFO, "found Madge ATM adapter (hrz) at: IO %x, IRQ %u, MEM %p",
2735
               iobase, irq, membase);
2736
 
2737
        dev->atm_dev = atm_dev_register(DEV_LABEL, &hrz_ops, -1, NULL);
2738
        if (!(dev->atm_dev)) {
2739
                PRINTD(DBG_ERR, "failed to register Madge ATM adapter");
2740
                err = -EINVAL;
2741
                goto out_free_irq;
2742
        }
2743
 
2744
        PRINTD(DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p",
2745
               dev->atm_dev->number, dev, dev->atm_dev);
2746
        dev->atm_dev->dev_data = (void *) dev;
2747
        dev->pci_dev = pci_dev;
2748
 
2749
        // enable bus master accesses
2750
        pci_set_master(pci_dev);
2751
 
2752
        // frobnicate latency (upwards, usually)
2753
        pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &lat);
2754
        if (pci_lat) {
2755
                PRINTD(DBG_INFO, "%s PCI latency timer from %hu to %hu",
2756
                       "changing", lat, pci_lat);
2757
                pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, pci_lat);
2758
        } else if (lat < MIN_PCI_LATENCY) {
2759
                PRINTK(KERN_INFO, "%s PCI latency timer from %hu to %hu",
2760
                       "increasing", lat, MIN_PCI_LATENCY);
2761
                pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, MIN_PCI_LATENCY);
2762
        }
2763
 
2764
        dev->iobase = iobase;
2765
        dev->irq = irq;
2766
        dev->membase = membase;
2767
 
2768
        dev->rx_q_entry = dev->rx_q_reset = &memmap->rx_q_entries[0];
2769
        dev->rx_q_wrap  = &memmap->rx_q_entries[RX_CHANS-1];
2770
 
2771
        // these next three are performance hacks
2772
        dev->last_vc = -1;
2773
        dev->tx_last = -1;
2774
        dev->tx_idle = 0;
2775
 
2776
        dev->tx_regions = 0;
2777
        dev->tx_bytes = 0;
2778
        dev->tx_skb = NULL;
2779
        dev->tx_iovec = NULL;
2780
 
2781
        dev->tx_cell_count = 0;
2782
        dev->rx_cell_count = 0;
2783
        dev->hec_error_count = 0;
2784
        dev->unassigned_cell_count = 0;
2785
 
2786
        dev->noof_spare_buffers = 0;
2787
 
2788
        {
2789
                unsigned int i;
2790
                for (i = 0; i < TX_CHANS; ++i)
2791
                        dev->tx_channel_record[i] = -1;
2792
        }
2793
 
2794
        dev->flags = 0;
2795
 
2796
        // Allocate cell rates and remember ASIC version
2797
        // Fibre: ATM_OC3_PCR = 1555200000/8/270*260/53 - 29/53
2798
        // Copper: (WRONG) we want 6 into the above, close to 25Mb/s
2799
        // Copper: (plagarise!) 25600000/8/270*260/53 - n/53
2800
 
2801
        if (hrz_init(dev)) {
2802
                // to be really pedantic, this should be ATM_OC3c_PCR
2803
                dev->tx_avail = ATM_OC3_PCR;
2804
                dev->rx_avail = ATM_OC3_PCR;
2805
                set_bit(ultra, &dev->flags); // NOT "|= ultra" !
2806
        } else {
2807
                dev->tx_avail = ((25600000/8)*26)/(27*53);
2808
                dev->rx_avail = ((25600000/8)*26)/(27*53);
2809
                PRINTD(DBG_WARN, "Buggy ASIC: no TX bus-mastering.");
2810
        }
2811
 
2812
        // rate changes spinlock
2813
        spin_lock_init(&dev->rate_lock);
2814
 
2815
        // on-board memory access spinlock; we want atomic reads and
2816
        // writes to adapter memory (handles IRQ and SMP)
2817
        spin_lock_init(&dev->mem_lock);
2818
 
2819
        init_waitqueue_head(&dev->tx_queue);
2820
 
2821
        // vpi in 0..4, vci in 6..10
2822
        dev->atm_dev->ci_range.vpi_bits = vpi_bits;
2823
        dev->atm_dev->ci_range.vci_bits = 10-vpi_bits;
2824
 
2825
        init_timer(&dev->housekeeping);
2826
        dev->housekeeping.function = do_housekeeping;
2827
        dev->housekeeping.data = (unsigned long) dev;
2828
        mod_timer(&dev->housekeeping, jiffies);
2829
 
2830
out:
2831
        return err;
2832
 
2833
out_free_irq:
2834
        free_irq(dev->irq, dev);
2835
out_free:
2836
        kfree(dev);
2837
out_release:
2838
        release_region(iobase, HRZ_IO_EXTENT);
2839
out_disable:
2840
        pci_disable_device(pci_dev);
2841
        goto out;
2842
}
2843
 
2844
static void __devexit hrz_remove_one(struct pci_dev *pci_dev)
2845
{
2846
        hrz_dev *dev;
2847
 
2848
        dev = pci_get_drvdata(pci_dev);
2849
 
2850
        PRINTD(DBG_INFO, "closing %p (atm_dev = %p)", dev, dev->atm_dev);
2851
        del_timer_sync(&dev->housekeeping);
2852
        hrz_reset(dev);
2853
        atm_dev_deregister(dev->atm_dev);
2854
        free_irq(dev->irq, dev);
2855
        release_region(dev->iobase, HRZ_IO_EXTENT);
2856
        kfree(dev);
2857
 
2858
        pci_disable_device(pci_dev);
2859
}
2860
 
2861
static void __init hrz_check_args (void) {
2862
#ifdef DEBUG_HORIZON
2863
  PRINTK (KERN_NOTICE, "debug bitmap is %hx", debug &= DBG_MASK);
2864
#else
2865
  if (debug)
2866
    PRINTK (KERN_NOTICE, "no debug support in this image");
2867
#endif
2868
 
2869
  if (vpi_bits > HRZ_MAX_VPI)
2870
    PRINTK (KERN_ERR, "vpi_bits has been limited to %hu",
2871
            vpi_bits = HRZ_MAX_VPI);
2872
 
2873
  if (max_tx_size < 0 || max_tx_size > TX_AAL5_LIMIT)
2874
    PRINTK (KERN_NOTICE, "max_tx_size has been limited to %hu",
2875
            max_tx_size = TX_AAL5_LIMIT);
2876
 
2877
  if (max_rx_size < 0 || max_rx_size > RX_AAL5_LIMIT)
2878
    PRINTK (KERN_NOTICE, "max_rx_size has been limited to %hu",
2879
            max_rx_size = RX_AAL5_LIMIT);
2880
 
2881
  return;
2882
}
2883
 
2884
MODULE_AUTHOR(maintainer_string);
2885
MODULE_DESCRIPTION(description_string);
2886
MODULE_LICENSE("GPL");
2887
module_param(debug, ushort, 0644);
2888
module_param(vpi_bits, ushort, 0);
2889
module_param(max_tx_size, int, 0);
2890
module_param(max_rx_size, int, 0);
2891
module_param(pci_lat, byte, 0);
2892
MODULE_PARM_DESC(debug, "debug bitmap, see .h file");
2893
MODULE_PARM_DESC(vpi_bits, "number of bits (0..4) to allocate to VPIs");
2894
MODULE_PARM_DESC(max_tx_size, "maximum size of TX AAL5 frames");
2895
MODULE_PARM_DESC(max_rx_size, "maximum size of RX AAL5 frames");
2896
MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles");
2897
 
2898
static struct pci_device_id hrz_pci_tbl[] = {
2899
        { PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_HORIZON, PCI_ANY_ID, PCI_ANY_ID,
2900
          0, 0, 0 },
2901
        { 0, }
2902
};
2903
 
2904
MODULE_DEVICE_TABLE(pci, hrz_pci_tbl);
2905
 
2906
static struct pci_driver hrz_driver = {
2907
        .name =         "horizon",
2908
        .probe =        hrz_probe,
2909
        .remove =       __devexit_p(hrz_remove_one),
2910
        .id_table =     hrz_pci_tbl,
2911
};
2912
 
2913
/********** module entry **********/
2914
 
2915
static int __init hrz_module_init (void) {
2916
  // sanity check - cast is needed since printk does not support %Zu
2917
  if (sizeof(struct MEMMAP) != 128*1024/4) {
2918
    PRINTK (KERN_ERR, "Fix struct MEMMAP (is %lu fakewords).",
2919
            (unsigned long) sizeof(struct MEMMAP));
2920
    return -ENOMEM;
2921
  }
2922
 
2923
  show_version();
2924
 
2925
  // check arguments
2926
  hrz_check_args();
2927
 
2928
  // get the juice
2929
  return pci_register_driver(&hrz_driver);
2930
}
2931
 
2932
/********** module exit **********/
2933
 
2934
static void __exit hrz_module_exit (void) {
2935
  PRINTD (DBG_FLOW, "cleanup_module");
2936
 
2937
  pci_unregister_driver(&hrz_driver);
2938
}
2939
 
2940
module_init(hrz_module_init);
2941
module_exit(hrz_module_exit);

powered by: WebSVN 2.1.0

© copyright 1999-2025 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.