1 |
1275 |
phoenix |
/*
|
2 |
|
|
* drivers/net/wan/dscc4/dscc4.c: a DSCC4 HDLC driver for Linux
|
3 |
|
|
*
|
4 |
|
|
* This software may be used and distributed according to the terms of the
|
5 |
|
|
* GNU General Public License.
|
6 |
|
|
*
|
7 |
|
|
* The author may be reached as romieu@cogenit.fr.
|
8 |
|
|
* Specific bug reports/asian food will be welcome.
|
9 |
|
|
*
|
10 |
|
|
* Special thanks to the nice people at CS-Telecom for the hardware and the
|
11 |
|
|
* access to the test/measure tools.
|
12 |
|
|
*
|
13 |
|
|
*
|
14 |
|
|
* Theory of Operation
|
15 |
|
|
*
|
16 |
|
|
* I. Board Compatibility
|
17 |
|
|
*
|
18 |
|
|
* This device driver is designed for the Siemens PEB20534 4 ports serial
|
19 |
|
|
* controller as found on Etinc PCISYNC cards. The documentation for the
|
20 |
|
|
* chipset is available at http://www.infineon.com:
|
21 |
|
|
* - Data Sheet "DSCC4, DMA Supported Serial Communication Controller with
|
22 |
|
|
* 4 Channels, PEB 20534 Version 2.1, PEF 20534 Version 2.1";
|
23 |
|
|
* - Application Hint "Management of DSCC4 on-chip FIFO resources".
|
24 |
|
|
* - Errata sheet DS5 (courtesy of Michael Skerritt).
|
25 |
|
|
* Jens David has built an adapter based on the same chipset. Take a look
|
26 |
|
|
* at http://www.afthd.tu-darmstadt.de/~dg1kjd/pciscc4 for a specific
|
27 |
|
|
* driver.
|
28 |
|
|
* Sample code (2 revisions) is available at Infineon.
|
29 |
|
|
*
|
30 |
|
|
* II. Board-specific settings
|
31 |
|
|
*
|
32 |
|
|
* Pcisync can transmit some clock signal to the outside world on the
|
33 |
|
|
* *first two* ports provided you put a quartz and a line driver on it and
|
34 |
|
|
* remove the jumpers. The operation is described on Etinc web site. If you
|
35 |
|
|
* go DCE on these ports, don't forget to use an adequate cable.
|
36 |
|
|
*
|
37 |
|
|
* Sharing of the PCI interrupt line for this board is possible.
|
38 |
|
|
*
|
39 |
|
|
* III. Driver operation
|
40 |
|
|
*
|
41 |
|
|
* The rx/tx operations are based on a linked list of descriptors. The driver
|
42 |
|
|
* doesn't use HOLD mode any more. HOLD mode is definitely buggy and the more
|
43 |
|
|
* I tried to fix it, the more it started to look like (convoluted) software
|
44 |
|
|
* mutation of LxDA method. Errata sheet DS5 suggests to use LxDA: consider
|
45 |
|
|
* this a rfc2119 MUST.
|
46 |
|
|
*
|
47 |
|
|
* Tx direction
|
48 |
|
|
* When the tx ring is full, the xmit routine issues a call to netdev_stop.
|
49 |
|
|
* The device is supposed to be enabled again during an ALLS irq (we could
|
50 |
|
|
* use HI but as it's easy to lose events, it's fscked).
|
51 |
|
|
*
|
52 |
|
|
* Rx direction
|
53 |
|
|
* The received frames aren't supposed to span over multiple receiving areas.
|
54 |
|
|
* I may implement it some day but it isn't the highest ranked item.
|
55 |
|
|
*
|
56 |
|
|
* IV. Notes
|
57 |
|
|
* The current error (XDU, RFO) recovery code is untested.
|
58 |
|
|
* So far, RDO takes his RX channel down and the right sequence to enable it
|
59 |
|
|
* again is still a mistery. If RDO happens, plan a reboot. More details
|
60 |
|
|
* in the code (NB: as this happens, TX still works).
|
61 |
|
|
* Don't mess the cables during operation, especially on DTE ports. I don't
|
62 |
|
|
* suggest it for DCE either but at least one can get some messages instead
|
63 |
|
|
* of a complete instant freeze.
|
64 |
|
|
* Tests are done on Rev. 20 of the silicium. The RDO handling changes with
|
65 |
|
|
* the documentation/chipset releases.
|
66 |
|
|
*
|
67 |
|
|
* TODO:
|
68 |
|
|
* - test X25.
|
69 |
|
|
* - use polling at high irq/s,
|
70 |
|
|
* - performance analysis,
|
71 |
|
|
* - endianness.
|
72 |
|
|
*
|
73 |
|
|
* 2001/12/10 Daniela Squassoni <daniela@cyclades.com>
|
74 |
|
|
* - Contribution to support the new generic HDLC layer.
|
75 |
|
|
*
|
76 |
|
|
* 2002/01 Ueimor
|
77 |
|
|
* - old style interface removal
|
78 |
|
|
* - dscc4_release_ring fix (related to DMA mapping)
|
79 |
|
|
* - hard_start_xmit fix (hint: TxSizeMax)
|
80 |
|
|
* - misc crapectomy.
|
81 |
|
|
*/
|
82 |
|
|
|
83 |
|
|
#include <linux/module.h>
|
84 |
|
|
#include <linux/types.h>
|
85 |
|
|
#include <linux/errno.h>
|
86 |
|
|
#include <linux/list.h>
|
87 |
|
|
#include <linux/ioport.h>
|
88 |
|
|
#include <linux/pci.h>
|
89 |
|
|
#include <linux/kernel.h>
|
90 |
|
|
#include <linux/mm.h>
|
91 |
|
|
|
92 |
|
|
#include <asm/system.h>
|
93 |
|
|
#include <asm/cache.h>
|
94 |
|
|
#include <asm/byteorder.h>
|
95 |
|
|
#include <asm/uaccess.h>
|
96 |
|
|
#include <asm/io.h>
|
97 |
|
|
#include <asm/irq.h>
|
98 |
|
|
|
99 |
|
|
#include <linux/init.h>
|
100 |
|
|
#include <linux/string.h>
|
101 |
|
|
|
102 |
|
|
#include <linux/if_arp.h>
|
103 |
|
|
#include <linux/netdevice.h>
|
104 |
|
|
#include <linux/skbuff.h>
|
105 |
|
|
#include <linux/delay.h>
|
106 |
|
|
#include <net/syncppp.h>
|
107 |
|
|
#include <linux/hdlc.h>
|
108 |
|
|
|
109 |
|
|
/* Version */
|
110 |
|
|
static const char version[] = "$Id: dscc4.c,v 1.1.1.1 2004-04-15 01:45:53 phoenix Exp $ for Linux\n";
|
111 |
|
|
static int debug;
|
112 |
|
|
static int quartz;
|
113 |
|
|
|
114 |
|
|
#ifdef CONFIG_DSCC4_PCI_RST
|
115 |
|
|
static DECLARE_MUTEX(dscc4_sem);
|
116 |
|
|
static u32 dscc4_pci_config_store[16];
|
117 |
|
|
#endif
|
118 |
|
|
|
119 |
|
|
#define DRV_NAME "dscc4"
|
120 |
|
|
|
121 |
|
|
#undef DSCC4_POLLING
|
122 |
|
|
|
123 |
|
|
/* Module parameters */
|
124 |
|
|
|
125 |
|
|
MODULE_AUTHOR("Maintainer: Francois Romieu <romieu@cogenit.fr>");
|
126 |
|
|
MODULE_DESCRIPTION("Siemens PEB20534 PCI Controler");
|
127 |
|
|
MODULE_LICENSE("GPL");
|
128 |
|
|
MODULE_PARM(debug,"i");
|
129 |
|
|
MODULE_PARM_DESC(debug,"Enable/disable extra messages");
|
130 |
|
|
MODULE_PARM(quartz,"i");
|
131 |
|
|
MODULE_PARM_DESC(quartz,"If present, on-board quartz frequency (Hz)");
|
132 |
|
|
|
133 |
|
|
/* Structures */
|
134 |
|
|
|
135 |
|
|
struct thingie {
|
136 |
|
|
int define;
|
137 |
|
|
u32 bits;
|
138 |
|
|
};
|
139 |
|
|
|
140 |
|
|
struct TxFD {
|
141 |
|
|
u32 state;
|
142 |
|
|
u32 next;
|
143 |
|
|
u32 data;
|
144 |
|
|
u32 complete;
|
145 |
|
|
u32 jiffies; /* Allows sizeof(TxFD) == sizeof(RxFD) + extra hack */
|
146 |
|
|
};
|
147 |
|
|
|
148 |
|
|
struct RxFD {
|
149 |
|
|
u32 state1;
|
150 |
|
|
u32 next;
|
151 |
|
|
u32 data;
|
152 |
|
|
u32 state2;
|
153 |
|
|
u32 end;
|
154 |
|
|
};
|
155 |
|
|
|
156 |
|
|
#define DUMMY_SKB_SIZE 64
|
157 |
|
|
#define TX_LOW 8
|
158 |
|
|
#define TX_RING_SIZE 32
|
159 |
|
|
#define RX_RING_SIZE 32
|
160 |
|
|
#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct TxFD)
|
161 |
|
|
#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct RxFD)
|
162 |
|
|
#define IRQ_RING_SIZE 64 /* Keep it a multiple of 32 */
|
163 |
|
|
#define TX_TIMEOUT (HZ/10)
|
164 |
|
|
#define DSCC4_HZ_MAX 33000000
|
165 |
|
|
#define BRR_DIVIDER_MAX 64*0x00004000 /* Cf errata DS5 p.10 */
|
166 |
|
|
#define dev_per_card 4
|
167 |
|
|
#define SCC_REGISTERS_MAX 23 /* Cf errata DS5 p.4 */
|
168 |
|
|
|
169 |
|
|
#define SOURCE_ID(flags) (((flags) >> 28) & 0x03)
|
170 |
|
|
#define TO_SIZE(state) (((state) >> 16) & 0x1fff)
|
171 |
|
|
|
172 |
|
|
/*
|
173 |
|
|
* Given the operating range of Linux HDLC, the 2 defines below could be
|
174 |
|
|
* made simpler. However they are a fine reminder for the limitations of
|
175 |
|
|
* the driver: it's better to stay < TxSizeMax and < RxSizeMax.
|
176 |
|
|
*/
|
177 |
|
|
#define TO_STATE_TX(len) cpu_to_le32(((len) & TxSizeMax) << 16)
|
178 |
|
|
#define TO_STATE_RX(len) cpu_to_le32((RX_MAX(len) % RxSizeMax) << 16)
|
179 |
|
|
#define RX_MAX(len) ((((len) >> 5) + 1) << 5) /* Cf RLCR */
|
180 |
|
|
#define SCC_REG_START(dpriv) (SCC_START+(dpriv->dev_id)*SCC_OFFSET)
|
181 |
|
|
|
182 |
|
|
struct dscc4_pci_priv {
|
183 |
|
|
u32 *iqcfg;
|
184 |
|
|
int cfg_cur;
|
185 |
|
|
spinlock_t lock;
|
186 |
|
|
struct pci_dev *pdev;
|
187 |
|
|
|
188 |
|
|
struct dscc4_dev_priv *root;
|
189 |
|
|
dma_addr_t iqcfg_dma;
|
190 |
|
|
u32 xtal_hz;
|
191 |
|
|
};
|
192 |
|
|
|
193 |
|
|
struct dscc4_dev_priv {
|
194 |
|
|
struct sk_buff *rx_skbuff[RX_RING_SIZE];
|
195 |
|
|
struct sk_buff *tx_skbuff[TX_RING_SIZE];
|
196 |
|
|
|
197 |
|
|
struct RxFD *rx_fd;
|
198 |
|
|
struct TxFD *tx_fd;
|
199 |
|
|
u32 *iqrx;
|
200 |
|
|
u32 *iqtx;
|
201 |
|
|
|
202 |
|
|
/* FIXME: check all the volatile are required */
|
203 |
|
|
volatile u32 tx_current;
|
204 |
|
|
u32 rx_current;
|
205 |
|
|
u32 iqtx_current;
|
206 |
|
|
u32 iqrx_current;
|
207 |
|
|
|
208 |
|
|
volatile u32 tx_dirty;
|
209 |
|
|
volatile u32 ltda;
|
210 |
|
|
u32 rx_dirty;
|
211 |
|
|
u32 lrda;
|
212 |
|
|
|
213 |
|
|
dma_addr_t tx_fd_dma;
|
214 |
|
|
dma_addr_t rx_fd_dma;
|
215 |
|
|
dma_addr_t iqtx_dma;
|
216 |
|
|
dma_addr_t iqrx_dma;
|
217 |
|
|
|
218 |
|
|
u32 scc_regs[SCC_REGISTERS_MAX]; /* Cf errata DS5 p.4 */
|
219 |
|
|
|
220 |
|
|
struct timer_list timer;
|
221 |
|
|
|
222 |
|
|
struct dscc4_pci_priv *pci_priv;
|
223 |
|
|
spinlock_t lock;
|
224 |
|
|
|
225 |
|
|
int dev_id;
|
226 |
|
|
volatile u32 flags;
|
227 |
|
|
u32 timer_help;
|
228 |
|
|
|
229 |
|
|
unsigned short encoding;
|
230 |
|
|
unsigned short parity;
|
231 |
|
|
hdlc_device hdlc;
|
232 |
|
|
sync_serial_settings settings;
|
233 |
|
|
u32 __pad __attribute__ ((aligned (4)));
|
234 |
|
|
};
|
235 |
|
|
|
236 |
|
|
/* GLOBAL registers definitions */
|
237 |
|
|
#define GCMDR 0x00
|
238 |
|
|
#define GSTAR 0x04
|
239 |
|
|
#define GMODE 0x08
|
240 |
|
|
#define IQLENR0 0x0C
|
241 |
|
|
#define IQLENR1 0x10
|
242 |
|
|
#define IQRX0 0x14
|
243 |
|
|
#define IQTX0 0x24
|
244 |
|
|
#define IQCFG 0x3c
|
245 |
|
|
#define FIFOCR1 0x44
|
246 |
|
|
#define FIFOCR2 0x48
|
247 |
|
|
#define FIFOCR3 0x4c
|
248 |
|
|
#define FIFOCR4 0x34
|
249 |
|
|
#define CH0CFG 0x50
|
250 |
|
|
#define CH0BRDA 0x54
|
251 |
|
|
#define CH0BTDA 0x58
|
252 |
|
|
#define CH0FRDA 0x98
|
253 |
|
|
#define CH0FTDA 0xb0
|
254 |
|
|
#define CH0LRDA 0xc8
|
255 |
|
|
#define CH0LTDA 0xe0
|
256 |
|
|
|
257 |
|
|
/* SCC registers definitions */
|
258 |
|
|
#define SCC_START 0x0100
|
259 |
|
|
#define SCC_OFFSET 0x80
|
260 |
|
|
#define CMDR 0x00
|
261 |
|
|
#define STAR 0x04
|
262 |
|
|
#define CCR0 0x08
|
263 |
|
|
#define CCR1 0x0c
|
264 |
|
|
#define CCR2 0x10
|
265 |
|
|
#define BRR 0x2C
|
266 |
|
|
#define RLCR 0x40
|
267 |
|
|
#define IMR 0x54
|
268 |
|
|
#define ISR 0x58
|
269 |
|
|
|
270 |
|
|
#define GPDIR 0x0400
|
271 |
|
|
#define GPDATA 0x0404
|
272 |
|
|
#define GPIM 0x0408
|
273 |
|
|
|
274 |
|
|
/* Bit masks */
|
275 |
|
|
#define EncodingMask 0x00700000
|
276 |
|
|
#define CrcMask 0x00000003
|
277 |
|
|
|
278 |
|
|
#define IntRxScc0 0x10000000
|
279 |
|
|
#define IntTxScc0 0x01000000
|
280 |
|
|
|
281 |
|
|
#define TxPollCmd 0x00000400
|
282 |
|
|
#define RxActivate 0x08000000
|
283 |
|
|
#define MTFi 0x04000000
|
284 |
|
|
#define Rdr 0x00400000
|
285 |
|
|
#define Rdt 0x00200000
|
286 |
|
|
#define Idr 0x00100000
|
287 |
|
|
#define Idt 0x00080000
|
288 |
|
|
#define TxSccRes 0x01000000
|
289 |
|
|
#define RxSccRes 0x00010000
|
290 |
|
|
#define TxSizeMax 0x1fff /* Datasheet DS1 - 11.1.1.1 */
|
291 |
|
|
#define RxSizeMax 0x1ffc /* Datasheet DS1 - 11.1.2.1 */
|
292 |
|
|
|
293 |
|
|
#define Ccr0ClockMask 0x0000003f
|
294 |
|
|
#define Ccr1LoopMask 0x00000200
|
295 |
|
|
#define IsrMask 0x000fffff
|
296 |
|
|
#define BrrExpMask 0x00000f00
|
297 |
|
|
#define BrrMultMask 0x0000003f
|
298 |
|
|
#define EncodingMask 0x00700000
|
299 |
|
|
#define Hold 0x40000000
|
300 |
|
|
#define SccBusy 0x10000000
|
301 |
|
|
#define PowerUp 0x80000000
|
302 |
|
|
#define Vis 0x00001000
|
303 |
|
|
#define FrameOk (FrameVfr | FrameCrc)
|
304 |
|
|
#define FrameVfr 0x80
|
305 |
|
|
#define FrameRdo 0x40
|
306 |
|
|
#define FrameCrc 0x20
|
307 |
|
|
#define FrameRab 0x10
|
308 |
|
|
#define FrameAborted 0x00000200
|
309 |
|
|
#define FrameEnd 0x80000000
|
310 |
|
|
#define DataComplete 0x40000000
|
311 |
|
|
#define LengthCheck 0x00008000
|
312 |
|
|
#define SccEvt 0x02000000
|
313 |
|
|
#define NoAck 0x00000200
|
314 |
|
|
#define Action 0x00000001
|
315 |
|
|
#define HiDesc 0x20000000
|
316 |
|
|
|
317 |
|
|
/* SCC events */
|
318 |
|
|
#define RxEvt 0xf0000000
|
319 |
|
|
#define TxEvt 0x0f000000
|
320 |
|
|
#define Alls 0x00040000
|
321 |
|
|
#define Xdu 0x00010000
|
322 |
|
|
#define Cts 0x00004000
|
323 |
|
|
#define Xmr 0x00002000
|
324 |
|
|
#define Xpr 0x00001000
|
325 |
|
|
#define Rdo 0x00000080
|
326 |
|
|
#define Rfs 0x00000040
|
327 |
|
|
#define Cd 0x00000004
|
328 |
|
|
#define Rfo 0x00000002
|
329 |
|
|
#define Flex 0x00000001
|
330 |
|
|
|
331 |
|
|
/* DMA core events */
|
332 |
|
|
#define Cfg 0x00200000
|
333 |
|
|
#define Hi 0x00040000
|
334 |
|
|
#define Fi 0x00020000
|
335 |
|
|
#define Err 0x00010000
|
336 |
|
|
#define Arf 0x00000002
|
337 |
|
|
#define ArAck 0x00000001
|
338 |
|
|
|
339 |
|
|
/* State flags */
|
340 |
|
|
#define Ready 0x00000000
|
341 |
|
|
#define NeedIDR 0x00000001
|
342 |
|
|
#define NeedIDT 0x00000002
|
343 |
|
|
#define RdoSet 0x00000004
|
344 |
|
|
#define FakeReset 0x00000008
|
345 |
|
|
|
346 |
|
|
/* Don't mask RDO. Ever. */
|
347 |
|
|
#ifdef DSCC4_POLLING
|
348 |
|
|
#define EventsMask 0xfffeef7f
|
349 |
|
|
#else
|
350 |
|
|
#define EventsMask 0xfffa8f7a
|
351 |
|
|
#endif
|
352 |
|
|
|
353 |
|
|
/* Functions prototypes */
|
354 |
|
|
static inline void dscc4_rx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *);
|
355 |
|
|
static inline void dscc4_tx_irq(struct dscc4_pci_priv *, struct dscc4_dev_priv *);
|
356 |
|
|
static int dscc4_found1(struct pci_dev *, unsigned long ioaddr);
|
357 |
|
|
static int dscc4_init_one(struct pci_dev *, const struct pci_device_id *ent);
|
358 |
|
|
static int dscc4_open(struct net_device *);
|
359 |
|
|
static int dscc4_start_xmit(struct sk_buff *, struct net_device *);
|
360 |
|
|
static int dscc4_close(struct net_device *);
|
361 |
|
|
static int dscc4_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
|
362 |
|
|
static int dscc4_init_ring(struct net_device *);
|
363 |
|
|
static void dscc4_release_ring(struct dscc4_dev_priv *);
|
364 |
|
|
static void dscc4_timer(unsigned long);
|
365 |
|
|
static void dscc4_tx_timeout(struct net_device *);
|
366 |
|
|
static void dscc4_irq(int irq, void *dev_id, struct pt_regs *ptregs);
|
367 |
|
|
static int dscc4_hdlc_attach(hdlc_device *, unsigned short, unsigned short);
|
368 |
|
|
static int dscc4_set_iface(struct dscc4_dev_priv *, struct net_device *);
|
369 |
|
|
static inline int dscc4_set_quartz(struct dscc4_dev_priv *, int);
|
370 |
|
|
#ifdef DSCC4_POLLING
|
371 |
|
|
static int dscc4_tx_poll(struct dscc4_dev_priv *, struct net_device *);
|
372 |
|
|
#endif
|
373 |
|
|
|
374 |
|
|
static inline struct dscc4_dev_priv *dscc4_priv(struct net_device *dev)
|
375 |
|
|
{
|
376 |
|
|
return list_entry(dev, struct dscc4_dev_priv, hdlc.netdev);
|
377 |
|
|
}
|
378 |
|
|
|
379 |
|
|
static void scc_patchl(u32 mask, u32 value, struct dscc4_dev_priv *dpriv,
|
380 |
|
|
struct net_device *dev, int offset)
|
381 |
|
|
{
|
382 |
|
|
u32 state;
|
383 |
|
|
|
384 |
|
|
/* Cf scc_writel for concern regarding thread-safety */
|
385 |
|
|
state = dpriv->scc_regs[offset >> 2];
|
386 |
|
|
state &= ~mask;
|
387 |
|
|
state |= value;
|
388 |
|
|
dpriv->scc_regs[offset >> 2] = state;
|
389 |
|
|
writel(state, dev->base_addr + SCC_REG_START(dpriv) + offset);
|
390 |
|
|
}
|
391 |
|
|
|
392 |
|
|
static void scc_writel(u32 bits, struct dscc4_dev_priv *dpriv,
|
393 |
|
|
struct net_device *dev, int offset)
|
394 |
|
|
{
|
395 |
|
|
/*
|
396 |
|
|
* Thread-UNsafe.
|
397 |
|
|
* As of 2002/02/16, there are no thread racing for access.
|
398 |
|
|
*/
|
399 |
|
|
dpriv->scc_regs[offset >> 2] = bits;
|
400 |
|
|
writel(bits, dev->base_addr + SCC_REG_START(dpriv) + offset);
|
401 |
|
|
}
|
402 |
|
|
|
403 |
|
|
static inline u32 scc_readl(struct dscc4_dev_priv *dpriv, int offset)
|
404 |
|
|
{
|
405 |
|
|
return dpriv->scc_regs[offset >> 2];
|
406 |
|
|
}
|
407 |
|
|
|
408 |
|
|
static u32 scc_readl_star(struct dscc4_dev_priv *dpriv, struct net_device *dev)
|
409 |
|
|
{
|
410 |
|
|
/* Cf errata DS5 p.4 */
|
411 |
|
|
readl(dev->base_addr + SCC_REG_START(dpriv) + STAR);
|
412 |
|
|
return readl(dev->base_addr + SCC_REG_START(dpriv) + STAR);
|
413 |
|
|
}
|
414 |
|
|
|
415 |
|
|
static inline void dscc4_do_tx(struct dscc4_dev_priv *dpriv,
|
416 |
|
|
struct net_device *dev)
|
417 |
|
|
{
|
418 |
|
|
dpriv->ltda = dpriv->tx_fd_dma +
|
419 |
|
|
((dpriv->tx_current-1)%TX_RING_SIZE)*sizeof(struct TxFD);
|
420 |
|
|
writel(dpriv->ltda, dev->base_addr + CH0LTDA + dpriv->dev_id*4);
|
421 |
|
|
/* Flush posted writes *NOW* */
|
422 |
|
|
readl(dev->base_addr + CH0LTDA + dpriv->dev_id*4);
|
423 |
|
|
}
|
424 |
|
|
|
425 |
|
|
static inline void dscc4_rx_update(struct dscc4_dev_priv *dpriv,
|
426 |
|
|
struct net_device *dev)
|
427 |
|
|
{
|
428 |
|
|
dpriv->lrda = dpriv->rx_fd_dma +
|
429 |
|
|
((dpriv->rx_dirty - 1)%RX_RING_SIZE)*sizeof(struct RxFD);
|
430 |
|
|
writel(dpriv->lrda, dev->base_addr + CH0LRDA + dpriv->dev_id*4);
|
431 |
|
|
}
|
432 |
|
|
|
433 |
|
|
static inline unsigned int dscc4_tx_done(struct dscc4_dev_priv *dpriv)
|
434 |
|
|
{
|
435 |
|
|
return dpriv->tx_current == dpriv->tx_dirty;
|
436 |
|
|
}
|
437 |
|
|
|
438 |
|
|
static inline unsigned int dscc4_tx_quiescent(struct dscc4_dev_priv *dpriv,
|
439 |
|
|
struct net_device *dev)
|
440 |
|
|
{
|
441 |
|
|
return readl(dev->base_addr + CH0FTDA + dpriv->dev_id*4) == dpriv->ltda;
|
442 |
|
|
}
|
443 |
|
|
|
444 |
|
|
int state_check(u32 state, struct dscc4_dev_priv *dpriv, struct net_device *dev,
|
445 |
|
|
const char *msg)
|
446 |
|
|
{
|
447 |
|
|
int ret = 0;
|
448 |
|
|
|
449 |
|
|
if (debug > 1) {
|
450 |
|
|
if (SOURCE_ID(state) != dpriv->dev_id) {
|
451 |
|
|
printk(KERN_DEBUG "%s (%s): Source Id=%d, state=%08x\n",
|
452 |
|
|
dev->name, msg, SOURCE_ID(state), state );
|
453 |
|
|
ret = -1;
|
454 |
|
|
}
|
455 |
|
|
if (state & 0x0df80c00) {
|
456 |
|
|
printk(KERN_DEBUG "%s (%s): state=%08x (UFO alert)\n",
|
457 |
|
|
dev->name, msg, state);
|
458 |
|
|
ret = -1;
|
459 |
|
|
}
|
460 |
|
|
}
|
461 |
|
|
return ret;
|
462 |
|
|
}
|
463 |
|
|
|
464 |
|
|
void dscc4_tx_print(struct net_device *dev, struct dscc4_dev_priv *dpriv,
|
465 |
|
|
char *msg)
|
466 |
|
|
{
|
467 |
|
|
printk(KERN_DEBUG "%s: tx_current=%02d tx_dirty=%02d (%s)\n",
|
468 |
|
|
dev->name, dpriv->tx_current, dpriv->tx_dirty, msg);
|
469 |
|
|
}
|
470 |
|
|
|
471 |
|
|
static void dscc4_release_ring(struct dscc4_dev_priv *dpriv)
|
472 |
|
|
{
|
473 |
|
|
struct pci_dev *pdev = dpriv->pci_priv->pdev;
|
474 |
|
|
struct TxFD *tx_fd = dpriv->tx_fd;
|
475 |
|
|
struct RxFD *rx_fd = dpriv->rx_fd;
|
476 |
|
|
struct sk_buff **skbuff;
|
477 |
|
|
int i;
|
478 |
|
|
|
479 |
|
|
pci_free_consistent(pdev, TX_TOTAL_SIZE, tx_fd, dpriv->tx_fd_dma);
|
480 |
|
|
pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma);
|
481 |
|
|
|
482 |
|
|
skbuff = dpriv->tx_skbuff;
|
483 |
|
|
for (i = 0; i < TX_RING_SIZE; i++) {
|
484 |
|
|
if (*skbuff) {
|
485 |
|
|
pci_unmap_single(pdev, tx_fd->data, (*skbuff)->len,
|
486 |
|
|
PCI_DMA_TODEVICE);
|
487 |
|
|
dev_kfree_skb(*skbuff);
|
488 |
|
|
}
|
489 |
|
|
skbuff++;
|
490 |
|
|
tx_fd++;
|
491 |
|
|
}
|
492 |
|
|
|
493 |
|
|
skbuff = dpriv->rx_skbuff;
|
494 |
|
|
for (i = 0; i < RX_RING_SIZE; i++) {
|
495 |
|
|
if (*skbuff) {
|
496 |
|
|
pci_unmap_single(pdev, rx_fd->data,
|
497 |
|
|
RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE);
|
498 |
|
|
dev_kfree_skb(*skbuff);
|
499 |
|
|
}
|
500 |
|
|
skbuff++;
|
501 |
|
|
rx_fd++;
|
502 |
|
|
}
|
503 |
|
|
}
|
504 |
|
|
|
505 |
|
|
inline int try_get_rx_skb(struct dscc4_dev_priv *dpriv, struct net_device *dev)
|
506 |
|
|
{
|
507 |
|
|
unsigned int dirty = dpriv->rx_dirty%RX_RING_SIZE;
|
508 |
|
|
struct RxFD *rx_fd = dpriv->rx_fd + dirty;
|
509 |
|
|
const int len = RX_MAX(HDLC_MAX_MRU);
|
510 |
|
|
struct sk_buff *skb;
|
511 |
|
|
int ret = 0;
|
512 |
|
|
|
513 |
|
|
skb = dev_alloc_skb(len);
|
514 |
|
|
dpriv->rx_skbuff[dirty] = skb;
|
515 |
|
|
if (skb) {
|
516 |
|
|
skb->dev = dev;
|
517 |
|
|
skb->protocol = hdlc_type_trans(skb, dev);
|
518 |
|
|
skb->mac.raw = skb->data;
|
519 |
|
|
rx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data,
|
520 |
|
|
len, PCI_DMA_FROMDEVICE);
|
521 |
|
|
} else {
|
522 |
|
|
rx_fd->data = (u32) NULL;
|
523 |
|
|
ret = -1;
|
524 |
|
|
}
|
525 |
|
|
return ret;
|
526 |
|
|
}
|
527 |
|
|
|
528 |
|
|
/*
|
529 |
|
|
* IRQ/thread/whatever safe
|
530 |
|
|
*/
|
531 |
|
|
static int dscc4_wait_ack_cec(struct dscc4_dev_priv *dpriv,
|
532 |
|
|
struct net_device *dev, char *msg)
|
533 |
|
|
{
|
534 |
|
|
s8 i = 0;
|
535 |
|
|
|
536 |
|
|
do {
|
537 |
|
|
if (!(scc_readl_star(dpriv, dev) & SccBusy)) {
|
538 |
|
|
printk(KERN_DEBUG "%s: %s ack (%d try)\n", dev->name,
|
539 |
|
|
msg, i);
|
540 |
|
|
goto done;
|
541 |
|
|
}
|
542 |
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
543 |
|
|
schedule_timeout(10);
|
544 |
|
|
rmb();
|
545 |
|
|
} while (++i > 0);
|
546 |
|
|
printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);
|
547 |
|
|
done:
|
548 |
|
|
return (i >= 0) ? i : -EAGAIN;
|
549 |
|
|
}
|
550 |
|
|
|
551 |
|
|
static int dscc4_do_action(struct net_device *dev, char *msg)
|
552 |
|
|
{
|
553 |
|
|
unsigned long ioaddr = dev->base_addr;
|
554 |
|
|
s16 i = 0;
|
555 |
|
|
|
556 |
|
|
writel(Action, ioaddr + GCMDR);
|
557 |
|
|
ioaddr += GSTAR;
|
558 |
|
|
do {
|
559 |
|
|
u32 state = readl(ioaddr);
|
560 |
|
|
|
561 |
|
|
if (state & ArAck) {
|
562 |
|
|
printk(KERN_DEBUG "%s: %s ack\n", dev->name, msg);
|
563 |
|
|
writel(ArAck, ioaddr);
|
564 |
|
|
goto done;
|
565 |
|
|
} else if (state & Arf) {
|
566 |
|
|
printk(KERN_ERR "%s: %s failed\n", dev->name, msg);
|
567 |
|
|
writel(Arf, ioaddr);
|
568 |
|
|
i = -1;
|
569 |
|
|
goto done;
|
570 |
|
|
}
|
571 |
|
|
rmb();
|
572 |
|
|
} while (++i > 0);
|
573 |
|
|
printk(KERN_ERR "%s: %s timeout\n", dev->name, msg);
|
574 |
|
|
done:
|
575 |
|
|
return i;
|
576 |
|
|
}
|
577 |
|
|
|
578 |
|
|
static inline int dscc4_xpr_ack(struct dscc4_dev_priv *dpriv)
|
579 |
|
|
{
|
580 |
|
|
int cur = dpriv->iqtx_current%IRQ_RING_SIZE;
|
581 |
|
|
s8 i = 0;
|
582 |
|
|
|
583 |
|
|
do {
|
584 |
|
|
if (!(dpriv->flags & (NeedIDR | NeedIDT)) ||
|
585 |
|
|
(dpriv->iqtx[cur] & Xpr))
|
586 |
|
|
break;
|
587 |
|
|
smp_rmb();
|
588 |
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
589 |
|
|
schedule_timeout(10);
|
590 |
|
|
} while (++i > 0);
|
591 |
|
|
|
592 |
|
|
return (i >= 0 ) ? i : -EAGAIN;
|
593 |
|
|
}
|
594 |
|
|
|
595 |
|
|
#if 0 /* dscc4_{rx/tx}_reset are both unreliable - more tweak needed */
|
596 |
|
|
static void dscc4_rx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
|
597 |
|
|
{
|
598 |
|
|
unsigned long flags;
|
599 |
|
|
|
600 |
|
|
spin_lock_irqsave(&dpriv->pci_priv->lock, flags);
|
601 |
|
|
/* Cf errata DS5 p.6 */
|
602 |
|
|
writel(0x00000000, dev->base_addr + CH0LRDA + dpriv->dev_id*4);
|
603 |
|
|
scc_patchl(PowerUp, 0, dpriv, dev, CCR0);
|
604 |
|
|
readl(dev->base_addr + CH0LRDA + dpriv->dev_id*4);
|
605 |
|
|
writel(MTFi|Rdr, dev->base_addr + dpriv->dev_id*0x0c + CH0CFG);
|
606 |
|
|
writel(Action, dev->base_addr + GCMDR);
|
607 |
|
|
spin_unlock_irqrestore(&dpriv->pci_priv->lock, flags);
|
608 |
|
|
}
|
609 |
|
|
|
610 |
|
|
static void dscc4_tx_reset(struct dscc4_dev_priv *dpriv, struct net_device *dev)
|
611 |
|
|
{
|
612 |
|
|
u16 i = 0;
|
613 |
|
|
|
614 |
|
|
/* Cf errata DS5 p.7 */
|
615 |
|
|
scc_patchl(PowerUp, 0, dpriv, dev, CCR0);
|
616 |
|
|
scc_writel(0x00050000, dpriv, dev, CCR2);
|
617 |
|
|
/*
|
618 |
|
|
* Must be longer than the time required to fill the fifo.
|
619 |
|
|
*/
|
620 |
|
|
while (!dscc4_tx_quiescent(dpriv, dev) && ++i) {
|
621 |
|
|
udelay(1);
|
622 |
|
|
wmb();
|
623 |
|
|
}
|
624 |
|
|
|
625 |
|
|
writel(MTFi|Rdt, dev->base_addr + dpriv->dev_id*0x0c + CH0CFG);
|
626 |
|
|
if (dscc4_do_action(dev, "Rdt") < 0)
|
627 |
|
|
printk(KERN_ERR "%s: Tx reset failed\n", dev->name);
|
628 |
|
|
}
|
629 |
|
|
#endif
|
630 |
|
|
|
631 |
|
|
/* TODO: (ab)use this function to refill a completely depleted RX ring. */
|
632 |
|
|
static inline void dscc4_rx_skb(struct dscc4_dev_priv *dpriv,
|
633 |
|
|
struct net_device *dev)
|
634 |
|
|
{
|
635 |
|
|
struct RxFD *rx_fd = dpriv->rx_fd + dpriv->rx_current%RX_RING_SIZE;
|
636 |
|
|
struct net_device_stats *stats = &dpriv->hdlc.stats;
|
637 |
|
|
struct pci_dev *pdev = dpriv->pci_priv->pdev;
|
638 |
|
|
struct sk_buff *skb;
|
639 |
|
|
int pkt_len;
|
640 |
|
|
|
641 |
|
|
skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE];
|
642 |
|
|
if (!skb) {
|
643 |
|
|
printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __FUNCTION__);
|
644 |
|
|
goto refill;
|
645 |
|
|
}
|
646 |
|
|
pkt_len = TO_SIZE(rx_fd->state2);
|
647 |
|
|
pci_dma_sync_single(pdev, rx_fd->data, pkt_len, PCI_DMA_FROMDEVICE);
|
648 |
|
|
pci_unmap_single(pdev, rx_fd->data, RX_MAX(HDLC_MAX_MRU), PCI_DMA_FROMDEVICE);
|
649 |
|
|
if ((skb->data[--pkt_len] & FrameOk) == FrameOk) {
|
650 |
|
|
stats->rx_packets++;
|
651 |
|
|
stats->rx_bytes += pkt_len;
|
652 |
|
|
skb_put(skb, pkt_len);
|
653 |
|
|
if (netif_running(dev))
|
654 |
|
|
skb->protocol = hdlc_type_trans(skb, dev);
|
655 |
|
|
skb->dev->last_rx = jiffies;
|
656 |
|
|
netif_rx(skb);
|
657 |
|
|
} else {
|
658 |
|
|
if (skb->data[pkt_len] & FrameRdo)
|
659 |
|
|
stats->rx_fifo_errors++;
|
660 |
|
|
else if (!(skb->data[pkt_len] | ~FrameCrc))
|
661 |
|
|
stats->rx_crc_errors++;
|
662 |
|
|
else if (!(skb->data[pkt_len] | ~(FrameVfr | FrameRab)))
|
663 |
|
|
stats->rx_length_errors++;
|
664 |
|
|
else
|
665 |
|
|
stats->rx_errors++;
|
666 |
|
|
dev_kfree_skb_irq(skb);
|
667 |
|
|
}
|
668 |
|
|
refill:
|
669 |
|
|
while ((dpriv->rx_dirty - dpriv->rx_current) % RX_RING_SIZE) {
|
670 |
|
|
if (try_get_rx_skb(dpriv, dev) < 0)
|
671 |
|
|
break;
|
672 |
|
|
dpriv->rx_dirty++;
|
673 |
|
|
}
|
674 |
|
|
dscc4_rx_update(dpriv, dev);
|
675 |
|
|
rx_fd->state2 = 0x00000000;
|
676 |
|
|
rx_fd->end = 0xbabeface;
|
677 |
|
|
}
|
678 |
|
|
|
679 |
|
|
static void dscc4_free1(struct pci_dev *pdev)
|
680 |
|
|
{
|
681 |
|
|
struct dscc4_pci_priv *ppriv;
|
682 |
|
|
struct dscc4_dev_priv *root;
|
683 |
|
|
int i;
|
684 |
|
|
|
685 |
|
|
ppriv = pci_get_drvdata(pdev);
|
686 |
|
|
root = ppriv->root;
|
687 |
|
|
|
688 |
|
|
for (i = 0; i < dev_per_card; i++)
|
689 |
|
|
unregister_hdlc_device(&root[i].hdlc);
|
690 |
|
|
|
691 |
|
|
pci_set_drvdata(pdev, NULL);
|
692 |
|
|
|
693 |
|
|
kfree(root);
|
694 |
|
|
kfree(ppriv);
|
695 |
|
|
}
|
696 |
|
|
|
697 |
|
|
static int __devinit dscc4_init_one(struct pci_dev *pdev,
|
698 |
|
|
const struct pci_device_id *ent)
|
699 |
|
|
{
|
700 |
|
|
struct dscc4_pci_priv *priv;
|
701 |
|
|
struct dscc4_dev_priv *dpriv;
|
702 |
|
|
static int cards_found = 0;
|
703 |
|
|
unsigned long ioaddr;
|
704 |
|
|
int i;
|
705 |
|
|
|
706 |
|
|
printk(KERN_DEBUG "%s", version);
|
707 |
|
|
|
708 |
|
|
if (pci_enable_device(pdev))
|
709 |
|
|
goto err_out;
|
710 |
|
|
if (!request_mem_region(pci_resource_start(pdev, 0),
|
711 |
|
|
pci_resource_len(pdev, 0), "registers")) {
|
712 |
|
|
printk(KERN_ERR "%s: can't reserve MMIO region (regs)\n",
|
713 |
|
|
DRV_NAME);
|
714 |
|
|
goto err_out;
|
715 |
|
|
}
|
716 |
|
|
if (!request_mem_region(pci_resource_start(pdev, 1),
|
717 |
|
|
pci_resource_len(pdev, 1), "LBI interface")) {
|
718 |
|
|
printk(KERN_ERR "%s: can't reserve MMIO region (lbi)\n",
|
719 |
|
|
DRV_NAME);
|
720 |
|
|
goto err_out_free_mmio_region0;
|
721 |
|
|
}
|
722 |
|
|
ioaddr = (unsigned long)ioremap(pci_resource_start(pdev, 0),
|
723 |
|
|
pci_resource_len(pdev, 0));
|
724 |
|
|
if (!ioaddr) {
|
725 |
|
|
printk(KERN_ERR "%s: cannot remap MMIO region %lx @ %lx\n",
|
726 |
|
|
DRV_NAME, pci_resource_len(pdev, 0),
|
727 |
|
|
pci_resource_start(pdev, 0));
|
728 |
|
|
goto err_out_free_mmio_region;
|
729 |
|
|
}
|
730 |
|
|
printk(KERN_DEBUG "Siemens DSCC4, MMIO at %#lx (regs), %#lx (lbi), IRQ %d\n",
|
731 |
|
|
pci_resource_start(pdev, 0),
|
732 |
|
|
pci_resource_start(pdev, 1), pdev->irq);
|
733 |
|
|
|
734 |
|
|
/* Cf errata DS5 p.2 */
|
735 |
|
|
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xf8);
|
736 |
|
|
pci_set_master(pdev);
|
737 |
|
|
|
738 |
|
|
if (dscc4_found1(pdev, ioaddr))
|
739 |
|
|
goto err_out_iounmap;
|
740 |
|
|
|
741 |
|
|
priv = (struct dscc4_pci_priv *)pci_get_drvdata(pdev);
|
742 |
|
|
|
743 |
|
|
if (request_irq(pdev->irq, &dscc4_irq, SA_SHIRQ, DRV_NAME, priv->root)){
|
744 |
|
|
printk(KERN_WARNING "%s: IRQ %d busy\n", DRV_NAME, pdev->irq);
|
745 |
|
|
goto err_out_free1;
|
746 |
|
|
}
|
747 |
|
|
|
748 |
|
|
/* power up/little endian/dma core controlled via lrda/ltda */
|
749 |
|
|
writel(0x00000001, ioaddr + GMODE);
|
750 |
|
|
/* Shared interrupt queue */
|
751 |
|
|
{
|
752 |
|
|
u32 bits;
|
753 |
|
|
|
754 |
|
|
bits = (IRQ_RING_SIZE >> 5) - 1;
|
755 |
|
|
bits |= bits << 4;
|
756 |
|
|
bits |= bits << 8;
|
757 |
|
|
bits |= bits << 16;
|
758 |
|
|
writel(bits, ioaddr + IQLENR0);
|
759 |
|
|
}
|
760 |
|
|
/* Global interrupt queue */
|
761 |
|
|
writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1);
|
762 |
|
|
priv->iqcfg = (u32 *) pci_alloc_consistent(pdev,
|
763 |
|
|
IRQ_RING_SIZE*sizeof(u32), &priv->iqcfg_dma);
|
764 |
|
|
if (!priv->iqcfg)
|
765 |
|
|
goto err_out_free_irq;
|
766 |
|
|
writel(priv->iqcfg_dma, ioaddr + IQCFG);
|
767 |
|
|
|
768 |
|
|
/*
|
769 |
|
|
* SCC 0-3 private rx/tx irq structures
|
770 |
|
|
* IQRX/TXi needs to be set soon. Learned it the hard way...
|
771 |
|
|
*/
|
772 |
|
|
for (i = 0; i < dev_per_card; i++) {
|
773 |
|
|
dpriv = priv->root + i;
|
774 |
|
|
dpriv->iqtx = (u32 *) pci_alloc_consistent(pdev,
|
775 |
|
|
IRQ_RING_SIZE*sizeof(u32), &dpriv->iqtx_dma);
|
776 |
|
|
if (!dpriv->iqtx)
|
777 |
|
|
goto err_out_free_iqtx;
|
778 |
|
|
writel(dpriv->iqtx_dma, ioaddr + IQTX0 + i*4);
|
779 |
|
|
}
|
780 |
|
|
for (i = 0; i < dev_per_card; i++) {
|
781 |
|
|
dpriv = priv->root + i;
|
782 |
|
|
dpriv->iqrx = (u32 *) pci_alloc_consistent(pdev,
|
783 |
|
|
IRQ_RING_SIZE*sizeof(u32), &dpriv->iqrx_dma);
|
784 |
|
|
if (!dpriv->iqrx)
|
785 |
|
|
goto err_out_free_iqrx;
|
786 |
|
|
writel(dpriv->iqrx_dma, ioaddr + IQRX0 + i*4);
|
787 |
|
|
}
|
788 |
|
|
|
789 |
|
|
/* Cf application hint. Beware of hard-lock condition on threshold. */
|
790 |
|
|
writel(0x42104000, ioaddr + FIFOCR1);
|
791 |
|
|
//writel(0x9ce69800, ioaddr + FIFOCR2);
|
792 |
|
|
writel(0xdef6d800, ioaddr + FIFOCR2);
|
793 |
|
|
//writel(0x11111111, ioaddr + FIFOCR4);
|
794 |
|
|
writel(0x18181818, ioaddr + FIFOCR4);
|
795 |
|
|
// FIXME: should depend on the chipset revision
|
796 |
|
|
writel(0x0000000e, ioaddr + FIFOCR3);
|
797 |
|
|
|
798 |
|
|
writel(0xff200001, ioaddr + GCMDR);
|
799 |
|
|
|
800 |
|
|
cards_found++;
|
801 |
|
|
return 0;
|
802 |
|
|
|
803 |
|
|
err_out_free_iqrx:
|
804 |
|
|
while (--i >= 0) {
|
805 |
|
|
dpriv = priv->root + i;
|
806 |
|
|
pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
|
807 |
|
|
dpriv->iqrx, dpriv->iqrx_dma);
|
808 |
|
|
}
|
809 |
|
|
i = dev_per_card;
|
810 |
|
|
err_out_free_iqtx:
|
811 |
|
|
while (--i >= 0) {
|
812 |
|
|
dpriv = priv->root + i;
|
813 |
|
|
pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
|
814 |
|
|
dpriv->iqtx, dpriv->iqtx_dma);
|
815 |
|
|
}
|
816 |
|
|
pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), priv->iqcfg,
|
817 |
|
|
priv->iqcfg_dma);
|
818 |
|
|
err_out_free_irq:
|
819 |
|
|
free_irq(pdev->irq, priv->root);
|
820 |
|
|
err_out_free1:
|
821 |
|
|
dscc4_free1(pdev);
|
822 |
|
|
err_out_iounmap:
|
823 |
|
|
iounmap ((void *)ioaddr);
|
824 |
|
|
err_out_free_mmio_region:
|
825 |
|
|
release_mem_region(pci_resource_start(pdev, 1),
|
826 |
|
|
pci_resource_len(pdev, 1));
|
827 |
|
|
err_out_free_mmio_region0:
|
828 |
|
|
release_mem_region(pci_resource_start(pdev, 0),
|
829 |
|
|
pci_resource_len(pdev, 0));
|
830 |
|
|
err_out:
|
831 |
|
|
return -ENODEV;
|
832 |
|
|
};
|
833 |
|
|
|
834 |
|
|
/*
|
835 |
|
|
* Let's hope the default values are decent enough to protect my
|
836 |
|
|
* feet from the user's gun - Ueimor
|
837 |
|
|
*/
|
838 |
|
|
static void dscc4_init_registers(struct dscc4_dev_priv *dpriv,
|
839 |
|
|
struct net_device *dev)
|
840 |
|
|
{
|
841 |
|
|
/* No interrupts, SCC core disabled. Let's relax */
|
842 |
|
|
scc_writel(0x00000000, dpriv, dev, CCR0);
|
843 |
|
|
|
844 |
|
|
scc_writel(LengthCheck | (HDLC_MAX_MRU >> 5), dpriv, dev, RLCR);
|
845 |
|
|
|
846 |
|
|
/*
|
847 |
|
|
* No address recognition/crc-CCITT/cts enabled
|
848 |
|
|
* Shared flags transmission disabled - cf errata DS5 p.11
|
849 |
|
|
* Carrier detect disabled - cf errata p.14
|
850 |
|
|
* FIXME: carrier detection/polarity may be handled more gracefully.
|
851 |
|
|
*/
|
852 |
|
|
scc_writel(0x02408000, dpriv, dev, CCR1);
|
853 |
|
|
|
854 |
|
|
/* crc not forwarded - Cf errata DS5 p.11 */
|
855 |
|
|
scc_writel(0x00050008 & ~RxActivate, dpriv, dev, CCR2);
|
856 |
|
|
// crc forwarded
|
857 |
|
|
//scc_writel(0x00250008 & ~RxActivate, dpriv, dev, CCR2);
|
858 |
|
|
}
|
859 |
|
|
|
860 |
|
|
static int dscc4_found1(struct pci_dev *pdev, unsigned long ioaddr)
|
861 |
|
|
{
|
862 |
|
|
struct dscc4_pci_priv *ppriv;
|
863 |
|
|
struct dscc4_dev_priv *root;
|
864 |
|
|
int i, ret = -ENOMEM;
|
865 |
|
|
|
866 |
|
|
root = (struct dscc4_dev_priv *)
|
867 |
|
|
kmalloc(dev_per_card*sizeof(*root), GFP_KERNEL);
|
868 |
|
|
if (!root) {
|
869 |
|
|
printk(KERN_ERR "%s: can't allocate data\n", DRV_NAME);
|
870 |
|
|
goto err_out;
|
871 |
|
|
}
|
872 |
|
|
memset(root, 0, dev_per_card*sizeof(*root));
|
873 |
|
|
|
874 |
|
|
ppriv = (struct dscc4_pci_priv *) kmalloc(sizeof(*ppriv), GFP_KERNEL);
|
875 |
|
|
if (!ppriv) {
|
876 |
|
|
printk(KERN_ERR "%s: can't allocate private data\n", DRV_NAME);
|
877 |
|
|
goto err_free_dev;
|
878 |
|
|
}
|
879 |
|
|
memset(ppriv, 0, sizeof(struct dscc4_pci_priv));
|
880 |
|
|
|
881 |
|
|
for (i = 0; i < dev_per_card; i++) {
|
882 |
|
|
struct dscc4_dev_priv *dpriv = root + i;
|
883 |
|
|
hdlc_device *hdlc = &dpriv->hdlc;
|
884 |
|
|
struct net_device *d = hdlc_to_dev(hdlc);
|
885 |
|
|
|
886 |
|
|
d->base_addr = ioaddr;
|
887 |
|
|
d->init = NULL;
|
888 |
|
|
d->irq = pdev->irq;
|
889 |
|
|
d->open = dscc4_open;
|
890 |
|
|
d->stop = dscc4_close;
|
891 |
|
|
d->set_multicast_list = NULL;
|
892 |
|
|
d->do_ioctl = dscc4_ioctl;
|
893 |
|
|
d->tx_timeout = dscc4_tx_timeout;
|
894 |
|
|
d->watchdog_timeo = TX_TIMEOUT;
|
895 |
|
|
|
896 |
|
|
SET_MODULE_OWNER(d);
|
897 |
|
|
|
898 |
|
|
dpriv->dev_id = i;
|
899 |
|
|
dpriv->pci_priv = ppriv;
|
900 |
|
|
spin_lock_init(&dpriv->lock);
|
901 |
|
|
|
902 |
|
|
hdlc->xmit = dscc4_start_xmit;
|
903 |
|
|
hdlc->attach = dscc4_hdlc_attach;
|
904 |
|
|
|
905 |
|
|
ret = register_hdlc_device(hdlc);
|
906 |
|
|
if (ret < 0) {
|
907 |
|
|
printk(KERN_ERR "%s: unable to register\n", DRV_NAME);
|
908 |
|
|
goto err_unregister;
|
909 |
|
|
}
|
910 |
|
|
|
911 |
|
|
dscc4_init_registers(dpriv, d);
|
912 |
|
|
dpriv->parity = PARITY_CRC16_PR0_CCITT;
|
913 |
|
|
dpriv->encoding = ENCODING_NRZ;
|
914 |
|
|
|
915 |
|
|
ret = dscc4_init_ring(d);
|
916 |
|
|
if (ret < 0) {
|
917 |
|
|
unregister_hdlc_device(hdlc);
|
918 |
|
|
goto err_unregister;
|
919 |
|
|
}
|
920 |
|
|
}
|
921 |
|
|
ret = dscc4_set_quartz(root, quartz);
|
922 |
|
|
if (ret < 0)
|
923 |
|
|
goto err_unregister;
|
924 |
|
|
ppriv->root = root;
|
925 |
|
|
spin_lock_init(&ppriv->lock);
|
926 |
|
|
pci_set_drvdata(pdev, ppriv);
|
927 |
|
|
return ret;
|
928 |
|
|
|
929 |
|
|
err_unregister:
|
930 |
|
|
while (--i >= 0) {
|
931 |
|
|
dscc4_release_ring(root + i);
|
932 |
|
|
unregister_hdlc_device(&root[i].hdlc);
|
933 |
|
|
}
|
934 |
|
|
kfree(ppriv);
|
935 |
|
|
err_free_dev:
|
936 |
|
|
kfree(root);
|
937 |
|
|
err_out:
|
938 |
|
|
return ret;
|
939 |
|
|
};
|
940 |
|
|
|
941 |
|
|
/* FIXME: get rid of the unneeded code */
|
942 |
|
|
static void dscc4_timer(unsigned long data)
|
943 |
|
|
{
|
944 |
|
|
struct net_device *dev = (struct net_device *)data;
|
945 |
|
|
struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
|
946 |
|
|
// struct dscc4_pci_priv *ppriv;
|
947 |
|
|
|
948 |
|
|
goto done;
|
949 |
|
|
done:
|
950 |
|
|
dpriv->timer.expires = jiffies + TX_TIMEOUT;
|
951 |
|
|
add_timer(&dpriv->timer);
|
952 |
|
|
}
|
953 |
|
|
|
954 |
|
|
static void dscc4_tx_timeout(struct net_device *dev)
|
955 |
|
|
{
|
956 |
|
|
/* FIXME: something is missing there */
|
957 |
|
|
}
|
958 |
|
|
|
959 |
|
|
static int dscc4_loopback_check(struct dscc4_dev_priv *dpriv)
|
960 |
|
|
{
|
961 |
|
|
sync_serial_settings *settings = &dpriv->settings;
|
962 |
|
|
|
963 |
|
|
if (settings->loopback && (settings->clock_type != CLOCK_INT)) {
|
964 |
|
|
struct net_device *dev = hdlc_to_dev(&dpriv->hdlc);
|
965 |
|
|
|
966 |
|
|
printk(KERN_INFO "%s: loopback requires clock\n", dev->name);
|
967 |
|
|
return -1;
|
968 |
|
|
}
|
969 |
|
|
return 0;
|
970 |
|
|
}
|
971 |
|
|
|
972 |
|
|
#ifdef CONFIG_DSCC4_PCI_RST
|
973 |
|
|
/*
|
974 |
|
|
* Some DSCC4-based cards wires the GPIO port and the PCI #RST pin together
|
975 |
|
|
* so as to provide a safe way to reset the asic while not the whole machine
|
976 |
|
|
* rebooting.
|
977 |
|
|
*
|
978 |
|
|
* This code doesn't need to be efficient. Keep It Simple
|
979 |
|
|
*/
|
980 |
|
|
static void dscc4_pci_reset(struct pci_dev *pdev, u32 ioaddr)
|
981 |
|
|
{
|
982 |
|
|
int i;
|
983 |
|
|
|
984 |
|
|
down(&dscc4_sem);
|
985 |
|
|
for (i = 0; i < 16; i++)
|
986 |
|
|
pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i);
|
987 |
|
|
|
988 |
|
|
/* Maximal LBI clock divider (who cares ?) and whole GPIO range. */
|
989 |
|
|
writel(0x001c0000, ioaddr + GMODE);
|
990 |
|
|
/* Configure GPIO port as output */
|
991 |
|
|
writel(0x0000ffff, ioaddr + GPDIR);
|
992 |
|
|
/* Disable interruption */
|
993 |
|
|
writel(0x0000ffff, ioaddr + GPIM);
|
994 |
|
|
|
995 |
|
|
writel(0x0000ffff, ioaddr + GPDATA);
|
996 |
|
|
writel(0x00000000, ioaddr + GPDATA);
|
997 |
|
|
|
998 |
|
|
/* Flush posted writes */
|
999 |
|
|
readl(ioaddr + GSTAR);
|
1000 |
|
|
|
1001 |
|
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
1002 |
|
|
schedule_timeout(10);
|
1003 |
|
|
|
1004 |
|
|
for (i = 0; i < 16; i++)
|
1005 |
|
|
pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]);
|
1006 |
|
|
up(&dscc4_sem);
|
1007 |
|
|
}
|
1008 |
|
|
#else
|
1009 |
|
|
#define dscc4_pci_reset(pdev,ioaddr) do {} while (0)
|
1010 |
|
|
#endif /* CONFIG_DSCC4_PCI_RST */
|
1011 |
|
|
|
1012 |
|
|
static int dscc4_open(struct net_device *dev)
|
1013 |
|
|
{
|
1014 |
|
|
struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
|
1015 |
|
|
hdlc_device *hdlc = &dpriv->hdlc;
|
1016 |
|
|
struct dscc4_pci_priv *ppriv;
|
1017 |
|
|
int ret = -EAGAIN;
|
1018 |
|
|
|
1019 |
|
|
if ((dscc4_loopback_check(dpriv) < 0) || !dev->hard_start_xmit)
|
1020 |
|
|
goto err;
|
1021 |
|
|
|
1022 |
|
|
if ((ret = hdlc_open(hdlc)))
|
1023 |
|
|
goto err;
|
1024 |
|
|
|
1025 |
|
|
MOD_INC_USE_COUNT;
|
1026 |
|
|
|
1027 |
|
|
ppriv = dpriv->pci_priv;
|
1028 |
|
|
|
1029 |
|
|
/*
|
1030 |
|
|
* Due to various bugs, there is no way to reliably reset a
|
1031 |
|
|
* specific port (manufacturer's dependant special PCI #RST wiring
|
1032 |
|
|
* apart: it affects all ports). Thus the device goes in the best
|
1033 |
|
|
* silent mode possible at dscc4_close() time and simply claims to
|
1034 |
|
|
* be up if it's opened again. It still isn't possible to change
|
1035 |
|
|
* the HDLC configuration without rebooting but at least the ports
|
1036 |
|
|
* can be up/down ifconfig'ed without killing the host.
|
1037 |
|
|
*/
|
1038 |
|
|
if (dpriv->flags & FakeReset) {
|
1039 |
|
|
dpriv->flags &= ~FakeReset;
|
1040 |
|
|
scc_patchl(0, PowerUp, dpriv, dev, CCR0);
|
1041 |
|
|
scc_patchl(0, 0x00050000, dpriv, dev, CCR2);
|
1042 |
|
|
scc_writel(EventsMask, dpriv, dev, IMR);
|
1043 |
|
|
printk(KERN_INFO "%s: up again.\n", dev->name);
|
1044 |
|
|
goto done;
|
1045 |
|
|
}
|
1046 |
|
|
|
1047 |
|
|
/* IDT+IDR during XPR */
|
1048 |
|
|
dpriv->flags = NeedIDR | NeedIDT;
|
1049 |
|
|
|
1050 |
|
|
scc_patchl(0, PowerUp | Vis, dpriv, dev, CCR0);
|
1051 |
|
|
|
1052 |
|
|
/*
|
1053 |
|
|
* The following is a bit paranoid...
|
1054 |
|
|
*
|
1055 |
|
|
* NB: the datasheet "...CEC will stay active if the SCC is in
|
1056 |
|
|
* power-down mode or..." and CCR2.RAC = 1 are two different
|
1057 |
|
|
* situations.
|
1058 |
|
|
*/
|
1059 |
|
|
if (scc_readl_star(dpriv, dev) & SccBusy) {
|
1060 |
|
|
printk(KERN_ERR "%s busy. Try later\n", dev->name);
|
1061 |
|
|
ret = -EAGAIN;
|
1062 |
|
|
goto err_out;
|
1063 |
|
|
} else
|
1064 |
|
|
printk(KERN_INFO "%s: available. Good\n", dev->name);
|
1065 |
|
|
|
1066 |
|
|
scc_writel(EventsMask, dpriv, dev, IMR);
|
1067 |
|
|
|
1068 |
|
|
/* Posted write is flushed in the wait_ack loop */
|
1069 |
|
|
scc_writel(TxSccRes | RxSccRes, dpriv, dev, CMDR);
|
1070 |
|
|
|
1071 |
|
|
if ((ret = dscc4_wait_ack_cec(dpriv, dev, "Cec")) < 0)
|
1072 |
|
|
goto err_disable_scc_events;
|
1073 |
|
|
|
1074 |
|
|
/*
|
1075 |
|
|
* I would expect XPR near CE completion (before ? after ?).
|
1076 |
|
|
* At worst, this code won't see a late XPR and people
|
1077 |
|
|
* will have to re-issue an ifconfig (this is harmless).
|
1078 |
|
|
* WARNING, a really missing XPR usually means a hardware
|
1079 |
|
|
* reset is needed. Suggestions anyone ?
|
1080 |
|
|
*/
|
1081 |
|
|
if ((ret = dscc4_xpr_ack(dpriv)) < 0) {
|
1082 |
|
|
printk(KERN_ERR "%s: %s timeout\n", DRV_NAME, "XPR");
|
1083 |
|
|
goto err_disable_scc_events;
|
1084 |
|
|
}
|
1085 |
|
|
|
1086 |
|
|
if (debug > 2)
|
1087 |
|
|
dscc4_tx_print(dev, dpriv, "Open");
|
1088 |
|
|
|
1089 |
|
|
done:
|
1090 |
|
|
netif_start_queue(dev);
|
1091 |
|
|
|
1092 |
|
|
init_timer(&dpriv->timer);
|
1093 |
|
|
dpriv->timer.expires = jiffies + 10*HZ;
|
1094 |
|
|
dpriv->timer.data = (unsigned long)dev;
|
1095 |
|
|
dpriv->timer.function = &dscc4_timer;
|
1096 |
|
|
add_timer(&dpriv->timer);
|
1097 |
|
|
netif_carrier_on(dev);
|
1098 |
|
|
|
1099 |
|
|
return 0;
|
1100 |
|
|
|
1101 |
|
|
err_disable_scc_events:
|
1102 |
|
|
scc_writel(0xffffffff, dpriv, dev, IMR);
|
1103 |
|
|
scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0);
|
1104 |
|
|
err_out:
|
1105 |
|
|
hdlc_close(hdlc);
|
1106 |
|
|
MOD_DEC_USE_COUNT;
|
1107 |
|
|
err:
|
1108 |
|
|
return ret;
|
1109 |
|
|
}
|
1110 |
|
|
|
1111 |
|
|
#ifdef DSCC4_POLLING
|
1112 |
|
|
static int dscc4_tx_poll(struct dscc4_dev_priv *dpriv, struct net_device *dev)
|
1113 |
|
|
{
|
1114 |
|
|
/* FIXME: it's gonna be easy (TM), for sure */
|
1115 |
|
|
}
|
1116 |
|
|
#endif /* DSCC4_POLLING */
|
1117 |
|
|
|
1118 |
|
|
static int dscc4_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
1119 |
|
|
{
|
1120 |
|
|
struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
|
1121 |
|
|
struct dscc4_pci_priv *ppriv = dpriv->pci_priv;
|
1122 |
|
|
struct TxFD *tx_fd;
|
1123 |
|
|
int next;
|
1124 |
|
|
|
1125 |
|
|
next = dpriv->tx_current%TX_RING_SIZE;
|
1126 |
|
|
dpriv->tx_skbuff[next] = skb;
|
1127 |
|
|
tx_fd = dpriv->tx_fd + next;
|
1128 |
|
|
tx_fd->state = FrameEnd | TO_STATE_TX(skb->len);
|
1129 |
|
|
tx_fd->data = pci_map_single(ppriv->pdev, skb->data, skb->len,
|
1130 |
|
|
PCI_DMA_TODEVICE);
|
1131 |
|
|
tx_fd->complete = 0x00000000;
|
1132 |
|
|
tx_fd->jiffies = jiffies;
|
1133 |
|
|
mb();
|
1134 |
|
|
|
1135 |
|
|
#ifdef DSCC4_POLLING
|
1136 |
|
|
spin_lock(&dpriv->lock);
|
1137 |
|
|
while (dscc4_tx_poll(dpriv, dev));
|
1138 |
|
|
spin_unlock(&dpriv->lock);
|
1139 |
|
|
#endif
|
1140 |
|
|
|
1141 |
|
|
dev->trans_start = jiffies;
|
1142 |
|
|
|
1143 |
|
|
if (debug > 2)
|
1144 |
|
|
dscc4_tx_print(dev, dpriv, "Xmit");
|
1145 |
|
|
/* To be cleaned(unsigned int)/optimized. Later, ok ? */
|
1146 |
|
|
if (!((++dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE))
|
1147 |
|
|
netif_stop_queue(dev);
|
1148 |
|
|
|
1149 |
|
|
if (dscc4_tx_quiescent(dpriv, dev))
|
1150 |
|
|
dscc4_do_tx(dpriv, dev);
|
1151 |
|
|
|
1152 |
|
|
return 0;
|
1153 |
|
|
}
|
1154 |
|
|
|
1155 |
|
|
static int dscc4_close(struct net_device *dev)
|
1156 |
|
|
{
|
1157 |
|
|
struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
|
1158 |
|
|
hdlc_device *hdlc = dev_to_hdlc(dev);
|
1159 |
|
|
|
1160 |
|
|
del_timer_sync(&dpriv->timer);
|
1161 |
|
|
netif_stop_queue(dev);
|
1162 |
|
|
|
1163 |
|
|
scc_patchl(PowerUp | Vis, 0, dpriv, dev, CCR0);
|
1164 |
|
|
scc_patchl(0x00050000, 0, dpriv, dev, CCR2);
|
1165 |
|
|
scc_writel(0xffffffff, dpriv, dev, IMR);
|
1166 |
|
|
|
1167 |
|
|
dpriv->flags |= FakeReset;
|
1168 |
|
|
|
1169 |
|
|
hdlc_close(hdlc);
|
1170 |
|
|
|
1171 |
|
|
MOD_DEC_USE_COUNT;
|
1172 |
|
|
return 0;
|
1173 |
|
|
}
|
1174 |
|
|
|
1175 |
|
|
static inline int dscc4_check_clock_ability(int port)
|
1176 |
|
|
{
|
1177 |
|
|
int ret = 0;
|
1178 |
|
|
|
1179 |
|
|
#ifdef CONFIG_DSCC4_PCISYNC
|
1180 |
|
|
if (port >= 2)
|
1181 |
|
|
ret = -1;
|
1182 |
|
|
#endif
|
1183 |
|
|
return ret;
|
1184 |
|
|
}
|
1185 |
|
|
|
1186 |
|
|
/*
|
1187 |
|
|
* DS1 p.137: "There are a total of 13 different clocking modes..."
|
1188 |
|
|
* ^^
|
1189 |
|
|
* Design choices:
|
1190 |
|
|
* - by default, assume a clock is provided on pin RxClk/TxClk (clock mode 0a).
|
1191 |
|
|
* Clock mode 3b _should_ work but the testing seems to make this point
|
1192 |
|
|
* dubious (DIY testing requires setting CCR0 at 0x00000033).
|
1193 |
|
|
* This is supposed to provide least surprise "DTE like" behavior.
|
1194 |
|
|
* - if line rate is specified, clocks are assumed to be locally generated.
|
1195 |
|
|
* A quartz must be available (on pin XTAL1). Modes 6b/7b are used. Choosing
|
1196 |
|
|
* between these it automagically done according on the required frequency
|
1197 |
|
|
* scaling. Of course some rounding may take place.
|
1198 |
|
|
* - no high speed mode (40Mb/s). May be trivial to do but I don't have an
|
1199 |
|
|
* appropriate external clocking device for testing.
|
1200 |
|
|
* - no time-slot/clock mode 5: shameless lazyness.
|
1201 |
|
|
*
|
1202 |
|
|
* The clock signals wiring can be (is ?) manufacturer dependant. Good luck.
|
1203 |
|
|
*
|
1204 |
|
|
* BIG FAT WARNING: if the device isn't provided enough clocking signal, it
|
1205 |
|
|
* won't pass the init sequence. For example, straight back-to-back DTE without
|
1206 |
|
|
* external clock will fail when dscc4_open() (<- 'ifconfig hdlcx xxx') is
|
1207 |
|
|
* called.
|
1208 |
|
|
*
|
1209 |
|
|
* Typos lurk in datasheet (missing divier in clock mode 7a figure 51 p.153
|
1210 |
|
|
* DS0 for example)
|
1211 |
|
|
*
|
1212 |
|
|
* Clock mode related bits of CCR0:
|
1213 |
|
|
* +------------ TOE: output TxClk (0b/2b/3a/3b/6b/7a/7b only)
|
1214 |
|
|
* | +---------- SSEL: sub-mode select 0 -> a, 1 -> b
|
1215 |
|
|
* | | +-------- High Speed: say 0
|
1216 |
|
|
* | | | +-+-+-- Clock Mode: 0..7
|
1217 |
|
|
* | | | | | |
|
1218 |
|
|
* -+-+-+-+-+-+-+-+
|
1219 |
|
|
* x|x|5|4|3|2|1|0| lower bits
|
1220 |
|
|
*
|
1221 |
|
|
* Division factor of BRR: k = (N+1)x2^M (total divider = 16xk in mode 6b)
|
1222 |
|
|
* +-+-+-+------------------ M (0..15)
|
1223 |
|
|
* | | | | +-+-+-+-+-+-- N (0..63)
|
1224 |
|
|
* 0 0 0 0 | | | | 0 0 | | | | | |
|
1225 |
|
|
* ...-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
1226 |
|
|
* f|e|d|c|b|a|9|8|7|6|5|4|3|2|1|0| lower bits
|
1227 |
|
|
*
|
1228 |
|
|
*/
|
1229 |
|
|
static int dscc4_set_clock(struct net_device *dev, u32 *bps, u32 *state)
|
1230 |
|
|
{
|
1231 |
|
|
struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
|
1232 |
|
|
int ret = -1;
|
1233 |
|
|
u32 brr;
|
1234 |
|
|
|
1235 |
|
|
*state &= ~Ccr0ClockMask;
|
1236 |
|
|
if (*bps) { /* Clock generated - required for DCE */
|
1237 |
|
|
u32 n = 0, m = 0, divider;
|
1238 |
|
|
int xtal;
|
1239 |
|
|
|
1240 |
|
|
xtal = dpriv->pci_priv->xtal_hz;
|
1241 |
|
|
if (!xtal)
|
1242 |
|
|
goto done;
|
1243 |
|
|
if (dscc4_check_clock_ability(dpriv->dev_id) < 0)
|
1244 |
|
|
goto done;
|
1245 |
|
|
divider = xtal / *bps;
|
1246 |
|
|
if (divider > BRR_DIVIDER_MAX) {
|
1247 |
|
|
divider >>= 4;
|
1248 |
|
|
*state |= 0x00000036; /* Clock mode 6b (BRG/16) */
|
1249 |
|
|
} else
|
1250 |
|
|
*state |= 0x00000037; /* Clock mode 7b (BRG) */
|
1251 |
|
|
if (divider >> 22) {
|
1252 |
|
|
n = 63;
|
1253 |
|
|
m = 15;
|
1254 |
|
|
} else if (divider) {
|
1255 |
|
|
/* Extraction of the 6 highest weighted bits */
|
1256 |
|
|
m = 0;
|
1257 |
|
|
while (0xffffffc0 & divider) {
|
1258 |
|
|
m++;
|
1259 |
|
|
divider >>= 1;
|
1260 |
|
|
}
|
1261 |
|
|
n = divider;
|
1262 |
|
|
}
|
1263 |
|
|
brr = (m << 8) | n;
|
1264 |
|
|
divider = n << m;
|
1265 |
|
|
if (!(*state & 0x00000001)) /* ?b mode mask => clock mode 6b */
|
1266 |
|
|
divider <<= 4;
|
1267 |
|
|
*bps = xtal / divider;
|
1268 |
|
|
} else {
|
1269 |
|
|
/*
|
1270 |
|
|
* External clock - DTE
|
1271 |
|
|
* "state" already reflects Clock mode 0a (CCR0 = 0xzzzzzz00).
|
1272 |
|
|
* Nothing more to be done
|
1273 |
|
|
*/
|
1274 |
|
|
brr = 0;
|
1275 |
|
|
}
|
1276 |
|
|
scc_writel(brr, dpriv, dev, BRR);
|
1277 |
|
|
ret = 0;
|
1278 |
|
|
done:
|
1279 |
|
|
return ret;
|
1280 |
|
|
}
|
1281 |
|
|
|
1282 |
|
|
static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
1283 |
|
|
{
|
1284 |
|
|
sync_serial_settings *line = ifr->ifr_settings.ifs_ifsu.sync;
|
1285 |
|
|
struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
|
1286 |
|
|
const size_t size = sizeof(dpriv->settings);
|
1287 |
|
|
int ret = 0;
|
1288 |
|
|
|
1289 |
|
|
if (dev->flags & IFF_UP)
|
1290 |
|
|
return -EBUSY;
|
1291 |
|
|
|
1292 |
|
|
if (cmd != SIOCWANDEV)
|
1293 |
|
|
return -EOPNOTSUPP;
|
1294 |
|
|
|
1295 |
|
|
switch(ifr->ifr_settings.type) {
|
1296 |
|
|
case IF_GET_IFACE:
|
1297 |
|
|
ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL;
|
1298 |
|
|
if (ifr->ifr_settings.size < size) {
|
1299 |
|
|
ifr->ifr_settings.size = size; /* data size wanted */
|
1300 |
|
|
return -ENOBUFS;
|
1301 |
|
|
}
|
1302 |
|
|
if (copy_to_user(line, &dpriv->settings, size))
|
1303 |
|
|
return -EFAULT;
|
1304 |
|
|
break;
|
1305 |
|
|
|
1306 |
|
|
case IF_IFACE_SYNC_SERIAL:
|
1307 |
|
|
if (!capable(CAP_NET_ADMIN))
|
1308 |
|
|
return -EPERM;
|
1309 |
|
|
|
1310 |
|
|
if (dpriv->flags & FakeReset) {
|
1311 |
|
|
printk(KERN_INFO "%s: please reset the device"
|
1312 |
|
|
" before this command\n", dev->name);
|
1313 |
|
|
return -EPERM;
|
1314 |
|
|
}
|
1315 |
|
|
if (copy_from_user(&dpriv->settings, line, size))
|
1316 |
|
|
return -EFAULT;
|
1317 |
|
|
ret = dscc4_set_iface(dpriv, dev);
|
1318 |
|
|
break;
|
1319 |
|
|
|
1320 |
|
|
default:
|
1321 |
|
|
ret = hdlc_ioctl(dev, ifr, cmd);
|
1322 |
|
|
break;
|
1323 |
|
|
}
|
1324 |
|
|
|
1325 |
|
|
return ret;
|
1326 |
|
|
}
|
1327 |
|
|
|
1328 |
|
|
static inline int dscc4_set_quartz(struct dscc4_dev_priv *dpriv, int hz)
|
1329 |
|
|
{
|
1330 |
|
|
int ret = 0;
|
1331 |
|
|
|
1332 |
|
|
if ((hz < 0) || (hz > DSCC4_HZ_MAX))
|
1333 |
|
|
ret = -EOPNOTSUPP;
|
1334 |
|
|
else
|
1335 |
|
|
dpriv->pci_priv->xtal_hz = hz;
|
1336 |
|
|
|
1337 |
|
|
return ret;
|
1338 |
|
|
}
|
1339 |
|
|
|
1340 |
|
|
static int dscc4_match(struct thingie *p, int value)
|
1341 |
|
|
{
|
1342 |
|
|
int i;
|
1343 |
|
|
|
1344 |
|
|
for (i = 0; p[i].define != -1; i++) {
|
1345 |
|
|
if (value == p[i].define)
|
1346 |
|
|
break;
|
1347 |
|
|
}
|
1348 |
|
|
if (p[i].define == -1)
|
1349 |
|
|
return -1;
|
1350 |
|
|
else
|
1351 |
|
|
return i;
|
1352 |
|
|
}
|
1353 |
|
|
|
1354 |
|
|
static int dscc4_clock_setting(struct dscc4_dev_priv *dpriv,
|
1355 |
|
|
struct net_device *dev)
|
1356 |
|
|
{
|
1357 |
|
|
sync_serial_settings *settings = &dpriv->settings;
|
1358 |
|
|
int ret = -EOPNOTSUPP;
|
1359 |
|
|
u32 bps, state;
|
1360 |
|
|
|
1361 |
|
|
bps = settings->clock_rate;
|
1362 |
|
|
state = scc_readl(dpriv, CCR0);
|
1363 |
|
|
if (dscc4_set_clock(dev, &bps, &state) < 0)
|
1364 |
|
|
goto done;
|
1365 |
|
|
if (bps) { /* DCE */
|
1366 |
|
|
printk(KERN_DEBUG "%s: generated RxClk (DCE)\n", dev->name);
|
1367 |
|
|
if (settings->clock_rate != bps) {
|
1368 |
|
|
printk(KERN_DEBUG "%s: clock adjusted (%08d -> %08d)\n",
|
1369 |
|
|
dev->name, settings->clock_rate, bps);
|
1370 |
|
|
settings->clock_rate = bps;
|
1371 |
|
|
}
|
1372 |
|
|
} else { /* DTE */
|
1373 |
|
|
state |= PowerUp | Vis;
|
1374 |
|
|
printk(KERN_DEBUG "%s: external RxClk (DTE)\n", dev->name);
|
1375 |
|
|
}
|
1376 |
|
|
scc_writel(state, dpriv, dev, CCR0);
|
1377 |
|
|
ret = 0;
|
1378 |
|
|
done:
|
1379 |
|
|
return ret;
|
1380 |
|
|
}
|
1381 |
|
|
|
1382 |
|
|
static int dscc4_encoding_setting(struct dscc4_dev_priv *dpriv,
|
1383 |
|
|
struct net_device *dev)
|
1384 |
|
|
{
|
1385 |
|
|
struct thingie encoding[] = {
|
1386 |
|
|
{ ENCODING_NRZ, 0x00000000 },
|
1387 |
|
|
{ ENCODING_NRZI, 0x00200000 },
|
1388 |
|
|
{ ENCODING_FM_MARK, 0x00400000 },
|
1389 |
|
|
{ ENCODING_FM_SPACE, 0x00500000 },
|
1390 |
|
|
{ ENCODING_MANCHESTER, 0x00600000 },
|
1391 |
|
|
{ -1, 0}
|
1392 |
|
|
};
|
1393 |
|
|
int i, ret = 0;
|
1394 |
|
|
|
1395 |
|
|
i = dscc4_match(encoding, dpriv->encoding);
|
1396 |
|
|
if (i >= 0)
|
1397 |
|
|
scc_patchl(EncodingMask, encoding[i].bits, dpriv, dev, CCR0);
|
1398 |
|
|
else
|
1399 |
|
|
ret = -EOPNOTSUPP;
|
1400 |
|
|
return ret;
|
1401 |
|
|
}
|
1402 |
|
|
|
1403 |
|
|
static int dscc4_loopback_setting(struct dscc4_dev_priv *dpriv,
|
1404 |
|
|
struct net_device *dev)
|
1405 |
|
|
{
|
1406 |
|
|
sync_serial_settings *settings = &dpriv->settings;
|
1407 |
|
|
u32 state;
|
1408 |
|
|
|
1409 |
|
|
state = scc_readl(dpriv, CCR1);
|
1410 |
|
|
if (settings->loopback) {
|
1411 |
|
|
printk(KERN_DEBUG "%s: loopback\n", dev->name);
|
1412 |
|
|
state |= 0x00000100;
|
1413 |
|
|
} else {
|
1414 |
|
|
printk(KERN_DEBUG "%s: normal\n", dev->name);
|
1415 |
|
|
state &= ~0x00000100;
|
1416 |
|
|
}
|
1417 |
|
|
scc_writel(state, dpriv, dev, CCR1);
|
1418 |
|
|
return 0;
|
1419 |
|
|
}
|
1420 |
|
|
|
1421 |
|
|
static int dscc4_crc_setting(struct dscc4_dev_priv *dpriv,
|
1422 |
|
|
struct net_device *dev)
|
1423 |
|
|
{
|
1424 |
|
|
struct thingie crc[] = {
|
1425 |
|
|
{ PARITY_CRC16_PR0_CCITT, 0x00000010 },
|
1426 |
|
|
{ PARITY_CRC16_PR1_CCITT, 0x00000000 },
|
1427 |
|
|
{ PARITY_CRC32_PR0_CCITT, 0x00000011 },
|
1428 |
|
|
{ PARITY_CRC32_PR1_CCITT, 0x00000001 }
|
1429 |
|
|
};
|
1430 |
|
|
int i, ret = 0;
|
1431 |
|
|
|
1432 |
|
|
i = dscc4_match(crc, dpriv->parity);
|
1433 |
|
|
if (i >= 0)
|
1434 |
|
|
scc_patchl(CrcMask, crc[i].bits, dpriv, dev, CCR1);
|
1435 |
|
|
else
|
1436 |
|
|
ret = -EOPNOTSUPP;
|
1437 |
|
|
return ret;
|
1438 |
|
|
}
|
1439 |
|
|
|
1440 |
|
|
static int dscc4_set_iface(struct dscc4_dev_priv *dpriv, struct net_device *dev)
|
1441 |
|
|
{
|
1442 |
|
|
struct {
|
1443 |
|
|
int (*action)(struct dscc4_dev_priv *, struct net_device *);
|
1444 |
|
|
} *p, do_setting[] = {
|
1445 |
|
|
{ dscc4_encoding_setting },
|
1446 |
|
|
{ dscc4_clock_setting },
|
1447 |
|
|
{ dscc4_loopback_setting },
|
1448 |
|
|
{ dscc4_crc_setting },
|
1449 |
|
|
{ NULL }
|
1450 |
|
|
};
|
1451 |
|
|
int ret = 0;
|
1452 |
|
|
|
1453 |
|
|
for (p = do_setting; p->action; p++) {
|
1454 |
|
|
if ((ret = p->action(dpriv, dev)) < 0)
|
1455 |
|
|
break;
|
1456 |
|
|
}
|
1457 |
|
|
return ret;
|
1458 |
|
|
}
|
1459 |
|
|
|
1460 |
|
|
static void dscc4_irq(int irq, void *token, struct pt_regs *ptregs)
|
1461 |
|
|
{
|
1462 |
|
|
struct dscc4_dev_priv *root = token;
|
1463 |
|
|
struct dscc4_pci_priv *priv;
|
1464 |
|
|
struct net_device *dev;
|
1465 |
|
|
u32 ioaddr, state;
|
1466 |
|
|
unsigned long flags;
|
1467 |
|
|
int i;
|
1468 |
|
|
|
1469 |
|
|
priv = root->pci_priv;
|
1470 |
|
|
dev = hdlc_to_dev(&root->hdlc);
|
1471 |
|
|
|
1472 |
|
|
spin_lock_irqsave(&priv->lock, flags);
|
1473 |
|
|
|
1474 |
|
|
ioaddr = dev->base_addr;
|
1475 |
|
|
|
1476 |
|
|
state = readl(ioaddr + GSTAR);
|
1477 |
|
|
if (!state)
|
1478 |
|
|
goto out;
|
1479 |
|
|
if (debug > 3)
|
1480 |
|
|
printk(KERN_DEBUG "%s: GSTAR = 0x%08x\n", DRV_NAME, state);
|
1481 |
|
|
writel(state, ioaddr + GSTAR);
|
1482 |
|
|
|
1483 |
|
|
if (state & Arf) {
|
1484 |
|
|
printk(KERN_ERR "%s: failure (Arf). Harass the maintener\n",
|
1485 |
|
|
dev->name);
|
1486 |
|
|
goto out;
|
1487 |
|
|
}
|
1488 |
|
|
state &= ~ArAck;
|
1489 |
|
|
if (state & Cfg) {
|
1490 |
|
|
if (debug > 0)
|
1491 |
|
|
printk(KERN_DEBUG "%s: CfgIV\n", DRV_NAME);
|
1492 |
|
|
if (priv->iqcfg[priv->cfg_cur++%IRQ_RING_SIZE] & Arf)
|
1493 |
|
|
printk(KERN_ERR "%s: %s failed\n", dev->name, "CFG");
|
1494 |
|
|
if (!(state &= ~Cfg))
|
1495 |
|
|
goto out;
|
1496 |
|
|
}
|
1497 |
|
|
if (state & RxEvt) {
|
1498 |
|
|
i = dev_per_card - 1;
|
1499 |
|
|
do {
|
1500 |
|
|
dscc4_rx_irq(priv, root + i);
|
1501 |
|
|
} while (--i >= 0);
|
1502 |
|
|
state &= ~RxEvt;
|
1503 |
|
|
}
|
1504 |
|
|
if (state & TxEvt) {
|
1505 |
|
|
i = dev_per_card - 1;
|
1506 |
|
|
do {
|
1507 |
|
|
dscc4_tx_irq(priv, root + i);
|
1508 |
|
|
} while (--i >= 0);
|
1509 |
|
|
state &= ~TxEvt;
|
1510 |
|
|
}
|
1511 |
|
|
out:
|
1512 |
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
1513 |
|
|
}
|
1514 |
|
|
|
1515 |
|
|
static inline void dscc4_tx_irq(struct dscc4_pci_priv *ppriv,
|
1516 |
|
|
struct dscc4_dev_priv *dpriv)
|
1517 |
|
|
{
|
1518 |
|
|
struct net_device *dev = hdlc_to_dev(&dpriv->hdlc);
|
1519 |
|
|
u32 state;
|
1520 |
|
|
int cur, loop = 0;
|
1521 |
|
|
|
1522 |
|
|
try:
|
1523 |
|
|
cur = dpriv->iqtx_current%IRQ_RING_SIZE;
|
1524 |
|
|
state = dpriv->iqtx[cur];
|
1525 |
|
|
if (!state) {
|
1526 |
|
|
if (debug > 4)
|
1527 |
|
|
printk(KERN_DEBUG "%s: Tx ISR = 0x%08x\n", dev->name,
|
1528 |
|
|
state);
|
1529 |
|
|
if ((debug > 1) && (loop > 1))
|
1530 |
|
|
printk(KERN_DEBUG "%s: Tx irq loop=%d\n", dev->name, loop);
|
1531 |
|
|
if (loop && netif_queue_stopped(dev))
|
1532 |
|
|
if ((dpriv->tx_current - dpriv->tx_dirty)%TX_RING_SIZE)
|
1533 |
|
|
netif_wake_queue(dev);
|
1534 |
|
|
|
1535 |
|
|
if (netif_running(dev) && dscc4_tx_quiescent(dpriv, dev) &&
|
1536 |
|
|
!dscc4_tx_done(dpriv))
|
1537 |
|
|
dscc4_do_tx(dpriv, dev);
|
1538 |
|
|
return;
|
1539 |
|
|
}
|
1540 |
|
|
loop++;
|
1541 |
|
|
dpriv->iqtx[cur] = 0;
|
1542 |
|
|
dpriv->iqtx_current++;
|
1543 |
|
|
|
1544 |
|
|
if (state_check(state, dpriv, dev, "Tx") < 0)
|
1545 |
|
|
return;
|
1546 |
|
|
|
1547 |
|
|
if (state & SccEvt) {
|
1548 |
|
|
if (state & Alls) {
|
1549 |
|
|
struct net_device_stats *stats = &dpriv->hdlc.stats;
|
1550 |
|
|
struct sk_buff *skb;
|
1551 |
|
|
struct TxFD *tx_fd;
|
1552 |
|
|
|
1553 |
|
|
if (debug > 2)
|
1554 |
|
|
dscc4_tx_print(dev, dpriv, "Alls");
|
1555 |
|
|
/*
|
1556 |
|
|
* DataComplete can't be trusted for Tx completion.
|
1557 |
|
|
* Cf errata DS5 p.8
|
1558 |
|
|
*/
|
1559 |
|
|
cur = dpriv->tx_dirty%TX_RING_SIZE;
|
1560 |
|
|
tx_fd = dpriv->tx_fd + cur;
|
1561 |
|
|
skb = dpriv->tx_skbuff[cur];
|
1562 |
|
|
if (skb) {
|
1563 |
|
|
pci_unmap_single(ppriv->pdev, tx_fd->data,
|
1564 |
|
|
skb->len, PCI_DMA_TODEVICE);
|
1565 |
|
|
if (tx_fd->state & FrameEnd) {
|
1566 |
|
|
stats->tx_packets++;
|
1567 |
|
|
stats->tx_bytes += skb->len;
|
1568 |
|
|
}
|
1569 |
|
|
dev_kfree_skb_irq(skb);
|
1570 |
|
|
dpriv->tx_skbuff[cur] = NULL;
|
1571 |
|
|
++dpriv->tx_dirty;
|
1572 |
|
|
} else {
|
1573 |
|
|
if (debug > 1)
|
1574 |
|
|
printk(KERN_ERR "%s Tx: NULL skb %d\n",
|
1575 |
|
|
dev->name, cur);
|
1576 |
|
|
}
|
1577 |
|
|
/*
|
1578 |
|
|
* If the driver ends sending crap on the wire, it
|
1579 |
|
|
* will be way easier to diagnose than the (not so)
|
1580 |
|
|
* random freeze induced by null sized tx frames.
|
1581 |
|
|
*/
|
1582 |
|
|
tx_fd->data = tx_fd->next;
|
1583 |
|
|
tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE);
|
1584 |
|
|
tx_fd->complete = 0x00000000;
|
1585 |
|
|
tx_fd->jiffies = 0;
|
1586 |
|
|
|
1587 |
|
|
if (!(state &= ~Alls))
|
1588 |
|
|
goto try;
|
1589 |
|
|
}
|
1590 |
|
|
/*
|
1591 |
|
|
* Transmit Data Underrun
|
1592 |
|
|
*/
|
1593 |
|
|
if (state & Xdu) {
|
1594 |
|
|
printk(KERN_ERR "%s: XDU. Ask maintainer\n", DRV_NAME);
|
1595 |
|
|
dpriv->flags = NeedIDT;
|
1596 |
|
|
/* Tx reset */
|
1597 |
|
|
writel(MTFi | Rdt,
|
1598 |
|
|
dev->base_addr + 0x0c*dpriv->dev_id + CH0CFG);
|
1599 |
|
|
writel(Action, dev->base_addr + GCMDR);
|
1600 |
|
|
return;
|
1601 |
|
|
}
|
1602 |
|
|
if (state & Cts) {
|
1603 |
|
|
printk(KERN_INFO "%s: CTS transition\n", dev->name);
|
1604 |
|
|
if (!(state &= ~Cts)) /* DEBUG */
|
1605 |
|
|
goto try;
|
1606 |
|
|
}
|
1607 |
|
|
if (state & Xmr) {
|
1608 |
|
|
/* Frame needs to be sent again - FIXME */
|
1609 |
|
|
printk(KERN_ERR "%s: Xmr. Ask maintainer\n", DRV_NAME);
|
1610 |
|
|
if (!(state &= ~Xmr)) /* DEBUG */
|
1611 |
|
|
goto try;
|
1612 |
|
|
}
|
1613 |
|
|
if (state & Xpr) {
|
1614 |
|
|
u32 scc_addr, ring;
|
1615 |
|
|
int i;
|
1616 |
|
|
|
1617 |
|
|
/*
|
1618 |
|
|
* - the busy condition happens (sometimes);
|
1619 |
|
|
* - it doesn't seem to make the handler unreliable.
|
1620 |
|
|
*/
|
1621 |
|
|
for (i = 1; i; i <<= 1) {
|
1622 |
|
|
if (!(scc_readl_star(dpriv, dev) & SccBusy))
|
1623 |
|
|
break;
|
1624 |
|
|
}
|
1625 |
|
|
if (!i)
|
1626 |
|
|
printk(KERN_INFO "%s busy in irq\n", dev->name);
|
1627 |
|
|
|
1628 |
|
|
scc_addr = dev->base_addr + 0x0c*dpriv->dev_id;
|
1629 |
|
|
/* Keep this order: IDT before IDR */
|
1630 |
|
|
if (dpriv->flags & NeedIDT) {
|
1631 |
|
|
if (debug > 2)
|
1632 |
|
|
dscc4_tx_print(dev, dpriv, "Xpr");
|
1633 |
|
|
ring = dpriv->tx_fd_dma +
|
1634 |
|
|
(dpriv->tx_dirty%TX_RING_SIZE)*
|
1635 |
|
|
sizeof(struct TxFD);
|
1636 |
|
|
writel(ring, scc_addr + CH0BTDA);
|
1637 |
|
|
dscc4_do_tx(dpriv, dev);
|
1638 |
|
|
writel(MTFi | Idt, scc_addr + CH0CFG);
|
1639 |
|
|
if (dscc4_do_action(dev, "IDT") < 0)
|
1640 |
|
|
goto err_xpr;
|
1641 |
|
|
dpriv->flags &= ~NeedIDT;
|
1642 |
|
|
}
|
1643 |
|
|
if (dpriv->flags & NeedIDR) {
|
1644 |
|
|
ring = dpriv->rx_fd_dma +
|
1645 |
|
|
(dpriv->rx_current%RX_RING_SIZE)*
|
1646 |
|
|
sizeof(struct RxFD);
|
1647 |
|
|
writel(ring, scc_addr + CH0BRDA);
|
1648 |
|
|
dscc4_rx_update(dpriv, dev);
|
1649 |
|
|
writel(MTFi | Idr, scc_addr + CH0CFG);
|
1650 |
|
|
if (dscc4_do_action(dev, "IDR") < 0)
|
1651 |
|
|
goto err_xpr;
|
1652 |
|
|
dpriv->flags &= ~NeedIDR;
|
1653 |
|
|
smp_wmb();
|
1654 |
|
|
/* Activate receiver and misc */
|
1655 |
|
|
scc_writel(0x08050008, dpriv, dev, CCR2);
|
1656 |
|
|
}
|
1657 |
|
|
err_xpr:
|
1658 |
|
|
if (!(state &= ~Xpr))
|
1659 |
|
|
goto try;
|
1660 |
|
|
}
|
1661 |
|
|
if (state & Cd) {
|
1662 |
|
|
if (debug > 0)
|
1663 |
|
|
printk(KERN_INFO "%s: CD transition\n", dev->name);
|
1664 |
|
|
if (!(state &= ~Cd)) /* DEBUG */
|
1665 |
|
|
goto try;
|
1666 |
|
|
}
|
1667 |
|
|
} else { /* ! SccEvt */
|
1668 |
|
|
if (state & Hi) {
|
1669 |
|
|
#ifdef DSCC4_POLLING
|
1670 |
|
|
while (!dscc4_tx_poll(dpriv, dev));
|
1671 |
|
|
#endif
|
1672 |
|
|
printk(KERN_INFO "%s: Tx Hi\n", dev->name);
|
1673 |
|
|
state &= ~Hi;
|
1674 |
|
|
}
|
1675 |
|
|
if (state & Err) {
|
1676 |
|
|
printk(KERN_INFO "%s: Tx ERR\n", dev->name);
|
1677 |
|
|
dev_to_hdlc(dev)->stats.tx_errors++;
|
1678 |
|
|
state &= ~Err;
|
1679 |
|
|
}
|
1680 |
|
|
}
|
1681 |
|
|
goto try;
|
1682 |
|
|
}
|
1683 |
|
|
|
1684 |
|
|
static inline void dscc4_rx_irq(struct dscc4_pci_priv *priv,
|
1685 |
|
|
struct dscc4_dev_priv *dpriv)
|
1686 |
|
|
{
|
1687 |
|
|
struct net_device *dev = hdlc_to_dev(&dpriv->hdlc);
|
1688 |
|
|
u32 state;
|
1689 |
|
|
int cur;
|
1690 |
|
|
|
1691 |
|
|
try:
|
1692 |
|
|
cur = dpriv->iqrx_current%IRQ_RING_SIZE;
|
1693 |
|
|
state = dpriv->iqrx[cur];
|
1694 |
|
|
if (!state)
|
1695 |
|
|
return;
|
1696 |
|
|
dpriv->iqrx[cur] = 0;
|
1697 |
|
|
dpriv->iqrx_current++;
|
1698 |
|
|
|
1699 |
|
|
if (state_check(state, dpriv, dev, "Rx") < 0)
|
1700 |
|
|
return;
|
1701 |
|
|
|
1702 |
|
|
if (!(state & SccEvt)){
|
1703 |
|
|
struct RxFD *rx_fd;
|
1704 |
|
|
|
1705 |
|
|
if (debug > 4)
|
1706 |
|
|
printk(KERN_DEBUG "%s: Rx ISR = 0x%08x\n", dev->name,
|
1707 |
|
|
state);
|
1708 |
|
|
state &= 0x00ffffff;
|
1709 |
|
|
if (state & Err) { /* Hold or reset */
|
1710 |
|
|
printk(KERN_DEBUG "%s: Rx ERR\n", dev->name);
|
1711 |
|
|
cur = dpriv->rx_current%RX_RING_SIZE;
|
1712 |
|
|
rx_fd = dpriv->rx_fd + cur;
|
1713 |
|
|
/*
|
1714 |
|
|
* Presume we're not facing a DMAC receiver reset.
|
1715 |
|
|
* As We use the rx size-filtering feature of the
|
1716 |
|
|
* DSCC4, the beginning of a new frame is waiting in
|
1717 |
|
|
* the rx fifo. I bet a Receive Data Overflow will
|
1718 |
|
|
* happen most of time but let's try and avoid it.
|
1719 |
|
|
* Btw (as for RDO) if one experiences ERR whereas
|
1720 |
|
|
* the system looks rather idle, there may be a
|
1721 |
|
|
* problem with latency. In this case, increasing
|
1722 |
|
|
* RX_RING_SIZE may help.
|
1723 |
|
|
*/
|
1724 |
|
|
//while (dpriv->rx_needs_refill) {
|
1725 |
|
|
while (!(rx_fd->state1 & Hold)) {
|
1726 |
|
|
rx_fd++;
|
1727 |
|
|
cur++;
|
1728 |
|
|
if (!(cur = cur%RX_RING_SIZE))
|
1729 |
|
|
rx_fd = dpriv->rx_fd;
|
1730 |
|
|
}
|
1731 |
|
|
//dpriv->rx_needs_refill--;
|
1732 |
|
|
try_get_rx_skb(dpriv, dev);
|
1733 |
|
|
if (!rx_fd->data)
|
1734 |
|
|
goto try;
|
1735 |
|
|
rx_fd->state1 &= ~Hold;
|
1736 |
|
|
rx_fd->state2 = 0x00000000;
|
1737 |
|
|
rx_fd->end = 0xbabeface;
|
1738 |
|
|
//}
|
1739 |
|
|
goto try;
|
1740 |
|
|
}
|
1741 |
|
|
if (state & Fi) {
|
1742 |
|
|
dscc4_rx_skb(dpriv, dev);
|
1743 |
|
|
goto try;
|
1744 |
|
|
}
|
1745 |
|
|
if (state & Hi ) { /* HI bit */
|
1746 |
|
|
printk(KERN_INFO "%s: Rx Hi\n", dev->name);
|
1747 |
|
|
state &= ~Hi;
|
1748 |
|
|
goto try;
|
1749 |
|
|
}
|
1750 |
|
|
} else { /* SccEvt */
|
1751 |
|
|
if (debug > 1) {
|
1752 |
|
|
//FIXME: verifier la presence de tous les evenements
|
1753 |
|
|
static struct {
|
1754 |
|
|
u32 mask;
|
1755 |
|
|
const char *irq_name;
|
1756 |
|
|
} evts[] = {
|
1757 |
|
|
{ 0x00008000, "TIN"},
|
1758 |
|
|
{ 0x00000020, "RSC"},
|
1759 |
|
|
{ 0x00000010, "PCE"},
|
1760 |
|
|
{ 0x00000008, "PLLA"},
|
1761 |
|
|
{ 0, NULL}
|
1762 |
|
|
}, *evt;
|
1763 |
|
|
|
1764 |
|
|
for (evt = evts; evt->irq_name; evt++) {
|
1765 |
|
|
if (state & evt->mask) {
|
1766 |
|
|
printk(KERN_DEBUG "%s: %s\n",
|
1767 |
|
|
dev->name, evt->irq_name);
|
1768 |
|
|
if (!(state &= ~evt->mask))
|
1769 |
|
|
goto try;
|
1770 |
|
|
}
|
1771 |
|
|
}
|
1772 |
|
|
} else {
|
1773 |
|
|
if (!(state &= ~0x0000c03c))
|
1774 |
|
|
goto try;
|
1775 |
|
|
}
|
1776 |
|
|
if (state & Cts) {
|
1777 |
|
|
printk(KERN_INFO "%s: CTS transition\n", dev->name);
|
1778 |
|
|
if (!(state &= ~Cts)) /* DEBUG */
|
1779 |
|
|
goto try;
|
1780 |
|
|
}
|
1781 |
|
|
/*
|
1782 |
|
|
* Receive Data Overflow (FIXME: fscked)
|
1783 |
|
|
*/
|
1784 |
|
|
if (state & Rdo) {
|
1785 |
|
|
struct RxFD *rx_fd;
|
1786 |
|
|
u32 scc_addr;
|
1787 |
|
|
int cur;
|
1788 |
|
|
|
1789 |
|
|
//if (debug)
|
1790 |
|
|
// dscc4_rx_dump(dpriv);
|
1791 |
|
|
scc_addr = dev->base_addr + 0x0c*dpriv->dev_id;
|
1792 |
|
|
|
1793 |
|
|
scc_patchl(RxActivate, 0, dpriv, dev, CCR2);
|
1794 |
|
|
/*
|
1795 |
|
|
* This has no effect. Why ?
|
1796 |
|
|
* ORed with TxSccRes, one sees the CFG ack (for
|
1797 |
|
|
* the TX part only).
|
1798 |
|
|
*/
|
1799 |
|
|
scc_writel(RxSccRes, dpriv, dev, CMDR);
|
1800 |
|
|
dpriv->flags |= RdoSet;
|
1801 |
|
|
|
1802 |
|
|
/*
|
1803 |
|
|
* Let's try and save something in the received data.
|
1804 |
|
|
* rx_current must be incremented at least once to
|
1805 |
|
|
* avoid HOLD in the BRDA-to-be-pointed desc.
|
1806 |
|
|
*/
|
1807 |
|
|
do {
|
1808 |
|
|
cur = dpriv->rx_current++%RX_RING_SIZE;
|
1809 |
|
|
rx_fd = dpriv->rx_fd + cur;
|
1810 |
|
|
if (!(rx_fd->state2 & DataComplete))
|
1811 |
|
|
break;
|
1812 |
|
|
if (rx_fd->state2 & FrameAborted) {
|
1813 |
|
|
dev_to_hdlc(dev)->stats.rx_over_errors++;
|
1814 |
|
|
rx_fd->state1 |= Hold;
|
1815 |
|
|
rx_fd->state2 = 0x00000000;
|
1816 |
|
|
rx_fd->end = 0xbabeface;
|
1817 |
|
|
} else
|
1818 |
|
|
dscc4_rx_skb(dpriv, dev);
|
1819 |
|
|
} while (1);
|
1820 |
|
|
|
1821 |
|
|
if (debug > 0) {
|
1822 |
|
|
if (dpriv->flags & RdoSet)
|
1823 |
|
|
printk(KERN_DEBUG
|
1824 |
|
|
"%s: no RDO in Rx data\n", DRV_NAME);
|
1825 |
|
|
}
|
1826 |
|
|
#ifdef DSCC4_RDO_EXPERIMENTAL_RECOVERY
|
1827 |
|
|
/*
|
1828 |
|
|
* FIXME: must the reset be this violent ?
|
1829 |
|
|
*/
|
1830 |
|
|
#warning "FIXME: CH0BRDA"
|
1831 |
|
|
writel(dpriv->rx_fd_dma +
|
1832 |
|
|
(dpriv->rx_current%RX_RING_SIZE)*
|
1833 |
|
|
sizeof(struct RxFD), scc_addr + CH0BRDA);
|
1834 |
|
|
writel(MTFi|Rdr|Idr, scc_addr + CH0CFG);
|
1835 |
|
|
if (dscc4_do_action(dev, "RDR") < 0) {
|
1836 |
|
|
printk(KERN_ERR "%s: RDO recovery failed(%s)\n",
|
1837 |
|
|
dev->name, "RDR");
|
1838 |
|
|
goto rdo_end;
|
1839 |
|
|
}
|
1840 |
|
|
writel(MTFi|Idr, scc_addr + CH0CFG);
|
1841 |
|
|
if (dscc4_do_action(dev, "IDR") < 0) {
|
1842 |
|
|
printk(KERN_ERR "%s: RDO recovery failed(%s)\n",
|
1843 |
|
|
dev->name, "IDR");
|
1844 |
|
|
goto rdo_end;
|
1845 |
|
|
}
|
1846 |
|
|
rdo_end:
|
1847 |
|
|
#endif
|
1848 |
|
|
scc_patchl(0, RxActivate, dpriv, dev, CCR2);
|
1849 |
|
|
goto try;
|
1850 |
|
|
}
|
1851 |
|
|
if (state & Cd) {
|
1852 |
|
|
printk(KERN_INFO "%s: CD transition\n", dev->name);
|
1853 |
|
|
if (!(state &= ~Cd)) /* DEBUG */
|
1854 |
|
|
goto try;
|
1855 |
|
|
}
|
1856 |
|
|
if (state & Flex) {
|
1857 |
|
|
printk(KERN_DEBUG "%s: Flex. Ttttt...\n", DRV_NAME);
|
1858 |
|
|
if (!(state &= ~Flex))
|
1859 |
|
|
goto try;
|
1860 |
|
|
}
|
1861 |
|
|
}
|
1862 |
|
|
}
|
1863 |
|
|
|
1864 |
|
|
/*
|
1865 |
|
|
* I had expected the following to work for the first descriptor
|
1866 |
|
|
* (tx_fd->state = 0xc0000000)
|
1867 |
|
|
* - Hold=1 (don't try and branch to the next descripto);
|
1868 |
|
|
* - No=0 (I want an empty data section, i.e. size=0);
|
1869 |
|
|
* - Fe=1 (required by No=0 or we got an Err irq and must reset).
|
1870 |
|
|
* It failed and locked solid. Thus the introduction of a dummy skb.
|
1871 |
|
|
* Problem is acknowledged in errata sheet DS5. Joy :o/
|
1872 |
|
|
*/
|
1873 |
|
|
struct sk_buff *dscc4_init_dummy_skb(struct dscc4_dev_priv *dpriv)
|
1874 |
|
|
{
|
1875 |
|
|
struct sk_buff *skb;
|
1876 |
|
|
|
1877 |
|
|
skb = dev_alloc_skb(DUMMY_SKB_SIZE);
|
1878 |
|
|
if (skb) {
|
1879 |
|
|
int last = dpriv->tx_dirty%TX_RING_SIZE;
|
1880 |
|
|
struct TxFD *tx_fd = dpriv->tx_fd + last;
|
1881 |
|
|
|
1882 |
|
|
skb->len = DUMMY_SKB_SIZE;
|
1883 |
|
|
memcpy(skb->data, version, strlen(version)%DUMMY_SKB_SIZE);
|
1884 |
|
|
tx_fd->state = FrameEnd | TO_STATE_TX(DUMMY_SKB_SIZE);
|
1885 |
|
|
tx_fd->data = pci_map_single(dpriv->pci_priv->pdev, skb->data,
|
1886 |
|
|
DUMMY_SKB_SIZE, PCI_DMA_TODEVICE);
|
1887 |
|
|
dpriv->tx_skbuff[last] = skb;
|
1888 |
|
|
}
|
1889 |
|
|
return skb;
|
1890 |
|
|
}
|
1891 |
|
|
|
1892 |
|
|
static int dscc4_init_ring(struct net_device *dev)
|
1893 |
|
|
{
|
1894 |
|
|
struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
|
1895 |
|
|
struct pci_dev *pdev = dpriv->pci_priv->pdev;
|
1896 |
|
|
struct TxFD *tx_fd;
|
1897 |
|
|
struct RxFD *rx_fd;
|
1898 |
|
|
void *ring;
|
1899 |
|
|
int i;
|
1900 |
|
|
|
1901 |
|
|
ring = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &dpriv->rx_fd_dma);
|
1902 |
|
|
if (!ring)
|
1903 |
|
|
goto err_out;
|
1904 |
|
|
dpriv->rx_fd = rx_fd = (struct RxFD *) ring;
|
1905 |
|
|
|
1906 |
|
|
ring = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &dpriv->tx_fd_dma);
|
1907 |
|
|
if (!ring)
|
1908 |
|
|
goto err_free_dma_rx;
|
1909 |
|
|
dpriv->tx_fd = tx_fd = (struct TxFD *) ring;
|
1910 |
|
|
|
1911 |
|
|
memset(dpriv->tx_skbuff, 0, sizeof(struct sk_buff *)*TX_RING_SIZE);
|
1912 |
|
|
dpriv->tx_dirty = 0xffffffff;
|
1913 |
|
|
i = dpriv->tx_current = 0;
|
1914 |
|
|
do {
|
1915 |
|
|
tx_fd->state = FrameEnd | TO_STATE_TX(2*DUMMY_SKB_SIZE);
|
1916 |
|
|
tx_fd->complete = 0x00000000;
|
1917 |
|
|
/* FIXME: NULL should be ok - to be tried */
|
1918 |
|
|
tx_fd->data = dpriv->tx_fd_dma;
|
1919 |
|
|
(tx_fd++)->next = (u32)(dpriv->tx_fd_dma +
|
1920 |
|
|
(++i%TX_RING_SIZE)*sizeof(*tx_fd));
|
1921 |
|
|
} while (i < TX_RING_SIZE);
|
1922 |
|
|
|
1923 |
|
|
if (dscc4_init_dummy_skb(dpriv) == NULL)
|
1924 |
|
|
goto err_free_dma_tx;
|
1925 |
|
|
|
1926 |
|
|
memset(dpriv->rx_skbuff, 0, sizeof(struct sk_buff *)*RX_RING_SIZE);
|
1927 |
|
|
i = dpriv->rx_dirty = dpriv->rx_current = 0;
|
1928 |
|
|
do {
|
1929 |
|
|
/* size set by the host. Multiple of 4 bytes please */
|
1930 |
|
|
rx_fd->state1 = HiDesc;
|
1931 |
|
|
rx_fd->state2 = 0x00000000;
|
1932 |
|
|
rx_fd->end = 0xbabeface;
|
1933 |
|
|
rx_fd->state1 |= TO_STATE_RX(HDLC_MAX_MRU);
|
1934 |
|
|
// FIXME: return value verifiee mais traitement suspect
|
1935 |
|
|
if (try_get_rx_skb(dpriv, dev) >= 0)
|
1936 |
|
|
dpriv->rx_dirty++;
|
1937 |
|
|
(rx_fd++)->next = (u32)(dpriv->rx_fd_dma +
|
1938 |
|
|
(++i%RX_RING_SIZE)*sizeof(*rx_fd));
|
1939 |
|
|
} while (i < RX_RING_SIZE);
|
1940 |
|
|
|
1941 |
|
|
return 0;
|
1942 |
|
|
|
1943 |
|
|
err_free_dma_tx:
|
1944 |
|
|
pci_free_consistent(pdev, TX_TOTAL_SIZE, ring, dpriv->tx_fd_dma);
|
1945 |
|
|
err_free_dma_rx:
|
1946 |
|
|
pci_free_consistent(pdev, RX_TOTAL_SIZE, rx_fd, dpriv->rx_fd_dma);
|
1947 |
|
|
err_out:
|
1948 |
|
|
return -ENOMEM;
|
1949 |
|
|
}
|
1950 |
|
|
|
1951 |
|
|
static void __devexit dscc4_remove_one(struct pci_dev *pdev)
|
1952 |
|
|
{
|
1953 |
|
|
struct dscc4_pci_priv *ppriv;
|
1954 |
|
|
struct dscc4_dev_priv *root;
|
1955 |
|
|
u32 ioaddr;
|
1956 |
|
|
int i;
|
1957 |
|
|
|
1958 |
|
|
ppriv = pci_get_drvdata(pdev);
|
1959 |
|
|
root = ppriv->root;
|
1960 |
|
|
|
1961 |
|
|
ioaddr = hdlc_to_dev(&root->hdlc)->base_addr;
|
1962 |
|
|
|
1963 |
|
|
dscc4_pci_reset(pdev, ioaddr);
|
1964 |
|
|
|
1965 |
|
|
free_irq(pdev->irq, root);
|
1966 |
|
|
pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32), ppriv->iqcfg,
|
1967 |
|
|
ppriv->iqcfg_dma);
|
1968 |
|
|
for (i = 0; i < dev_per_card; i++) {
|
1969 |
|
|
struct dscc4_dev_priv *dpriv = root + i;
|
1970 |
|
|
|
1971 |
|
|
dscc4_release_ring(dpriv);
|
1972 |
|
|
pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
|
1973 |
|
|
dpriv->iqrx, dpriv->iqrx_dma);
|
1974 |
|
|
pci_free_consistent(pdev, IRQ_RING_SIZE*sizeof(u32),
|
1975 |
|
|
dpriv->iqtx, dpriv->iqtx_dma);
|
1976 |
|
|
}
|
1977 |
|
|
|
1978 |
|
|
dscc4_free1(pdev);
|
1979 |
|
|
|
1980 |
|
|
iounmap((void *)ioaddr);
|
1981 |
|
|
|
1982 |
|
|
release_mem_region(pci_resource_start(pdev, 1),
|
1983 |
|
|
pci_resource_len(pdev, 1));
|
1984 |
|
|
release_mem_region(pci_resource_start(pdev, 0),
|
1985 |
|
|
pci_resource_len(pdev, 0));
|
1986 |
|
|
}
|
1987 |
|
|
|
1988 |
|
|
static int dscc4_hdlc_attach(hdlc_device *hdlc, unsigned short encoding,
|
1989 |
|
|
unsigned short parity)
|
1990 |
|
|
{
|
1991 |
|
|
struct net_device *dev = hdlc_to_dev(hdlc);
|
1992 |
|
|
struct dscc4_dev_priv *dpriv = dscc4_priv(dev);
|
1993 |
|
|
|
1994 |
|
|
if (encoding != ENCODING_NRZ &&
|
1995 |
|
|
encoding != ENCODING_NRZI &&
|
1996 |
|
|
encoding != ENCODING_FM_MARK &&
|
1997 |
|
|
encoding != ENCODING_FM_SPACE &&
|
1998 |
|
|
encoding != ENCODING_MANCHESTER)
|
1999 |
|
|
return -EINVAL;
|
2000 |
|
|
|
2001 |
|
|
if (parity != PARITY_NONE &&
|
2002 |
|
|
parity != PARITY_CRC16_PR0_CCITT &&
|
2003 |
|
|
parity != PARITY_CRC16_PR1_CCITT &&
|
2004 |
|
|
parity != PARITY_CRC32_PR0_CCITT &&
|
2005 |
|
|
parity != PARITY_CRC32_PR1_CCITT)
|
2006 |
|
|
return -EINVAL;
|
2007 |
|
|
|
2008 |
|
|
dpriv->encoding = encoding;
|
2009 |
|
|
dpriv->parity = parity;
|
2010 |
|
|
return 0;
|
2011 |
|
|
}
|
2012 |
|
|
|
2013 |
|
|
#ifndef MODULE
|
2014 |
|
|
static int __init dscc4_setup(char *str)
|
2015 |
|
|
{
|
2016 |
|
|
int *args[] = { &debug, &quartz, NULL }, **p = args;
|
2017 |
|
|
|
2018 |
|
|
while (*p && (get_option(&str, *p) == 2))
|
2019 |
|
|
p++;
|
2020 |
|
|
return 1;
|
2021 |
|
|
}
|
2022 |
|
|
|
2023 |
|
|
__setup("dscc4.setup=", dscc4_setup);
|
2024 |
|
|
#endif /* MODULE */
|
2025 |
|
|
|
2026 |
|
|
static struct pci_device_id dscc4_pci_tbl[] = {
|
2027 |
|
|
{ PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4,
|
2028 |
|
|
PCI_ANY_ID, PCI_ANY_ID, },
|
2029 |
|
|
{ 0,}
|
2030 |
|
|
};
|
2031 |
|
|
MODULE_DEVICE_TABLE(pci, dscc4_pci_tbl);
|
2032 |
|
|
|
2033 |
|
|
static struct pci_driver dscc4_driver = {
|
2034 |
|
|
.name = DRV_NAME,
|
2035 |
|
|
.id_table = dscc4_pci_tbl,
|
2036 |
|
|
.probe = dscc4_init_one,
|
2037 |
|
|
.remove = __devexit_p(dscc4_remove_one),
|
2038 |
|
|
};
|
2039 |
|
|
|
2040 |
|
|
static int __init dscc4_init_module(void)
|
2041 |
|
|
{
|
2042 |
|
|
return pci_module_init(&dscc4_driver);
|
2043 |
|
|
}
|
2044 |
|
|
|
2045 |
|
|
static void __exit dscc4_cleanup_module(void)
|
2046 |
|
|
{
|
2047 |
|
|
pci_unregister_driver(&dscc4_driver);
|
2048 |
|
|
}
|
2049 |
|
|
|
2050 |
|
|
module_init(dscc4_init_module);
|
2051 |
|
|
module_exit(dscc4_cleanup_module);
|