1 |
1275 |
phoenix |
/*
|
2 |
|
|
* ibm_ocp_enet.c
|
3 |
|
|
*
|
4 |
|
|
* Ethernet driver for the built in ethernet on the IBM 4xx PowerPC
|
5 |
|
|
* processors.
|
6 |
|
|
*
|
7 |
|
|
* (c) 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org>
|
8 |
|
|
*
|
9 |
|
|
* Based on original work by
|
10 |
|
|
*
|
11 |
|
|
* Armin Kuster <akuster@mvista.com>
|
12 |
|
|
* Johnnie Peters <jpeters@mvista.com>
|
13 |
|
|
*
|
14 |
|
|
* This program is free software; you can redistribute it and/or modify it
|
15 |
|
|
* under the terms of the GNU General Public License as published by the
|
16 |
|
|
* Free Software Foundation; either version 2 of the License, or (at your
|
17 |
|
|
* option) any later version.
|
18 |
|
|
*
|
19 |
|
|
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
20 |
|
|
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
21 |
|
|
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
|
22 |
|
|
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
23 |
|
|
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
24 |
|
|
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
|
25 |
|
|
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
26 |
|
|
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
27 |
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
28 |
|
|
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
29 |
|
|
*
|
30 |
|
|
* You should have received a copy of the GNU General Public License along
|
31 |
|
|
* with this program; if not, write to the Free Software Foundation, Inc.,
|
32 |
|
|
* 675 Mass Ave, Cambridge, MA 02139, USA.
|
33 |
|
|
*
|
34 |
|
|
* TODO
|
35 |
|
|
* - Check for races in the "remove" code path
|
36 |
|
|
* - Add some Power Management to the MAC and the PHY
|
37 |
|
|
* - Audit remaining of non-rewritten code (--BenH)
|
38 |
|
|
* - Cleanup message display using msglevel mecanism
|
39 |
|
|
*
|
40 |
|
|
*/
|
41 |
|
|
#include <linux/module.h>
|
42 |
|
|
#include <linux/kernel.h>
|
43 |
|
|
#include <linux/sched.h>
|
44 |
|
|
#include <linux/string.h>
|
45 |
|
|
#include <linux/timer.h>
|
46 |
|
|
#include <linux/ptrace.h>
|
47 |
|
|
#include <linux/errno.h>
|
48 |
|
|
#include <linux/ioport.h>
|
49 |
|
|
#include <linux/slab.h>
|
50 |
|
|
#include <linux/interrupt.h>
|
51 |
|
|
#include <linux/delay.h>
|
52 |
|
|
#include <linux/init.h>
|
53 |
|
|
#include <linux/types.h>
|
54 |
|
|
#include <linux/pci.h>
|
55 |
|
|
#include <linux/ethtool.h>
|
56 |
|
|
#include <linux/mii.h>
|
57 |
|
|
|
58 |
|
|
#include <asm/processor.h>
|
59 |
|
|
#include <asm/bitops.h>
|
60 |
|
|
#include <asm/io.h>
|
61 |
|
|
#include <asm/dma.h>
|
62 |
|
|
#include <asm/irq.h>
|
63 |
|
|
#include <asm/uaccess.h>
|
64 |
|
|
#include <asm/ocp.h>
|
65 |
|
|
|
66 |
|
|
#include <linux/netdevice.h>
|
67 |
|
|
#include <linux/etherdevice.h>
|
68 |
|
|
#include <linux/skbuff.h>
|
69 |
|
|
#include <linux/crc32.h>
|
70 |
|
|
|
71 |
|
|
#include "ibm_ocp_enet.h"
|
72 |
|
|
|
73 |
|
|
//#define MDIO_DEBUG(fmt) printk fmt
|
74 |
|
|
#define MDIO_DEBUG(fmt)
|
75 |
|
|
|
76 |
|
|
//#define LINK_DEBUG(fmt) printk fmt
|
77 |
|
|
#define LINK_DEBUG(fmt)
|
78 |
|
|
|
79 |
|
|
//#define PKT_DEBUG(fmt) printk fmt
|
80 |
|
|
#define PKT_DEBUG(fmt)
|
81 |
|
|
|
82 |
|
|
#define DRV_NAME "emac"
|
83 |
|
|
#define DRV_VERSION "2.0"
|
84 |
|
|
#define DRV_AUTHOR "Benjamin Herrenschmidt <benh@kernel.crashing.org>"
|
85 |
|
|
#define DRV_DESC "IBM OCP EMAC Ethernet driver"
|
86 |
|
|
|
87 |
|
|
MODULE_AUTHOR(DRV_AUTHOR);
|
88 |
|
|
MODULE_DESCRIPTION(DRV_DESC);
|
89 |
|
|
MODULE_LICENSE("GPL");
|
90 |
|
|
|
91 |
|
|
static int skb_res = SKB_RES;
|
92 |
|
|
MODULE_PARM(skb_res, "i");
|
93 |
|
|
MODULE_PARM_DESC(skb_res, "Amount of data to reserve on skb buffs\n"
|
94 |
|
|
"The 405 handles a misaligned IP header fine but\n"
|
95 |
|
|
"this can help if you are routing to a tunnel or a\n"
|
96 |
|
|
"device that needs aligned data. 0..2");
|
97 |
|
|
|
98 |
|
|
#define ZMII_PRIV(ocpdev) ((struct ibm_ocp_zmii*)ocp_get_drvdata(ocpdev))
|
99 |
|
|
|
100 |
|
|
static unsigned int zmii_enable[][4] = {
|
101 |
|
|
{ZMII_SMII0, ZMII_RMII0, ZMII_MII0,
|
102 |
|
|
~(ZMII_MDI1 | ZMII_MDI2 | ZMII_MDI3)},
|
103 |
|
|
{ZMII_SMII1, ZMII_RMII1, ZMII_MII1,
|
104 |
|
|
~(ZMII_MDI0 | ZMII_MDI2 | ZMII_MDI3)},
|
105 |
|
|
{ZMII_SMII2, ZMII_RMII2, ZMII_MII2,
|
106 |
|
|
~(ZMII_MDI0 | ZMII_MDI1 | ZMII_MDI3)},
|
107 |
|
|
{ZMII_SMII3, ZMII_RMII3, ZMII_MII3, ~(ZMII_MDI0 | ZMII_MDI1 | ZMII_MDI2)}
|
108 |
|
|
};
|
109 |
|
|
static unsigned int mdi_enable[] =
|
110 |
|
|
{ ZMII_MDI0, ZMII_MDI1, ZMII_MDI2, ZMII_MDI3 };
|
111 |
|
|
|
112 |
|
|
static unsigned int zmii_speed = 0x0;
|
113 |
|
|
static unsigned int zmii_speed100[] = { ZMII_MII0_100MB, ZMII_MII1_100MB };
|
114 |
|
|
|
115 |
|
|
/* Since multiple EMACs share MDIO lines in various ways, we need
|
116 |
|
|
* to avoid re-using the same PHY ID in cases where the arch didn't
|
117 |
|
|
* setup precise emac_phy_map entries
|
118 |
|
|
*/
|
119 |
|
|
static u32 busy_phy_map = 0;
|
120 |
|
|
|
121 |
|
|
static struct net_device_stats *
|
122 |
|
|
emac_stats(struct net_device *dev)
|
123 |
|
|
{
|
124 |
|
|
struct ocp_enet_private *fep = dev->priv;
|
125 |
|
|
return &fep->stats;
|
126 |
|
|
}
|
127 |
|
|
|
128 |
|
|
static int
|
129 |
|
|
emac_init_zmii(struct ocp_device *ocpdev, int mode)
|
130 |
|
|
{
|
131 |
|
|
struct ibm_ocp_zmii *zmii = ZMII_PRIV(ocpdev);
|
132 |
|
|
struct zmii_regs *zmiip;
|
133 |
|
|
const char *mode_name[] = { "SMII", "RMII", "MII" };
|
134 |
|
|
|
135 |
|
|
if (zmii){
|
136 |
|
|
/* We have already initialized ZMII device,
|
137 |
|
|
so just increment refcount and return */
|
138 |
|
|
zmii->users++;
|
139 |
|
|
return 0;
|
140 |
|
|
}
|
141 |
|
|
|
142 |
|
|
zmii = kmalloc(sizeof(struct ibm_ocp_zmii), GFP_KERNEL);
|
143 |
|
|
if (zmii == NULL) {
|
144 |
|
|
printk(KERN_ERR "zmii%d: Out of memory allocating ZMII structure!\n",
|
145 |
|
|
ocpdev->def->index);
|
146 |
|
|
return -ENOMEM;
|
147 |
|
|
}
|
148 |
|
|
memset(zmii, 0, sizeof(*zmii));
|
149 |
|
|
|
150 |
|
|
zmiip = (struct zmii_regs *)ioremap(ocpdev->def->paddr, sizeof(*zmiip));
|
151 |
|
|
if (zmiip == NULL){
|
152 |
|
|
printk(KERN_ERR "zmii%d: Cannot ioremap bridge registers!\n",
|
153 |
|
|
ocpdev->def->index);
|
154 |
|
|
|
155 |
|
|
kfree(zmii);
|
156 |
|
|
return -ENOMEM;
|
157 |
|
|
}
|
158 |
|
|
|
159 |
|
|
if (mode == ZMII_AUTO) {
|
160 |
|
|
if (zmiip->fer & (ZMII_MII0 | ZMII_MII1 |
|
161 |
|
|
ZMII_MII2 | ZMII_MII3))
|
162 |
|
|
mode = MII;
|
163 |
|
|
if (zmiip->fer & (ZMII_RMII0 | ZMII_RMII1 |
|
164 |
|
|
ZMII_RMII2 | ZMII_RMII3))
|
165 |
|
|
mode = RMII;
|
166 |
|
|
if (zmiip->fer & (ZMII_SMII0 | ZMII_SMII1 |
|
167 |
|
|
ZMII_SMII2 | ZMII_SMII3))
|
168 |
|
|
mode = SMII;
|
169 |
|
|
|
170 |
|
|
/* Failsafe: ZMII_AUTO is invalid index into the arrays,
|
171 |
|
|
so force SMII if all else fails. */
|
172 |
|
|
|
173 |
|
|
if (mode == ZMII_AUTO)
|
174 |
|
|
mode = SMII;
|
175 |
|
|
}
|
176 |
|
|
|
177 |
|
|
zmii->base = zmiip;
|
178 |
|
|
zmii->mode = mode;
|
179 |
|
|
zmii->users++;
|
180 |
|
|
ocp_set_drvdata(ocpdev, zmii);
|
181 |
|
|
|
182 |
|
|
printk(KERN_NOTICE "zmii%d: bridge in %s mode\n", ocpdev->def->index,
|
183 |
|
|
mode_name[mode]);
|
184 |
|
|
return 0;
|
185 |
|
|
}
|
186 |
|
|
|
187 |
|
|
static void
|
188 |
|
|
emac_enable_zmii_port(struct ocp_device *ocpdev, int input)
|
189 |
|
|
{
|
190 |
|
|
u32 mask;
|
191 |
|
|
struct ibm_ocp_zmii *zmii = ZMII_PRIV(ocpdev);
|
192 |
|
|
|
193 |
|
|
mask = in_be32(&zmii->base->fer);
|
194 |
|
|
mask &= zmii_enable[input][MDI]; /* turn all non enabled MDI's off */
|
195 |
|
|
mask |= zmii_enable[input][zmii->mode] | mdi_enable[input];
|
196 |
|
|
out_be32(&zmii->base->fer, mask);
|
197 |
|
|
}
|
198 |
|
|
|
199 |
|
|
static void
|
200 |
|
|
emac_zmii_port_speed(struct ocp_device *ocpdev, int input, int speed)
|
201 |
|
|
{
|
202 |
|
|
struct ibm_ocp_zmii *zmii = ZMII_PRIV(ocpdev);
|
203 |
|
|
|
204 |
|
|
if (speed == 100)
|
205 |
|
|
zmii_speed |= zmii_speed100[input];
|
206 |
|
|
else
|
207 |
|
|
zmii_speed &= ~zmii_speed100[input];
|
208 |
|
|
|
209 |
|
|
out_be32(&zmii->base->ssr, zmii_speed);
|
210 |
|
|
}
|
211 |
|
|
|
212 |
|
|
static void
|
213 |
|
|
emac_fini_zmii(struct ocp_device *ocpdev)
|
214 |
|
|
{
|
215 |
|
|
struct ibm_ocp_zmii *zmii = ZMII_PRIV(ocpdev);
|
216 |
|
|
BUG_ON(!zmii || zmii->users == 0);
|
217 |
|
|
|
218 |
|
|
if (!--zmii->users){
|
219 |
|
|
ocp_set_drvdata(ocpdev, NULL);
|
220 |
|
|
iounmap((void*)zmii->base);
|
221 |
|
|
kfree(zmii);
|
222 |
|
|
}
|
223 |
|
|
}
|
224 |
|
|
|
225 |
|
|
int
|
226 |
|
|
emac_phy_read(struct net_device *dev, int mii_id, int reg)
|
227 |
|
|
{
|
228 |
|
|
register int i;
|
229 |
|
|
uint32_t stacr;
|
230 |
|
|
struct ocp_enet_private *fep = dev->priv;
|
231 |
|
|
volatile emac_t *emacp = fep->emacp;
|
232 |
|
|
|
233 |
|
|
MDIO_DEBUG(("%s: phy_read, id: 0x%x, reg: 0x%x\n", dev->name, mii_id, reg));
|
234 |
|
|
|
235 |
|
|
/* Enable proper ZMII port */
|
236 |
|
|
if (fep->zmii_dev)
|
237 |
|
|
emac_enable_zmii_port(fep->zmii_dev, fep->zmii_input);
|
238 |
|
|
/* Use the EMAC that has the MDIO port */
|
239 |
|
|
if (fep->mdio_dev) {
|
240 |
|
|
dev = fep->mdio_dev;
|
241 |
|
|
fep = dev->priv;
|
242 |
|
|
}
|
243 |
|
|
|
244 |
|
|
/* Wait for data transfer complete bit */
|
245 |
|
|
for (i = 0; i < OCP_RESET_DELAY; ++i) {
|
246 |
|
|
if (emacp->em0stacr & EMAC_STACR_OC)
|
247 |
|
|
break;
|
248 |
|
|
udelay(MDIO_DELAY); /* changed to 2 with new scheme -armin */
|
249 |
|
|
}
|
250 |
|
|
if ((emacp->em0stacr & EMAC_STACR_OC) == 0) {
|
251 |
|
|
printk(KERN_WARNING "%s: PHY read timeout #1!\n", dev->name);
|
252 |
|
|
return -1;
|
253 |
|
|
}
|
254 |
|
|
|
255 |
|
|
/* Clear the speed bits and make a read request to the PHY */
|
256 |
|
|
stacr = ((EMAC_STACR_READ | (reg & 0x1f)) & ~EMAC_STACR_CLK_100MHZ);
|
257 |
|
|
stacr |= ((mii_id & 0x1F) << 5);
|
258 |
|
|
|
259 |
|
|
out_be32(&emacp->em0stacr, stacr);
|
260 |
|
|
stacr = in_be32(&emacp->em0stacr);
|
261 |
|
|
/* Wait for data transfer complete bit */
|
262 |
|
|
for (i = 0; i < OCP_RESET_DELAY; ++i) {
|
263 |
|
|
if ((stacr = in_be32(&emacp->em0stacr)) & EMAC_STACR_OC)
|
264 |
|
|
break;
|
265 |
|
|
udelay(MDIO_DELAY);
|
266 |
|
|
}
|
267 |
|
|
if ((stacr & EMAC_STACR_OC) == 0) {
|
268 |
|
|
printk(KERN_WARNING "%s: PHY read timeout #2!\n", dev->name);
|
269 |
|
|
return -1;
|
270 |
|
|
}
|
271 |
|
|
|
272 |
|
|
/* Check for a read error */
|
273 |
|
|
if (stacr & EMAC_STACR_PHYE) {
|
274 |
|
|
MDIO_DEBUG(("OCP MDIO PHY error !\n"));
|
275 |
|
|
return -1;
|
276 |
|
|
}
|
277 |
|
|
|
278 |
|
|
MDIO_DEBUG((" -> 0x%x\n", stacr >> 16));
|
279 |
|
|
|
280 |
|
|
return (stacr >> 16);
|
281 |
|
|
}
|
282 |
|
|
|
283 |
|
|
|
284 |
|
|
void
|
285 |
|
|
emac_phy_write(struct net_device *dev, int mii_id, int reg, int data)
|
286 |
|
|
{
|
287 |
|
|
register int i = 0;
|
288 |
|
|
uint32_t stacr;
|
289 |
|
|
struct ocp_enet_private *fep = dev->priv;
|
290 |
|
|
volatile emac_t *emacp = fep->emacp;
|
291 |
|
|
|
292 |
|
|
MDIO_DEBUG(("%s phy_write, id: 0x%x, reg: 0x%x, data: 0x%x\n",
|
293 |
|
|
dev->name, mii_id, reg, data));
|
294 |
|
|
|
295 |
|
|
/* Enable proper ZMII port */
|
296 |
|
|
if (fep->zmii_dev)
|
297 |
|
|
emac_enable_zmii_port(fep->zmii_dev, fep->zmii_input);
|
298 |
|
|
/* Use the EMAC that has the MDIO port */
|
299 |
|
|
if (fep->mdio_dev) {
|
300 |
|
|
dev = fep->mdio_dev;
|
301 |
|
|
fep = dev->priv;
|
302 |
|
|
}
|
303 |
|
|
|
304 |
|
|
/* Wait for data transfer complete bit */
|
305 |
|
|
for (i = 0; i < OCP_RESET_DELAY; ++i) {
|
306 |
|
|
if (emacp->em0stacr & EMAC_STACR_OC)
|
307 |
|
|
break;
|
308 |
|
|
udelay(MDIO_DELAY); /* changed to 2 with new scheme -armin */
|
309 |
|
|
}
|
310 |
|
|
if ((emacp->em0stacr & EMAC_STACR_OC) == 0) {
|
311 |
|
|
printk(KERN_WARNING "%s: PHY write timeout #2!\n", dev->name);
|
312 |
|
|
return;
|
313 |
|
|
}
|
314 |
|
|
|
315 |
|
|
/* Clear the speed bits and make a read request to the PHY */
|
316 |
|
|
|
317 |
|
|
stacr = ((EMAC_STACR_WRITE | (reg & 0x1f)) & ~EMAC_STACR_CLK_100MHZ);
|
318 |
|
|
stacr |= ((mii_id & 0x1f) << 5) | ((data & 0xffff) << 16);
|
319 |
|
|
|
320 |
|
|
out_be32(&emacp->em0stacr, stacr);
|
321 |
|
|
|
322 |
|
|
/* Wait for data transfer complete bit */
|
323 |
|
|
for (i = 0; i < OCP_RESET_DELAY; ++i) {
|
324 |
|
|
if ((stacr = emacp->em0stacr) & EMAC_STACR_OC)
|
325 |
|
|
break;
|
326 |
|
|
udelay(MDIO_DELAY);
|
327 |
|
|
}
|
328 |
|
|
if ((emacp->em0stacr & EMAC_STACR_OC) == 0)
|
329 |
|
|
printk(KERN_WARNING "%s: PHY write timeout #2!\n", dev->name);
|
330 |
|
|
|
331 |
|
|
/* Check for a write error */
|
332 |
|
|
if ((stacr & EMAC_STACR_PHYE) != 0) {
|
333 |
|
|
MDIO_DEBUG(("OCP MDIO PHY error !\n"));
|
334 |
|
|
}
|
335 |
|
|
}
|
336 |
|
|
|
337 |
|
|
static void
|
338 |
|
|
emac_wakeup_irq(int irq, void *param, struct pt_regs *regs)
|
339 |
|
|
{
|
340 |
|
|
struct net_device *dev = param;
|
341 |
|
|
|
342 |
|
|
/* On Linux the 405 ethernet will always be active if configured
|
343 |
|
|
* in. This interrupt should never occur.
|
344 |
|
|
*/
|
345 |
|
|
printk(KERN_INFO "%s: WakeUp interrupt !\n", dev->name);
|
346 |
|
|
}
|
347 |
|
|
|
348 |
|
|
static void
|
349 |
|
|
emac_txeob_dev(void *param, u32 chanmask)
|
350 |
|
|
{
|
351 |
|
|
struct net_device *dev = param;
|
352 |
|
|
struct ocp_enet_private *fep = dev->priv;
|
353 |
|
|
unsigned long flags;
|
354 |
|
|
|
355 |
|
|
spin_lock_irqsave(&fep->lock, flags);
|
356 |
|
|
|
357 |
|
|
PKT_DEBUG(("emac_txeob_dev() entry, tx_cnt: %d\n", fep->tx_cnt));
|
358 |
|
|
|
359 |
|
|
while (fep->tx_cnt &&
|
360 |
|
|
!(fep->tx_desc[fep->ack_slot].ctrl & MAL_TX_CTRL_READY)) {
|
361 |
|
|
|
362 |
|
|
/* Tell the system the transmit completed. */
|
363 |
|
|
dev_kfree_skb_irq(fep->tx_skb[fep->ack_slot]);
|
364 |
|
|
|
365 |
|
|
if (fep->tx_desc[fep->ack_slot].ctrl &
|
366 |
|
|
(EMAC_TX_ST_EC | EMAC_TX_ST_MC | EMAC_TX_ST_SC))
|
367 |
|
|
fep->stats.collisions++;
|
368 |
|
|
|
369 |
|
|
fep->tx_skb[fep->ack_slot] = (struct sk_buff *) NULL;
|
370 |
|
|
if (++fep->ack_slot == NUM_TX_BUFF)
|
371 |
|
|
fep->ack_slot = 0;
|
372 |
|
|
|
373 |
|
|
fep->tx_cnt--;
|
374 |
|
|
}
|
375 |
|
|
if (fep->tx_cnt < NUM_TX_BUFF)
|
376 |
|
|
netif_wake_queue(dev);
|
377 |
|
|
|
378 |
|
|
PKT_DEBUG(("emac_txeob_dev() exit, tx_cnt: %d\n", fep->tx_cnt));
|
379 |
|
|
|
380 |
|
|
spin_unlock_irqrestore(&fep->lock, flags);
|
381 |
|
|
}
|
382 |
|
|
|
383 |
|
|
/*
|
384 |
|
|
Fill/Re-fill the rx chain with valid ctrl/ptrs.
|
385 |
|
|
This function will fill from rx_slot up to the parm end.
|
386 |
|
|
So to completely fill the chain pre-set rx_slot to 0 and
|
387 |
|
|
pass in an end of 0.
|
388 |
|
|
*/
|
389 |
|
|
static void
|
390 |
|
|
emac_rx_fill(struct net_device *dev, int end)
|
391 |
|
|
{
|
392 |
|
|
int i;
|
393 |
|
|
struct ocp_enet_private *fep = dev->priv;
|
394 |
|
|
unsigned char *ptr;
|
395 |
|
|
|
396 |
|
|
i = fep->rx_slot;
|
397 |
|
|
do {
|
398 |
|
|
if (fep->rx_skb[i] != NULL) {
|
399 |
|
|
/*We will trust the skb is still in a good state */
|
400 |
|
|
ptr = (char *) virt_to_phys(fep->rx_skb[i]->data);
|
401 |
|
|
} else {
|
402 |
|
|
|
403 |
|
|
/* We don't want the 16 bytes skb_reserve done by dev_alloc_skb,
|
404 |
|
|
* it breaks our cache line alignement. However, we still allocate
|
405 |
|
|
* +16 so that we end up allocating the exact same size as
|
406 |
|
|
* dev_alloc_skb() would do.
|
407 |
|
|
* Also, because of the skb_res, the max DMA size we give to EMAC
|
408 |
|
|
* is slighly wrong, causing it to potentially DMA 2 more bytes
|
409 |
|
|
* from a broken/oversized packet. These 16 bytes will take care
|
410 |
|
|
* that we don't walk on somebody else toes with that.
|
411 |
|
|
*/
|
412 |
|
|
fep->rx_skb[i] =
|
413 |
|
|
alloc_skb(DESC_RX_BUF_SIZE + 16, GFP_ATOMIC);
|
414 |
|
|
|
415 |
|
|
if (fep->rx_skb[i] == NULL) {
|
416 |
|
|
/* Keep rx_slot here, the next time clean/fill is called
|
417 |
|
|
* we will try again before the MAL wraps back here
|
418 |
|
|
* If the MAL tries to use this descriptor with
|
419 |
|
|
* the EMPTY bit off it will cause the
|
420 |
|
|
* rxde interrupt. That is where we will
|
421 |
|
|
* try again to allocate an sk_buff.
|
422 |
|
|
*/
|
423 |
|
|
break;
|
424 |
|
|
|
425 |
|
|
}
|
426 |
|
|
|
427 |
|
|
if (skb_res)
|
428 |
|
|
skb_reserve(fep->rx_skb[i], skb_res);
|
429 |
|
|
|
430 |
|
|
/* We must NOT consistent_sync the cache line right after the
|
431 |
|
|
* buffer, so we must crop our sync size to account for the
|
432 |
|
|
* reserved space
|
433 |
|
|
*/
|
434 |
|
|
consistent_sync((void *) fep->rx_skb[i]->
|
435 |
|
|
data, (DESC_RX_BUF_SIZE-skb_res),
|
436 |
|
|
PCI_DMA_FROMDEVICE);
|
437 |
|
|
ptr = (char *) virt_to_phys(fep->rx_skb[i]->data);
|
438 |
|
|
}
|
439 |
|
|
fep->rx_desc[i].ctrl = MAL_RX_CTRL_EMPTY | MAL_RX_CTRL_INTR | /*could be smarter about this to avoid ints at high loads */
|
440 |
|
|
(i == (NUM_RX_BUFF - 1) ? MAL_RX_CTRL_WRAP : 0);
|
441 |
|
|
|
442 |
|
|
fep->rx_desc[i].data_ptr = ptr;
|
443 |
|
|
/*
|
444 |
|
|
* 440GP uses the previously reserved bits in the
|
445 |
|
|
* data_len to encode the upper 4-bits of the buffer
|
446 |
|
|
* physical address (ERPN). Initialize these.
|
447 |
|
|
*/
|
448 |
|
|
fep->rx_desc[i].data_len = 0;
|
449 |
|
|
} while ((i = (i + 1) % NUM_RX_BUFF) != end);
|
450 |
|
|
|
451 |
|
|
fep->rx_slot = i;
|
452 |
|
|
}
|
453 |
|
|
|
454 |
|
|
static void
|
455 |
|
|
emac_rx_clean(struct net_device *dev, int call_rx_fill)
|
456 |
|
|
{
|
457 |
|
|
int i;
|
458 |
|
|
int error, frame_length;
|
459 |
|
|
struct ocp_enet_private *fep = dev->priv;
|
460 |
|
|
unsigned short ctrl;
|
461 |
|
|
int slots_walked = 0;
|
462 |
|
|
|
463 |
|
|
i = fep->rx_slot;
|
464 |
|
|
|
465 |
|
|
PKT_DEBUG(("emac_rx_clean() entry, call_rx_fill: %d, rx_slot: %d\n", call_rx_fill, fep->rx_slot));
|
466 |
|
|
|
467 |
|
|
do {
|
468 |
|
|
if (fep->rx_skb[i] == NULL)
|
469 |
|
|
goto skip; /*we have already handled the packet but haved failed to alloc */
|
470 |
|
|
/*
|
471 |
|
|
since rx_desc is in uncached mem we don't keep reading it directly
|
472 |
|
|
we pull out a local copy of ctrl and do the checks on the copy.
|
473 |
|
|
*/
|
474 |
|
|
ctrl = fep->rx_desc[i].ctrl;
|
475 |
|
|
if (ctrl & MAL_RX_CTRL_EMPTY)
|
476 |
|
|
break; /*we don't have any more ready packets */
|
477 |
|
|
|
478 |
|
|
if (ctrl & EMAC_BAD_RX_PACKET) {
|
479 |
|
|
|
480 |
|
|
fep->stats.rx_errors++;
|
481 |
|
|
fep->stats.rx_dropped++;
|
482 |
|
|
|
483 |
|
|
if (ctrl & EMAC_RX_ST_OE)
|
484 |
|
|
fep->stats.rx_fifo_errors++;
|
485 |
|
|
if (ctrl & EMAC_RX_ST_AE)
|
486 |
|
|
fep->stats.rx_frame_errors++;
|
487 |
|
|
if (ctrl & EMAC_RX_ST_BFCS)
|
488 |
|
|
fep->stats.rx_crc_errors++;
|
489 |
|
|
if (ctrl & (EMAC_RX_ST_RP | EMAC_RX_ST_PTL |
|
490 |
|
|
EMAC_RX_ST_ORE | EMAC_RX_ST_IRE))
|
491 |
|
|
fep->stats.rx_length_errors++;
|
492 |
|
|
} else {
|
493 |
|
|
|
494 |
|
|
/* Send the skb up the chain. */
|
495 |
|
|
frame_length = fep->rx_desc[i].data_len - 4;
|
496 |
|
|
|
497 |
|
|
skb_put(fep->rx_skb[i], frame_length);
|
498 |
|
|
fep->rx_skb[i]->dev = dev;
|
499 |
|
|
fep->rx_skb[i]->protocol =
|
500 |
|
|
eth_type_trans(fep->rx_skb[i], dev);
|
501 |
|
|
|
502 |
|
|
error = netif_rx(fep->rx_skb[i]);
|
503 |
|
|
if ((error == NET_RX_DROP) || (error == NET_RX_BAD)) {
|
504 |
|
|
fep->stats.rx_dropped++;
|
505 |
|
|
} else {
|
506 |
|
|
fep->stats.rx_packets++;
|
507 |
|
|
fep->stats.rx_bytes += frame_length;
|
508 |
|
|
}
|
509 |
|
|
fep->rx_skb[i] = NULL;
|
510 |
|
|
}
|
511 |
|
|
skip:
|
512 |
|
|
slots_walked = 1;
|
513 |
|
|
|
514 |
|
|
} while ((i = (i + 1) % NUM_RX_BUFF) != fep->rx_slot);
|
515 |
|
|
|
516 |
|
|
PKT_DEBUG(("emac_rx_clean() exit, rx_slot: %d\n", fep->rx_slot));
|
517 |
|
|
|
518 |
|
|
if (slots_walked && call_rx_fill)
|
519 |
|
|
emac_rx_fill(dev, i);
|
520 |
|
|
}
|
521 |
|
|
|
522 |
|
|
static void
|
523 |
|
|
emac_rxeob_dev(void *param, u32 chanmask)
|
524 |
|
|
{
|
525 |
|
|
struct net_device *dev = param;
|
526 |
|
|
struct ocp_enet_private *fep = dev->priv;
|
527 |
|
|
unsigned long flags;
|
528 |
|
|
|
529 |
|
|
spin_lock_irqsave(&fep->lock, flags);
|
530 |
|
|
emac_rx_clean(dev, 1);
|
531 |
|
|
spin_unlock_irqrestore(&fep->lock, flags);
|
532 |
|
|
}
|
533 |
|
|
|
534 |
|
|
/*
|
535 |
|
|
* This interrupt should never occurr, we don't program
|
536 |
|
|
* the MAL for contiunous mode.
|
537 |
|
|
*/
|
538 |
|
|
static void
|
539 |
|
|
emac_txde_dev(void *param, u32 chanmask)
|
540 |
|
|
{
|
541 |
|
|
struct net_device *dev = param;
|
542 |
|
|
struct ocp_enet_private *fep = dev->priv;
|
543 |
|
|
|
544 |
|
|
printk(KERN_WARNING "%s: transmit descriptor error\n", dev->name);
|
545 |
|
|
|
546 |
|
|
emac_mac_dump(dev);
|
547 |
|
|
emac_mal_dump(dev);
|
548 |
|
|
|
549 |
|
|
/* Reenable the transmit channel */
|
550 |
|
|
mal_enable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
|
551 |
|
|
}
|
552 |
|
|
|
553 |
|
|
/*
|
554 |
|
|
* This interrupt should be very rare at best. This occurs when
|
555 |
|
|
* the hardware has a problem with the receive descriptors. The manual
|
556 |
|
|
* states that it occurs when the hardware cannot the receive descriptor
|
557 |
|
|
* empty bit is not set. The recovery mechanism will be to
|
558 |
|
|
* traverse through the descriptors, handle any that are marked to be
|
559 |
|
|
* handled and reinitialize each along the way. At that point the driver
|
560 |
|
|
* will be restarted.
|
561 |
|
|
*/
|
562 |
|
|
static void
|
563 |
|
|
emac_rxde_dev(void *param, u32 chanmask)
|
564 |
|
|
{
|
565 |
|
|
struct net_device *dev = param;
|
566 |
|
|
struct ocp_enet_private *fep = dev->priv;
|
567 |
|
|
unsigned long flags;
|
568 |
|
|
|
569 |
|
|
printk(KERN_WARNING "%s: receive descriptor error\n", fep->ndev->name);
|
570 |
|
|
|
571 |
|
|
emac_mac_dump(dev);
|
572 |
|
|
emac_mal_dump(dev);
|
573 |
|
|
emac_desc_dump(dev);
|
574 |
|
|
|
575 |
|
|
/* Disable RX channel */
|
576 |
|
|
spin_lock_irqsave(&fep->lock, flags);
|
577 |
|
|
mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
|
578 |
|
|
|
579 |
|
|
/* For now, charge the error against all emacs */
|
580 |
|
|
fep->stats.rx_errors++;
|
581 |
|
|
|
582 |
|
|
/* so do we have any good packets still? */
|
583 |
|
|
emac_rx_clean(dev,0);
|
584 |
|
|
|
585 |
|
|
/* When the interface is restarted it resets processing to the
|
586 |
|
|
* first descriptor in the table.
|
587 |
|
|
*/
|
588 |
|
|
|
589 |
|
|
fep->rx_slot = 0;
|
590 |
|
|
emac_rx_fill(dev, 0);
|
591 |
|
|
|
592 |
|
|
set_mal_dcrn(fep->mal, DCRN_MALRXEOBISR, fep->commac.rx_chan_mask);
|
593 |
|
|
set_mal_dcrn(fep->mal, DCRN_MALRXDEIR, fep->commac.rx_chan_mask);
|
594 |
|
|
|
595 |
|
|
/* Reenable the receive channels */
|
596 |
|
|
mal_enable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
|
597 |
|
|
spin_unlock_irqrestore(&fep->lock, flags);
|
598 |
|
|
}
|
599 |
|
|
|
600 |
|
|
static void
|
601 |
|
|
emac_mac_irq(int irq, void *dev_instance, struct pt_regs *regs)
|
602 |
|
|
{
|
603 |
|
|
struct net_device *dev = dev_instance;
|
604 |
|
|
struct ocp_enet_private *fep = dev->priv;
|
605 |
|
|
volatile emac_t *emacp = fep->emacp;
|
606 |
|
|
unsigned long tmp_em0isr;
|
607 |
|
|
|
608 |
|
|
/* EMAC interrupt */
|
609 |
|
|
tmp_em0isr = in_be32(&emacp->em0isr);
|
610 |
|
|
if (tmp_em0isr & (EMAC_ISR_TE0 | EMAC_ISR_TE1)) {
|
611 |
|
|
/* This error is a hard transmit error - could retransmit */
|
612 |
|
|
fep->stats.tx_errors++;
|
613 |
|
|
|
614 |
|
|
/* Reenable the transmit channel */
|
615 |
|
|
mal_enable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
|
616 |
|
|
|
617 |
|
|
} else {
|
618 |
|
|
fep->stats.rx_errors++;
|
619 |
|
|
}
|
620 |
|
|
|
621 |
|
|
if (tmp_em0isr & EMAC_ISR_RP)
|
622 |
|
|
fep->stats.rx_length_errors++;
|
623 |
|
|
if (tmp_em0isr & EMAC_ISR_ALE)
|
624 |
|
|
fep->stats.rx_frame_errors++;
|
625 |
|
|
if (tmp_em0isr & EMAC_ISR_BFCS)
|
626 |
|
|
fep->stats.rx_crc_errors++;
|
627 |
|
|
if (tmp_em0isr & EMAC_ISR_PTLE)
|
628 |
|
|
fep->stats.rx_length_errors++;
|
629 |
|
|
if (tmp_em0isr & EMAC_ISR_ORE)
|
630 |
|
|
fep->stats.rx_length_errors++;
|
631 |
|
|
if (tmp_em0isr & EMAC_ISR_TE0)
|
632 |
|
|
fep->stats.tx_aborted_errors++;
|
633 |
|
|
|
634 |
|
|
emac_err_dump(dev, tmp_em0isr);
|
635 |
|
|
|
636 |
|
|
out_be32(&emacp->em0isr, tmp_em0isr);
|
637 |
|
|
}
|
638 |
|
|
|
639 |
|
|
static int
|
640 |
|
|
emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
641 |
|
|
{
|
642 |
|
|
unsigned short ctrl;
|
643 |
|
|
unsigned long flags;
|
644 |
|
|
struct ocp_enet_private *fep = dev->priv;
|
645 |
|
|
volatile emac_t *emacp = fep->emacp;
|
646 |
|
|
|
647 |
|
|
spin_lock_irqsave(&fep->lock, flags);
|
648 |
|
|
|
649 |
|
|
PKT_DEBUG(("emac_start_xmit() entry, queue stopped: %d, fep->tx_cnt: %d\n",
|
650 |
|
|
netif_queue_stopped(dev), fep->tx_cnt));
|
651 |
|
|
|
652 |
|
|
/* That shouldn't happen... */
|
653 |
|
|
if (netif_queue_stopped(dev) || (fep->tx_cnt == NUM_TX_BUFF)) {
|
654 |
|
|
printk("%s: start_xmit called on full queue !\n", dev->name);
|
655 |
|
|
BUG();
|
656 |
|
|
}
|
657 |
|
|
|
658 |
|
|
if (++fep->tx_cnt == NUM_TX_BUFF) {
|
659 |
|
|
PKT_DEBUG(("emac_start_xmit() stopping queue\n"));
|
660 |
|
|
netif_stop_queue(dev);
|
661 |
|
|
}
|
662 |
|
|
|
663 |
|
|
/* Store the skb buffer for later ack by the transmit end of buffer
|
664 |
|
|
* interrupt.
|
665 |
|
|
*/
|
666 |
|
|
fep->tx_skb[fep->tx_slot] = skb;
|
667 |
|
|
consistent_sync((void *) skb->data, skb->len, PCI_DMA_TODEVICE);
|
668 |
|
|
|
669 |
|
|
ctrl = EMAC_TX_CTRL_DFLT;
|
670 |
|
|
if ((NUM_TX_BUFF - 1) == fep->tx_slot)
|
671 |
|
|
ctrl |= MAL_TX_CTRL_WRAP;
|
672 |
|
|
fep->tx_desc[fep->tx_slot].data_ptr = (char *) virt_to_phys(skb->data);
|
673 |
|
|
fep->tx_desc[fep->tx_slot].data_len = (short) skb->len;
|
674 |
|
|
fep->tx_desc[fep->tx_slot].ctrl = ctrl;
|
675 |
|
|
|
676 |
|
|
/* Send the packet out. */
|
677 |
|
|
out_be32(&emacp->em0tmr0, EMAC_TMR0_XMIT);
|
678 |
|
|
|
679 |
|
|
if (++fep->tx_slot == NUM_TX_BUFF)
|
680 |
|
|
fep->tx_slot = 0;
|
681 |
|
|
|
682 |
|
|
fep->stats.tx_packets++;
|
683 |
|
|
fep->stats.tx_bytes += skb->len;
|
684 |
|
|
|
685 |
|
|
PKT_DEBUG(("emac_start_xmit() exitn"));
|
686 |
|
|
|
687 |
|
|
spin_unlock_irqrestore(&fep->lock, flags);
|
688 |
|
|
|
689 |
|
|
return 0;
|
690 |
|
|
}
|
691 |
|
|
|
692 |
|
|
static int
|
693 |
|
|
emac_adjust_to_link(struct ocp_enet_private *fep)
|
694 |
|
|
{
|
695 |
|
|
volatile emac_t *emacp = fep->emacp;
|
696 |
|
|
unsigned long mode_reg;
|
697 |
|
|
int full_duplex, speed;
|
698 |
|
|
|
699 |
|
|
full_duplex = 0;
|
700 |
|
|
speed = SPEED_10;
|
701 |
|
|
|
702 |
|
|
/* set mode register 1 defaults */
|
703 |
|
|
mode_reg = EMAC_M1_DEFAULT;
|
704 |
|
|
|
705 |
|
|
/* Read link mode on PHY */
|
706 |
|
|
if (fep->phy_mii.def->ops->read_link(&fep->phy_mii) == 0) {
|
707 |
|
|
/* If an error occurred, we don't deal with it yet */
|
708 |
|
|
full_duplex = (fep->phy_mii.duplex == DUPLEX_FULL);
|
709 |
|
|
speed = fep->phy_mii.speed;
|
710 |
|
|
}
|
711 |
|
|
|
712 |
|
|
/* set speed (default is 10Mb) */
|
713 |
|
|
if (speed == SPEED_100) {
|
714 |
|
|
mode_reg |= EMAC_M1_MF_100MBPS;
|
715 |
|
|
if (fep->zmii_dev)
|
716 |
|
|
emac_zmii_port_speed(fep->zmii_dev, fep->zmii_input, 100);
|
717 |
|
|
} else {
|
718 |
|
|
mode_reg &= ~EMAC_M1_MF_100MBPS;
|
719 |
|
|
if (fep->zmii_dev)
|
720 |
|
|
emac_zmii_port_speed(fep->zmii_dev, fep->zmii_input, 10);
|
721 |
|
|
}
|
722 |
|
|
|
723 |
|
|
if (full_duplex)
|
724 |
|
|
mode_reg |= EMAC_M1_FDE | EMAC_M1_EIFC | EMAC_M1_IST;
|
725 |
|
|
else
|
726 |
|
|
mode_reg &= ~(EMAC_M1_FDE | EMAC_M1_EIFC | EMAC_M1_ILE);
|
727 |
|
|
|
728 |
|
|
LINK_DEBUG(("%s: adjust to link, speed: %d, duplex: %d, opened: %d\n",
|
729 |
|
|
fep->ndev->name, speed, full_duplex, fep->opened));
|
730 |
|
|
|
731 |
|
|
printk(KERN_INFO "%s: Speed: %s, %s duplex.\n",
|
732 |
|
|
fep->ndev->name,
|
733 |
|
|
speed == SPEED_100 ? "100" : "10",
|
734 |
|
|
full_duplex ? "Full" : "Half");
|
735 |
|
|
if (fep->opened)
|
736 |
|
|
out_be32(&emacp->em0mr1, mode_reg);
|
737 |
|
|
|
738 |
|
|
return 0;
|
739 |
|
|
}
|
740 |
|
|
|
741 |
|
|
static void
|
742 |
|
|
__emac_set_multicast_list(struct net_device *dev)
|
743 |
|
|
{
|
744 |
|
|
struct ocp_enet_private *fep = dev->priv;
|
745 |
|
|
volatile emac_t *emacp = fep->emacp;
|
746 |
|
|
u32 rmr = in_be32(&emacp->em0rmr);
|
747 |
|
|
|
748 |
|
|
/* First clear all special bits, they can be set later */
|
749 |
|
|
rmr &= ~(EMAC_RMR_PME | EMAC_RMR_PMME | EMAC_RMR_MAE);
|
750 |
|
|
|
751 |
|
|
if (dev->flags & IFF_PROMISC) {
|
752 |
|
|
rmr |= EMAC_RMR_PME;
|
753 |
|
|
|
754 |
|
|
} else if (dev->flags & IFF_ALLMULTI || 32 < dev->mc_count) {
|
755 |
|
|
/* Must be setting up to use multicast. Now check for promiscuous
|
756 |
|
|
* multicast
|
757 |
|
|
*/
|
758 |
|
|
rmr |= EMAC_RMR_PMME;
|
759 |
|
|
} else if (dev->flags & IFF_MULTICAST && 0 < dev->mc_count) {
|
760 |
|
|
|
761 |
|
|
unsigned short em0gaht[4] = { 0, 0, 0, 0 };
|
762 |
|
|
struct dev_mc_list *dmi;
|
763 |
|
|
|
764 |
|
|
/* Need to hash on the multicast address. */
|
765 |
|
|
for (dmi = dev->mc_list; dmi; dmi = dmi->next) {
|
766 |
|
|
unsigned long mc_crc;
|
767 |
|
|
unsigned int bit_number;
|
768 |
|
|
|
769 |
|
|
mc_crc = ether_crc(6, (char *) dmi->dmi_addr);
|
770 |
|
|
bit_number = 63 - (mc_crc >> 26); /* MSB: 0 LSB: 63 */
|
771 |
|
|
em0gaht[bit_number >> 4] |=
|
772 |
|
|
0x8000 >> (bit_number & 0x0f);
|
773 |
|
|
}
|
774 |
|
|
emacp->em0gaht1 = em0gaht[0];
|
775 |
|
|
emacp->em0gaht2 = em0gaht[1];
|
776 |
|
|
emacp->em0gaht3 = em0gaht[2];
|
777 |
|
|
emacp->em0gaht4 = em0gaht[3];
|
778 |
|
|
|
779 |
|
|
/* Turn on multicast addressing */
|
780 |
|
|
rmr |= EMAC_RMR_MAE;
|
781 |
|
|
}
|
782 |
|
|
|
783 |
|
|
out_be32(&emacp->em0rmr, rmr);
|
784 |
|
|
}
|
785 |
|
|
|
786 |
|
|
static void
|
787 |
|
|
emac_init_rings(struct net_device *dev)
|
788 |
|
|
{
|
789 |
|
|
struct ocp_enet_private *ep = dev->priv;
|
790 |
|
|
int loop;
|
791 |
|
|
|
792 |
|
|
ep->tx_desc = (struct mal_descriptor *) ((char *) ep->mal->tx_virt_addr +
|
793 |
|
|
(ep->mal_tx_chan * MAL_DT_ALIGN));
|
794 |
|
|
ep->rx_desc = (struct mal_descriptor *) ((char *) ep->mal->rx_virt_addr +
|
795 |
|
|
(ep->mal_rx_chan * MAL_DT_ALIGN));
|
796 |
|
|
|
797 |
|
|
/* Fill in the transmit descriptor ring. */
|
798 |
|
|
for (loop = 0; loop < NUM_TX_BUFF; loop++) {
|
799 |
|
|
if (ep->tx_skb[loop])
|
800 |
|
|
dev_kfree_skb_irq(ep->tx_skb[loop]);
|
801 |
|
|
ep->tx_skb[loop] = NULL;
|
802 |
|
|
ep->tx_desc[loop].ctrl = 0;
|
803 |
|
|
ep->tx_desc[loop].data_len = 0;
|
804 |
|
|
ep->tx_desc[loop].data_ptr = NULL;
|
805 |
|
|
}
|
806 |
|
|
ep->tx_desc[loop - 1].ctrl |= MAL_TX_CTRL_WRAP;
|
807 |
|
|
|
808 |
|
|
/* Format the receive descriptor ring. */
|
809 |
|
|
ep->rx_slot = 0;
|
810 |
|
|
emac_rx_fill(dev, 0);
|
811 |
|
|
if (ep->rx_slot != 0) {
|
812 |
|
|
printk(KERN_ERR
|
813 |
|
|
"%s: Not enough mem for RxChain durning Open?\n",
|
814 |
|
|
dev->name);
|
815 |
|
|
/*We couldn't fill the ring at startup?
|
816 |
|
|
*We could clean up and fail to open but right now we will try to
|
817 |
|
|
*carry on. It may be a sign of a bad NUM_RX_BUFF value
|
818 |
|
|
*/
|
819 |
|
|
}
|
820 |
|
|
|
821 |
|
|
ep->tx_cnt = 0;
|
822 |
|
|
ep->tx_slot = 0;
|
823 |
|
|
ep->ack_slot = 0;
|
824 |
|
|
}
|
825 |
|
|
|
826 |
|
|
static void
|
827 |
|
|
emac_reset_configure(struct ocp_enet_private *fep)
|
828 |
|
|
{
|
829 |
|
|
volatile emac_t *emacp = fep->emacp;
|
830 |
|
|
int i;
|
831 |
|
|
|
832 |
|
|
mal_disable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
|
833 |
|
|
mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
|
834 |
|
|
|
835 |
|
|
/* Reset the EMAC */
|
836 |
|
|
out_be32(&emacp->em0mr0, EMAC_M0_SRST);
|
837 |
|
|
udelay(20);
|
838 |
|
|
for (i=0; i<100; i++) {
|
839 |
|
|
if ((in_be32(&emacp->em0mr0) & EMAC_M0_SRST) == 0)
|
840 |
|
|
break;
|
841 |
|
|
udelay(10);
|
842 |
|
|
}
|
843 |
|
|
|
844 |
|
|
if (i >= 100) {
|
845 |
|
|
printk(KERN_ERR "%s: Cannot reset EMAC\n", fep->ndev->name);
|
846 |
|
|
return;
|
847 |
|
|
}
|
848 |
|
|
|
849 |
|
|
/* Switch IRQs off for now */
|
850 |
|
|
out_be32(&emacp->em0iser, 0);
|
851 |
|
|
|
852 |
|
|
/* Configure MAL rx channel */
|
853 |
|
|
mal_set_rcbs(fep->mal, fep->mal_rx_chan, DESC_BUF_SIZE_REG);
|
854 |
|
|
|
855 |
|
|
/* set the high address */
|
856 |
|
|
out_be32(&emacp->em0iahr, (fep->ndev->dev_addr[0] << 8) | fep->ndev->dev_addr[1]);
|
857 |
|
|
|
858 |
|
|
/* set the low address */
|
859 |
|
|
out_be32(&emacp->em0ialr,
|
860 |
|
|
(fep->ndev->dev_addr[2] << 24) | (fep->ndev->dev_addr[3] << 16)
|
861 |
|
|
| (fep->ndev->dev_addr[4] << 8) | fep->ndev->dev_addr[5]);
|
862 |
|
|
|
863 |
|
|
/* Adjust to link */
|
864 |
|
|
if (netif_carrier_ok(fep->ndev))
|
865 |
|
|
emac_adjust_to_link(fep);
|
866 |
|
|
|
867 |
|
|
/* enable broadcast/individual address and RX FIFO defaults */
|
868 |
|
|
out_be32(&emacp->em0rmr, EMAC_RMR_DEFAULT);
|
869 |
|
|
|
870 |
|
|
/* set transmit request threshold register */
|
871 |
|
|
out_be32(&emacp->em0trtr, EMAC_TRTR_DEFAULT);
|
872 |
|
|
|
873 |
|
|
/* Reconfigure multicast */
|
874 |
|
|
__emac_set_multicast_list(fep->ndev);
|
875 |
|
|
|
876 |
|
|
/* Set receiver/transmitter defaults */
|
877 |
|
|
out_be32(&emacp->em0rwmr, EMAC_RWMR_DEFAULT);
|
878 |
|
|
out_be32(&emacp->em0tmr0, EMAC_TMR0_DEFAULT);
|
879 |
|
|
out_be32(&emacp->em0tmr1, EMAC_TMR1_DEFAULT);
|
880 |
|
|
|
881 |
|
|
/* set frame gap */
|
882 |
|
|
out_be32(&emacp->em0ipgvr, CONFIG_IBM_OCP_ENET_GAP);
|
883 |
|
|
|
884 |
|
|
/* Init ring buffers */
|
885 |
|
|
emac_init_rings(fep->ndev);
|
886 |
|
|
}
|
887 |
|
|
|
888 |
|
|
static void
|
889 |
|
|
emac_kick(struct ocp_enet_private *fep)
|
890 |
|
|
{
|
891 |
|
|
volatile emac_t *emacp = fep->emacp;
|
892 |
|
|
unsigned long emac_ier;
|
893 |
|
|
|
894 |
|
|
emac_ier = EMAC_ISR_PP | EMAC_ISR_BP | EMAC_ISR_RP |
|
895 |
|
|
EMAC_ISR_SE | EMAC_ISR_PTLE | EMAC_ISR_ALE |
|
896 |
|
|
EMAC_ISR_BFCS | EMAC_ISR_ORE | EMAC_ISR_IRE;
|
897 |
|
|
|
898 |
|
|
out_be32(&emacp->em0iser, emac_ier);
|
899 |
|
|
|
900 |
|
|
/* enable all MAL transmit and receive channels */
|
901 |
|
|
mal_enable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
|
902 |
|
|
mal_enable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
|
903 |
|
|
|
904 |
|
|
/* set transmit and receive enable */
|
905 |
|
|
out_be32(&emacp->em0mr0, EMAC_M0_TXE | EMAC_M0_RXE);
|
906 |
|
|
}
|
907 |
|
|
|
908 |
|
|
static void
|
909 |
|
|
emac_start_link(struct ocp_enet_private *fep, struct ethtool_cmd *ep)
|
910 |
|
|
{
|
911 |
|
|
u32 advertise;
|
912 |
|
|
int autoneg;
|
913 |
|
|
int forced_speed;
|
914 |
|
|
int forced_duplex;
|
915 |
|
|
|
916 |
|
|
/* Default advertise */
|
917 |
|
|
advertise = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
|
918 |
|
|
ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
|
919 |
|
|
autoneg = fep->want_autoneg;
|
920 |
|
|
forced_speed = fep->phy_mii.speed;
|
921 |
|
|
forced_duplex = fep->phy_mii.duplex;
|
922 |
|
|
|
923 |
|
|
/* Setup link parameters */
|
924 |
|
|
if (ep) {
|
925 |
|
|
if (ep->autoneg == AUTONEG_ENABLE) {
|
926 |
|
|
advertise = ep->advertising;
|
927 |
|
|
autoneg = 1;
|
928 |
|
|
} else {
|
929 |
|
|
autoneg = 0;
|
930 |
|
|
forced_speed = ep->speed;
|
931 |
|
|
forced_duplex = ep->duplex;
|
932 |
|
|
}
|
933 |
|
|
}
|
934 |
|
|
|
935 |
|
|
/* Configure PHY & start aneg */
|
936 |
|
|
fep->want_autoneg = autoneg;
|
937 |
|
|
if (autoneg) {
|
938 |
|
|
LINK_DEBUG(("%s: start link aneg, advertise: 0x%x\n",
|
939 |
|
|
fep->ndev->name, advertise));
|
940 |
|
|
fep->phy_mii.def->ops->setup_aneg(&fep->phy_mii, advertise);
|
941 |
|
|
} else {
|
942 |
|
|
LINK_DEBUG(("%s: start link forced, speed: %d, duplex: %d\n",
|
943 |
|
|
fep->ndev->name, forced_speed, forced_duplex));
|
944 |
|
|
fep->phy_mii.def->ops->setup_forced(&fep->phy_mii, forced_speed,
|
945 |
|
|
forced_duplex);
|
946 |
|
|
}
|
947 |
|
|
fep->timer_ticks = 0;
|
948 |
|
|
mod_timer(&fep->link_timer, jiffies + HZ);
|
949 |
|
|
}
|
950 |
|
|
|
951 |
|
|
static void
|
952 |
|
|
emac_link_timer(unsigned long data)
|
953 |
|
|
{
|
954 |
|
|
struct ocp_enet_private *fep = (struct ocp_enet_private *)data;
|
955 |
|
|
int link;
|
956 |
|
|
|
957 |
|
|
if (fep->going_away)
|
958 |
|
|
return;
|
959 |
|
|
|
960 |
|
|
spin_lock_irq(&fep->lock);
|
961 |
|
|
|
962 |
|
|
link = fep->phy_mii.def->ops->poll_link(&fep->phy_mii);
|
963 |
|
|
LINK_DEBUG(("%s: poll_link: %d\n", fep->ndev->name, link));
|
964 |
|
|
|
965 |
|
|
if (link == netif_carrier_ok(fep->ndev)) {
|
966 |
|
|
if (!link && fep->want_autoneg && (++fep->timer_ticks) > 10)
|
967 |
|
|
emac_start_link(fep, NULL);
|
968 |
|
|
goto out;
|
969 |
|
|
}
|
970 |
|
|
printk(KERN_INFO "%s: Link is %s\n", fep->ndev->name, link ? "Up" : "Down");
|
971 |
|
|
if (link) {
|
972 |
|
|
netif_carrier_on(fep->ndev);
|
973 |
|
|
/* Chip needs a full reset on config change. That sucks, so I
|
974 |
|
|
* should ultimately move that to some tasklet to limit
|
975 |
|
|
* latency peaks caused by this code
|
976 |
|
|
*/
|
977 |
|
|
emac_reset_configure(fep);
|
978 |
|
|
if (fep->opened)
|
979 |
|
|
emac_kick(fep);
|
980 |
|
|
} else {
|
981 |
|
|
fep->timer_ticks = 0;
|
982 |
|
|
netif_carrier_off(fep->ndev);
|
983 |
|
|
}
|
984 |
|
|
out:
|
985 |
|
|
mod_timer(&fep->link_timer, jiffies + HZ);
|
986 |
|
|
spin_unlock_irq(&fep->lock);
|
987 |
|
|
}
|
988 |
|
|
|
989 |
|
|
static void
|
990 |
|
|
emac_set_multicast_list(struct net_device *dev)
|
991 |
|
|
{
|
992 |
|
|
struct ocp_enet_private *fep = dev->priv;
|
993 |
|
|
|
994 |
|
|
spin_lock_irq(&fep->lock);
|
995 |
|
|
__emac_set_multicast_list(dev);
|
996 |
|
|
spin_unlock_irq(&fep->lock);
|
997 |
|
|
}
|
998 |
|
|
|
999 |
|
|
static int
|
1000 |
|
|
emac_ethtool(struct net_device *dev, void* ep_user)
|
1001 |
|
|
{
|
1002 |
|
|
struct ocp_enet_private *fep = dev->priv;
|
1003 |
|
|
struct ethtool_cmd ecmd;
|
1004 |
|
|
unsigned long features = fep->phy_mii.def->features;
|
1005 |
|
|
|
1006 |
|
|
if (copy_from_user(&ecmd, ep_user, sizeof(ecmd)))
|
1007 |
|
|
return -EFAULT;
|
1008 |
|
|
|
1009 |
|
|
switch(ecmd.cmd) {
|
1010 |
|
|
case ETHTOOL_GDRVINFO: {
|
1011 |
|
|
struct ethtool_drvinfo info;
|
1012 |
|
|
memset(&info, 0, sizeof(info));
|
1013 |
|
|
info.cmd = ETHTOOL_GDRVINFO;
|
1014 |
|
|
strncpy(info.driver, DRV_NAME, ETHTOOL_BUSINFO_LEN);
|
1015 |
|
|
strncpy(info.version, DRV_VERSION, ETHTOOL_BUSINFO_LEN);
|
1016 |
|
|
info.fw_version[0] = '\0';
|
1017 |
|
|
sprintf(info.bus_info, "OCP EMAC %d", fep->ocpdev->def->index);
|
1018 |
|
|
info.regdump_len = 0;
|
1019 |
|
|
if (copy_to_user(ep_user, &info, sizeof(info)))
|
1020 |
|
|
return -EFAULT;
|
1021 |
|
|
return 0;
|
1022 |
|
|
}
|
1023 |
|
|
|
1024 |
|
|
case ETHTOOL_GSET:
|
1025 |
|
|
ecmd.supported = features;
|
1026 |
|
|
ecmd.port = PORT_MII;
|
1027 |
|
|
ecmd.transceiver = XCVR_EXTERNAL;
|
1028 |
|
|
ecmd.phy_address = fep->mii_phy_addr;
|
1029 |
|
|
spin_lock_irq(&fep->lock);
|
1030 |
|
|
ecmd.autoneg = fep->want_autoneg;
|
1031 |
|
|
ecmd.speed = fep->phy_mii.speed;
|
1032 |
|
|
ecmd.duplex = fep->phy_mii.duplex;
|
1033 |
|
|
spin_unlock_irq(&fep->lock);
|
1034 |
|
|
if (copy_to_user(ep_user, &ecmd, sizeof(ecmd)))
|
1035 |
|
|
return -EFAULT;
|
1036 |
|
|
return 0;
|
1037 |
|
|
|
1038 |
|
|
case ETHTOOL_SSET:
|
1039 |
|
|
if (!capable(CAP_NET_ADMIN))
|
1040 |
|
|
return -EPERM;
|
1041 |
|
|
|
1042 |
|
|
if (ecmd.autoneg != AUTONEG_ENABLE &&
|
1043 |
|
|
ecmd.autoneg != AUTONEG_DISABLE)
|
1044 |
|
|
return -EINVAL;
|
1045 |
|
|
if (ecmd.autoneg == AUTONEG_ENABLE &&
|
1046 |
|
|
ecmd.advertising == 0)
|
1047 |
|
|
return -EINVAL;
|
1048 |
|
|
if (ecmd.duplex != DUPLEX_HALF && ecmd.duplex != DUPLEX_FULL)
|
1049 |
|
|
return -EINVAL;
|
1050 |
|
|
if (ecmd.autoneg == AUTONEG_DISABLE)
|
1051 |
|
|
switch(ecmd.speed) {
|
1052 |
|
|
case SPEED_10:
|
1053 |
|
|
if (ecmd.duplex == DUPLEX_HALF &&
|
1054 |
|
|
(features & SUPPORTED_10baseT_Half) == 0)
|
1055 |
|
|
return -EINVAL;
|
1056 |
|
|
if (ecmd.duplex == DUPLEX_FULL &&
|
1057 |
|
|
(features & SUPPORTED_10baseT_Full) == 0)
|
1058 |
|
|
return -EINVAL;
|
1059 |
|
|
break;
|
1060 |
|
|
case SPEED_100:
|
1061 |
|
|
if (ecmd.duplex == DUPLEX_HALF &&
|
1062 |
|
|
(features & SUPPORTED_100baseT_Half) == 0)
|
1063 |
|
|
return -EINVAL;
|
1064 |
|
|
if (ecmd.duplex == DUPLEX_FULL &&
|
1065 |
|
|
(features & SUPPORTED_100baseT_Full) == 0)
|
1066 |
|
|
return -EINVAL;
|
1067 |
|
|
break;
|
1068 |
|
|
default:
|
1069 |
|
|
return -EINVAL;
|
1070 |
|
|
}
|
1071 |
|
|
else if ((features & SUPPORTED_Autoneg) == 0)
|
1072 |
|
|
return -EINVAL;
|
1073 |
|
|
spin_lock_irq(&fep->lock);
|
1074 |
|
|
emac_start_link(fep, &ecmd);
|
1075 |
|
|
spin_unlock_irq(&fep->lock);
|
1076 |
|
|
return 0;
|
1077 |
|
|
|
1078 |
|
|
case ETHTOOL_NWAY_RST:
|
1079 |
|
|
if (!fep->want_autoneg)
|
1080 |
|
|
return -EINVAL;
|
1081 |
|
|
spin_lock_irq(&fep->lock);
|
1082 |
|
|
emac_start_link(fep, NULL);
|
1083 |
|
|
spin_unlock_irq(&fep->lock);
|
1084 |
|
|
return 0;
|
1085 |
|
|
|
1086 |
|
|
case ETHTOOL_GLINK: {
|
1087 |
|
|
struct ethtool_value edata;
|
1088 |
|
|
memset(&edata, 0, sizeof(edata));
|
1089 |
|
|
edata.cmd = ETHTOOL_GLINK;
|
1090 |
|
|
edata.data = netif_carrier_ok(dev);
|
1091 |
|
|
if (copy_to_user(ep_user, &edata, sizeof(edata)))
|
1092 |
|
|
return -EFAULT;
|
1093 |
|
|
return 0;
|
1094 |
|
|
}
|
1095 |
|
|
}
|
1096 |
|
|
|
1097 |
|
|
return -EOPNOTSUPP;
|
1098 |
|
|
}
|
1099 |
|
|
|
1100 |
|
|
static int
|
1101 |
|
|
emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
1102 |
|
|
{
|
1103 |
|
|
struct ocp_enet_private *fep = dev->priv;
|
1104 |
|
|
uint *data = (uint *) & rq->ifr_data;
|
1105 |
|
|
|
1106 |
|
|
switch (cmd) {
|
1107 |
|
|
case SIOCETHTOOL:
|
1108 |
|
|
return emac_ethtool(dev, rq->ifr_data);
|
1109 |
|
|
case SIOCDEVPRIVATE:
|
1110 |
|
|
case SIOCGMIIPHY:
|
1111 |
|
|
data[0] = fep->mii_phy_addr;
|
1112 |
|
|
/*FALLTHRU*/
|
1113 |
|
|
case SIOCDEVPRIVATE + 1:
|
1114 |
|
|
case SIOCGMIIREG:
|
1115 |
|
|
data[3] = emac_phy_read(dev, fep->mii_phy_addr, data[1]);
|
1116 |
|
|
return 0;
|
1117 |
|
|
case SIOCDEVPRIVATE + 2:
|
1118 |
|
|
case SIOCSMIIREG:
|
1119 |
|
|
if (!capable(CAP_NET_ADMIN))
|
1120 |
|
|
return -EPERM;
|
1121 |
|
|
|
1122 |
|
|
emac_phy_write(dev, fep->mii_phy_addr, data[1], data[2]);
|
1123 |
|
|
return 0;
|
1124 |
|
|
default:
|
1125 |
|
|
return -EOPNOTSUPP;
|
1126 |
|
|
}
|
1127 |
|
|
}
|
1128 |
|
|
|
1129 |
|
|
static int
|
1130 |
|
|
emac_open(struct net_device *dev)
|
1131 |
|
|
{
|
1132 |
|
|
struct ocp_enet_private *fep = dev->priv;
|
1133 |
|
|
int rc;
|
1134 |
|
|
|
1135 |
|
|
spin_lock_irq(&fep->lock);
|
1136 |
|
|
|
1137 |
|
|
fep->opened = 1;
|
1138 |
|
|
|
1139 |
|
|
/* Reset & configure the chip */
|
1140 |
|
|
emac_reset_configure(fep);
|
1141 |
|
|
|
1142 |
|
|
spin_unlock_irq(&fep->lock);
|
1143 |
|
|
|
1144 |
|
|
/* Request our interrupt lines */
|
1145 |
|
|
rc = request_irq(dev->irq, emac_mac_irq, 0, "OCP EMAC MAC", dev);
|
1146 |
|
|
if (rc != 0)
|
1147 |
|
|
goto bail;
|
1148 |
|
|
rc = request_irq(fep->wol_irq, emac_wakeup_irq, 0, "OCP EMAC Wakeup", dev);
|
1149 |
|
|
if (rc != 0) {
|
1150 |
|
|
free_irq(dev->irq, dev);
|
1151 |
|
|
goto bail;
|
1152 |
|
|
}
|
1153 |
|
|
/* Kick the chip rx & tx channels into life */
|
1154 |
|
|
spin_lock_irq(&fep->lock);
|
1155 |
|
|
emac_kick(fep);
|
1156 |
|
|
spin_unlock_irq(&fep->lock);
|
1157 |
|
|
|
1158 |
|
|
netif_start_queue(dev);
|
1159 |
|
|
bail:
|
1160 |
|
|
return rc;
|
1161 |
|
|
}
|
1162 |
|
|
|
1163 |
|
|
static int
|
1164 |
|
|
emac_close(struct net_device *dev)
|
1165 |
|
|
{
|
1166 |
|
|
struct ocp_enet_private *fep = dev->priv;
|
1167 |
|
|
volatile emac_t *emacp = fep->emacp;
|
1168 |
|
|
|
1169 |
|
|
/* XXX Stop IRQ emitting here */
|
1170 |
|
|
spin_lock_irq(&fep->lock);
|
1171 |
|
|
fep->opened = 0;
|
1172 |
|
|
mal_disable_tx_channels(fep->mal, fep->commac.tx_chan_mask);
|
1173 |
|
|
mal_disable_rx_channels(fep->mal, fep->commac.rx_chan_mask);
|
1174 |
|
|
netif_stop_queue(dev);
|
1175 |
|
|
|
1176 |
|
|
out_be32(&emacp->em0mr0, EMAC_M0_SRST);
|
1177 |
|
|
udelay(10);
|
1178 |
|
|
|
1179 |
|
|
if (emacp->em0mr0 & EMAC_M0_SRST) {
|
1180 |
|
|
/*not sure what to do here hopefully it clears before another open */
|
1181 |
|
|
printk(KERN_ERR "%s: Phy SoftReset didn't clear, no link?\n",
|
1182 |
|
|
dev->name);
|
1183 |
|
|
}
|
1184 |
|
|
|
1185 |
|
|
/* Free the irq's */
|
1186 |
|
|
free_irq(dev->irq, dev);
|
1187 |
|
|
free_irq(fep->wol_irq, dev);
|
1188 |
|
|
|
1189 |
|
|
spin_unlock_irq(&fep->lock);
|
1190 |
|
|
|
1191 |
|
|
return 0;
|
1192 |
|
|
}
|
1193 |
|
|
|
1194 |
|
|
static void
|
1195 |
|
|
emac_remove(struct ocp_device *ocpdev)
|
1196 |
|
|
{
|
1197 |
|
|
struct net_device *dev = ocp_get_drvdata(ocpdev);
|
1198 |
|
|
struct ocp_enet_private *ep = dev->priv;
|
1199 |
|
|
|
1200 |
|
|
/* FIXME: locking, races, ... */
|
1201 |
|
|
ep->going_away = 1;
|
1202 |
|
|
ocp_set_drvdata(ocpdev, NULL);
|
1203 |
|
|
if (ep->zmii_dev)
|
1204 |
|
|
emac_fini_zmii(ep->zmii_dev);
|
1205 |
|
|
|
1206 |
|
|
unregister_netdev(dev);
|
1207 |
|
|
del_timer_sync(&ep->link_timer);
|
1208 |
|
|
mal_unregister_commac(ep->mal, &ep->commac);
|
1209 |
|
|
iounmap((void *)ep->emacp);
|
1210 |
|
|
kfree(dev);
|
1211 |
|
|
}
|
1212 |
|
|
|
1213 |
|
|
struct mal_commac_ops emac_commac_ops = {
|
1214 |
|
|
.txeob = &emac_txeob_dev,
|
1215 |
|
|
.txde = &emac_txde_dev,
|
1216 |
|
|
.rxeob = &emac_rxeob_dev,
|
1217 |
|
|
.rxde = &emac_rxde_dev,
|
1218 |
|
|
};
|
1219 |
|
|
|
1220 |
|
|
static int
|
1221 |
|
|
emac_probe(struct ocp_device *ocpdev)
|
1222 |
|
|
{
|
1223 |
|
|
int rc = 0, i;
|
1224 |
|
|
bd_t *bd;
|
1225 |
|
|
struct net_device *ndev;
|
1226 |
|
|
struct ocp_enet_private *ep;
|
1227 |
|
|
struct ocp_device *maldev;
|
1228 |
|
|
struct ibm_ocp_mal *mal;
|
1229 |
|
|
struct ocp_func_emac_data *emacdata;
|
1230 |
|
|
struct ocp_device *mdiodev;
|
1231 |
|
|
struct net_device *mdio_ndev = NULL;
|
1232 |
|
|
int commac_reg = 0;
|
1233 |
|
|
u32 phy_map;
|
1234 |
|
|
|
1235 |
|
|
emacdata = (struct ocp_func_emac_data *)ocpdev->def->additions;
|
1236 |
|
|
if (emacdata == NULL) {
|
1237 |
|
|
printk(KERN_ERR "emac%d: Missing additional datas !\n", ocpdev->def->index);
|
1238 |
|
|
return -ENODEV;
|
1239 |
|
|
}
|
1240 |
|
|
|
1241 |
|
|
/* Wait for MAL to show up */
|
1242 |
|
|
maldev = ocp_find_device(OCP_ANY_ID, OCP_FUNC_MAL, emacdata->mal_idx);
|
1243 |
|
|
if (maldev == NULL)
|
1244 |
|
|
return -EAGAIN;
|
1245 |
|
|
/* Check if MAL driver attached yet */
|
1246 |
|
|
mal = (struct ibm_ocp_mal *)ocp_get_drvdata(maldev);
|
1247 |
|
|
if (mal == NULL)
|
1248 |
|
|
return -EAGAIN;
|
1249 |
|
|
|
1250 |
|
|
/* If we depend on another EMAC for MDIO, wait for it to show up */
|
1251 |
|
|
if (emacdata->mdio_idx >= 0 && emacdata->mdio_idx != ocpdev->def->index) {
|
1252 |
|
|
mdiodev = ocp_find_device(OCP_ANY_ID, OCP_FUNC_EMAC, emacdata->mdio_idx);
|
1253 |
|
|
if (mdiodev == NULL)
|
1254 |
|
|
return -EAGAIN;
|
1255 |
|
|
mdio_ndev = (struct net_device *)ocp_get_drvdata(mdiodev);
|
1256 |
|
|
if (mdio_ndev == NULL)
|
1257 |
|
|
return -EAGAIN;
|
1258 |
|
|
}
|
1259 |
|
|
|
1260 |
|
|
/* Allocate our net_device structure */
|
1261 |
|
|
ndev = alloc_etherdev(sizeof (struct ocp_enet_private));
|
1262 |
|
|
if (ndev == NULL) {
|
1263 |
|
|
printk(KERN_ERR
|
1264 |
|
|
"emac%d: Could not allocate ethernet device.\n", ocpdev->def->index);
|
1265 |
|
|
return -ENOMEM;
|
1266 |
|
|
}
|
1267 |
|
|
ep = ndev->priv;
|
1268 |
|
|
memset(ep, 0, sizeof(*ep));
|
1269 |
|
|
ep->ndev = ndev;
|
1270 |
|
|
ep->ocpdev = ocpdev;
|
1271 |
|
|
ndev->irq = ocpdev->def->irq;
|
1272 |
|
|
ep->wol_irq = emacdata->wol_irq;
|
1273 |
|
|
ep->mdio_dev = mdio_ndev;
|
1274 |
|
|
ocp_set_drvdata(ocpdev, ndev);
|
1275 |
|
|
spin_lock_init(&ep->lock);
|
1276 |
|
|
|
1277 |
|
|
/* Fill out MAL informations and register commac */
|
1278 |
|
|
ep->mal = mal;
|
1279 |
|
|
ep->mal_tx_chan = emacdata->mal_tx1_chan;
|
1280 |
|
|
ep->mal_rx_chan = emacdata->mal_rx_chan;
|
1281 |
|
|
ep->commac.ops = &emac_commac_ops;
|
1282 |
|
|
ep->commac.dev = ndev;
|
1283 |
|
|
ep->commac.tx_chan_mask = MAL_CHAN_MASK(ep->mal_tx_chan);
|
1284 |
|
|
ep->commac.rx_chan_mask = MAL_CHAN_MASK(ep->mal_rx_chan);
|
1285 |
|
|
rc = mal_register_commac(ep->mal, &ep->commac);
|
1286 |
|
|
if (rc != 0)
|
1287 |
|
|
goto bail;
|
1288 |
|
|
commac_reg = 1;
|
1289 |
|
|
|
1290 |
|
|
/* Map our MMIOs */
|
1291 |
|
|
ep->emacp = (volatile emac_t *)ioremap(ocpdev->def->paddr, sizeof (emac_t));
|
1292 |
|
|
|
1293 |
|
|
/* Check if we need to attach to a ZMII */
|
1294 |
|
|
if (emacdata->zmii_idx >= 0) {
|
1295 |
|
|
ep->zmii_input = emacdata->zmii_mux;
|
1296 |
|
|
ep->zmii_dev = ocp_find_device(OCP_ANY_ID, OCP_FUNC_ZMII, emacdata->zmii_idx);
|
1297 |
|
|
if (ep->zmii_dev == NULL)
|
1298 |
|
|
printk(KERN_WARNING "emac%d: ZMII %d requested but not found !\n",
|
1299 |
|
|
ocpdev->def->index, emacdata->zmii_idx);
|
1300 |
|
|
else if ((rc = emac_init_zmii(ep->zmii_dev, ZMII_AUTO)) != 0)
|
1301 |
|
|
goto bail;
|
1302 |
|
|
}
|
1303 |
|
|
|
1304 |
|
|
/* Reset the EMAC */
|
1305 |
|
|
out_be32(&ep->emacp->em0mr0, EMAC_M0_SRST);
|
1306 |
|
|
udelay(20);
|
1307 |
|
|
for (i=0; i<100; i++) {
|
1308 |
|
|
if ((in_be32(&ep->emacp->em0mr0) & EMAC_M0_SRST) == 0)
|
1309 |
|
|
break;
|
1310 |
|
|
udelay(10);
|
1311 |
|
|
}
|
1312 |
|
|
|
1313 |
|
|
if (i >= 100) {
|
1314 |
|
|
printk(KERN_ERR "emac%d: Cannot reset EMAC\n", ocpdev->def->index);
|
1315 |
|
|
rc = -ENXIO;
|
1316 |
|
|
goto bail;
|
1317 |
|
|
}
|
1318 |
|
|
|
1319 |
|
|
/* Init link monitoring timer */
|
1320 |
|
|
init_timer(&ep->link_timer);
|
1321 |
|
|
ep->link_timer.function = emac_link_timer;
|
1322 |
|
|
ep->link_timer.data = (unsigned long) ep;
|
1323 |
|
|
ep->timer_ticks = 0;
|
1324 |
|
|
|
1325 |
|
|
/* Fill up the mii_phy structure */
|
1326 |
|
|
ep->phy_mii.dev = ndev;
|
1327 |
|
|
ep->phy_mii.mdio_read = emac_phy_read;
|
1328 |
|
|
ep->phy_mii.mdio_write = emac_phy_write;
|
1329 |
|
|
|
1330 |
|
|
/* Find PHY */
|
1331 |
|
|
phy_map = emac_phy_map[ocpdev->def->index] | busy_phy_map;
|
1332 |
|
|
for (i = 0; i <= 0x1f; i++, phy_map >>= 1) {
|
1333 |
|
|
if ((phy_map & 0x1) == 0) {
|
1334 |
|
|
int val = emac_phy_read(ndev, i, MII_BMCR);
|
1335 |
|
|
if (val != 0xffff && val != -1)
|
1336 |
|
|
break;
|
1337 |
|
|
}
|
1338 |
|
|
}
|
1339 |
|
|
if (i == 0x20) {
|
1340 |
|
|
printk(KERN_WARNING "emac%d: Can't find PHY.\n", ocpdev->def->index);
|
1341 |
|
|
rc = -ENODEV;
|
1342 |
|
|
goto bail;
|
1343 |
|
|
}
|
1344 |
|
|
busy_phy_map |= 1 << i;
|
1345 |
|
|
ep->mii_phy_addr = i;
|
1346 |
|
|
rc = mii_phy_probe(&ep->phy_mii, i);
|
1347 |
|
|
if (rc) {
|
1348 |
|
|
printk(KERN_WARNING "emac%d: Failed to probe PHY type.\n", ocpdev->def->index);
|
1349 |
|
|
rc = -ENODEV;
|
1350 |
|
|
goto bail;
|
1351 |
|
|
}
|
1352 |
|
|
|
1353 |
|
|
/* Setup initial PHY config & startup aneg */
|
1354 |
|
|
if (ep->phy_mii.def->ops->init)
|
1355 |
|
|
ep->phy_mii.def->ops->init(&ep->phy_mii);
|
1356 |
|
|
netif_carrier_off(ndev);
|
1357 |
|
|
if (ep->phy_mii.def->features & SUPPORTED_Autoneg)
|
1358 |
|
|
ep->want_autoneg = 1;
|
1359 |
|
|
emac_start_link(ep, NULL);
|
1360 |
|
|
|
1361 |
|
|
|
1362 |
|
|
/* read the MAC Address */
|
1363 |
|
|
bd = (bd_t *) __res;
|
1364 |
|
|
for (i = 0; i < 6; i++)
|
1365 |
|
|
ndev->dev_addr[i] = bd->BD_EMAC_ADDR(ocpdev->def->index, i); /* Marco to disques array */
|
1366 |
|
|
|
1367 |
|
|
/* Fill in the driver function table */
|
1368 |
|
|
ndev->open = &emac_open;
|
1369 |
|
|
ndev->hard_start_xmit = &emac_start_xmit;
|
1370 |
|
|
ndev->stop = &emac_close;
|
1371 |
|
|
ndev->get_stats = &emac_stats;
|
1372 |
|
|
ndev->set_multicast_list = &emac_set_multicast_list;
|
1373 |
|
|
ndev->do_ioctl = &emac_ioctl;
|
1374 |
|
|
|
1375 |
|
|
SET_MODULE_OWNER(ndev);
|
1376 |
|
|
|
1377 |
|
|
rc = register_netdev(ndev);
|
1378 |
|
|
if (rc != 0)
|
1379 |
|
|
goto bail;
|
1380 |
|
|
|
1381 |
|
|
printk("%s: IBM emac, MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
|
1382 |
|
|
ndev->name,
|
1383 |
|
|
ndev->dev_addr[0], ndev->dev_addr[1], ndev->dev_addr[2],
|
1384 |
|
|
ndev->dev_addr[3], ndev->dev_addr[4], ndev->dev_addr[5]);
|
1385 |
|
|
printk(KERN_INFO "%s: Found %s PHY (0x%02x)\n",
|
1386 |
|
|
ndev->name, ep->phy_mii.def->name, ep->mii_phy_addr);
|
1387 |
|
|
|
1388 |
|
|
|
1389 |
|
|
bail:
|
1390 |
|
|
if (rc && commac_reg)
|
1391 |
|
|
mal_unregister_commac(ep->mal, &ep->commac);
|
1392 |
|
|
if (rc && ndev)
|
1393 |
|
|
kfree(ndev);
|
1394 |
|
|
|
1395 |
|
|
return rc;
|
1396 |
|
|
|
1397 |
|
|
}
|
1398 |
|
|
|
1399 |
|
|
/* Structure for a device driver */
|
1400 |
|
|
static struct ocp_device_id emac_ids[] =
|
1401 |
|
|
{
|
1402 |
|
|
{ .vendor = OCP_ANY_ID, .function = OCP_FUNC_EMAC },
|
1403 |
|
|
{ .vendor = OCP_VENDOR_INVALID }
|
1404 |
|
|
};
|
1405 |
|
|
|
1406 |
|
|
static struct ocp_driver emac_driver =
|
1407 |
|
|
{
|
1408 |
|
|
.name = "emac",
|
1409 |
|
|
.id_table = emac_ids,
|
1410 |
|
|
|
1411 |
|
|
.probe = emac_probe,
|
1412 |
|
|
.remove = emac_remove,
|
1413 |
|
|
};
|
1414 |
|
|
|
1415 |
|
|
static int __init
|
1416 |
|
|
emac_init(void)
|
1417 |
|
|
{
|
1418 |
|
|
int rc;
|
1419 |
|
|
|
1420 |
|
|
printk(KERN_INFO DRV_NAME ": " DRV_DESC ", version " DRV_VERSION "\n");
|
1421 |
|
|
printk(KERN_INFO "Maintained by " DRV_AUTHOR "\n");
|
1422 |
|
|
|
1423 |
|
|
if (skb_res > 2) {
|
1424 |
|
|
printk(KERN_WARNING "Invalid skb_res: %d, cropping to 2\n", skb_res);
|
1425 |
|
|
skb_res = 2;
|
1426 |
|
|
}
|
1427 |
|
|
rc = ocp_register_driver(&emac_driver);
|
1428 |
|
|
if (rc == 0) {
|
1429 |
|
|
ocp_unregister_driver(&emac_driver);
|
1430 |
|
|
return -ENODEV;
|
1431 |
|
|
}
|
1432 |
|
|
|
1433 |
|
|
return 0;
|
1434 |
|
|
}
|
1435 |
|
|
|
1436 |
|
|
|
1437 |
|
|
|
1438 |
|
|
static void __exit
|
1439 |
|
|
emac_exit(void)
|
1440 |
|
|
{
|
1441 |
|
|
ocp_unregister_driver(&emac_driver);
|
1442 |
|
|
}
|
1443 |
|
|
|
1444 |
|
|
module_init(emac_init);
|
1445 |
|
|
module_exit(emac_exit);
|