OpenCores
URL https://opencores.org/ocsvn/or1k/or1k/trunk

Subversion Repositories or1k

[/] [or1k/] [trunk/] [linux/] [linux-2.4/] [drivers/] [net/] [e1000/] [e1000_main.c] - Blame information for rev 1765

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 1275 phoenix
/*******************************************************************************
2
 
3
 
4
  Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
5
 
6
  This program is free software; you can redistribute it and/or modify it
7
  under the terms of the GNU General Public License as published by the Free
8
  Software Foundation; either version 2 of the License, or (at your option)
9
  any later version.
10
 
11
  This program is distributed in the hope that it will be useful, but WITHOUT
12
  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14
  more details.
15
 
16
  You should have received a copy of the GNU General Public License along with
17
  this program; if not, write to the Free Software Foundation, Inc., 59
18
  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
19
 
20
  The full GNU General Public License is included in this distribution in the
21
  file called LICENSE.
22
 
23
  Contact Information:
24
  Linux NICS <linux.nics@intel.com>
25
  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
 
27
*******************************************************************************/
28
 
29
#include "e1000.h"
30
 
31
/* Change Log
32
 *
33
 * 5.2.30.1     1/29/03
34
 *   o Set VLAN filtering to IEEE 802.1Q after reset so we don't break
35
 *     SoL connections that use VLANs.
36
 *   o Allow 1000/Full setting for AutoNeg param for Fiber connections
37
 *     Jon D Mason [jonmason@us.ibm.com].
38
 *   o Race between Tx queue and Tx clean fixed with a spin lock.
39
 *   o Added netpoll support.
40
 *   o Fixed endianess bug causing ethtool loopback diags to fail on ppc.
41
 *   o Use pdev->irq rather than netdev->irq in preparation for MSI support.
42
 *   o Report driver message on user override of InterruptThrottleRate
43
 *     module parameter.
44
 *   o Change I/O address storage from uint32_t to unsigned long.
45
 *
46
 * 5.2.22       10/15/03
47
 *   o Bug fix: SERDES devices might be connected to a back-plane
48
 *     switch that doesn't support auto-neg, so add the capability
49
 *     to force 1000/Full.  Also, since forcing 1000/Full, sample
50
 *     RxSynchronize bit to detect link state.
51
 *   o Bug fix: Flow control settings for hi/lo watermark didn't
52
 *     consider changes in the Rx FIFO size, which could occur with
53
 *     Jumbo Frames or with the reduced FIFO in 82547.
54
 *   o Better propagation of error codes. [Janice Girouard
55
 *     (janiceg@us.ibm.com)].
56
 *   o Bug fix: hang under heavy Tx stress when running out of Tx
57
 *     descriptors; wasn't clearing context descriptor when backing
58
 *     out of send because of no-resource condition.
59
 *   o Bug fix: check netif_running in dev->poll so we don't have to
60
 *     hang in dev->close until all polls are finished.  [Robert
61
 *     Ollson (robert.olsson@data.slu.se)].
62
 *   o Revert TxDescriptor ring size back to 256 since change to 1024
63
 *     wasn't accepted into the kernel.
64
 *
65
 * 5.2.16       8/8/03
66
 */
67
 
68
char e1000_driver_name[] = "e1000";
69
char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
70
char e1000_driver_version[] = "5.2.30.1-k1";
71
char e1000_copyright[] = "Copyright (c) 1999-2004 Intel Corporation.";
72
 
73
/* e1000_pci_tbl - PCI Device ID Table
74
 *
75
 * Wildcard entries (PCI_ANY_ID) should come last
76
 * Last entry must be all 0s
77
 *
78
 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
79
 *   Class, Class Mask, private data (not used) }
80
 */
81
static struct pci_device_id e1000_pci_tbl[] = {
82
        {0x8086, 0x1000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
83
        {0x8086, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
84
        {0x8086, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
85
        {0x8086, 0x1008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
86
        {0x8086, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
87
        {0x8086, 0x100C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
88
        {0x8086, 0x100D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
89
        {0x8086, 0x100E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
90
        {0x8086, 0x100F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
91
        {0x8086, 0x1010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
92
        {0x8086, 0x1011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
93
        {0x8086, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
94
        {0x8086, 0x1013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
95
        {0x8086, 0x1015, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
96
        {0x8086, 0x1016, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
97
        {0x8086, 0x1017, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
98
        {0x8086, 0x1018, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
99
        {0x8086, 0x1019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
100
        {0x8086, 0x101D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
101
        {0x8086, 0x101E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
102
        {0x8086, 0x1026, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
103
        {0x8086, 0x1027, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
104
        {0x8086, 0x1028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
105
        {0x8086, 0x1075, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
106
        {0x8086, 0x1076, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
107
        {0x8086, 0x1077, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
108
        {0x8086, 0x1078, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
109
        {0x8086, 0x1079, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
110
        {0x8086, 0x107A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
111
        {0x8086, 0x107B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
112
        /* required last entry */
113
        {0,}
114
};
115
 
116
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
117
 
118
int e1000_up(struct e1000_adapter *adapter);
119
void e1000_down(struct e1000_adapter *adapter);
120
void e1000_reset(struct e1000_adapter *adapter);
121
int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
122
int e1000_setup_tx_resources(struct e1000_adapter *adapter);
123
int e1000_setup_rx_resources(struct e1000_adapter *adapter);
124
void e1000_free_tx_resources(struct e1000_adapter *adapter);
125
void e1000_free_rx_resources(struct e1000_adapter *adapter);
126
void e1000_update_stats(struct e1000_adapter *adapter);
127
 
128
/* Local Function Prototypes */
129
 
130
static int e1000_init_module(void);
131
static void e1000_exit_module(void);
132
static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
133
static void __devexit e1000_remove(struct pci_dev *pdev);
134
static int e1000_sw_init(struct e1000_adapter *adapter);
135
static int e1000_open(struct net_device *netdev);
136
static int e1000_close(struct net_device *netdev);
137
static void e1000_configure_tx(struct e1000_adapter *adapter);
138
static void e1000_configure_rx(struct e1000_adapter *adapter);
139
static void e1000_setup_rctl(struct e1000_adapter *adapter);
140
static void e1000_clean_tx_ring(struct e1000_adapter *adapter);
141
static void e1000_clean_rx_ring(struct e1000_adapter *adapter);
142
static void e1000_set_multi(struct net_device *netdev);
143
static void e1000_update_phy_info(unsigned long data);
144
static void e1000_watchdog(unsigned long data);
145
static void e1000_82547_tx_fifo_stall(unsigned long data);
146
static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
147
static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
148
static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
149
static int e1000_set_mac(struct net_device *netdev, void *p);
150
static inline void e1000_irq_disable(struct e1000_adapter *adapter);
151
static inline void e1000_irq_enable(struct e1000_adapter *adapter);
152
static irqreturn_t e1000_intr(int irq, void *data, struct pt_regs *regs);
153
static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter);
154
#ifdef CONFIG_E1000_NAPI
155
static int e1000_clean(struct net_device *netdev, int *budget);
156
static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
157
                                    int *work_done, int work_to_do);
158
#else
159
static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter);
160
#endif
161
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter);
162
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
163
static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
164
                           int cmd);
165
static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
166
static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
167
static inline void e1000_rx_checksum(struct e1000_adapter *adapter,
168
                                     struct e1000_rx_desc *rx_desc,
169
                                     struct sk_buff *skb);
170
static void e1000_tx_timeout(struct net_device *dev);
171
static void e1000_tx_timeout_task(struct net_device *dev);
172
static void e1000_smartspeed(struct e1000_adapter *adapter);
173
static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
174
                                              struct sk_buff *skb);
175
 
176
static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
177
static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
178
static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
179
static void e1000_restore_vlan(struct e1000_adapter *adapter);
180
 
181
static int e1000_notify_reboot(struct notifier_block *, unsigned long event, void *ptr);
182
static int e1000_suspend(struct pci_dev *pdev, uint32_t state);
183
#ifdef CONFIG_PM
184
static int e1000_resume(struct pci_dev *pdev);
185
#endif
186
 
187
#ifdef CONFIG_NET_POLL_CONTROLLER
188
/* for netdump / net console */
189
static void e1000_netpoll (struct net_device *dev);
190
#endif
191
 
192
struct notifier_block e1000_notifier_reboot = {
193
        .notifier_call  = e1000_notify_reboot,
194
        .next           = NULL,
195
        .priority       = 0
196
};
197
 
198
/* Exported from other modules */
199
 
200
extern void e1000_check_options(struct e1000_adapter *adapter);
201
extern int e1000_ethtool_ioctl(struct net_device *netdev, struct ifreq *ifr);
202
 
203
static struct pci_driver e1000_driver = {
204
        .name     = e1000_driver_name,
205
        .id_table = e1000_pci_tbl,
206
        .probe    = e1000_probe,
207
        .remove   = __devexit_p(e1000_remove),
208
        /* Power Managment Hooks */
209
#ifdef CONFIG_PM
210
        .suspend  = e1000_suspend,
211
        .resume   = e1000_resume
212
#endif
213
};
214
 
215
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
216
MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
217
MODULE_LICENSE("GPL");
218
 
219
/**
220
 * e1000_init_module - Driver Registration Routine
221
 *
222
 * e1000_init_module is the first routine called when the driver is
223
 * loaded. All it does is register with the PCI subsystem.
224
 **/
225
 
226
static int __init
227
e1000_init_module(void)
228
{
229
        int ret;
230
        printk(KERN_INFO "%s - version %s\n",
231
               e1000_driver_string, e1000_driver_version);
232
 
233
        printk(KERN_INFO "%s\n", e1000_copyright);
234
 
235
        ret = pci_module_init(&e1000_driver);
236
        if(ret >= 0) {
237
                register_reboot_notifier(&e1000_notifier_reboot);
238
        }
239
        return ret;
240
}
241
 
242
module_init(e1000_init_module);
243
 
244
/**
245
 * e1000_exit_module - Driver Exit Cleanup Routine
246
 *
247
 * e1000_exit_module is called just before the driver is removed
248
 * from memory.
249
 **/
250
 
251
static void __exit
252
e1000_exit_module(void)
253
{
254
        unregister_reboot_notifier(&e1000_notifier_reboot);
255
        pci_unregister_driver(&e1000_driver);
256
}
257
 
258
module_exit(e1000_exit_module);
259
 
260
 
261
int
262
e1000_up(struct e1000_adapter *adapter)
263
{
264
        struct net_device *netdev = adapter->netdev;
265
        int err;
266
 
267
        /* hardware has been reset, we need to reload some things */
268
 
269
        e1000_set_multi(netdev);
270
 
271
        e1000_restore_vlan(adapter);
272
 
273
        e1000_configure_tx(adapter);
274
        e1000_setup_rctl(adapter);
275
        e1000_configure_rx(adapter);
276
        e1000_alloc_rx_buffers(adapter);
277
 
278
        if((err = request_irq(adapter->pdev->irq, &e1000_intr,
279
                              SA_SHIRQ | SA_SAMPLE_RANDOM,
280
                              netdev->name, netdev)))
281
                return err;
282
 
283
        mod_timer(&adapter->watchdog_timer, jiffies);
284
        e1000_irq_enable(adapter);
285
 
286
        return 0;
287
}
288
 
289
void
290
e1000_down(struct e1000_adapter *adapter)
291
{
292
        struct net_device *netdev = adapter->netdev;
293
 
294
        e1000_irq_disable(adapter);
295
        free_irq(adapter->pdev->irq, netdev);
296
        del_timer_sync(&adapter->tx_fifo_stall_timer);
297
        del_timer_sync(&adapter->watchdog_timer);
298
        del_timer_sync(&adapter->phy_info_timer);
299
        adapter->link_speed = 0;
300
        adapter->link_duplex = 0;
301
        netif_carrier_off(netdev);
302
        netif_stop_queue(netdev);
303
 
304
        e1000_reset(adapter);
305
        e1000_clean_tx_ring(adapter);
306
        e1000_clean_rx_ring(adapter);
307
}
308
 
309
void
310
e1000_reset(struct e1000_adapter *adapter)
311
{
312
        uint32_t pba;
313
        /* Repartition Pba for greater than 9k mtu
314
         * To take effect CTRL.RST is required.
315
         */
316
 
317
        if(adapter->hw.mac_type < e1000_82547) {
318
                if(adapter->rx_buffer_len > E1000_RXBUFFER_8192)
319
                        pba = E1000_PBA_40K;
320
                else
321
                        pba = E1000_PBA_48K;
322
        } else {
323
                if(adapter->rx_buffer_len > E1000_RXBUFFER_8192)
324
                        pba = E1000_PBA_22K;
325
                else
326
                        pba = E1000_PBA_30K;
327
                adapter->tx_fifo_head = 0;
328
                adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
329
                adapter->tx_fifo_size =
330
                        (E1000_PBA_40K - pba) << E1000_TX_FIFO_SIZE_SHIFT;
331
                atomic_set(&adapter->tx_fifo_stall, 0);
332
        }
333
        E1000_WRITE_REG(&adapter->hw, PBA, pba);
334
 
335
        /* flow control settings */
336
        adapter->hw.fc_high_water = pba - E1000_FC_HIGH_DIFF;
337
        adapter->hw.fc_low_water = pba - E1000_FC_LOW_DIFF;
338
        adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
339
        adapter->hw.fc_send_xon = 1;
340
        adapter->hw.fc = adapter->hw.original_fc;
341
 
342
        e1000_reset_hw(&adapter->hw);
343
        if(adapter->hw.mac_type >= e1000_82544)
344
                E1000_WRITE_REG(&adapter->hw, WUC, 0);
345
        e1000_init_hw(&adapter->hw);
346
 
347
        /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
348
        E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
349
 
350
        e1000_reset_adaptive(&adapter->hw);
351
        e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
352
}
353
 
354
/**
355
 * e1000_probe - Device Initialization Routine
356
 * @pdev: PCI device information struct
357
 * @ent: entry in e1000_pci_tbl
358
 *
359
 * Returns 0 on success, negative on failure
360
 *
361
 * e1000_probe initializes an adapter identified by a pci_dev structure.
362
 * The OS initialization, configuring of the adapter private structure,
363
 * and a hardware reset occur.
364
 **/
365
 
366
static int __devinit
367
e1000_probe(struct pci_dev *pdev,
368
            const struct pci_device_id *ent)
369
{
370
        struct net_device *netdev;
371
        struct e1000_adapter *adapter;
372
        static int cards_found = 0;
373
        unsigned long mmio_start;
374
        int mmio_len;
375
        int pci_using_dac;
376
        int i;
377
        int err;
378
        uint16_t eeprom_data;
379
 
380
        if((err = pci_enable_device(pdev)))
381
                return err;
382
 
383
        if(!(err = pci_set_dma_mask(pdev, PCI_DMA_64BIT))) {
384
                pci_using_dac = 1;
385
        } else {
386
                if((err = pci_set_dma_mask(pdev, PCI_DMA_32BIT))) {
387
                        E1000_ERR("No usable DMA configuration, aborting\n");
388
                        return err;
389
                }
390
                pci_using_dac = 0;
391
        }
392
 
393
        if((err = pci_request_regions(pdev, e1000_driver_name)))
394
                return err;
395
 
396
        pci_set_master(pdev);
397
 
398
        netdev = alloc_etherdev(sizeof(struct e1000_adapter));
399
        if(!netdev) {
400
                err = -ENOMEM;
401
                goto err_alloc_etherdev;
402
        }
403
 
404
        SET_MODULE_OWNER(netdev);
405
 
406
        pci_set_drvdata(pdev, netdev);
407
        adapter = netdev->priv;
408
        adapter->netdev = netdev;
409
        adapter->pdev = pdev;
410
        adapter->hw.back = adapter;
411
 
412
        mmio_start = pci_resource_start(pdev, BAR_0);
413
        mmio_len = pci_resource_len(pdev, BAR_0);
414
 
415
        adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
416
        if(!adapter->hw.hw_addr) {
417
                err = -EIO;
418
                goto err_ioremap;
419
        }
420
 
421
        for(i = BAR_1; i <= BAR_5; i++) {
422
                if(pci_resource_len(pdev, i) == 0)
423
                        continue;
424
                if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
425
                        adapter->hw.io_base = pci_resource_start(pdev, i);
426
                        break;
427
                }
428
        }
429
 
430
        netdev->open = &e1000_open;
431
        netdev->stop = &e1000_close;
432
        netdev->hard_start_xmit = &e1000_xmit_frame;
433
        netdev->get_stats = &e1000_get_stats;
434
        netdev->set_multicast_list = &e1000_set_multi;
435
        netdev->set_mac_address = &e1000_set_mac;
436
        netdev->change_mtu = &e1000_change_mtu;
437
        netdev->do_ioctl = &e1000_ioctl;
438
        netdev->tx_timeout = &e1000_tx_timeout;
439
        netdev->watchdog_timeo = 5 * HZ;
440
#ifdef CONFIG_E1000_NAPI
441
        netdev->poll = &e1000_clean;
442
        netdev->weight = 64;
443
#endif
444
        netdev->vlan_rx_register = e1000_vlan_rx_register;
445
        netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
446
        netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
447
#ifdef CONFIG_NET_POLL_CONTROLLER
448
        netdev->poll_controller = e1000_netpoll;
449
#endif
450
 
451
        netdev->mem_start = mmio_start;
452
        netdev->mem_end = mmio_start + mmio_len;
453
        netdev->base_addr = adapter->hw.io_base;
454
 
455
        adapter->bd_number = cards_found;
456
 
457
        /* setup the private structure */
458
 
459
        if((err = e1000_sw_init(adapter)))
460
                goto err_sw_init;
461
 
462
        if(adapter->hw.mac_type >= e1000_82543) {
463
                netdev->features = NETIF_F_SG |
464
                                   NETIF_F_HW_CSUM |
465
                                   NETIF_F_HW_VLAN_TX |
466
                                   NETIF_F_HW_VLAN_RX |
467
                                   NETIF_F_HW_VLAN_FILTER;
468
        } else {
469
                netdev->features = NETIF_F_SG;
470
        }
471
 
472
#ifdef NETIF_F_TSO
473
        if((adapter->hw.mac_type >= e1000_82544) &&
474
           (adapter->hw.mac_type != e1000_82547))
475
                netdev->features |= NETIF_F_TSO;
476
#endif
477
 
478
        if(pci_using_dac)
479
                netdev->features |= NETIF_F_HIGHDMA;
480
 
481
        /* before reading the EEPROM, reset the controller to
482
         * put the device in a known good starting state */
483
 
484
        e1000_reset_hw(&adapter->hw);
485
 
486
        /* make sure the EEPROM is good */
487
 
488
        if(e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
489
                printk(KERN_ERR "The EEPROM Checksum Is Not Valid\n");
490
                err = -EIO;
491
                goto err_eeprom;
492
        }
493
 
494
        /* copy the MAC address out of the EEPROM */
495
 
496
        e1000_read_mac_addr(&adapter->hw);
497
        memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
498
 
499
        if(!is_valid_ether_addr(netdev->dev_addr)) {
500
                err = -EIO;
501
                goto err_eeprom;
502
        }
503
 
504
        e1000_read_part_num(&adapter->hw, &(adapter->part_num));
505
 
506
        e1000_get_bus_info(&adapter->hw);
507
 
508
        init_timer(&adapter->tx_fifo_stall_timer);
509
        adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
510
        adapter->tx_fifo_stall_timer.data = (unsigned long) adapter;
511
 
512
        init_timer(&adapter->watchdog_timer);
513
        adapter->watchdog_timer.function = &e1000_watchdog;
514
        adapter->watchdog_timer.data = (unsigned long) adapter;
515
 
516
        init_timer(&adapter->phy_info_timer);
517
        adapter->phy_info_timer.function = &e1000_update_phy_info;
518
        adapter->phy_info_timer.data = (unsigned long) adapter;
519
 
520
        INIT_TQUEUE(&adapter->tx_timeout_task,
521
                (void (*)(void *))e1000_tx_timeout_task, netdev);
522
 
523
        register_netdev(netdev);
524
 
525
        /* we're going to reset, so assume we have no link for now */
526
 
527
        netif_carrier_off(netdev);
528
        netif_stop_queue(netdev);
529
 
530
        printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Connection\n",
531
               netdev->name);
532
        e1000_check_options(adapter);
533
 
534
        /* Initial Wake on LAN setting
535
         * If APM wake is enabled in the EEPROM,
536
         * enable the ACPI Magic Packet filter
537
         */
538
 
539
        switch(adapter->hw.mac_type) {
540
        case e1000_82542_rev2_0:
541
        case e1000_82542_rev2_1:
542
        case e1000_82543:
543
                break;
544
        case e1000_82546:
545
        case e1000_82546_rev_3:
546
                if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1)
547
                   && (adapter->hw.media_type == e1000_media_type_copper)) {
548
                        e1000_read_eeprom(&adapter->hw,
549
                                EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
550
                        break;
551
                }
552
                /* Fall Through */
553
        default:
554
                e1000_read_eeprom(&adapter->hw,
555
                        EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
556
                break;
557
        }
558
        if(eeprom_data & E1000_EEPROM_APME)
559
                adapter->wol |= E1000_WUFC_MAG;
560
 
561
        /* reset the hardware with the new settings */
562
 
563
        e1000_reset(adapter);
564
 
565
        cards_found++;
566
        return 0;
567
 
568
err_sw_init:
569
err_eeprom:
570
        iounmap(adapter->hw.hw_addr);
571
err_ioremap:
572
        free_netdev(netdev);
573
err_alloc_etherdev:
574
        pci_release_regions(pdev);
575
        return err;
576
}
577
 
578
/**
579
 * e1000_remove - Device Removal Routine
580
 * @pdev: PCI device information struct
581
 *
582
 * e1000_remove is called by the PCI subsystem to alert the driver
583
 * that it should release a PCI device.  The could be caused by a
584
 * Hot-Plug event, or because the driver is going to be removed from
585
 * memory.
586
 **/
587
 
588
static void __devexit
589
e1000_remove(struct pci_dev *pdev)
590
{
591
        struct net_device *netdev = pci_get_drvdata(pdev);
592
        struct e1000_adapter *adapter = netdev->priv;
593
        uint32_t manc;
594
 
595
        if(adapter->hw.mac_type >= e1000_82540 &&
596
           adapter->hw.media_type == e1000_media_type_copper) {
597
                manc = E1000_READ_REG(&adapter->hw, MANC);
598
                if(manc & E1000_MANC_SMBUS_EN) {
599
                        manc |= E1000_MANC_ARP_EN;
600
                        E1000_WRITE_REG(&adapter->hw, MANC, manc);
601
                }
602
        }
603
 
604
        unregister_netdev(netdev);
605
 
606
        e1000_phy_hw_reset(&adapter->hw);
607
 
608
        iounmap(adapter->hw.hw_addr);
609
        pci_release_regions(pdev);
610
 
611
        free_netdev(netdev);
612
}
613
 
614
/**
615
 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
616
 * @adapter: board private structure to initialize
617
 *
618
 * e1000_sw_init initializes the Adapter private data structure.
619
 * Fields are initialized based on PCI device information and
620
 * OS network device settings (MTU size).
621
 **/
622
 
623
static int __devinit
624
e1000_sw_init(struct e1000_adapter *adapter)
625
{
626
        struct e1000_hw *hw = &adapter->hw;
627
        struct net_device *netdev = adapter->netdev;
628
        struct pci_dev *pdev = adapter->pdev;
629
 
630
        /* PCI config space info */
631
 
632
        hw->vendor_id = pdev->vendor;
633
        hw->device_id = pdev->device;
634
        hw->subsystem_vendor_id = pdev->subsystem_vendor;
635
        hw->subsystem_id = pdev->subsystem_device;
636
 
637
        pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
638
 
639
        pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
640
 
641
        adapter->rx_buffer_len = E1000_RXBUFFER_2048;
642
        hw->max_frame_size = netdev->mtu +
643
                             ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
644
        hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
645
 
646
        /* identify the MAC */
647
 
648
        if (e1000_set_mac_type(hw)) {
649
                E1000_ERR("Unknown MAC Type\n");
650
                return -EIO;
651
        }
652
 
653
        /* initialize eeprom parameters */
654
 
655
        e1000_init_eeprom_params(hw);
656
 
657
        if((hw->mac_type == e1000_82541) ||
658
           (hw->mac_type == e1000_82547) ||
659
           (hw->mac_type == e1000_82541_rev_2) ||
660
           (hw->mac_type == e1000_82547_rev_2))
661
                hw->phy_init_script = 1;
662
 
663
        e1000_set_media_type(hw);
664
 
665
        if(hw->mac_type < e1000_82543)
666
                hw->report_tx_early = 0;
667
        else
668
                hw->report_tx_early = 1;
669
 
670
        hw->wait_autoneg_complete = FALSE;
671
        hw->tbi_compatibility_en = TRUE;
672
        hw->adaptive_ifs = TRUE;
673
 
674
        /* Copper options */
675
 
676
        if(hw->media_type == e1000_media_type_copper) {
677
                hw->mdix = AUTO_ALL_MODES;
678
                hw->disable_polarity_correction = FALSE;
679
                hw->master_slave = E1000_MASTER_SLAVE;
680
        }
681
 
682
        atomic_set(&adapter->irq_sem, 1);
683
        spin_lock_init(&adapter->stats_lock);
684
        spin_lock_init(&adapter->tx_lock);
685
 
686
        return 0;
687
}
688
 
689
/**
690
 * e1000_open - Called when a network interface is made active
691
 * @netdev: network interface device structure
692
 *
693
 * Returns 0 on success, negative value on failure
694
 *
695
 * The open entry point is called when a network interface is made
696
 * active by the system (IFF_UP).  At this point all resources needed
697
 * for transmit and receive operations are allocated, the interrupt
698
 * handler is registered with the OS, the watchdog timer is started,
699
 * and the stack is notified that the interface is ready.
700
 **/
701
 
702
static int
703
e1000_open(struct net_device *netdev)
704
{
705
        struct e1000_adapter *adapter = netdev->priv;
706
        int err;
707
 
708
        /* allocate transmit descriptors */
709
 
710
        if((err = e1000_setup_tx_resources(adapter)))
711
                goto err_setup_tx;
712
 
713
        /* allocate receive descriptors */
714
 
715
        if((err = e1000_setup_rx_resources(adapter)))
716
                goto err_setup_rx;
717
 
718
        if((err = e1000_up(adapter)))
719
                goto err_up;
720
 
721
        return 0;
722
 
723
err_up:
724
        e1000_free_rx_resources(adapter);
725
err_setup_rx:
726
        e1000_free_tx_resources(adapter);
727
err_setup_tx:
728
        e1000_reset(adapter);
729
 
730
        return err;
731
}
732
 
733
/**
734
 * e1000_close - Disables a network interface
735
 * @netdev: network interface device structure
736
 *
737
 * Returns 0, this is not allowed to fail
738
 *
739
 * The close entry point is called when an interface is de-activated
740
 * by the OS.  The hardware is still under the drivers control, but
741
 * needs to be disabled.  A global MAC reset is issued to stop the
742
 * hardware, and all transmit and receive resources are freed.
743
 **/
744
 
745
static int
746
e1000_close(struct net_device *netdev)
747
{
748
        struct e1000_adapter *adapter = netdev->priv;
749
 
750
        e1000_down(adapter);
751
 
752
        e1000_free_tx_resources(adapter);
753
        e1000_free_rx_resources(adapter);
754
 
755
        return 0;
756
}
757
 
758
/**
759
 * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
760
 * @adapter: board private structure
761
 *
762
 * Return 0 on success, negative on failure
763
 **/
764
 
765
int
766
e1000_setup_tx_resources(struct e1000_adapter *adapter)
767
{
768
        struct e1000_desc_ring *txdr = &adapter->tx_ring;
769
        struct pci_dev *pdev = adapter->pdev;
770
        int size;
771
 
772
        size = sizeof(struct e1000_buffer) * txdr->count;
773
        txdr->buffer_info = kmalloc(size, GFP_KERNEL);
774
        if(!txdr->buffer_info) {
775
                return -ENOMEM;
776
        }
777
        memset(txdr->buffer_info, 0, size);
778
 
779
        /* round up to nearest 4K */
780
 
781
        txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
782
        E1000_ROUNDUP(txdr->size, 4096);
783
 
784
        txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
785
        if(!txdr->desc) {
786
                kfree(txdr->buffer_info);
787
                return -ENOMEM;
788
        }
789
        memset(txdr->desc, 0, txdr->size);
790
 
791
        txdr->next_to_use = 0;
792
        txdr->next_to_clean = 0;
793
 
794
        return 0;
795
}
796
 
797
/**
798
 * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
799
 * @adapter: board private structure
800
 *
801
 * Configure the Tx unit of the MAC after a reset.
802
 **/
803
 
804
static void
805
e1000_configure_tx(struct e1000_adapter *adapter)
806
{
807
        uint64_t tdba = adapter->tx_ring.dma;
808
        uint32_t tdlen = adapter->tx_ring.count * sizeof(struct e1000_tx_desc);
809
        uint32_t tctl, tipg;
810
 
811
        E1000_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL));
812
        E1000_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32));
813
 
814
        E1000_WRITE_REG(&adapter->hw, TDLEN, tdlen);
815
 
816
        /* Setup the HW Tx Head and Tail descriptor pointers */
817
 
818
        E1000_WRITE_REG(&adapter->hw, TDH, 0);
819
        E1000_WRITE_REG(&adapter->hw, TDT, 0);
820
 
821
        /* Set the default values for the Tx Inter Packet Gap timer */
822
 
823
        switch (adapter->hw.mac_type) {
824
        case e1000_82542_rev2_0:
825
        case e1000_82542_rev2_1:
826
                tipg = DEFAULT_82542_TIPG_IPGT;
827
                tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
828
                tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
829
                break;
830
        default:
831
                if(adapter->hw.media_type == e1000_media_type_fiber ||
832
                   adapter->hw.media_type == e1000_media_type_internal_serdes)
833
                        tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
834
                else
835
                        tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
836
                tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
837
                tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
838
        }
839
        E1000_WRITE_REG(&adapter->hw, TIPG, tipg);
840
 
841
        /* Set the Tx Interrupt Delay register */
842
 
843
        E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay);
844
        if(adapter->hw.mac_type >= e1000_82540)
845
                E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay);
846
 
847
        /* Program the Transmit Control Register */
848
 
849
        tctl = E1000_READ_REG(&adapter->hw, TCTL);
850
 
851
        tctl &= ~E1000_TCTL_CT;
852
        tctl |= E1000_TCTL_EN | E1000_TCTL_PSP |
853
                (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
854
 
855
        E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
856
 
857
        e1000_config_collision_dist(&adapter->hw);
858
 
859
        /* Setup Transmit Descriptor Settings for eop descriptor */
860
        adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
861
                E1000_TXD_CMD_IFCS;
862
 
863
        if(adapter->hw.report_tx_early == 1)
864
                adapter->txd_cmd |= E1000_TXD_CMD_RS;
865
        else
866
                adapter->txd_cmd |= E1000_TXD_CMD_RPS;
867
 
868
        /* Cache if we're 82544 running in PCI-X because we'll
869
         * need this to apply a workaround later in the send path. */
870
        if(adapter->hw.mac_type == e1000_82544 &&
871
           adapter->hw.bus_type == e1000_bus_type_pcix)
872
                adapter->pcix_82544 = 1;
873
}
874
 
875
/**
876
 * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
877
 * @adapter: board private structure
878
 *
879
 * Returns 0 on success, negative on failure
880
 **/
881
 
882
int
883
e1000_setup_rx_resources(struct e1000_adapter *adapter)
884
{
885
        struct e1000_desc_ring *rxdr = &adapter->rx_ring;
886
        struct pci_dev *pdev = adapter->pdev;
887
        int size;
888
 
889
        size = sizeof(struct e1000_buffer) * rxdr->count;
890
        rxdr->buffer_info = kmalloc(size, GFP_KERNEL);
891
        if(!rxdr->buffer_info) {
892
                return -ENOMEM;
893
        }
894
        memset(rxdr->buffer_info, 0, size);
895
 
896
        /* Round up to nearest 4K */
897
 
898
        rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
899
        E1000_ROUNDUP(rxdr->size, 4096);
900
 
901
        rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
902
 
903
        if(!rxdr->desc) {
904
                kfree(rxdr->buffer_info);
905
                return -ENOMEM;
906
        }
907
        memset(rxdr->desc, 0, rxdr->size);
908
 
909
        rxdr->next_to_clean = 0;
910
        rxdr->next_to_use = 0;
911
 
912
        return 0;
913
}
914
 
915
/**
916
 * e1000_setup_rctl - configure the receive control register
917
 * @adapter: Board private structure
918
 **/
919
 
920
static void
921
e1000_setup_rctl(struct e1000_adapter *adapter)
922
{
923
        uint32_t rctl;
924
 
925
        rctl = E1000_READ_REG(&adapter->hw, RCTL);
926
 
927
        rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
928
 
929
        rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
930
                E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
931
                (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
932
 
933
        if(adapter->hw.tbi_compatibility_on == 1)
934
                rctl |= E1000_RCTL_SBP;
935
        else
936
                rctl &= ~E1000_RCTL_SBP;
937
 
938
        rctl &= ~(E1000_RCTL_SZ_4096);
939
        switch (adapter->rx_buffer_len) {
940
        case E1000_RXBUFFER_2048:
941
        default:
942
                rctl |= E1000_RCTL_SZ_2048;
943
                rctl &= ~(E1000_RCTL_BSEX | E1000_RCTL_LPE);
944
                break;
945
        case E1000_RXBUFFER_4096:
946
                rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
947
                break;
948
        case E1000_RXBUFFER_8192:
949
                rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
950
                break;
951
        case E1000_RXBUFFER_16384:
952
                rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE;
953
                break;
954
        }
955
 
956
        E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
957
}
958
 
959
/**
960
 * e1000_configure_rx - Configure 8254x Receive Unit after Reset
961
 * @adapter: board private structure
962
 *
963
 * Configure the Rx unit of the MAC after a reset.
964
 **/
965
 
966
static void
967
e1000_configure_rx(struct e1000_adapter *adapter)
968
{
969
        uint64_t rdba = adapter->rx_ring.dma;
970
        uint32_t rdlen = adapter->rx_ring.count * sizeof(struct e1000_rx_desc);
971
        uint32_t rctl;
972
        uint32_t rxcsum;
973
 
974
        /* make sure receives are disabled while setting up the descriptors */
975
 
976
        rctl = E1000_READ_REG(&adapter->hw, RCTL);
977
        E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
978
 
979
        /* set the Receive Delay Timer Register */
980
 
981
        E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay);
982
 
983
        if(adapter->hw.mac_type >= e1000_82540) {
984
                E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay);
985
                if(adapter->itr > 1)
986
                        E1000_WRITE_REG(&adapter->hw, ITR,
987
                                1000000000 / (adapter->itr * 256));
988
        }
989
 
990
        /* Setup the Base and Length of the Rx Descriptor Ring */
991
 
992
        E1000_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL));
993
        E1000_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32));
994
 
995
        E1000_WRITE_REG(&adapter->hw, RDLEN, rdlen);
996
 
997
        /* Setup the HW Rx Head and Tail Descriptor Pointers */
998
        E1000_WRITE_REG(&adapter->hw, RDH, 0);
999
        E1000_WRITE_REG(&adapter->hw, RDT, 0);
1000
 
1001
        /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1002
        if((adapter->hw.mac_type >= e1000_82543) &&
1003
           (adapter->rx_csum == TRUE)) {
1004
                rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM);
1005
                rxcsum |= E1000_RXCSUM_TUOFL;
1006
                E1000_WRITE_REG(&adapter->hw, RXCSUM, rxcsum);
1007
        }
1008
 
1009
        /* Enable Receives */
1010
 
1011
        E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1012
}
1013
 
1014
/**
1015
 * e1000_free_tx_resources - Free Tx Resources
1016
 * @adapter: board private structure
1017
 *
1018
 * Free all transmit software resources
1019
 **/
1020
 
1021
void
1022
e1000_free_tx_resources(struct e1000_adapter *adapter)
1023
{
1024
        struct pci_dev *pdev = adapter->pdev;
1025
 
1026
        e1000_clean_tx_ring(adapter);
1027
 
1028
        kfree(adapter->tx_ring.buffer_info);
1029
        adapter->tx_ring.buffer_info = NULL;
1030
 
1031
        pci_free_consistent(pdev, adapter->tx_ring.size,
1032
                            adapter->tx_ring.desc, adapter->tx_ring.dma);
1033
 
1034
        adapter->tx_ring.desc = NULL;
1035
}
1036
 
1037
/**
1038
 * e1000_clean_tx_ring - Free Tx Buffers
1039
 * @adapter: board private structure
1040
 **/
1041
 
1042
static void
1043
e1000_clean_tx_ring(struct e1000_adapter *adapter)
1044
{
1045
        struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1046
        struct e1000_buffer *buffer_info;
1047
        struct pci_dev *pdev = adapter->pdev;
1048
        unsigned long size;
1049
        unsigned int i;
1050
 
1051
        /* Free all the Tx ring sk_buffs */
1052
 
1053
        for(i = 0; i < tx_ring->count; i++) {
1054
                buffer_info = &tx_ring->buffer_info[i];
1055
                if(buffer_info->skb) {
1056
 
1057
                        pci_unmap_page(pdev,
1058
                                       buffer_info->dma,
1059
                                       buffer_info->length,
1060
                                       PCI_DMA_TODEVICE);
1061
 
1062
                        dev_kfree_skb(buffer_info->skb);
1063
 
1064
                        buffer_info->skb = NULL;
1065
                }
1066
        }
1067
 
1068
        size = sizeof(struct e1000_buffer) * tx_ring->count;
1069
        memset(tx_ring->buffer_info, 0, size);
1070
 
1071
        /* Zero out the descriptor ring */
1072
 
1073
        memset(tx_ring->desc, 0, tx_ring->size);
1074
 
1075
        tx_ring->next_to_use = 0;
1076
        tx_ring->next_to_clean = 0;
1077
 
1078
        E1000_WRITE_REG(&adapter->hw, TDH, 0);
1079
        E1000_WRITE_REG(&adapter->hw, TDT, 0);
1080
}
1081
 
1082
/**
1083
 * e1000_free_rx_resources - Free Rx Resources
1084
 * @adapter: board private structure
1085
 *
1086
 * Free all receive software resources
1087
 **/
1088
 
1089
void
1090
e1000_free_rx_resources(struct e1000_adapter *adapter)
1091
{
1092
        struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1093
        struct pci_dev *pdev = adapter->pdev;
1094
 
1095
        e1000_clean_rx_ring(adapter);
1096
 
1097
        kfree(rx_ring->buffer_info);
1098
        rx_ring->buffer_info = NULL;
1099
 
1100
        pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
1101
 
1102
        rx_ring->desc = NULL;
1103
}
1104
 
1105
/**
1106
 * e1000_clean_rx_ring - Free Rx Buffers
1107
 * @adapter: board private structure
1108
 **/
1109
 
1110
static void
1111
e1000_clean_rx_ring(struct e1000_adapter *adapter)
1112
{
1113
        struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
1114
        struct e1000_buffer *buffer_info;
1115
        struct pci_dev *pdev = adapter->pdev;
1116
        unsigned long size;
1117
        unsigned int i;
1118
 
1119
        /* Free all the Rx ring sk_buffs */
1120
 
1121
        for(i = 0; i < rx_ring->count; i++) {
1122
                buffer_info = &rx_ring->buffer_info[i];
1123
                if(buffer_info->skb) {
1124
 
1125
                        pci_unmap_single(pdev,
1126
                                         buffer_info->dma,
1127
                                         buffer_info->length,
1128
                                         PCI_DMA_FROMDEVICE);
1129
 
1130
                        dev_kfree_skb(buffer_info->skb);
1131
 
1132
                        buffer_info->skb = NULL;
1133
                }
1134
        }
1135
 
1136
        size = sizeof(struct e1000_buffer) * rx_ring->count;
1137
        memset(rx_ring->buffer_info, 0, size);
1138
 
1139
        /* Zero out the descriptor ring */
1140
 
1141
        memset(rx_ring->desc, 0, rx_ring->size);
1142
 
1143
        rx_ring->next_to_clean = 0;
1144
        rx_ring->next_to_use = 0;
1145
 
1146
        E1000_WRITE_REG(&adapter->hw, RDH, 0);
1147
        E1000_WRITE_REG(&adapter->hw, RDT, 0);
1148
}
1149
 
1150
/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
1151
 * and memory write and invalidate disabled for certain operations
1152
 */
1153
static void
1154
e1000_enter_82542_rst(struct e1000_adapter *adapter)
1155
{
1156
        struct net_device *netdev = adapter->netdev;
1157
        uint32_t rctl;
1158
 
1159
        e1000_pci_clear_mwi(&adapter->hw);
1160
 
1161
        rctl = E1000_READ_REG(&adapter->hw, RCTL);
1162
        rctl |= E1000_RCTL_RST;
1163
        E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1164
        E1000_WRITE_FLUSH(&adapter->hw);
1165
        mdelay(5);
1166
 
1167
        if(netif_running(netdev))
1168
                e1000_clean_rx_ring(adapter);
1169
}
1170
 
1171
static void
1172
e1000_leave_82542_rst(struct e1000_adapter *adapter)
1173
{
1174
        struct net_device *netdev = adapter->netdev;
1175
        uint32_t rctl;
1176
 
1177
        rctl = E1000_READ_REG(&adapter->hw, RCTL);
1178
        rctl &= ~E1000_RCTL_RST;
1179
        E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
1180
        E1000_WRITE_FLUSH(&adapter->hw);
1181
        mdelay(5);
1182
 
1183
        if(adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
1184
                e1000_pci_set_mwi(&adapter->hw);
1185
 
1186
        if(netif_running(netdev)) {
1187
                e1000_configure_rx(adapter);
1188
                e1000_alloc_rx_buffers(adapter);
1189
        }
1190
}
1191
 
1192
/**
1193
 * e1000_set_mac - Change the Ethernet Address of the NIC
1194
 * @netdev: network interface device structure
1195
 * @p: pointer to an address structure
1196
 *
1197
 * Returns 0 on success, negative on failure
1198
 **/
1199
 
1200
static int
1201
e1000_set_mac(struct net_device *netdev, void *p)
1202
{
1203
        struct e1000_adapter *adapter = netdev->priv;
1204
        struct sockaddr *addr = p;
1205
 
1206
        if(!is_valid_ether_addr(addr->sa_data))
1207
                return -EADDRNOTAVAIL;
1208
 
1209
        /* 82542 2.0 needs to be in reset to write receive address registers */
1210
 
1211
        if(adapter->hw.mac_type == e1000_82542_rev2_0)
1212
                e1000_enter_82542_rst(adapter);
1213
 
1214
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
1215
        memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
1216
 
1217
        e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
1218
 
1219
        if(adapter->hw.mac_type == e1000_82542_rev2_0)
1220
                e1000_leave_82542_rst(adapter);
1221
 
1222
        return 0;
1223
}
1224
 
1225
/**
1226
 * e1000_set_multi - Multicast and Promiscuous mode set
1227
 * @netdev: network interface device structure
1228
 *
1229
 * The set_multi entry point is called whenever the multicast address
1230
 * list or the network interface flags are updated.  This routine is
1231
 * responsible for configuring the hardware for proper multicast,
1232
 * promiscuous mode, and all-multi behavior.
1233
 **/
1234
 
1235
static void
1236
e1000_set_multi(struct net_device *netdev)
1237
{
1238
        struct e1000_adapter *adapter = netdev->priv;
1239
        struct e1000_hw *hw = &adapter->hw;
1240
        struct dev_mc_list *mc_ptr;
1241
        uint32_t rctl;
1242
        uint32_t hash_value;
1243
        int i;
1244
 
1245
        /* Check for Promiscuous and All Multicast modes */
1246
 
1247
        rctl = E1000_READ_REG(hw, RCTL);
1248
 
1249
        if(netdev->flags & IFF_PROMISC) {
1250
                rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
1251
        } else if(netdev->flags & IFF_ALLMULTI) {
1252
                rctl |= E1000_RCTL_MPE;
1253
                rctl &= ~E1000_RCTL_UPE;
1254
        } else {
1255
                rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
1256
        }
1257
 
1258
        E1000_WRITE_REG(hw, RCTL, rctl);
1259
 
1260
        /* 82542 2.0 needs to be in reset to write receive address registers */
1261
 
1262
        if(hw->mac_type == e1000_82542_rev2_0)
1263
                e1000_enter_82542_rst(adapter);
1264
 
1265
        /* load the first 14 multicast address into the exact filters 1-14
1266
         * RAR 0 is used for the station MAC adddress
1267
         * if there are not 14 addresses, go ahead and clear the filters
1268
         */
1269
        mc_ptr = netdev->mc_list;
1270
 
1271
        for(i = 1; i < E1000_RAR_ENTRIES; i++) {
1272
                if(mc_ptr) {
1273
                        e1000_rar_set(hw, mc_ptr->dmi_addr, i);
1274
                        mc_ptr = mc_ptr->next;
1275
                } else {
1276
                        E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
1277
                        E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
1278
                }
1279
        }
1280
 
1281
        /* clear the old settings from the multicast hash table */
1282
 
1283
        for(i = 0; i < E1000_NUM_MTA_REGISTERS; i++)
1284
                E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
1285
 
1286
        /* load any remaining addresses into the hash table */
1287
 
1288
        for(; mc_ptr; mc_ptr = mc_ptr->next) {
1289
                hash_value = e1000_hash_mc_addr(hw, mc_ptr->dmi_addr);
1290
                e1000_mta_set(hw, hash_value);
1291
        }
1292
 
1293
        if(hw->mac_type == e1000_82542_rev2_0)
1294
                e1000_leave_82542_rst(adapter);
1295
}
1296
 
1297
/* need to wait a few seconds after link up to get diagnostic information from the phy */
1298
 
1299
static void
1300
e1000_update_phy_info(unsigned long data)
1301
{
1302
        struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1303
        e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
1304
}
1305
 
1306
/**
1307
 * e1000_82547_tx_fifo_stall - Timer Call-back
1308
 * @data: pointer to adapter cast into an unsigned long
1309
 **/
1310
 
1311
static void
1312
e1000_82547_tx_fifo_stall(unsigned long data)
1313
{
1314
        struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1315
        struct net_device *netdev = adapter->netdev;
1316
        uint32_t tctl;
1317
 
1318
        if(atomic_read(&adapter->tx_fifo_stall)) {
1319
                if((E1000_READ_REG(&adapter->hw, TDT) ==
1320
                    E1000_READ_REG(&adapter->hw, TDH)) &&
1321
                   (E1000_READ_REG(&adapter->hw, TDFT) ==
1322
                    E1000_READ_REG(&adapter->hw, TDFH)) &&
1323
                   (E1000_READ_REG(&adapter->hw, TDFTS) ==
1324
                    E1000_READ_REG(&adapter->hw, TDFHS))) {
1325
                        tctl = E1000_READ_REG(&adapter->hw, TCTL);
1326
                        E1000_WRITE_REG(&adapter->hw, TCTL,
1327
                                        tctl & ~E1000_TCTL_EN);
1328
                        E1000_WRITE_REG(&adapter->hw, TDFT,
1329
                                        adapter->tx_head_addr);
1330
                        E1000_WRITE_REG(&adapter->hw, TDFH,
1331
                                        adapter->tx_head_addr);
1332
                        E1000_WRITE_REG(&adapter->hw, TDFTS,
1333
                                        adapter->tx_head_addr);
1334
                        E1000_WRITE_REG(&adapter->hw, TDFHS,
1335
                                        adapter->tx_head_addr);
1336
                        E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
1337
                        E1000_WRITE_FLUSH(&adapter->hw);
1338
 
1339
                        adapter->tx_fifo_head = 0;
1340
                        atomic_set(&adapter->tx_fifo_stall, 0);
1341
                        netif_wake_queue(netdev);
1342
                } else {
1343
                        mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
1344
                }
1345
        }
1346
}
1347
 
1348
/**
1349
 * e1000_watchdog - Timer Call-back
1350
 * @data: pointer to netdev cast into an unsigned long
1351
 **/
1352
 
1353
static void
1354
e1000_watchdog(unsigned long data)
1355
{
1356
        struct e1000_adapter *adapter = (struct e1000_adapter *) data;
1357
        struct net_device *netdev = adapter->netdev;
1358
        struct e1000_desc_ring *txdr = &adapter->tx_ring;
1359
        unsigned int i;
1360
        uint32_t link;
1361
 
1362
        e1000_check_for_link(&adapter->hw);
1363
 
1364
        if((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
1365
           !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
1366
                link = !adapter->hw.serdes_link_down;
1367
        else
1368
                link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
1369
 
1370
        if(link) {
1371
                if(!netif_carrier_ok(netdev)) {
1372
                        e1000_get_speed_and_duplex(&adapter->hw,
1373
                                                   &adapter->link_speed,
1374
                                                   &adapter->link_duplex);
1375
 
1376
                        printk(KERN_INFO
1377
                               "e1000: %s NIC Link is Up %d Mbps %s\n",
1378
                               netdev->name, adapter->link_speed,
1379
                               adapter->link_duplex == FULL_DUPLEX ?
1380
                               "Full Duplex" : "Half Duplex");
1381
 
1382
                        netif_carrier_on(netdev);
1383
                        netif_wake_queue(netdev);
1384
                        mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
1385
                        adapter->smartspeed = 0;
1386
                }
1387
        } else {
1388
                if(netif_carrier_ok(netdev)) {
1389
                        adapter->link_speed = 0;
1390
                        adapter->link_duplex = 0;
1391
                        printk(KERN_INFO
1392
                               "e1000: %s NIC Link is Down\n",
1393
                               netdev->name);
1394
                        netif_carrier_off(netdev);
1395
                        netif_stop_queue(netdev);
1396
                        mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
1397
                }
1398
 
1399
                e1000_smartspeed(adapter);
1400
        }
1401
 
1402
        e1000_update_stats(adapter);
1403
 
1404
        adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
1405
        adapter->tpt_old = adapter->stats.tpt;
1406
        adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
1407
        adapter->colc_old = adapter->stats.colc;
1408
 
1409
        adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
1410
        adapter->gorcl_old = adapter->stats.gorcl;
1411
        adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
1412
        adapter->gotcl_old = adapter->stats.gotcl;
1413
 
1414
        e1000_update_adaptive(&adapter->hw);
1415
 
1416
        if(!netif_carrier_ok(netdev)) {
1417
                if(E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
1418
                        /* We've lost link, so the controller stops DMA,
1419
                         * but we've got queued Tx work that's never going
1420
                         * to get done, so reset controller to flush Tx.
1421
                         * (Do the reset outside of interrupt context). */
1422
                        schedule_task(&adapter->tx_timeout_task);
1423
                }
1424
        }
1425
 
1426
        /* Dynamic mode for Interrupt Throttle Rate (ITR) */
1427
        if(adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
1428
                /* Symmetric Tx/Rx gets a reduced ITR=2000; Total
1429
                 * asymmetrical Tx or Rx gets ITR=8000; everyone
1430
                 * else is between 2000-8000. */
1431
                uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
1432
                uint32_t dif = (adapter->gotcl > adapter->gorcl ?
1433
                        adapter->gotcl - adapter->gorcl :
1434
                        adapter->gorcl - adapter->gotcl) / 10000;
1435
                uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
1436
                E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
1437
        }
1438
 
1439
        /* Cause software interrupt to ensure rx ring is cleaned */
1440
        E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
1441
 
1442
        /* Early detection of hung controller */
1443
        i = txdr->next_to_clean;
1444
        if(txdr->buffer_info[i].dma &&
1445
           time_after(jiffies, txdr->buffer_info[i].time_stamp + HZ) &&
1446
           !(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF))
1447
                netif_stop_queue(netdev);
1448
 
1449
        /* Reset the timer */
1450
        mod_timer(&adapter->watchdog_timer, jiffies + 2 * HZ);
1451
}
1452
 
1453
#define E1000_TX_FLAGS_CSUM             0x00000001
1454
#define E1000_TX_FLAGS_VLAN             0x00000002
1455
#define E1000_TX_FLAGS_TSO              0x00000004
1456
#define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
1457
#define E1000_TX_FLAGS_VLAN_SHIFT       16
1458
 
1459
static inline boolean_t
1460
e1000_tso(struct e1000_adapter *adapter, struct sk_buff *skb)
1461
{
1462
#ifdef NETIF_F_TSO
1463
        struct e1000_context_desc *context_desc;
1464
        unsigned int i;
1465
        uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
1466
        uint16_t ipcse, tucse, mss;
1467
 
1468
        if(skb_shinfo(skb)->tso_size) {
1469
                hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
1470
                mss = skb_shinfo(skb)->tso_size;
1471
                skb->nh.iph->tot_len = 0;
1472
                skb->nh.iph->check = 0;
1473
                skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
1474
                                                      skb->nh.iph->daddr,
1475
                                                      0,
1476
                                                      IPPROTO_TCP,
1477
                                                      0);
1478
                ipcss = skb->nh.raw - skb->data;
1479
                ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
1480
                ipcse = skb->h.raw - skb->data - 1;
1481
                tucss = skb->h.raw - skb->data;
1482
                tucso = (void *)&(skb->h.th->check) - (void *)skb->data;
1483
                tucse = 0;
1484
 
1485
                i = adapter->tx_ring.next_to_use;
1486
                context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
1487
 
1488
                context_desc->lower_setup.ip_fields.ipcss  = ipcss;
1489
                context_desc->lower_setup.ip_fields.ipcso  = ipcso;
1490
                context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
1491
                context_desc->upper_setup.tcp_fields.tucss = tucss;
1492
                context_desc->upper_setup.tcp_fields.tucso = tucso;
1493
                context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
1494
                context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
1495
                context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
1496
                context_desc->cmd_and_length = cpu_to_le32(
1497
                        E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
1498
                        E1000_TXD_CMD_IP | E1000_TXD_CMD_TCP |
1499
                        (skb->len - (hdr_len)));
1500
 
1501
                if(++i == adapter->tx_ring.count) i = 0;
1502
                adapter->tx_ring.next_to_use = i;
1503
 
1504
                return TRUE;
1505
        }
1506
#endif
1507
 
1508
        return FALSE;
1509
}
1510
 
1511
static inline boolean_t
1512
e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
1513
{
1514
        struct e1000_context_desc *context_desc;
1515
        unsigned int i;
1516
        uint8_t css, cso;
1517
 
1518
        if(skb->ip_summed == CHECKSUM_HW) {
1519
                css = skb->h.raw - skb->data;
1520
                cso = (skb->h.raw + skb->csum) - skb->data;
1521
 
1522
                i = adapter->tx_ring.next_to_use;
1523
                context_desc = E1000_CONTEXT_DESC(adapter->tx_ring, i);
1524
 
1525
                context_desc->upper_setup.tcp_fields.tucss = css;
1526
                context_desc->upper_setup.tcp_fields.tucso = cso;
1527
                context_desc->upper_setup.tcp_fields.tucse = 0;
1528
                context_desc->tcp_seg_setup.data = 0;
1529
                context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
1530
 
1531
                if(++i == adapter->tx_ring.count) i = 0;
1532
                adapter->tx_ring.next_to_use = i;
1533
 
1534
                return TRUE;
1535
        }
1536
 
1537
        return FALSE;
1538
}
1539
 
1540
#define E1000_MAX_TXD_PWR       12
1541
#define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
1542
 
1543
static inline int
1544
e1000_tx_map(struct e1000_adapter *adapter, struct sk_buff *skb,
1545
        unsigned int first)
1546
{
1547
        struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1548
        struct e1000_tx_desc *tx_desc;
1549
        struct e1000_buffer *buffer_info;
1550
        unsigned int len = skb->len, max_per_txd = E1000_MAX_DATA_PER_TXD;
1551
        unsigned int offset = 0, size, count = 0, i;
1552
#ifdef NETIF_F_TSO
1553
        unsigned int mss;
1554
#endif
1555
        unsigned int nr_frags;
1556
        unsigned int f;
1557
 
1558
#ifdef NETIF_F_TSO
1559
        mss = skb_shinfo(skb)->tso_size;
1560
        /* The controller does a simple calculation to
1561
         * make sure there is enough room in the FIFO before
1562
         * initiating the DMA for each buffer.  The calc is:
1563
         * 4 = ceil(buffer len/mss).  To make sure we don't
1564
         * overrun the FIFO, adjust the max buffer len if mss
1565
         * drops. */
1566
        if(mss)
1567
                max_per_txd = min(mss << 2, max_per_txd);
1568
#endif
1569
        nr_frags = skb_shinfo(skb)->nr_frags;
1570
        len -= skb->data_len;
1571
 
1572
        i = tx_ring->next_to_use;
1573
 
1574
        while(len) {
1575
                buffer_info = &tx_ring->buffer_info[i];
1576
                size = min(len, max_per_txd);
1577
#ifdef NETIF_F_TSO
1578
                /* Workaround for premature desc write-backs
1579
                 * in TSO mode.  Append 4-byte sentinel desc */
1580
                if(mss && !nr_frags && size == len && size > 8)
1581
                        size -= 4;
1582
#endif
1583
                /* Workaround for potential 82544 hang in PCI-X.  Avoid
1584
                 * terminating buffers within evenly-aligned dwords. */
1585
                if(adapter->pcix_82544 &&
1586
                   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
1587
                   size > 4)
1588
                        size -= 4;
1589
 
1590
                buffer_info->length = size;
1591
                buffer_info->dma =
1592
                        pci_map_single(adapter->pdev,
1593
                                skb->data + offset,
1594
                                size,
1595
                                PCI_DMA_TODEVICE);
1596
                buffer_info->time_stamp = jiffies;
1597
 
1598
                len -= size;
1599
                offset += size;
1600
                count++;
1601
                if(++i == tx_ring->count) i = 0;
1602
        }
1603
 
1604
        for(f = 0; f < nr_frags; f++) {
1605
                struct skb_frag_struct *frag;
1606
 
1607
                frag = &skb_shinfo(skb)->frags[f];
1608
                len = frag->size;
1609
                offset = frag->page_offset;
1610
 
1611
                while(len) {
1612
                        buffer_info = &tx_ring->buffer_info[i];
1613
                        size = min(len, max_per_txd);
1614
#ifdef NETIF_F_TSO
1615
                        /* Workaround for premature desc write-backs
1616
                         * in TSO mode.  Append 4-byte sentinel desc */
1617
                        if(mss && f == (nr_frags-1) && size == len && size > 8)
1618
                                size -= 4;
1619
#endif
1620
                        /* Workaround for potential 82544 hang in PCI-X.
1621
                         * Avoid terminating buffers within evenly-aligned
1622
                         * dwords. */
1623
                        if(adapter->pcix_82544 &&
1624
                           !((unsigned long)(frag->page+offset+size-1) & 4) &&
1625
                           size > 4)
1626
                                size -= 4;
1627
 
1628
                        buffer_info->length = size;
1629
                        buffer_info->dma =
1630
                                pci_map_page(adapter->pdev,
1631
                                        frag->page,
1632
                                        offset,
1633
                                        size,
1634
                                        PCI_DMA_TODEVICE);
1635
                        buffer_info->time_stamp = jiffies;
1636
 
1637
                        len -= size;
1638
                        offset += size;
1639
                        count++;
1640
                        if(++i == tx_ring->count) i = 0;
1641
                }
1642
        }
1643
 
1644
        if(E1000_DESC_UNUSED(&adapter->tx_ring) < count + 2) {
1645
 
1646
                /* There aren't enough descriptors available to queue up
1647
                 * this send (need: count + 1 context desc + 1 desc gap
1648
                 * to keep tail from touching head), so undo the mapping
1649
                 * and abort the send.  We could have done the check before
1650
                 * we mapped the skb, but because of all the workarounds
1651
                 * (above), it's too difficult to predict how many we're
1652
                 * going to need.*/
1653
                i = adapter->tx_ring.next_to_use;
1654
 
1655
                if(i == first) {
1656
                        /* Cleanup after e1000_tx_[csum|tso] scribbling
1657
                         * on descriptors. */
1658
                        tx_desc = E1000_TX_DESC(*tx_ring, first);
1659
                        tx_desc->buffer_addr = 0;
1660
                        tx_desc->lower.data = 0;
1661
                        tx_desc->upper.data = 0;
1662
                }
1663
 
1664
                while(count--) {
1665
                        buffer_info = &tx_ring->buffer_info[i];
1666
 
1667
                        if(buffer_info->dma) {
1668
                                pci_unmap_page(adapter->pdev,
1669
                                               buffer_info->dma,
1670
                                               buffer_info->length,
1671
                                               PCI_DMA_TODEVICE);
1672
                                buffer_info->dma = 0;
1673
                        }
1674
 
1675
                        if(++i == tx_ring->count) i = 0;
1676
                }
1677
 
1678
                adapter->tx_ring.next_to_use = first;
1679
 
1680
                return 0;
1681
        }
1682
 
1683
        i = (i == 0) ? tx_ring->count - 1 : i - 1;
1684
        tx_ring->buffer_info[i].skb = skb;
1685
        tx_ring->buffer_info[first].next_to_watch = i;
1686
 
1687
        return count;
1688
}
1689
 
1690
static inline void
1691
e1000_tx_queue(struct e1000_adapter *adapter, int count, int tx_flags)
1692
{
1693
        struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
1694
        struct e1000_tx_desc *tx_desc = NULL;
1695
        struct e1000_buffer *buffer_info;
1696
        uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
1697
        unsigned int i;
1698
 
1699
        if(tx_flags & E1000_TX_FLAGS_TSO) {
1700
                txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
1701
                             E1000_TXD_CMD_TSE;
1702
                txd_upper |= (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
1703
        }
1704
 
1705
        if(tx_flags & E1000_TX_FLAGS_CSUM) {
1706
                txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
1707
                txd_upper |= E1000_TXD_POPTS_TXSM << 8;
1708
        }
1709
 
1710
        if(tx_flags & E1000_TX_FLAGS_VLAN) {
1711
                txd_lower |= E1000_TXD_CMD_VLE;
1712
                txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
1713
        }
1714
 
1715
        i = tx_ring->next_to_use;
1716
 
1717
        while(count--) {
1718
                buffer_info = &tx_ring->buffer_info[i];
1719
                tx_desc = E1000_TX_DESC(*tx_ring, i);
1720
                tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1721
                tx_desc->lower.data =
1722
                        cpu_to_le32(txd_lower | buffer_info->length);
1723
                tx_desc->upper.data = cpu_to_le32(txd_upper);
1724
                if(++i == tx_ring->count) i = 0;
1725
        }
1726
 
1727
        tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
1728
 
1729
        /* Force memory writes to complete before letting h/w
1730
         * know there are new descriptors to fetch.  (Only
1731
         * applicable for weak-ordered memory model archs,
1732
         * such as IA-64). */
1733
        wmb();
1734
 
1735
        tx_ring->next_to_use = i;
1736
        E1000_WRITE_REG(&adapter->hw, TDT, i);
1737
}
1738
 
1739
/**
1740
 * 82547 workaround to avoid controller hang in half-duplex environment.
1741
 * The workaround is to avoid queuing a large packet that would span
1742
 * the internal Tx FIFO ring boundary by notifying the stack to resend
1743
 * the packet at a later time.  This gives the Tx FIFO an opportunity to
1744
 * flush all packets.  When that occurs, we reset the Tx FIFO pointers
1745
 * to the beginning of the Tx FIFO.
1746
 **/
1747
 
1748
#define E1000_FIFO_HDR                  0x10
1749
#define E1000_82547_PAD_LEN             0x3E0
1750
 
1751
static inline int
1752
e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
1753
{
1754
        uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
1755
        uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
1756
 
1757
        E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
1758
 
1759
        if(adapter->link_duplex != HALF_DUPLEX)
1760
                goto no_fifo_stall_required;
1761
 
1762
        if(atomic_read(&adapter->tx_fifo_stall))
1763
                return 1;
1764
 
1765
        if(skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
1766
                atomic_set(&adapter->tx_fifo_stall, 1);
1767
                return 1;
1768
        }
1769
 
1770
no_fifo_stall_required:
1771
        adapter->tx_fifo_head += skb_fifo_len;
1772
        if(adapter->tx_fifo_head >= adapter->tx_fifo_size)
1773
                adapter->tx_fifo_head -= adapter->tx_fifo_size;
1774
        return 0;
1775
}
1776
 
1777
static int
1778
e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1779
{
1780
        struct e1000_adapter *adapter = netdev->priv;
1781
        unsigned int first;
1782
        unsigned int tx_flags = 0;
1783
        unsigned long flags;
1784
        int count;
1785
 
1786
        if(skb->len <= 0) {
1787
                dev_kfree_skb_any(skb);
1788
                return 0;
1789
        }
1790
 
1791
        spin_lock_irqsave(&adapter->tx_lock, flags);
1792
 
1793
        if(adapter->hw.mac_type == e1000_82547) {
1794
                if(e1000_82547_fifo_workaround(adapter, skb)) {
1795
                        netif_stop_queue(netdev);
1796
                        mod_timer(&adapter->tx_fifo_stall_timer, jiffies);
1797
                        spin_unlock_irqrestore(&adapter->tx_lock, flags);
1798
                        return 1;
1799
                }
1800
        }
1801
 
1802
        if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
1803
                tx_flags |= E1000_TX_FLAGS_VLAN;
1804
                tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
1805
        }
1806
 
1807
        first = adapter->tx_ring.next_to_use;
1808
 
1809
        if(e1000_tso(adapter, skb))
1810
                tx_flags |= E1000_TX_FLAGS_TSO;
1811
        else if(e1000_tx_csum(adapter, skb))
1812
                tx_flags |= E1000_TX_FLAGS_CSUM;
1813
 
1814
        if((count = e1000_tx_map(adapter, skb, first)))
1815
                e1000_tx_queue(adapter, count, tx_flags);
1816
        else {
1817
                netif_stop_queue(netdev);
1818
                spin_unlock_irqrestore(&adapter->tx_lock, flags);
1819
                return 1;
1820
        }
1821
 
1822
        netdev->trans_start = jiffies;
1823
 
1824
        spin_unlock_irqrestore(&adapter->tx_lock, flags);
1825
 
1826
        return 0;
1827
}
1828
 
1829
/**
1830
 * e1000_tx_timeout - Respond to a Tx Hang
1831
 * @netdev: network interface device structure
1832
 **/
1833
 
1834
static void
1835
e1000_tx_timeout(struct net_device *netdev)
1836
{
1837
        struct e1000_adapter *adapter = netdev->priv;
1838
 
1839
        /* Do the reset outside of interrupt context */
1840
        schedule_task(&adapter->tx_timeout_task);
1841
}
1842
 
1843
static void
1844
e1000_tx_timeout_task(struct net_device *netdev)
1845
{
1846
        struct e1000_adapter *adapter = netdev->priv;
1847
 
1848
        netif_device_detach(netdev);
1849
        e1000_down(adapter);
1850
        e1000_up(adapter);
1851
        netif_device_attach(netdev);
1852
}
1853
 
1854
/**
1855
 * e1000_get_stats - Get System Network Statistics
1856
 * @netdev: network interface device structure
1857
 *
1858
 * Returns the address of the device statistics structure.
1859
 * The statistics are actually updated from the timer callback.
1860
 **/
1861
 
1862
static struct net_device_stats *
1863
e1000_get_stats(struct net_device *netdev)
1864
{
1865
        struct e1000_adapter *adapter = netdev->priv;
1866
 
1867
        e1000_update_stats(adapter);
1868
        return &adapter->net_stats;
1869
}
1870
 
1871
/**
1872
 * e1000_change_mtu - Change the Maximum Transfer Unit
1873
 * @netdev: network interface device structure
1874
 * @new_mtu: new value for maximum frame size
1875
 *
1876
 * Returns 0 on success, negative on failure
1877
 **/
1878
 
1879
static int
1880
e1000_change_mtu(struct net_device *netdev, int new_mtu)
1881
{
1882
        struct e1000_adapter *adapter = netdev->priv;
1883
        int old_mtu = adapter->rx_buffer_len;
1884
        int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
1885
 
1886
        if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
1887
           (max_frame > MAX_JUMBO_FRAME_SIZE)) {
1888
                E1000_ERR("Invalid MTU setting\n");
1889
                return -EINVAL;
1890
        }
1891
 
1892
        if(max_frame <= MAXIMUM_ETHERNET_FRAME_SIZE) {
1893
                adapter->rx_buffer_len = E1000_RXBUFFER_2048;
1894
 
1895
        } else if(adapter->hw.mac_type < e1000_82543) {
1896
                E1000_ERR("Jumbo Frames not supported on 82542\n");
1897
                return -EINVAL;
1898
 
1899
        } else if(max_frame <= E1000_RXBUFFER_4096) {
1900
                adapter->rx_buffer_len = E1000_RXBUFFER_4096;
1901
 
1902
        } else if(max_frame <= E1000_RXBUFFER_8192) {
1903
                adapter->rx_buffer_len = E1000_RXBUFFER_8192;
1904
 
1905
        } else {
1906
                adapter->rx_buffer_len = E1000_RXBUFFER_16384;
1907
        }
1908
 
1909
        if(old_mtu != adapter->rx_buffer_len && netif_running(netdev)) {
1910
 
1911
                e1000_down(adapter);
1912
                e1000_up(adapter);
1913
        }
1914
 
1915
        netdev->mtu = new_mtu;
1916
        adapter->hw.max_frame_size = max_frame;
1917
 
1918
        return 0;
1919
}
1920
 
1921
/**
1922
 * e1000_update_stats - Update the board statistics counters
1923
 * @adapter: board private structure
1924
 **/
1925
 
1926
void
1927
e1000_update_stats(struct e1000_adapter *adapter)
1928
{
1929
        struct e1000_hw *hw = &adapter->hw;
1930
        unsigned long flags;
1931
        uint16_t phy_tmp;
1932
 
1933
#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
1934
 
1935
        spin_lock_irqsave(&adapter->stats_lock, flags);
1936
 
1937
        /* these counters are modified from e1000_adjust_tbi_stats,
1938
         * called from the interrupt context, so they must only
1939
         * be written while holding adapter->stats_lock
1940
         */
1941
 
1942
        adapter->stats.crcerrs += E1000_READ_REG(hw, CRCERRS);
1943
        adapter->stats.gprc += E1000_READ_REG(hw, GPRC);
1944
        adapter->stats.gorcl += E1000_READ_REG(hw, GORCL);
1945
        adapter->stats.gorch += E1000_READ_REG(hw, GORCH);
1946
        adapter->stats.bprc += E1000_READ_REG(hw, BPRC);
1947
        adapter->stats.mprc += E1000_READ_REG(hw, MPRC);
1948
        adapter->stats.roc += E1000_READ_REG(hw, ROC);
1949
        adapter->stats.prc64 += E1000_READ_REG(hw, PRC64);
1950
        adapter->stats.prc127 += E1000_READ_REG(hw, PRC127);
1951
        adapter->stats.prc255 += E1000_READ_REG(hw, PRC255);
1952
        adapter->stats.prc511 += E1000_READ_REG(hw, PRC511);
1953
        adapter->stats.prc1023 += E1000_READ_REG(hw, PRC1023);
1954
        adapter->stats.prc1522 += E1000_READ_REG(hw, PRC1522);
1955
 
1956
        /* the rest of the counters are only modified here */
1957
 
1958
        adapter->stats.symerrs += E1000_READ_REG(hw, SYMERRS);
1959
        adapter->stats.mpc += E1000_READ_REG(hw, MPC);
1960
        adapter->stats.scc += E1000_READ_REG(hw, SCC);
1961
        adapter->stats.ecol += E1000_READ_REG(hw, ECOL);
1962
        adapter->stats.mcc += E1000_READ_REG(hw, MCC);
1963
        adapter->stats.latecol += E1000_READ_REG(hw, LATECOL);
1964
        adapter->stats.dc += E1000_READ_REG(hw, DC);
1965
        adapter->stats.sec += E1000_READ_REG(hw, SEC);
1966
        adapter->stats.rlec += E1000_READ_REG(hw, RLEC);
1967
        adapter->stats.xonrxc += E1000_READ_REG(hw, XONRXC);
1968
        adapter->stats.xontxc += E1000_READ_REG(hw, XONTXC);
1969
        adapter->stats.xoffrxc += E1000_READ_REG(hw, XOFFRXC);
1970
        adapter->stats.xofftxc += E1000_READ_REG(hw, XOFFTXC);
1971
        adapter->stats.fcruc += E1000_READ_REG(hw, FCRUC);
1972
        adapter->stats.gptc += E1000_READ_REG(hw, GPTC);
1973
        adapter->stats.gotcl += E1000_READ_REG(hw, GOTCL);
1974
        adapter->stats.gotch += E1000_READ_REG(hw, GOTCH);
1975
        adapter->stats.rnbc += E1000_READ_REG(hw, RNBC);
1976
        adapter->stats.ruc += E1000_READ_REG(hw, RUC);
1977
        adapter->stats.rfc += E1000_READ_REG(hw, RFC);
1978
        adapter->stats.rjc += E1000_READ_REG(hw, RJC);
1979
        adapter->stats.torl += E1000_READ_REG(hw, TORL);
1980
        adapter->stats.torh += E1000_READ_REG(hw, TORH);
1981
        adapter->stats.totl += E1000_READ_REG(hw, TOTL);
1982
        adapter->stats.toth += E1000_READ_REG(hw, TOTH);
1983
        adapter->stats.tpr += E1000_READ_REG(hw, TPR);
1984
        adapter->stats.ptc64 += E1000_READ_REG(hw, PTC64);
1985
        adapter->stats.ptc127 += E1000_READ_REG(hw, PTC127);
1986
        adapter->stats.ptc255 += E1000_READ_REG(hw, PTC255);
1987
        adapter->stats.ptc511 += E1000_READ_REG(hw, PTC511);
1988
        adapter->stats.ptc1023 += E1000_READ_REG(hw, PTC1023);
1989
        adapter->stats.ptc1522 += E1000_READ_REG(hw, PTC1522);
1990
        adapter->stats.mptc += E1000_READ_REG(hw, MPTC);
1991
        adapter->stats.bptc += E1000_READ_REG(hw, BPTC);
1992
 
1993
        /* used for adaptive IFS */
1994
 
1995
        hw->tx_packet_delta = E1000_READ_REG(hw, TPT);
1996
        adapter->stats.tpt += hw->tx_packet_delta;
1997
        hw->collision_delta = E1000_READ_REG(hw, COLC);
1998
        adapter->stats.colc += hw->collision_delta;
1999
 
2000
        if(hw->mac_type >= e1000_82543) {
2001
                adapter->stats.algnerrc += E1000_READ_REG(hw, ALGNERRC);
2002
                adapter->stats.rxerrc += E1000_READ_REG(hw, RXERRC);
2003
                adapter->stats.tncrs += E1000_READ_REG(hw, TNCRS);
2004
                adapter->stats.cexterr += E1000_READ_REG(hw, CEXTERR);
2005
                adapter->stats.tsctc += E1000_READ_REG(hw, TSCTC);
2006
                adapter->stats.tsctfc += E1000_READ_REG(hw, TSCTFC);
2007
        }
2008
 
2009
        /* Fill out the OS statistics structure */
2010
 
2011
        adapter->net_stats.rx_packets = adapter->stats.gprc;
2012
        adapter->net_stats.tx_packets = adapter->stats.gptc;
2013
        adapter->net_stats.rx_bytes = adapter->stats.gorcl;
2014
        adapter->net_stats.tx_bytes = adapter->stats.gotcl;
2015
        adapter->net_stats.multicast = adapter->stats.mprc;
2016
        adapter->net_stats.collisions = adapter->stats.colc;
2017
 
2018
        /* Rx Errors */
2019
 
2020
        adapter->net_stats.rx_errors = adapter->stats.rxerrc +
2021
                adapter->stats.crcerrs + adapter->stats.algnerrc +
2022
                adapter->stats.rlec + adapter->stats.rnbc +
2023
                adapter->stats.mpc + adapter->stats.cexterr;
2024
        adapter->net_stats.rx_dropped = adapter->stats.rnbc;
2025
        adapter->net_stats.rx_length_errors = adapter->stats.rlec;
2026
        adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
2027
        adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
2028
        adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
2029
        adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
2030
 
2031
        /* Tx Errors */
2032
 
2033
        adapter->net_stats.tx_errors = adapter->stats.ecol +
2034
                                       adapter->stats.latecol;
2035
        adapter->net_stats.tx_aborted_errors = adapter->stats.ecol;
2036
        adapter->net_stats.tx_window_errors = adapter->stats.latecol;
2037
        adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs;
2038
 
2039
        /* Tx Dropped needs to be maintained elsewhere */
2040
 
2041
        /* Phy Stats */
2042
 
2043
        if(hw->media_type == e1000_media_type_copper) {
2044
                if((adapter->link_speed == SPEED_1000) &&
2045
                   (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
2046
                        phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
2047
                        adapter->phy_stats.idle_errors += phy_tmp;
2048
                }
2049
 
2050
                if((hw->mac_type <= e1000_82546) &&
2051
                   (hw->phy_type == e1000_phy_m88) &&
2052
                   !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
2053
                        adapter->phy_stats.receive_errors += phy_tmp;
2054
        }
2055
 
2056
        spin_unlock_irqrestore(&adapter->stats_lock, flags);
2057
}
2058
 
2059
/**
2060
 * e1000_irq_disable - Mask off interrupt generation on the NIC
2061
 * @adapter: board private structure
2062
 **/
2063
 
2064
static inline void
2065
e1000_irq_disable(struct e1000_adapter *adapter)
2066
{
2067
        atomic_inc(&adapter->irq_sem);
2068
        E1000_WRITE_REG(&adapter->hw, IMC, ~0);
2069
        E1000_WRITE_FLUSH(&adapter->hw);
2070
        synchronize_irq();
2071
}
2072
 
2073
/**
2074
 * e1000_irq_enable - Enable default interrupt generation settings
2075
 * @adapter: board private structure
2076
 **/
2077
 
2078
static inline void
2079
e1000_irq_enable(struct e1000_adapter *adapter)
2080
{
2081
        if(atomic_dec_and_test(&adapter->irq_sem)) {
2082
                E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
2083
                E1000_WRITE_FLUSH(&adapter->hw);
2084
        }
2085
}
2086
 
2087
/**
2088
 * e1000_intr - Interrupt Handler
2089
 * @irq: interrupt number
2090
 * @data: pointer to a network interface device structure
2091
 * @pt_regs: CPU registers structure
2092
 **/
2093
 
2094
static irqreturn_t
2095
e1000_intr(int irq, void *data, struct pt_regs *regs)
2096
{
2097
        struct net_device *netdev = data;
2098
        struct e1000_adapter *adapter = netdev->priv;
2099
        struct e1000_hw *hw = &adapter->hw;
2100
        uint32_t icr = E1000_READ_REG(&adapter->hw, ICR);
2101
#ifndef CONFIG_E1000_NAPI
2102
        unsigned int i;
2103
#endif
2104
 
2105
        if(!icr)
2106
                return IRQ_NONE;  /* Not our interrupt */
2107
 
2108
        if(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
2109
                hw->get_link_status = 1;
2110
                mod_timer(&adapter->watchdog_timer, jiffies);
2111
        }
2112
 
2113
#ifdef CONFIG_E1000_NAPI
2114
        if(netif_rx_schedule_prep(netdev)) {
2115
 
2116
                /* Disable interrupts and register for poll. The flush
2117
                  of the posted write is intentionally left out.
2118
                */
2119
 
2120
                atomic_inc(&adapter->irq_sem);
2121
                E1000_WRITE_REG(hw, IMC, ~0);
2122
                __netif_rx_schedule(netdev);
2123
        }
2124
#else
2125
        /* Writing IMC and IMS is needed for 82547.
2126
           Due to Hub Link bus being occupied, an interrupt
2127
           de-assertion message is not able to be sent.
2128
           When an interrupt assertion message is generated later,
2129
           two messages are re-ordered and sent out.
2130
           That causes APIC to think 82547 is in de-assertion
2131
           state, while 82547 is in assertion state, resulting
2132
           in dead lock. Writing IMC forces 82547 into
2133
           de-assertion state.
2134
        */
2135
        if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
2136
                e1000_irq_disable(adapter);
2137
 
2138
        for(i = 0; i < E1000_MAX_INTR; i++)
2139
                if(!e1000_clean_rx_irq(adapter) &
2140
                   !e1000_clean_tx_irq(adapter))
2141
                        break;
2142
 
2143
        if(hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
2144
                e1000_irq_enable(adapter);
2145
#endif
2146
 
2147
        return IRQ_HANDLED;
2148
}
2149
 
2150
#ifdef CONFIG_E1000_NAPI
2151
/**
2152
 * e1000_clean - NAPI Rx polling callback
2153
 * @adapter: board private structure
2154
 **/
2155
 
2156
static int
2157
e1000_clean(struct net_device *netdev, int *budget)
2158
{
2159
        struct e1000_adapter *adapter = netdev->priv;
2160
        int work_to_do = min(*budget, netdev->quota);
2161
        int work_done = 0;
2162
 
2163
        e1000_clean_tx_irq(adapter);
2164
        e1000_clean_rx_irq(adapter, &work_done, work_to_do);
2165
 
2166
        *budget -= work_done;
2167
        netdev->quota -= work_done;
2168
 
2169
        if(work_done < work_to_do || !netif_running(netdev)) {
2170
                netif_rx_complete(netdev);
2171
                e1000_irq_enable(adapter);
2172
        }
2173
 
2174
        return (work_done >= work_to_do);
2175
}
2176
#endif
2177
 
2178
/**
2179
 * e1000_clean_tx_irq - Reclaim resources after transmit completes
2180
 * @adapter: board private structure
2181
 **/
2182
 
2183
static boolean_t
2184
e1000_clean_tx_irq(struct e1000_adapter *adapter)
2185
{
2186
        struct e1000_desc_ring *tx_ring = &adapter->tx_ring;
2187
        struct net_device *netdev = adapter->netdev;
2188
        struct pci_dev *pdev = adapter->pdev;
2189
        struct e1000_tx_desc *tx_desc, *eop_desc;
2190
        struct e1000_buffer *buffer_info;
2191
        unsigned int i, eop;
2192
        boolean_t cleaned = FALSE;
2193
 
2194
        spin_lock(&adapter->tx_lock);
2195
 
2196
        i = tx_ring->next_to_clean;
2197
        eop = tx_ring->buffer_info[i].next_to_watch;
2198
        eop_desc = E1000_TX_DESC(*tx_ring, eop);
2199
 
2200
        while(eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
2201
 
2202
                for(cleaned = FALSE; !cleaned; ) {
2203
                        tx_desc = E1000_TX_DESC(*tx_ring, i);
2204
                        buffer_info = &tx_ring->buffer_info[i];
2205
 
2206
                        if(buffer_info->dma) {
2207
 
2208
                                pci_unmap_page(pdev,
2209
                                               buffer_info->dma,
2210
                                               buffer_info->length,
2211
                                               PCI_DMA_TODEVICE);
2212
 
2213
                                buffer_info->dma = 0;
2214
                        }
2215
 
2216
                        if(buffer_info->skb) {
2217
 
2218
                                dev_kfree_skb_any(buffer_info->skb);
2219
 
2220
                                buffer_info->skb = NULL;
2221
                        }
2222
 
2223
                        tx_desc->buffer_addr = 0;
2224
                        tx_desc->lower.data = 0;
2225
                        tx_desc->upper.data = 0;
2226
 
2227
                        cleaned = (i == eop);
2228
                        if(++i == tx_ring->count) i = 0;
2229
                }
2230
 
2231
                eop = tx_ring->buffer_info[i].next_to_watch;
2232
                eop_desc = E1000_TX_DESC(*tx_ring, eop);
2233
        }
2234
 
2235
        tx_ring->next_to_clean = i;
2236
 
2237
        if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev))
2238
                netif_wake_queue(netdev);
2239
 
2240
        spin_unlock(&adapter->tx_lock);
2241
 
2242
        return cleaned;
2243
}
2244
 
2245
/**
2246
 * e1000_clean_rx_irq - Send received data up the network stack,
2247
 * @adapter: board private structure
2248
 **/
2249
 
2250
static boolean_t
2251
#ifdef CONFIG_E1000_NAPI
2252
e1000_clean_rx_irq(struct e1000_adapter *adapter, int *work_done,
2253
                   int work_to_do)
2254
#else
2255
e1000_clean_rx_irq(struct e1000_adapter *adapter)
2256
#endif
2257
{
2258
        struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2259
        struct net_device *netdev = adapter->netdev;
2260
        struct pci_dev *pdev = adapter->pdev;
2261
        struct e1000_rx_desc *rx_desc;
2262
        struct e1000_buffer *buffer_info;
2263
        struct sk_buff *skb;
2264
        unsigned long flags;
2265
        uint32_t length;
2266
        uint8_t last_byte;
2267
        unsigned int i;
2268
        boolean_t cleaned = FALSE;
2269
 
2270
        i = rx_ring->next_to_clean;
2271
        rx_desc = E1000_RX_DESC(*rx_ring, i);
2272
 
2273
        while(rx_desc->status & E1000_RXD_STAT_DD) {
2274
                buffer_info = &rx_ring->buffer_info[i];
2275
 
2276
#ifdef CONFIG_E1000_NAPI
2277
                if(*work_done >= work_to_do)
2278
                        break;
2279
 
2280
                (*work_done)++;
2281
#endif
2282
 
2283
                cleaned = TRUE;
2284
 
2285
                pci_unmap_single(pdev,
2286
                                 buffer_info->dma,
2287
                                 buffer_info->length,
2288
                                 PCI_DMA_FROMDEVICE);
2289
 
2290
                skb = buffer_info->skb;
2291
                length = le16_to_cpu(rx_desc->length);
2292
 
2293
                if(!(rx_desc->status & E1000_RXD_STAT_EOP)) {
2294
 
2295
                        /* All receives must fit into a single buffer */
2296
 
2297
                        E1000_DBG("Receive packet consumed multiple buffers\n");
2298
 
2299
                        dev_kfree_skb_irq(skb);
2300
                        rx_desc->status = 0;
2301
                        buffer_info->skb = NULL;
2302
 
2303
                        if(++i == rx_ring->count) i = 0;
2304
 
2305
                        rx_desc = E1000_RX_DESC(*rx_ring, i);
2306
                        continue;
2307
                }
2308
 
2309
                if(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) {
2310
 
2311
                        last_byte = *(skb->data + length - 1);
2312
 
2313
                        if(TBI_ACCEPT(&adapter->hw, rx_desc->status,
2314
                                      rx_desc->errors, length, last_byte)) {
2315
 
2316
                                spin_lock_irqsave(&adapter->stats_lock, flags);
2317
 
2318
                                e1000_tbi_adjust_stats(&adapter->hw,
2319
                                                       &adapter->stats,
2320
                                                       length, skb->data);
2321
 
2322
                                spin_unlock_irqrestore(&adapter->stats_lock,
2323
                                                       flags);
2324
                                length--;
2325
                        } else {
2326
 
2327
                                dev_kfree_skb_irq(skb);
2328
                                rx_desc->status = 0;
2329
                                buffer_info->skb = NULL;
2330
 
2331
                                if(++i == rx_ring->count) i = 0;
2332
 
2333
                                rx_desc = E1000_RX_DESC(*rx_ring, i);
2334
                                continue;
2335
                        }
2336
                }
2337
 
2338
                /* Good Receive */
2339
                skb_put(skb, length - ETHERNET_FCS_SIZE);
2340
 
2341
                /* Receive Checksum Offload */
2342
                e1000_rx_checksum(adapter, rx_desc, skb);
2343
 
2344
                skb->protocol = eth_type_trans(skb, netdev);
2345
#ifdef CONFIG_E1000_NAPI
2346
                if(adapter->vlgrp && (rx_desc->status & E1000_RXD_STAT_VP)) {
2347
                        vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
2348
                                le16_to_cpu(rx_desc->special &
2349
                                        E1000_RXD_SPC_VLAN_MASK));
2350
                } else {
2351
                        netif_receive_skb(skb);
2352
                }
2353
#else /* CONFIG_E1000_NAPI */
2354
                if(adapter->vlgrp && (rx_desc->status & E1000_RXD_STAT_VP)) {
2355
                        vlan_hwaccel_rx(skb, adapter->vlgrp,
2356
                                le16_to_cpu(rx_desc->special &
2357
                                        E1000_RXD_SPC_VLAN_MASK));
2358
                } else {
2359
                        netif_rx(skb);
2360
                }
2361
#endif /* CONFIG_E1000_NAPI */
2362
                netdev->last_rx = jiffies;
2363
 
2364
                rx_desc->status = 0;
2365
                buffer_info->skb = NULL;
2366
 
2367
                if(++i == rx_ring->count) i = 0;
2368
 
2369
                rx_desc = E1000_RX_DESC(*rx_ring, i);
2370
        }
2371
 
2372
        rx_ring->next_to_clean = i;
2373
 
2374
        e1000_alloc_rx_buffers(adapter);
2375
 
2376
        return cleaned;
2377
}
2378
 
2379
/**
2380
 * e1000_alloc_rx_buffers - Replace used receive buffers
2381
 * @adapter: address of board private structure
2382
 **/
2383
 
2384
static void
2385
e1000_alloc_rx_buffers(struct e1000_adapter *adapter)
2386
{
2387
        struct e1000_desc_ring *rx_ring = &adapter->rx_ring;
2388
        struct net_device *netdev = adapter->netdev;
2389
        struct pci_dev *pdev = adapter->pdev;
2390
        struct e1000_rx_desc *rx_desc;
2391
        struct e1000_buffer *buffer_info;
2392
        struct sk_buff *skb;
2393
        int reserve_len = 2;
2394
        unsigned int i;
2395
 
2396
        i = rx_ring->next_to_use;
2397
        buffer_info = &rx_ring->buffer_info[i];
2398
 
2399
        while(!buffer_info->skb) {
2400
                rx_desc = E1000_RX_DESC(*rx_ring, i);
2401
 
2402
                skb = dev_alloc_skb(adapter->rx_buffer_len + reserve_len);
2403
 
2404
                if(!skb) {
2405
                        /* Better luck next round */
2406
                        break;
2407
                }
2408
 
2409
                /* Make buffer alignment 2 beyond a 16 byte boundary
2410
                 * this will result in a 16 byte aligned IP header after
2411
                 * the 14 byte MAC header is removed
2412
                 */
2413
                skb_reserve(skb, reserve_len);
2414
 
2415
                skb->dev = netdev;
2416
 
2417
                buffer_info->skb = skb;
2418
                buffer_info->length = adapter->rx_buffer_len;
2419
                buffer_info->dma =
2420
                        pci_map_single(pdev,
2421
                                       skb->data,
2422
                                       adapter->rx_buffer_len,
2423
                                       PCI_DMA_FROMDEVICE);
2424
 
2425
                rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
2426
 
2427
                if((i & ~(E1000_RX_BUFFER_WRITE - 1)) == i) {
2428
                        /* Force memory writes to complete before letting h/w
2429
                         * know there are new descriptors to fetch.  (Only
2430
                         * applicable for weak-ordered memory model archs,
2431
                         * such as IA-64). */
2432
                        wmb();
2433
 
2434
                        E1000_WRITE_REG(&adapter->hw, RDT, i);
2435
                }
2436
 
2437
                if(++i == rx_ring->count) i = 0;
2438
                buffer_info = &rx_ring->buffer_info[i];
2439
        }
2440
 
2441
        rx_ring->next_to_use = i;
2442
}
2443
 
2444
/**
2445
 * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
2446
 * @adapter:
2447
 **/
2448
 
2449
static void
2450
e1000_smartspeed(struct e1000_adapter *adapter)
2451
{
2452
        uint16_t phy_status;
2453
        uint16_t phy_ctrl;
2454
 
2455
        if((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
2456
           !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
2457
                return;
2458
 
2459
        if(adapter->smartspeed == 0) {
2460
                /* If Master/Slave config fault is asserted twice,
2461
                 * we assume back-to-back */
2462
                e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
2463
                if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
2464
                e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
2465
                if(!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
2466
                e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
2467
                if(phy_ctrl & CR_1000T_MS_ENABLE) {
2468
                        phy_ctrl &= ~CR_1000T_MS_ENABLE;
2469
                        e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
2470
                                            phy_ctrl);
2471
                        adapter->smartspeed++;
2472
                        if(!e1000_phy_setup_autoneg(&adapter->hw) &&
2473
                           !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
2474
                                               &phy_ctrl)) {
2475
                                phy_ctrl |= (MII_CR_AUTO_NEG_EN |
2476
                                             MII_CR_RESTART_AUTO_NEG);
2477
                                e1000_write_phy_reg(&adapter->hw, PHY_CTRL,
2478
                                                    phy_ctrl);
2479
                        }
2480
                }
2481
                return;
2482
        } else if(adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
2483
                /* If still no link, perhaps using 2/3 pair cable */
2484
                e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
2485
                phy_ctrl |= CR_1000T_MS_ENABLE;
2486
                e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
2487
                if(!e1000_phy_setup_autoneg(&adapter->hw) &&
2488
                   !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
2489
                        phy_ctrl |= (MII_CR_AUTO_NEG_EN |
2490
                                     MII_CR_RESTART_AUTO_NEG);
2491
                        e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
2492
                }
2493
        }
2494
        /* Restart process after E1000_SMARTSPEED_MAX iterations */
2495
        if(adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
2496
                adapter->smartspeed = 0;
2497
}
2498
 
2499
/**
2500
 * e1000_ioctl -
2501
 * @netdev:
2502
 * @ifreq:
2503
 * @cmd:
2504
 **/
2505
 
2506
static int
2507
e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2508
{
2509
        switch (cmd) {
2510
        case SIOCGMIIPHY:
2511
        case SIOCGMIIREG:
2512
        case SIOCSMIIREG:
2513
                return e1000_mii_ioctl(netdev, ifr, cmd);
2514
        case SIOCETHTOOL:
2515
                return e1000_ethtool_ioctl(netdev, ifr);
2516
        default:
2517
                return -EOPNOTSUPP;
2518
        }
2519
}
2520
 
2521
/**
2522
 * e1000_mii_ioctl -
2523
 * @netdev:
2524
 * @ifreq:
2525
 * @cmd:
2526
 **/
2527
 
2528
static int
2529
e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2530
{
2531
        struct e1000_adapter *adapter = netdev->priv;
2532
        struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data;
2533
        int retval;
2534
        uint16_t mii_reg;
2535
        uint16_t spddplx;
2536
 
2537
        if(adapter->hw.media_type != e1000_media_type_copper)
2538
                return -EOPNOTSUPP;
2539
 
2540
        switch (cmd) {
2541
        case SIOCGMIIPHY:
2542
                data->phy_id = adapter->hw.phy_addr;
2543
                break;
2544
        case SIOCGMIIREG:
2545
                if (!capable(CAP_NET_ADMIN))
2546
                        return -EPERM;
2547
                if (e1000_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
2548
                                   &data->val_out))
2549
                        return -EIO;
2550
                break;
2551
        case SIOCSMIIREG:
2552
                if (!capable(CAP_NET_ADMIN))
2553
                        return -EPERM;
2554
                if (data->reg_num & ~(0x1F))
2555
                        return -EFAULT;
2556
                mii_reg = data->val_in;
2557
                if (e1000_write_phy_reg(&adapter->hw, data->reg_num,
2558
                                        data->val_in))
2559
                        return -EIO;
2560
                if (adapter->hw.phy_type == e1000_phy_m88) {
2561
                        switch (data->reg_num) {
2562
                        case PHY_CTRL:
2563
                                if(data->val_in & MII_CR_AUTO_NEG_EN) {
2564
                                        adapter->hw.autoneg = 1;
2565
                                        adapter->hw.autoneg_advertised = 0x2F;
2566
                                } else {
2567
                                        if (data->val_in & 0x40)
2568
                                                spddplx = SPEED_1000;
2569
                                        else if (data->val_in & 0x2000)
2570
                                                spddplx = SPEED_100;
2571
                                        else
2572
                                                spddplx = SPEED_10;
2573
                                        spddplx += (data->val_in & 0x100)
2574
                                                   ? FULL_DUPLEX :
2575
                                                   HALF_DUPLEX;
2576
                                        retval = e1000_set_spd_dplx(adapter,
2577
                                                                    spddplx);
2578
                                        if(retval)
2579
                                                return retval;
2580
                                }
2581
                                if(netif_running(adapter->netdev)) {
2582
                                        e1000_down(adapter);
2583
                                        e1000_up(adapter);
2584
                                } else
2585
                                        e1000_reset(adapter);
2586
                                break;
2587
                        case M88E1000_PHY_SPEC_CTRL:
2588
                        case M88E1000_EXT_PHY_SPEC_CTRL:
2589
                                if (e1000_phy_reset(&adapter->hw))
2590
                                        return -EIO;
2591
                                break;
2592
                        }
2593
                }
2594
                break;
2595
        default:
2596
                return -EOPNOTSUPP;
2597
        }
2598
        return E1000_SUCCESS;
2599
}
2600
 
2601
/**
2602
 * e1000_rx_checksum - Receive Checksum Offload for 82543
2603
 * @adapter: board private structure
2604
 * @rx_desc: receive descriptor
2605
 * @sk_buff: socket buffer with received data
2606
 **/
2607
 
2608
static inline void
2609
e1000_rx_checksum(struct e1000_adapter *adapter,
2610
                  struct e1000_rx_desc *rx_desc,
2611
                  struct sk_buff *skb)
2612
{
2613
        /* 82543 or newer only */
2614
        if((adapter->hw.mac_type < e1000_82543) ||
2615
        /* Ignore Checksum bit is set */
2616
        (rx_desc->status & E1000_RXD_STAT_IXSM) ||
2617
        /* TCP Checksum has not been calculated */
2618
        (!(rx_desc->status & E1000_RXD_STAT_TCPCS))) {
2619
                skb->ip_summed = CHECKSUM_NONE;
2620
                return;
2621
        }
2622
 
2623
        /* At this point we know the hardware did the TCP checksum */
2624
        /* now look at the TCP checksum error bit */
2625
        if(rx_desc->errors & E1000_RXD_ERR_TCPE) {
2626
                /* let the stack verify checksum errors */
2627
                skb->ip_summed = CHECKSUM_NONE;
2628
                adapter->hw_csum_err++;
2629
        } else {
2630
        /* TCP checksum is good */
2631
                skb->ip_summed = CHECKSUM_UNNECESSARY;
2632
                adapter->hw_csum_good++;
2633
        }
2634
}
2635
 
2636
void
2637
e1000_pci_set_mwi(struct e1000_hw *hw)
2638
{
2639
        struct e1000_adapter *adapter = hw->back;
2640
 
2641
        pci_set_mwi(adapter->pdev);
2642
}
2643
 
2644
void
2645
e1000_pci_clear_mwi(struct e1000_hw *hw)
2646
{
2647
        struct e1000_adapter *adapter = hw->back;
2648
 
2649
        pci_clear_mwi(adapter->pdev);
2650
}
2651
 
2652
void
2653
e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
2654
{
2655
        struct e1000_adapter *adapter = hw->back;
2656
 
2657
        pci_read_config_word(adapter->pdev, reg, value);
2658
}
2659
 
2660
void
2661
e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
2662
{
2663
        struct e1000_adapter *adapter = hw->back;
2664
 
2665
        pci_write_config_word(adapter->pdev, reg, *value);
2666
}
2667
 
2668
uint32_t
2669
e1000_io_read(struct e1000_hw *hw, unsigned long port)
2670
{
2671
        return inl(port);
2672
}
2673
 
2674
void
2675
e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
2676
{
2677
        outl(value, port);
2678
}
2679
 
2680
static void
2681
e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2682
{
2683
        struct e1000_adapter *adapter = netdev->priv;
2684
        uint32_t ctrl, rctl;
2685
 
2686
        e1000_irq_disable(adapter);
2687
        adapter->vlgrp = grp;
2688
 
2689
        if(grp) {
2690
                /* enable VLAN tag insert/strip */
2691
 
2692
                ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2693
                ctrl |= E1000_CTRL_VME;
2694
                E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2695
 
2696
                /* enable VLAN receive filtering */
2697
 
2698
                rctl = E1000_READ_REG(&adapter->hw, RCTL);
2699
                rctl |= E1000_RCTL_VFE;
2700
                rctl &= ~E1000_RCTL_CFIEN;
2701
                E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2702
        } else {
2703
                /* disable VLAN tag insert/strip */
2704
 
2705
                ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2706
                ctrl &= ~E1000_CTRL_VME;
2707
                E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2708
 
2709
                /* disable VLAN filtering */
2710
 
2711
                rctl = E1000_READ_REG(&adapter->hw, RCTL);
2712
                rctl &= ~E1000_RCTL_VFE;
2713
                E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2714
        }
2715
 
2716
        e1000_irq_enable(adapter);
2717
}
2718
 
2719
static void
2720
e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2721
{
2722
        struct e1000_adapter *adapter = netdev->priv;
2723
        uint32_t vfta, index;
2724
 
2725
        /* add VID to filter table */
2726
 
2727
        index = (vid >> 5) & 0x7F;
2728
        vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2729
        vfta |= (1 << (vid & 0x1F));
2730
        e1000_write_vfta(&adapter->hw, index, vfta);
2731
}
2732
 
2733
static void
2734
e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
2735
{
2736
        struct e1000_adapter *adapter = netdev->priv;
2737
        uint32_t vfta, index;
2738
 
2739
        e1000_irq_disable(adapter);
2740
 
2741
        if(adapter->vlgrp)
2742
                adapter->vlgrp->vlan_devices[vid] = NULL;
2743
 
2744
        e1000_irq_enable(adapter);
2745
 
2746
        /* remove VID from filter table*/
2747
 
2748
        index = (vid >> 5) & 0x7F;
2749
        vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
2750
        vfta &= ~(1 << (vid & 0x1F));
2751
        e1000_write_vfta(&adapter->hw, index, vfta);
2752
}
2753
 
2754
static void
2755
e1000_restore_vlan(struct e1000_adapter *adapter)
2756
{
2757
        e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2758
 
2759
        if(adapter->vlgrp) {
2760
                uint16_t vid;
2761
                for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2762
                        if(!adapter->vlgrp->vlan_devices[vid])
2763
                                continue;
2764
                        e1000_vlan_rx_add_vid(adapter->netdev, vid);
2765
                }
2766
        }
2767
}
2768
 
2769
int
2770
e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
2771
{
2772
        adapter->hw.autoneg = 0;
2773
 
2774
        switch(spddplx) {
2775
        case SPEED_10 + DUPLEX_HALF:
2776
                adapter->hw.forced_speed_duplex = e1000_10_half;
2777
                break;
2778
        case SPEED_10 + DUPLEX_FULL:
2779
                adapter->hw.forced_speed_duplex = e1000_10_full;
2780
                break;
2781
        case SPEED_100 + DUPLEX_HALF:
2782
                adapter->hw.forced_speed_duplex = e1000_100_half;
2783
                break;
2784
        case SPEED_100 + DUPLEX_FULL:
2785
                adapter->hw.forced_speed_duplex = e1000_100_full;
2786
                break;
2787
        case SPEED_1000 + DUPLEX_FULL:
2788
                adapter->hw.autoneg = 1;
2789
                adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
2790
                break;
2791
        case SPEED_1000 + DUPLEX_HALF: /* not supported */
2792
        default:
2793
                return -EINVAL;
2794
        }
2795
        return 0;
2796
}
2797
 
2798
static int
2799
e1000_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
2800
{
2801
        struct pci_dev *pdev = NULL;
2802
 
2803
        switch(event) {
2804
        case SYS_DOWN:
2805
        case SYS_HALT:
2806
        case SYS_POWER_OFF:
2807
                while((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
2808
                        if(pci_dev_driver(pdev) == &e1000_driver)
2809
                                e1000_suspend(pdev, 3);
2810
                }
2811
        }
2812
        return NOTIFY_DONE;
2813
}
2814
 
2815
static int
2816
e1000_suspend(struct pci_dev *pdev, uint32_t state)
2817
{
2818
        struct net_device *netdev = pci_get_drvdata(pdev);
2819
        struct e1000_adapter *adapter = netdev->priv;
2820
        uint32_t ctrl, ctrl_ext, rctl, manc, status;
2821
        uint32_t wufc = adapter->wol;
2822
 
2823
        netif_device_detach(netdev);
2824
 
2825
        if(netif_running(netdev))
2826
                e1000_down(adapter);
2827
 
2828
        status = E1000_READ_REG(&adapter->hw, STATUS);
2829
        if(status & E1000_STATUS_LU)
2830
                wufc &= ~E1000_WUFC_LNKC;
2831
 
2832
        if(wufc) {
2833
                e1000_setup_rctl(adapter);
2834
                e1000_set_multi(netdev);
2835
 
2836
                /* turn on all-multi mode if wake on multicast is enabled */
2837
                if(adapter->wol & E1000_WUFC_MC) {
2838
                        rctl = E1000_READ_REG(&adapter->hw, RCTL);
2839
                        rctl |= E1000_RCTL_MPE;
2840
                        E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
2841
                }
2842
 
2843
                if(adapter->hw.mac_type >= e1000_82540) {
2844
                        ctrl = E1000_READ_REG(&adapter->hw, CTRL);
2845
                        /* advertise wake from D3Cold */
2846
                        #define E1000_CTRL_ADVD3WUC 0x00100000
2847
                        /* phy power management enable */
2848
                        #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
2849
                        ctrl |= E1000_CTRL_ADVD3WUC |
2850
                                E1000_CTRL_EN_PHY_PWR_MGMT;
2851
                        E1000_WRITE_REG(&adapter->hw, CTRL, ctrl);
2852
                }
2853
 
2854
                if(adapter->hw.media_type == e1000_media_type_fiber ||
2855
                   adapter->hw.media_type == e1000_media_type_internal_serdes) {
2856
                        /* keep the laser running in D3 */
2857
                        ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
2858
                        ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
2859
                        E1000_WRITE_REG(&adapter->hw, CTRL_EXT, ctrl_ext);
2860
                }
2861
 
2862
                E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
2863
                E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
2864
                pci_enable_wake(pdev, 3, 1);
2865
                pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
2866
        } else {
2867
                E1000_WRITE_REG(&adapter->hw, WUC, 0);
2868
                E1000_WRITE_REG(&adapter->hw, WUFC, 0);
2869
                pci_enable_wake(pdev, 3, 0);
2870
                pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
2871
        }
2872
 
2873
        pci_save_state(pdev, adapter->pci_state);
2874
 
2875
        if(adapter->hw.mac_type >= e1000_82540 &&
2876
           adapter->hw.media_type == e1000_media_type_copper) {
2877
                manc = E1000_READ_REG(&adapter->hw, MANC);
2878
                if(manc & E1000_MANC_SMBUS_EN) {
2879
                        manc |= E1000_MANC_ARP_EN;
2880
                        E1000_WRITE_REG(&adapter->hw, MANC, manc);
2881
                        pci_enable_wake(pdev, 3, 1);
2882
                        pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */
2883
                }
2884
        }
2885
 
2886
        state = (state > 0) ? 3 : 0;
2887
        pci_set_power_state(pdev, state);
2888
 
2889
        return 0;
2890
}
2891
 
2892
#ifdef CONFIG_PM
2893
static int
2894
e1000_resume(struct pci_dev *pdev)
2895
{
2896
        struct net_device *netdev = pci_get_drvdata(pdev);
2897
        struct e1000_adapter *adapter = netdev->priv;
2898
        uint32_t manc;
2899
 
2900
        pci_set_power_state(pdev, 0);
2901
        pci_restore_state(pdev, adapter->pci_state);
2902
 
2903
        pci_enable_wake(pdev, 3, 0);
2904
        pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */
2905
 
2906
        e1000_reset(adapter);
2907
        E1000_WRITE_REG(&adapter->hw, WUS, ~0);
2908
 
2909
        if(netif_running(netdev))
2910
                e1000_up(adapter);
2911
 
2912
        netif_device_attach(netdev);
2913
 
2914
        if(adapter->hw.mac_type >= e1000_82540 &&
2915
           adapter->hw.media_type == e1000_media_type_copper) {
2916
                manc = E1000_READ_REG(&adapter->hw, MANC);
2917
                manc &= ~(E1000_MANC_ARP_EN);
2918
                E1000_WRITE_REG(&adapter->hw, MANC, manc);
2919
        }
2920
 
2921
        return 0;
2922
}
2923
#endif
2924
 
2925
#ifdef CONFIG_NET_POLL_CONTROLLER
2926
/*
2927
 * Polling 'interrupt' - used by things like netconsole to send skbs
2928
 * without having to re-enable interrupts. It's not called while
2929
 * the interrupt routine is executing.
2930
 */
2931
 
2932
static void e1000_netpoll (struct net_device *dev)
2933
{
2934
        struct e1000_adapter *adapter = dev->priv;
2935
        disable_irq(adapter->pdev->irq);
2936
        e1000_intr (adapter->pdev->irq, dev, NULL);
2937
        enable_irq(adapter->pdev->irq);
2938
}
2939
#endif
2940
 
2941
/* e1000_main.c */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.