1 |
62 |
marcus.erl |
/*******************************************************************************
|
2 |
|
|
|
3 |
|
|
Intel PRO/1000 Linux driver
|
4 |
|
|
Copyright(c) 1999 - 2007 Intel Corporation.
|
5 |
|
|
|
6 |
|
|
This program is free software; you can redistribute it and/or modify it
|
7 |
|
|
under the terms and conditions of the GNU General Public License,
|
8 |
|
|
version 2, as published by the Free Software Foundation.
|
9 |
|
|
|
10 |
|
|
This program is distributed in the hope it will be useful, but WITHOUT
|
11 |
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
12 |
|
|
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
13 |
|
|
more details.
|
14 |
|
|
|
15 |
|
|
You should have received a copy of the GNU General Public License along with
|
16 |
|
|
this program; if not, write to the Free Software Foundation, Inc.,
|
17 |
|
|
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
18 |
|
|
|
19 |
|
|
The full GNU General Public License is included in this distribution in
|
20 |
|
|
the file called "COPYING".
|
21 |
|
|
|
22 |
|
|
Contact Information:
|
23 |
|
|
Linux NICS <linux.nics@intel.com>
|
24 |
|
|
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
|
25 |
|
|
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
26 |
|
|
|
27 |
|
|
*******************************************************************************/
|
28 |
|
|
|
29 |
|
|
#include <linux/netdevice.h>
|
30 |
|
|
#include <linux/ethtool.h>
|
31 |
|
|
#include <linux/delay.h>
|
32 |
|
|
#include <linux/pci.h>
|
33 |
|
|
|
34 |
|
|
#include "e1000.h"
|
35 |
|
|
|
36 |
|
|
enum e1000_mng_mode {
|
37 |
|
|
e1000_mng_mode_none = 0,
|
38 |
|
|
e1000_mng_mode_asf,
|
39 |
|
|
e1000_mng_mode_pt,
|
40 |
|
|
e1000_mng_mode_ipmi,
|
41 |
|
|
e1000_mng_mode_host_if_only
|
42 |
|
|
};
|
43 |
|
|
|
44 |
|
|
#define E1000_FACTPS_MNGCG 0x20000000
|
45 |
|
|
|
46 |
|
|
#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management
|
47 |
|
|
* Technology signature */
|
48 |
|
|
|
49 |
|
|
/**
|
50 |
|
|
* e1000e_get_bus_info_pcie - Get PCIe bus information
|
51 |
|
|
* @hw: pointer to the HW structure
|
52 |
|
|
*
|
53 |
|
|
* Determines and stores the system bus information for a particular
|
54 |
|
|
* network interface. The following bus information is determined and stored:
|
55 |
|
|
* bus speed, bus width, type (PCIe), and PCIe function.
|
56 |
|
|
**/
|
57 |
|
|
s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
|
58 |
|
|
{
|
59 |
|
|
struct e1000_bus_info *bus = &hw->bus;
|
60 |
|
|
struct e1000_adapter *adapter = hw->adapter;
|
61 |
|
|
u32 status;
|
62 |
|
|
u16 pcie_link_status, pci_header_type, cap_offset;
|
63 |
|
|
|
64 |
|
|
cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
|
65 |
|
|
if (!cap_offset) {
|
66 |
|
|
bus->width = e1000_bus_width_unknown;
|
67 |
|
|
} else {
|
68 |
|
|
pci_read_config_word(adapter->pdev,
|
69 |
|
|
cap_offset + PCIE_LINK_STATUS,
|
70 |
|
|
&pcie_link_status);
|
71 |
|
|
bus->width = (enum e1000_bus_width)((pcie_link_status &
|
72 |
|
|
PCIE_LINK_WIDTH_MASK) >>
|
73 |
|
|
PCIE_LINK_WIDTH_SHIFT);
|
74 |
|
|
}
|
75 |
|
|
|
76 |
|
|
pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER,
|
77 |
|
|
&pci_header_type);
|
78 |
|
|
if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
|
79 |
|
|
status = er32(STATUS);
|
80 |
|
|
bus->func = (status & E1000_STATUS_FUNC_MASK)
|
81 |
|
|
>> E1000_STATUS_FUNC_SHIFT;
|
82 |
|
|
} else {
|
83 |
|
|
bus->func = 0;
|
84 |
|
|
}
|
85 |
|
|
|
86 |
|
|
return 0;
|
87 |
|
|
}
|
88 |
|
|
|
89 |
|
|
/**
|
90 |
|
|
* e1000e_write_vfta - Write value to VLAN filter table
|
91 |
|
|
* @hw: pointer to the HW structure
|
92 |
|
|
* @offset: register offset in VLAN filter table
|
93 |
|
|
* @value: register value written to VLAN filter table
|
94 |
|
|
*
|
95 |
|
|
* Writes value at the given offset in the register array which stores
|
96 |
|
|
* the VLAN filter table.
|
97 |
|
|
**/
|
98 |
|
|
void e1000e_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
|
99 |
|
|
{
|
100 |
|
|
E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
|
101 |
|
|
e1e_flush();
|
102 |
|
|
}
|
103 |
|
|
|
104 |
|
|
/**
|
105 |
|
|
* e1000e_init_rx_addrs - Initialize receive address's
|
106 |
|
|
* @hw: pointer to the HW structure
|
107 |
|
|
* @rar_count: receive address registers
|
108 |
|
|
*
|
109 |
|
|
* Setups the receive address registers by setting the base receive address
|
110 |
|
|
* register to the devices MAC address and clearing all the other receive
|
111 |
|
|
* address registers to 0.
|
112 |
|
|
**/
|
113 |
|
|
void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
|
114 |
|
|
{
|
115 |
|
|
u32 i;
|
116 |
|
|
|
117 |
|
|
/* Setup the receive address */
|
118 |
|
|
hw_dbg(hw, "Programming MAC Address into RAR[0]\n");
|
119 |
|
|
|
120 |
|
|
e1000e_rar_set(hw, hw->mac.addr, 0);
|
121 |
|
|
|
122 |
|
|
/* Zero out the other (rar_entry_count - 1) receive addresses */
|
123 |
|
|
hw_dbg(hw, "Clearing RAR[1-%u]\n", rar_count-1);
|
124 |
|
|
for (i = 1; i < rar_count; i++) {
|
125 |
|
|
E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
|
126 |
|
|
e1e_flush();
|
127 |
|
|
E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
|
128 |
|
|
e1e_flush();
|
129 |
|
|
}
|
130 |
|
|
}
|
131 |
|
|
|
132 |
|
|
/**
|
133 |
|
|
* e1000e_rar_set - Set receive address register
|
134 |
|
|
* @hw: pointer to the HW structure
|
135 |
|
|
* @addr: pointer to the receive address
|
136 |
|
|
* @index: receive address array register
|
137 |
|
|
*
|
138 |
|
|
* Sets the receive address array register at index to the address passed
|
139 |
|
|
* in by addr.
|
140 |
|
|
**/
|
141 |
|
|
void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
|
142 |
|
|
{
|
143 |
|
|
u32 rar_low, rar_high;
|
144 |
|
|
|
145 |
|
|
/* HW expects these in little endian so we reverse the byte order
|
146 |
|
|
* from network order (big endian) to little endian
|
147 |
|
|
*/
|
148 |
|
|
rar_low = ((u32) addr[0] |
|
149 |
|
|
((u32) addr[1] << 8) |
|
150 |
|
|
((u32) addr[2] << 16) | ((u32) addr[3] << 24));
|
151 |
|
|
|
152 |
|
|
rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
|
153 |
|
|
|
154 |
|
|
rar_high |= E1000_RAH_AV;
|
155 |
|
|
|
156 |
|
|
E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
|
157 |
|
|
E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
|
158 |
|
|
}
|
159 |
|
|
|
160 |
|
|
/**
|
161 |
|
|
* e1000_mta_set - Set multicast filter table address
|
162 |
|
|
* @hw: pointer to the HW structure
|
163 |
|
|
* @hash_value: determines the MTA register and bit to set
|
164 |
|
|
*
|
165 |
|
|
* The multicast table address is a register array of 32-bit registers.
|
166 |
|
|
* The hash_value is used to determine what register the bit is in, the
|
167 |
|
|
* current value is read, the new bit is OR'd in and the new value is
|
168 |
|
|
* written back into the register.
|
169 |
|
|
**/
|
170 |
|
|
static void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
|
171 |
|
|
{
|
172 |
|
|
u32 hash_bit, hash_reg, mta;
|
173 |
|
|
|
174 |
|
|
/* The MTA is a register array of 32-bit registers. It is
|
175 |
|
|
* treated like an array of (32*mta_reg_count) bits. We want to
|
176 |
|
|
* set bit BitArray[hash_value]. So we figure out what register
|
177 |
|
|
* the bit is in, read it, OR in the new bit, then write
|
178 |
|
|
* back the new value. The (hw->mac.mta_reg_count - 1) serves as a
|
179 |
|
|
* mask to bits 31:5 of the hash value which gives us the
|
180 |
|
|
* register we're modifying. The hash bit within that register
|
181 |
|
|
* is determined by the lower 5 bits of the hash value.
|
182 |
|
|
*/
|
183 |
|
|
hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
|
184 |
|
|
hash_bit = hash_value & 0x1F;
|
185 |
|
|
|
186 |
|
|
mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
|
187 |
|
|
|
188 |
|
|
mta |= (1 << hash_bit);
|
189 |
|
|
|
190 |
|
|
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
|
191 |
|
|
e1e_flush();
|
192 |
|
|
}
|
193 |
|
|
|
194 |
|
|
/**
|
195 |
|
|
* e1000_hash_mc_addr - Generate a multicast hash value
|
196 |
|
|
* @hw: pointer to the HW structure
|
197 |
|
|
* @mc_addr: pointer to a multicast address
|
198 |
|
|
*
|
199 |
|
|
* Generates a multicast address hash value which is used to determine
|
200 |
|
|
* the multicast filter table array address and new table value. See
|
201 |
|
|
* e1000_mta_set_generic()
|
202 |
|
|
**/
|
203 |
|
|
static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
|
204 |
|
|
{
|
205 |
|
|
u32 hash_value, hash_mask;
|
206 |
|
|
u8 bit_shift = 0;
|
207 |
|
|
|
208 |
|
|
/* Register count multiplied by bits per register */
|
209 |
|
|
hash_mask = (hw->mac.mta_reg_count * 32) - 1;
|
210 |
|
|
|
211 |
|
|
/* For a mc_filter_type of 0, bit_shift is the number of left-shifts
|
212 |
|
|
* where 0xFF would still fall within the hash mask. */
|
213 |
|
|
while (hash_mask >> bit_shift != 0xFF)
|
214 |
|
|
bit_shift++;
|
215 |
|
|
|
216 |
|
|
/* The portion of the address that is used for the hash table
|
217 |
|
|
* is determined by the mc_filter_type setting.
|
218 |
|
|
* The algorithm is such that there is a total of 8 bits of shifting.
|
219 |
|
|
* The bit_shift for a mc_filter_type of 0 represents the number of
|
220 |
|
|
* left-shifts where the MSB of mc_addr[5] would still fall within
|
221 |
|
|
* the hash_mask. Case 0 does this exactly. Since there are a total
|
222 |
|
|
* of 8 bits of shifting, then mc_addr[4] will shift right the
|
223 |
|
|
* remaining number of bits. Thus 8 - bit_shift. The rest of the
|
224 |
|
|
* cases are a variation of this algorithm...essentially raising the
|
225 |
|
|
* number of bits to shift mc_addr[5] left, while still keeping the
|
226 |
|
|
* 8-bit shifting total.
|
227 |
|
|
*/
|
228 |
|
|
/* For example, given the following Destination MAC Address and an
|
229 |
|
|
* mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
|
230 |
|
|
* we can see that the bit_shift for case 0 is 4. These are the hash
|
231 |
|
|
* values resulting from each mc_filter_type...
|
232 |
|
|
* [0] [1] [2] [3] [4] [5]
|
233 |
|
|
* 01 AA 00 12 34 56
|
234 |
|
|
* LSB MSB
|
235 |
|
|
*
|
236 |
|
|
* case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
|
237 |
|
|
* case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
|
238 |
|
|
* case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
|
239 |
|
|
* case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
|
240 |
|
|
*/
|
241 |
|
|
switch (hw->mac.mc_filter_type) {
|
242 |
|
|
default:
|
243 |
|
|
case 0:
|
244 |
|
|
break;
|
245 |
|
|
case 1:
|
246 |
|
|
bit_shift += 1;
|
247 |
|
|
break;
|
248 |
|
|
case 2:
|
249 |
|
|
bit_shift += 2;
|
250 |
|
|
break;
|
251 |
|
|
case 3:
|
252 |
|
|
bit_shift += 4;
|
253 |
|
|
break;
|
254 |
|
|
}
|
255 |
|
|
|
256 |
|
|
hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
|
257 |
|
|
(((u16) mc_addr[5]) << bit_shift)));
|
258 |
|
|
|
259 |
|
|
return hash_value;
|
260 |
|
|
}
|
261 |
|
|
|
262 |
|
|
/**
|
263 |
|
|
* e1000e_mc_addr_list_update_generic - Update Multicast addresses
|
264 |
|
|
* @hw: pointer to the HW structure
|
265 |
|
|
* @mc_addr_list: array of multicast addresses to program
|
266 |
|
|
* @mc_addr_count: number of multicast addresses to program
|
267 |
|
|
* @rar_used_count: the first RAR register free to program
|
268 |
|
|
* @rar_count: total number of supported Receive Address Registers
|
269 |
|
|
*
|
270 |
|
|
* Updates the Receive Address Registers and Multicast Table Array.
|
271 |
|
|
* The caller must have a packed mc_addr_list of multicast addresses.
|
272 |
|
|
* The parameter rar_count will usually be hw->mac.rar_entry_count
|
273 |
|
|
* unless there are workarounds that change this.
|
274 |
|
|
**/
|
275 |
|
|
void e1000e_mc_addr_list_update_generic(struct e1000_hw *hw,
|
276 |
|
|
u8 *mc_addr_list, u32 mc_addr_count,
|
277 |
|
|
u32 rar_used_count, u32 rar_count)
|
278 |
|
|
{
|
279 |
|
|
u32 hash_value;
|
280 |
|
|
u32 i;
|
281 |
|
|
|
282 |
|
|
/* Load the first set of multicast addresses into the exact
|
283 |
|
|
* filters (RAR). If there are not enough to fill the RAR
|
284 |
|
|
* array, clear the filters.
|
285 |
|
|
*/
|
286 |
|
|
for (i = rar_used_count; i < rar_count; i++) {
|
287 |
|
|
if (mc_addr_count) {
|
288 |
|
|
e1000e_rar_set(hw, mc_addr_list, i);
|
289 |
|
|
mc_addr_count--;
|
290 |
|
|
mc_addr_list += ETH_ALEN;
|
291 |
|
|
} else {
|
292 |
|
|
E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
|
293 |
|
|
e1e_flush();
|
294 |
|
|
E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
|
295 |
|
|
e1e_flush();
|
296 |
|
|
}
|
297 |
|
|
}
|
298 |
|
|
|
299 |
|
|
/* Clear the old settings from the MTA */
|
300 |
|
|
hw_dbg(hw, "Clearing MTA\n");
|
301 |
|
|
for (i = 0; i < hw->mac.mta_reg_count; i++) {
|
302 |
|
|
E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
|
303 |
|
|
e1e_flush();
|
304 |
|
|
}
|
305 |
|
|
|
306 |
|
|
/* Load any remaining multicast addresses into the hash table. */
|
307 |
|
|
for (; mc_addr_count > 0; mc_addr_count--) {
|
308 |
|
|
hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
|
309 |
|
|
hw_dbg(hw, "Hash value = 0x%03X\n", hash_value);
|
310 |
|
|
e1000_mta_set(hw, hash_value);
|
311 |
|
|
mc_addr_list += ETH_ALEN;
|
312 |
|
|
}
|
313 |
|
|
}
|
314 |
|
|
|
315 |
|
|
/**
|
316 |
|
|
* e1000e_clear_hw_cntrs_base - Clear base hardware counters
|
317 |
|
|
* @hw: pointer to the HW structure
|
318 |
|
|
*
|
319 |
|
|
* Clears the base hardware counters by reading the counter registers.
|
320 |
|
|
**/
|
321 |
|
|
void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
|
322 |
|
|
{
|
323 |
|
|
u32 temp;
|
324 |
|
|
|
325 |
|
|
temp = er32(CRCERRS);
|
326 |
|
|
temp = er32(SYMERRS);
|
327 |
|
|
temp = er32(MPC);
|
328 |
|
|
temp = er32(SCC);
|
329 |
|
|
temp = er32(ECOL);
|
330 |
|
|
temp = er32(MCC);
|
331 |
|
|
temp = er32(LATECOL);
|
332 |
|
|
temp = er32(COLC);
|
333 |
|
|
temp = er32(DC);
|
334 |
|
|
temp = er32(SEC);
|
335 |
|
|
temp = er32(RLEC);
|
336 |
|
|
temp = er32(XONRXC);
|
337 |
|
|
temp = er32(XONTXC);
|
338 |
|
|
temp = er32(XOFFRXC);
|
339 |
|
|
temp = er32(XOFFTXC);
|
340 |
|
|
temp = er32(FCRUC);
|
341 |
|
|
temp = er32(GPRC);
|
342 |
|
|
temp = er32(BPRC);
|
343 |
|
|
temp = er32(MPRC);
|
344 |
|
|
temp = er32(GPTC);
|
345 |
|
|
temp = er32(GORCL);
|
346 |
|
|
temp = er32(GORCH);
|
347 |
|
|
temp = er32(GOTCL);
|
348 |
|
|
temp = er32(GOTCH);
|
349 |
|
|
temp = er32(RNBC);
|
350 |
|
|
temp = er32(RUC);
|
351 |
|
|
temp = er32(RFC);
|
352 |
|
|
temp = er32(ROC);
|
353 |
|
|
temp = er32(RJC);
|
354 |
|
|
temp = er32(TORL);
|
355 |
|
|
temp = er32(TORH);
|
356 |
|
|
temp = er32(TOTL);
|
357 |
|
|
temp = er32(TOTH);
|
358 |
|
|
temp = er32(TPR);
|
359 |
|
|
temp = er32(TPT);
|
360 |
|
|
temp = er32(MPTC);
|
361 |
|
|
temp = er32(BPTC);
|
362 |
|
|
}
|
363 |
|
|
|
364 |
|
|
/**
|
365 |
|
|
* e1000e_check_for_copper_link - Check for link (Copper)
|
366 |
|
|
* @hw: pointer to the HW structure
|
367 |
|
|
*
|
368 |
|
|
* Checks to see of the link status of the hardware has changed. If a
|
369 |
|
|
* change in link status has been detected, then we read the PHY registers
|
370 |
|
|
* to get the current speed/duplex if link exists.
|
371 |
|
|
**/
|
372 |
|
|
s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
|
373 |
|
|
{
|
374 |
|
|
struct e1000_mac_info *mac = &hw->mac;
|
375 |
|
|
s32 ret_val;
|
376 |
|
|
bool link;
|
377 |
|
|
|
378 |
|
|
/* We only want to go out to the PHY registers to see if Auto-Neg
|
379 |
|
|
* has completed and/or if our link status has changed. The
|
380 |
|
|
* get_link_status flag is set upon receiving a Link Status
|
381 |
|
|
* Change or Rx Sequence Error interrupt.
|
382 |
|
|
*/
|
383 |
|
|
if (!mac->get_link_status)
|
384 |
|
|
return 0;
|
385 |
|
|
|
386 |
|
|
/* First we want to see if the MII Status Register reports
|
387 |
|
|
* link. If so, then we want to get the current speed/duplex
|
388 |
|
|
* of the PHY.
|
389 |
|
|
*/
|
390 |
|
|
ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
|
391 |
|
|
if (ret_val)
|
392 |
|
|
return ret_val;
|
393 |
|
|
|
394 |
|
|
if (!link)
|
395 |
|
|
return ret_val; /* No link detected */
|
396 |
|
|
|
397 |
|
|
mac->get_link_status = 0;
|
398 |
|
|
|
399 |
|
|
/* Check if there was DownShift, must be checked
|
400 |
|
|
* immediately after link-up */
|
401 |
|
|
e1000e_check_downshift(hw);
|
402 |
|
|
|
403 |
|
|
/* If we are forcing speed/duplex, then we simply return since
|
404 |
|
|
* we have already determined whether we have link or not.
|
405 |
|
|
*/
|
406 |
|
|
if (!mac->autoneg) {
|
407 |
|
|
ret_val = -E1000_ERR_CONFIG;
|
408 |
|
|
return ret_val;
|
409 |
|
|
}
|
410 |
|
|
|
411 |
|
|
/* Auto-Neg is enabled. Auto Speed Detection takes care
|
412 |
|
|
* of MAC speed/duplex configuration. So we only need to
|
413 |
|
|
* configure Collision Distance in the MAC.
|
414 |
|
|
*/
|
415 |
|
|
e1000e_config_collision_dist(hw);
|
416 |
|
|
|
417 |
|
|
/* Configure Flow Control now that Auto-Neg has completed.
|
418 |
|
|
* First, we need to restore the desired flow control
|
419 |
|
|
* settings because we may have had to re-autoneg with a
|
420 |
|
|
* different link partner.
|
421 |
|
|
*/
|
422 |
|
|
ret_val = e1000e_config_fc_after_link_up(hw);
|
423 |
|
|
if (ret_val) {
|
424 |
|
|
hw_dbg(hw, "Error configuring flow control\n");
|
425 |
|
|
}
|
426 |
|
|
|
427 |
|
|
return ret_val;
|
428 |
|
|
}
|
429 |
|
|
|
430 |
|
|
/**
|
431 |
|
|
* e1000e_check_for_fiber_link - Check for link (Fiber)
|
432 |
|
|
* @hw: pointer to the HW structure
|
433 |
|
|
*
|
434 |
|
|
* Checks for link up on the hardware. If link is not up and we have
|
435 |
|
|
* a signal, then we need to force link up.
|
436 |
|
|
**/
|
437 |
|
|
s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
|
438 |
|
|
{
|
439 |
|
|
struct e1000_mac_info *mac = &hw->mac;
|
440 |
|
|
u32 rxcw;
|
441 |
|
|
u32 ctrl;
|
442 |
|
|
u32 status;
|
443 |
|
|
s32 ret_val;
|
444 |
|
|
|
445 |
|
|
ctrl = er32(CTRL);
|
446 |
|
|
status = er32(STATUS);
|
447 |
|
|
rxcw = er32(RXCW);
|
448 |
|
|
|
449 |
|
|
/* If we don't have link (auto-negotiation failed or link partner
|
450 |
|
|
* cannot auto-negotiate), the cable is plugged in (we have signal),
|
451 |
|
|
* and our link partner is not trying to auto-negotiate with us (we
|
452 |
|
|
* are receiving idles or data), we need to force link up. We also
|
453 |
|
|
* need to give auto-negotiation time to complete, in case the cable
|
454 |
|
|
* was just plugged in. The autoneg_failed flag does this.
|
455 |
|
|
*/
|
456 |
|
|
/* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
|
457 |
|
|
if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
|
458 |
|
|
(!(rxcw & E1000_RXCW_C))) {
|
459 |
|
|
if (mac->autoneg_failed == 0) {
|
460 |
|
|
mac->autoneg_failed = 1;
|
461 |
|
|
return 0;
|
462 |
|
|
}
|
463 |
|
|
hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n");
|
464 |
|
|
|
465 |
|
|
/* Disable auto-negotiation in the TXCW register */
|
466 |
|
|
ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
|
467 |
|
|
|
468 |
|
|
/* Force link-up and also force full-duplex. */
|
469 |
|
|
ctrl = er32(CTRL);
|
470 |
|
|
ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
|
471 |
|
|
ew32(CTRL, ctrl);
|
472 |
|
|
|
473 |
|
|
/* Configure Flow Control after forcing link up. */
|
474 |
|
|
ret_val = e1000e_config_fc_after_link_up(hw);
|
475 |
|
|
if (ret_val) {
|
476 |
|
|
hw_dbg(hw, "Error configuring flow control\n");
|
477 |
|
|
return ret_val;
|
478 |
|
|
}
|
479 |
|
|
} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
|
480 |
|
|
/* If we are forcing link and we are receiving /C/ ordered
|
481 |
|
|
* sets, re-enable auto-negotiation in the TXCW register
|
482 |
|
|
* and disable forced link in the Device Control register
|
483 |
|
|
* in an attempt to auto-negotiate with our link partner.
|
484 |
|
|
*/
|
485 |
|
|
hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n");
|
486 |
|
|
ew32(TXCW, mac->txcw);
|
487 |
|
|
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
|
488 |
|
|
|
489 |
|
|
mac->serdes_has_link = 1;
|
490 |
|
|
}
|
491 |
|
|
|
492 |
|
|
return 0;
|
493 |
|
|
}
|
494 |
|
|
|
495 |
|
|
/**
|
496 |
|
|
* e1000e_check_for_serdes_link - Check for link (Serdes)
|
497 |
|
|
* @hw: pointer to the HW structure
|
498 |
|
|
*
|
499 |
|
|
* Checks for link up on the hardware. If link is not up and we have
|
500 |
|
|
* a signal, then we need to force link up.
|
501 |
|
|
**/
|
502 |
|
|
s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
|
503 |
|
|
{
|
504 |
|
|
struct e1000_mac_info *mac = &hw->mac;
|
505 |
|
|
u32 rxcw;
|
506 |
|
|
u32 ctrl;
|
507 |
|
|
u32 status;
|
508 |
|
|
s32 ret_val;
|
509 |
|
|
|
510 |
|
|
ctrl = er32(CTRL);
|
511 |
|
|
status = er32(STATUS);
|
512 |
|
|
rxcw = er32(RXCW);
|
513 |
|
|
|
514 |
|
|
/* If we don't have link (auto-negotiation failed or link partner
|
515 |
|
|
* cannot auto-negotiate), and our link partner is not trying to
|
516 |
|
|
* auto-negotiate with us (we are receiving idles or data),
|
517 |
|
|
* we need to force link up. We also need to give auto-negotiation
|
518 |
|
|
* time to complete.
|
519 |
|
|
*/
|
520 |
|
|
/* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
|
521 |
|
|
if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
|
522 |
|
|
if (mac->autoneg_failed == 0) {
|
523 |
|
|
mac->autoneg_failed = 1;
|
524 |
|
|
return 0;
|
525 |
|
|
}
|
526 |
|
|
hw_dbg(hw, "NOT RXing /C/, disable AutoNeg and force link.\n");
|
527 |
|
|
|
528 |
|
|
/* Disable auto-negotiation in the TXCW register */
|
529 |
|
|
ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
|
530 |
|
|
|
531 |
|
|
/* Force link-up and also force full-duplex. */
|
532 |
|
|
ctrl = er32(CTRL);
|
533 |
|
|
ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
|
534 |
|
|
ew32(CTRL, ctrl);
|
535 |
|
|
|
536 |
|
|
/* Configure Flow Control after forcing link up. */
|
537 |
|
|
ret_val = e1000e_config_fc_after_link_up(hw);
|
538 |
|
|
if (ret_val) {
|
539 |
|
|
hw_dbg(hw, "Error configuring flow control\n");
|
540 |
|
|
return ret_val;
|
541 |
|
|
}
|
542 |
|
|
} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
|
543 |
|
|
/* If we are forcing link and we are receiving /C/ ordered
|
544 |
|
|
* sets, re-enable auto-negotiation in the TXCW register
|
545 |
|
|
* and disable forced link in the Device Control register
|
546 |
|
|
* in an attempt to auto-negotiate with our link partner.
|
547 |
|
|
*/
|
548 |
|
|
hw_dbg(hw, "RXing /C/, enable AutoNeg and stop forcing link.\n");
|
549 |
|
|
ew32(TXCW, mac->txcw);
|
550 |
|
|
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
|
551 |
|
|
|
552 |
|
|
mac->serdes_has_link = 1;
|
553 |
|
|
} else if (!(E1000_TXCW_ANE & er32(TXCW))) {
|
554 |
|
|
/* If we force link for non-auto-negotiation switch, check
|
555 |
|
|
* link status based on MAC synchronization for internal
|
556 |
|
|
* serdes media type.
|
557 |
|
|
*/
|
558 |
|
|
/* SYNCH bit and IV bit are sticky. */
|
559 |
|
|
udelay(10);
|
560 |
|
|
if (E1000_RXCW_SYNCH & er32(RXCW)) {
|
561 |
|
|
if (!(rxcw & E1000_RXCW_IV)) {
|
562 |
|
|
mac->serdes_has_link = 1;
|
563 |
|
|
hw_dbg(hw, "SERDES: Link is up.\n");
|
564 |
|
|
}
|
565 |
|
|
} else {
|
566 |
|
|
mac->serdes_has_link = 0;
|
567 |
|
|
hw_dbg(hw, "SERDES: Link is down.\n");
|
568 |
|
|
}
|
569 |
|
|
}
|
570 |
|
|
|
571 |
|
|
if (E1000_TXCW_ANE & er32(TXCW)) {
|
572 |
|
|
status = er32(STATUS);
|
573 |
|
|
mac->serdes_has_link = (status & E1000_STATUS_LU);
|
574 |
|
|
}
|
575 |
|
|
|
576 |
|
|
return 0;
|
577 |
|
|
}
|
578 |
|
|
|
579 |
|
|
/**
|
580 |
|
|
* e1000_set_default_fc_generic - Set flow control default values
|
581 |
|
|
* @hw: pointer to the HW structure
|
582 |
|
|
*
|
583 |
|
|
* Read the EEPROM for the default values for flow control and store the
|
584 |
|
|
* values.
|
585 |
|
|
**/
|
586 |
|
|
static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
|
587 |
|
|
{
|
588 |
|
|
struct e1000_mac_info *mac = &hw->mac;
|
589 |
|
|
s32 ret_val;
|
590 |
|
|
u16 nvm_data;
|
591 |
|
|
|
592 |
|
|
if (mac->fc != e1000_fc_default)
|
593 |
|
|
return 0;
|
594 |
|
|
|
595 |
|
|
/* Read and store word 0x0F of the EEPROM. This word contains bits
|
596 |
|
|
* that determine the hardware's default PAUSE (flow control) mode,
|
597 |
|
|
* a bit that determines whether the HW defaults to enabling or
|
598 |
|
|
* disabling auto-negotiation, and the direction of the
|
599 |
|
|
* SW defined pins. If there is no SW over-ride of the flow
|
600 |
|
|
* control setting, then the variable hw->fc will
|
601 |
|
|
* be initialized based on a value in the EEPROM.
|
602 |
|
|
*/
|
603 |
|
|
ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
|
604 |
|
|
|
605 |
|
|
if (ret_val) {
|
606 |
|
|
hw_dbg(hw, "NVM Read Error\n");
|
607 |
|
|
return ret_val;
|
608 |
|
|
}
|
609 |
|
|
|
610 |
|
|
if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
|
611 |
|
|
mac->fc = e1000_fc_none;
|
612 |
|
|
else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
|
613 |
|
|
NVM_WORD0F_ASM_DIR)
|
614 |
|
|
mac->fc = e1000_fc_tx_pause;
|
615 |
|
|
else
|
616 |
|
|
mac->fc = e1000_fc_full;
|
617 |
|
|
|
618 |
|
|
return 0;
|
619 |
|
|
}
|
620 |
|
|
|
621 |
|
|
/**
|
622 |
|
|
* e1000e_setup_link - Setup flow control and link settings
|
623 |
|
|
* @hw: pointer to the HW structure
|
624 |
|
|
*
|
625 |
|
|
* Determines which flow control settings to use, then configures flow
|
626 |
|
|
* control. Calls the appropriate media-specific link configuration
|
627 |
|
|
* function. Assuming the adapter has a valid link partner, a valid link
|
628 |
|
|
* should be established. Assumes the hardware has previously been reset
|
629 |
|
|
* and the transmitter and receiver are not enabled.
|
630 |
|
|
**/
|
631 |
|
|
s32 e1000e_setup_link(struct e1000_hw *hw)
|
632 |
|
|
{
|
633 |
|
|
struct e1000_mac_info *mac = &hw->mac;
|
634 |
|
|
s32 ret_val;
|
635 |
|
|
|
636 |
|
|
/* In the case of the phy reset being blocked, we already have a link.
|
637 |
|
|
* We do not need to set it up again.
|
638 |
|
|
*/
|
639 |
|
|
if (e1000_check_reset_block(hw))
|
640 |
|
|
return 0;
|
641 |
|
|
|
642 |
|
|
/*
|
643 |
|
|
* If flow control is set to default, set flow control based on
|
644 |
|
|
* the EEPROM flow control settings.
|
645 |
|
|
*/
|
646 |
|
|
if (mac->fc == e1000_fc_default) {
|
647 |
|
|
ret_val = e1000_set_default_fc_generic(hw);
|
648 |
|
|
if (ret_val)
|
649 |
|
|
return ret_val;
|
650 |
|
|
}
|
651 |
|
|
|
652 |
|
|
/* We want to save off the original Flow Control configuration just
|
653 |
|
|
* in case we get disconnected and then reconnected into a different
|
654 |
|
|
* hub or switch with different Flow Control capabilities.
|
655 |
|
|
*/
|
656 |
|
|
mac->original_fc = mac->fc;
|
657 |
|
|
|
658 |
|
|
hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", mac->fc);
|
659 |
|
|
|
660 |
|
|
/* Call the necessary media_type subroutine to configure the link. */
|
661 |
|
|
ret_val = mac->ops.setup_physical_interface(hw);
|
662 |
|
|
if (ret_val)
|
663 |
|
|
return ret_val;
|
664 |
|
|
|
665 |
|
|
/* Initialize the flow control address, type, and PAUSE timer
|
666 |
|
|
* registers to their default values. This is done even if flow
|
667 |
|
|
* control is disabled, because it does not hurt anything to
|
668 |
|
|
* initialize these registers.
|
669 |
|
|
*/
|
670 |
|
|
hw_dbg(hw, "Initializing the Flow Control address, type and timer regs\n");
|
671 |
|
|
ew32(FCT, FLOW_CONTROL_TYPE);
|
672 |
|
|
ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
|
673 |
|
|
ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
|
674 |
|
|
|
675 |
|
|
ew32(FCTTV, mac->fc_pause_time);
|
676 |
|
|
|
677 |
|
|
return e1000e_set_fc_watermarks(hw);
|
678 |
|
|
}
|
679 |
|
|
|
680 |
|
|
/**
|
681 |
|
|
* e1000_commit_fc_settings_generic - Configure flow control
|
682 |
|
|
* @hw: pointer to the HW structure
|
683 |
|
|
*
|
684 |
|
|
* Write the flow control settings to the Transmit Config Word Register (TXCW)
|
685 |
|
|
* base on the flow control settings in e1000_mac_info.
|
686 |
|
|
**/
|
687 |
|
|
static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
|
688 |
|
|
{
|
689 |
|
|
struct e1000_mac_info *mac = &hw->mac;
|
690 |
|
|
u32 txcw;
|
691 |
|
|
|
692 |
|
|
/* Check for a software override of the flow control settings, and
|
693 |
|
|
* setup the device accordingly. If auto-negotiation is enabled, then
|
694 |
|
|
* software will have to set the "PAUSE" bits to the correct value in
|
695 |
|
|
* the Transmit Config Word Register (TXCW) and re-start auto-
|
696 |
|
|
* negotiation. However, if auto-negotiation is disabled, then
|
697 |
|
|
* software will have to manually configure the two flow control enable
|
698 |
|
|
* bits in the CTRL register.
|
699 |
|
|
*
|
700 |
|
|
* The possible values of the "fc" parameter are:
|
701 |
|
|
* 0: Flow control is completely disabled
|
702 |
|
|
* 1: Rx flow control is enabled (we can receive pause frames,
|
703 |
|
|
* but not send pause frames).
|
704 |
|
|
* 2: Tx flow control is enabled (we can send pause frames but we
|
705 |
|
|
* do not support receiving pause frames).
|
706 |
|
|
* 3: Both Rx and TX flow control (symmetric) are enabled.
|
707 |
|
|
*/
|
708 |
|
|
switch (mac->fc) {
|
709 |
|
|
case e1000_fc_none:
|
710 |
|
|
/* Flow control completely disabled by a software over-ride. */
|
711 |
|
|
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
|
712 |
|
|
break;
|
713 |
|
|
case e1000_fc_rx_pause:
|
714 |
|
|
/* RX Flow control is enabled and TX Flow control is disabled
|
715 |
|
|
* by a software over-ride. Since there really isn't a way to
|
716 |
|
|
* advertise that we are capable of RX Pause ONLY, we will
|
717 |
|
|
* advertise that we support both symmetric and asymmetric RX
|
718 |
|
|
* PAUSE. Later, we will disable the adapter's ability to send
|
719 |
|
|
* PAUSE frames.
|
720 |
|
|
*/
|
721 |
|
|
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
|
722 |
|
|
break;
|
723 |
|
|
case e1000_fc_tx_pause:
|
724 |
|
|
/* TX Flow control is enabled, and RX Flow control is disabled,
|
725 |
|
|
* by a software over-ride.
|
726 |
|
|
*/
|
727 |
|
|
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
|
728 |
|
|
break;
|
729 |
|
|
case e1000_fc_full:
|
730 |
|
|
/* Flow control (both RX and TX) is enabled by a software
|
731 |
|
|
* over-ride.
|
732 |
|
|
*/
|
733 |
|
|
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
|
734 |
|
|
break;
|
735 |
|
|
default:
|
736 |
|
|
hw_dbg(hw, "Flow control param set incorrectly\n");
|
737 |
|
|
return -E1000_ERR_CONFIG;
|
738 |
|
|
break;
|
739 |
|
|
}
|
740 |
|
|
|
741 |
|
|
ew32(TXCW, txcw);
|
742 |
|
|
mac->txcw = txcw;
|
743 |
|
|
|
744 |
|
|
return 0;
|
745 |
|
|
}
|
746 |
|
|
|
747 |
|
|
/**
|
748 |
|
|
* e1000_poll_fiber_serdes_link_generic - Poll for link up
|
749 |
|
|
* @hw: pointer to the HW structure
|
750 |
|
|
*
|
751 |
|
|
* Polls for link up by reading the status register, if link fails to come
|
752 |
|
|
* up with auto-negotiation, then the link is forced if a signal is detected.
|
753 |
|
|
**/
|
754 |
|
|
static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
|
755 |
|
|
{
|
756 |
|
|
struct e1000_mac_info *mac = &hw->mac;
|
757 |
|
|
u32 i, status;
|
758 |
|
|
s32 ret_val;
|
759 |
|
|
|
760 |
|
|
/* If we have a signal (the cable is plugged in, or assumed true for
|
761 |
|
|
* serdes media) then poll for a "Link-Up" indication in the Device
|
762 |
|
|
* Status Register. Time-out if a link isn't seen in 500 milliseconds
|
763 |
|
|
* seconds (Auto-negotiation should complete in less than 500
|
764 |
|
|
* milliseconds even if the other end is doing it in SW).
|
765 |
|
|
*/
|
766 |
|
|
for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
|
767 |
|
|
msleep(10);
|
768 |
|
|
status = er32(STATUS);
|
769 |
|
|
if (status & E1000_STATUS_LU)
|
770 |
|
|
break;
|
771 |
|
|
}
|
772 |
|
|
if (i == FIBER_LINK_UP_LIMIT) {
|
773 |
|
|
hw_dbg(hw, "Never got a valid link from auto-neg!!!\n");
|
774 |
|
|
mac->autoneg_failed = 1;
|
775 |
|
|
/* AutoNeg failed to achieve a link, so we'll call
|
776 |
|
|
* mac->check_for_link. This routine will force the
|
777 |
|
|
* link up if we detect a signal. This will allow us to
|
778 |
|
|
* communicate with non-autonegotiating link partners.
|
779 |
|
|
*/
|
780 |
|
|
ret_val = mac->ops.check_for_link(hw);
|
781 |
|
|
if (ret_val) {
|
782 |
|
|
hw_dbg(hw, "Error while checking for link\n");
|
783 |
|
|
return ret_val;
|
784 |
|
|
}
|
785 |
|
|
mac->autoneg_failed = 0;
|
786 |
|
|
} else {
|
787 |
|
|
mac->autoneg_failed = 0;
|
788 |
|
|
hw_dbg(hw, "Valid Link Found\n");
|
789 |
|
|
}
|
790 |
|
|
|
791 |
|
|
return 0;
|
792 |
|
|
}
|
793 |
|
|
|
794 |
|
|
/**
|
795 |
|
|
* e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes
|
796 |
|
|
* @hw: pointer to the HW structure
|
797 |
|
|
*
|
798 |
|
|
* Configures collision distance and flow control for fiber and serdes
|
799 |
|
|
* links. Upon successful setup, poll for link.
|
800 |
|
|
**/
|
801 |
|
|
s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
|
802 |
|
|
{
|
803 |
|
|
u32 ctrl;
|
804 |
|
|
s32 ret_val;
|
805 |
|
|
|
806 |
|
|
ctrl = er32(CTRL);
|
807 |
|
|
|
808 |
|
|
/* Take the link out of reset */
|
809 |
|
|
ctrl &= ~E1000_CTRL_LRST;
|
810 |
|
|
|
811 |
|
|
e1000e_config_collision_dist(hw);
|
812 |
|
|
|
813 |
|
|
ret_val = e1000_commit_fc_settings_generic(hw);
|
814 |
|
|
if (ret_val)
|
815 |
|
|
return ret_val;
|
816 |
|
|
|
817 |
|
|
/* Since auto-negotiation is enabled, take the link out of reset (the
|
818 |
|
|
* link will be in reset, because we previously reset the chip). This
|
819 |
|
|
* will restart auto-negotiation. If auto-negotiation is successful
|
820 |
|
|
* then the link-up status bit will be set and the flow control enable
|
821 |
|
|
* bits (RFCE and TFCE) will be set according to their negotiated value.
|
822 |
|
|
*/
|
823 |
|
|
hw_dbg(hw, "Auto-negotiation enabled\n");
|
824 |
|
|
|
825 |
|
|
ew32(CTRL, ctrl);
|
826 |
|
|
e1e_flush();
|
827 |
|
|
msleep(1);
|
828 |
|
|
|
829 |
|
|
/* For these adapters, the SW defineable pin 1 is set when the optics
|
830 |
|
|
* detect a signal. If we have a signal, then poll for a "Link-Up"
|
831 |
|
|
* indication.
|
832 |
|
|
*/
|
833 |
|
|
if (hw->media_type == e1000_media_type_internal_serdes ||
|
834 |
|
|
(er32(CTRL) & E1000_CTRL_SWDPIN1)) {
|
835 |
|
|
ret_val = e1000_poll_fiber_serdes_link_generic(hw);
|
836 |
|
|
} else {
|
837 |
|
|
hw_dbg(hw, "No signal detected\n");
|
838 |
|
|
}
|
839 |
|
|
|
840 |
|
|
return 0;
|
841 |
|
|
}
|
842 |
|
|
|
843 |
|
|
/**
|
844 |
|
|
* e1000e_config_collision_dist - Configure collision distance
|
845 |
|
|
* @hw: pointer to the HW structure
|
846 |
|
|
*
|
847 |
|
|
* Configures the collision distance to the default value and is used
|
848 |
|
|
* during link setup. Currently no func pointer exists and all
|
849 |
|
|
* implementations are handled in the generic version of this function.
|
850 |
|
|
**/
|
851 |
|
|
void e1000e_config_collision_dist(struct e1000_hw *hw)
|
852 |
|
|
{
|
853 |
|
|
u32 tctl;
|
854 |
|
|
|
855 |
|
|
tctl = er32(TCTL);
|
856 |
|
|
|
857 |
|
|
tctl &= ~E1000_TCTL_COLD;
|
858 |
|
|
tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
|
859 |
|
|
|
860 |
|
|
ew32(TCTL, tctl);
|
861 |
|
|
e1e_flush();
|
862 |
|
|
}
|
863 |
|
|
|
864 |
|
|
/**
|
865 |
|
|
* e1000e_set_fc_watermarks - Set flow control high/low watermarks
|
866 |
|
|
* @hw: pointer to the HW structure
|
867 |
|
|
*
|
868 |
|
|
* Sets the flow control high/low threshold (watermark) registers. If
|
869 |
|
|
* flow control XON frame transmission is enabled, then set XON frame
|
870 |
|
|
* tansmission as well.
|
871 |
|
|
**/
|
872 |
|
|
s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
|
873 |
|
|
{
|
874 |
|
|
struct e1000_mac_info *mac = &hw->mac;
|
875 |
|
|
u32 fcrtl = 0, fcrth = 0;
|
876 |
|
|
|
877 |
|
|
/* Set the flow control receive threshold registers. Normally,
|
878 |
|
|
* these registers will be set to a default threshold that may be
|
879 |
|
|
* adjusted later by the driver's runtime code. However, if the
|
880 |
|
|
* ability to transmit pause frames is not enabled, then these
|
881 |
|
|
* registers will be set to 0.
|
882 |
|
|
*/
|
883 |
|
|
if (mac->fc & e1000_fc_tx_pause) {
|
884 |
|
|
/* We need to set up the Receive Threshold high and low water
|
885 |
|
|
* marks as well as (optionally) enabling the transmission of
|
886 |
|
|
* XON frames.
|
887 |
|
|
*/
|
888 |
|
|
fcrtl = mac->fc_low_water;
|
889 |
|
|
fcrtl |= E1000_FCRTL_XONE;
|
890 |
|
|
fcrth = mac->fc_high_water;
|
891 |
|
|
}
|
892 |
|
|
ew32(FCRTL, fcrtl);
|
893 |
|
|
ew32(FCRTH, fcrth);
|
894 |
|
|
|
895 |
|
|
return 0;
|
896 |
|
|
}
|
897 |
|
|
|
898 |
|
|
/**
|
899 |
|
|
* e1000e_force_mac_fc - Force the MAC's flow control settings
|
900 |
|
|
* @hw: pointer to the HW structure
|
901 |
|
|
*
|
902 |
|
|
* Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the
|
903 |
|
|
* device control register to reflect the adapter settings. TFCE and RFCE
|
904 |
|
|
* need to be explicitly set by software when a copper PHY is used because
|
905 |
|
|
* autonegotiation is managed by the PHY rather than the MAC. Software must
|
906 |
|
|
* also configure these bits when link is forced on a fiber connection.
|
907 |
|
|
**/
|
908 |
|
|
s32 e1000e_force_mac_fc(struct e1000_hw *hw)
|
909 |
|
|
{
|
910 |
|
|
struct e1000_mac_info *mac = &hw->mac;
|
911 |
|
|
u32 ctrl;
|
912 |
|
|
|
913 |
|
|
ctrl = er32(CTRL);
|
914 |
|
|
|
915 |
|
|
/* Because we didn't get link via the internal auto-negotiation
|
916 |
|
|
* mechanism (we either forced link or we got link via PHY
|
917 |
|
|
* auto-neg), we have to manually enable/disable transmit an
|
918 |
|
|
* receive flow control.
|
919 |
|
|
*
|
920 |
|
|
* The "Case" statement below enables/disable flow control
|
921 |
|
|
* according to the "mac->fc" parameter.
|
922 |
|
|
*
|
923 |
|
|
* The possible values of the "fc" parameter are:
|
924 |
|
|
* 0: Flow control is completely disabled
|
925 |
|
|
* 1: Rx flow control is enabled (we can receive pause
|
926 |
|
|
* frames but not send pause frames).
|
927 |
|
|
* 2: Tx flow control is enabled (we can send pause frames
|
928 |
|
|
* frames but we do not receive pause frames).
|
929 |
|
|
* 3: Both Rx and TX flow control (symmetric) is enabled.
|
930 |
|
|
* other: No other values should be possible at this point.
|
931 |
|
|
*/
|
932 |
|
|
hw_dbg(hw, "mac->fc = %u\n", mac->fc);
|
933 |
|
|
|
934 |
|
|
switch (mac->fc) {
|
935 |
|
|
case e1000_fc_none:
|
936 |
|
|
ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
|
937 |
|
|
break;
|
938 |
|
|
case e1000_fc_rx_pause:
|
939 |
|
|
ctrl &= (~E1000_CTRL_TFCE);
|
940 |
|
|
ctrl |= E1000_CTRL_RFCE;
|
941 |
|
|
break;
|
942 |
|
|
case e1000_fc_tx_pause:
|
943 |
|
|
ctrl &= (~E1000_CTRL_RFCE);
|
944 |
|
|
ctrl |= E1000_CTRL_TFCE;
|
945 |
|
|
break;
|
946 |
|
|
case e1000_fc_full:
|
947 |
|
|
ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
|
948 |
|
|
break;
|
949 |
|
|
default:
|
950 |
|
|
hw_dbg(hw, "Flow control param set incorrectly\n");
|
951 |
|
|
return -E1000_ERR_CONFIG;
|
952 |
|
|
}
|
953 |
|
|
|
954 |
|
|
ew32(CTRL, ctrl);
|
955 |
|
|
|
956 |
|
|
return 0;
|
957 |
|
|
}
|
958 |
|
|
|
959 |
|
|
/**
|
960 |
|
|
* e1000e_config_fc_after_link_up - Configures flow control after link
|
961 |
|
|
* @hw: pointer to the HW structure
|
962 |
|
|
*
|
963 |
|
|
* Checks the status of auto-negotiation after link up to ensure that the
|
964 |
|
|
* speed and duplex were not forced. If the link needed to be forced, then
|
965 |
|
|
* flow control needs to be forced also. If auto-negotiation is enabled
|
966 |
|
|
* and did not fail, then we configure flow control based on our link
|
967 |
|
|
* partner.
|
968 |
|
|
**/
|
969 |
|
|
s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
|
970 |
|
|
{
|
971 |
|
|
struct e1000_mac_info *mac = &hw->mac;
|
972 |
|
|
s32 ret_val = 0;
|
973 |
|
|
u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
|
974 |
|
|
u16 speed, duplex;
|
975 |
|
|
|
976 |
|
|
/* Check for the case where we have fiber media and auto-neg failed
|
977 |
|
|
* so we had to force link. In this case, we need to force the
|
978 |
|
|
* configuration of the MAC to match the "fc" parameter.
|
979 |
|
|
*/
|
980 |
|
|
if (mac->autoneg_failed) {
|
981 |
|
|
if (hw->media_type == e1000_media_type_fiber ||
|
982 |
|
|
hw->media_type == e1000_media_type_internal_serdes)
|
983 |
|
|
ret_val = e1000e_force_mac_fc(hw);
|
984 |
|
|
} else {
|
985 |
|
|
if (hw->media_type == e1000_media_type_copper)
|
986 |
|
|
ret_val = e1000e_force_mac_fc(hw);
|
987 |
|
|
}
|
988 |
|
|
|
989 |
|
|
if (ret_val) {
|
990 |
|
|
hw_dbg(hw, "Error forcing flow control settings\n");
|
991 |
|
|
return ret_val;
|
992 |
|
|
}
|
993 |
|
|
|
994 |
|
|
/* Check for the case where we have copper media and auto-neg is
|
995 |
|
|
* enabled. In this case, we need to check and see if Auto-Neg
|
996 |
|
|
* has completed, and if so, how the PHY and link partner has
|
997 |
|
|
* flow control configured.
|
998 |
|
|
*/
|
999 |
|
|
if ((hw->media_type == e1000_media_type_copper) && mac->autoneg) {
|
1000 |
|
|
/* Read the MII Status Register and check to see if AutoNeg
|
1001 |
|
|
* has completed. We read this twice because this reg has
|
1002 |
|
|
* some "sticky" (latched) bits.
|
1003 |
|
|
*/
|
1004 |
|
|
ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
|
1005 |
|
|
if (ret_val)
|
1006 |
|
|
return ret_val;
|
1007 |
|
|
ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
|
1008 |
|
|
if (ret_val)
|
1009 |
|
|
return ret_val;
|
1010 |
|
|
|
1011 |
|
|
if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
|
1012 |
|
|
hw_dbg(hw, "Copper PHY and Auto Neg "
|
1013 |
|
|
"has not completed.\n");
|
1014 |
|
|
return ret_val;
|
1015 |
|
|
}
|
1016 |
|
|
|
1017 |
|
|
/* The AutoNeg process has completed, so we now need to
|
1018 |
|
|
* read both the Auto Negotiation Advertisement
|
1019 |
|
|
* Register (Address 4) and the Auto_Negotiation Base
|
1020 |
|
|
* Page Ability Register (Address 5) to determine how
|
1021 |
|
|
* flow control was negotiated.
|
1022 |
|
|
*/
|
1023 |
|
|
ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
|
1024 |
|
|
if (ret_val)
|
1025 |
|
|
return ret_val;
|
1026 |
|
|
ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
|
1027 |
|
|
if (ret_val)
|
1028 |
|
|
return ret_val;
|
1029 |
|
|
|
1030 |
|
|
/* Two bits in the Auto Negotiation Advertisement Register
|
1031 |
|
|
* (Address 4) and two bits in the Auto Negotiation Base
|
1032 |
|
|
* Page Ability Register (Address 5) determine flow control
|
1033 |
|
|
* for both the PHY and the link partner. The following
|
1034 |
|
|
* table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
|
1035 |
|
|
* 1999, describes these PAUSE resolution bits and how flow
|
1036 |
|
|
* control is determined based upon these settings.
|
1037 |
|
|
* NOTE: DC = Don't Care
|
1038 |
|
|
*
|
1039 |
|
|
* LOCAL DEVICE | LINK PARTNER
|
1040 |
|
|
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
|
1041 |
|
|
*-------|---------|-------|---------|--------------------
|
1042 |
|
|
* 0 | 0 | DC | DC | e1000_fc_none
|
1043 |
|
|
* 0 | 1 | 0 | DC | e1000_fc_none
|
1044 |
|
|
* 0 | 1 | 1 | 0 | e1000_fc_none
|
1045 |
|
|
* 0 | 1 | 1 | 1 | e1000_fc_tx_pause
|
1046 |
|
|
* 1 | 0 | 0 | DC | e1000_fc_none
|
1047 |
|
|
* 1 | DC | 1 | DC | e1000_fc_full
|
1048 |
|
|
* 1 | 1 | 0 | 0 | e1000_fc_none
|
1049 |
|
|
* 1 | 1 | 0 | 1 | e1000_fc_rx_pause
|
1050 |
|
|
*
|
1051 |
|
|
*/
|
1052 |
|
|
/* Are both PAUSE bits set to 1? If so, this implies
|
1053 |
|
|
* Symmetric Flow Control is enabled at both ends. The
|
1054 |
|
|
* ASM_DIR bits are irrelevant per the spec.
|
1055 |
|
|
*
|
1056 |
|
|
* For Symmetric Flow Control:
|
1057 |
|
|
*
|
1058 |
|
|
* LOCAL DEVICE | LINK PARTNER
|
1059 |
|
|
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
|
1060 |
|
|
*-------|---------|-------|---------|--------------------
|
1061 |
|
|
* 1 | DC | 1 | DC | E1000_fc_full
|
1062 |
|
|
*
|
1063 |
|
|
*/
|
1064 |
|
|
if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
|
1065 |
|
|
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
|
1066 |
|
|
/* Now we need to check if the user selected RX ONLY
|
1067 |
|
|
* of pause frames. In this case, we had to advertise
|
1068 |
|
|
* FULL flow control because we could not advertise RX
|
1069 |
|
|
* ONLY. Hence, we must now check to see if we need to
|
1070 |
|
|
* turn OFF the TRANSMISSION of PAUSE frames.
|
1071 |
|
|
*/
|
1072 |
|
|
if (mac->original_fc == e1000_fc_full) {
|
1073 |
|
|
mac->fc = e1000_fc_full;
|
1074 |
|
|
hw_dbg(hw, "Flow Control = FULL.\r\n");
|
1075 |
|
|
} else {
|
1076 |
|
|
mac->fc = e1000_fc_rx_pause;
|
1077 |
|
|
hw_dbg(hw, "Flow Control = "
|
1078 |
|
|
"RX PAUSE frames only.\r\n");
|
1079 |
|
|
}
|
1080 |
|
|
}
|
1081 |
|
|
/* For receiving PAUSE frames ONLY.
|
1082 |
|
|
*
|
1083 |
|
|
* LOCAL DEVICE | LINK PARTNER
|
1084 |
|
|
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
|
1085 |
|
|
*-------|---------|-------|---------|--------------------
|
1086 |
|
|
* 0 | 1 | 1 | 1 | e1000_fc_tx_pause
|
1087 |
|
|
*
|
1088 |
|
|
*/
|
1089 |
|
|
else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
|
1090 |
|
|
(mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
|
1091 |
|
|
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
|
1092 |
|
|
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
|
1093 |
|
|
mac->fc = e1000_fc_tx_pause;
|
1094 |
|
|
hw_dbg(hw, "Flow Control = TX PAUSE frames only.\r\n");
|
1095 |
|
|
}
|
1096 |
|
|
/* For transmitting PAUSE frames ONLY.
|
1097 |
|
|
*
|
1098 |
|
|
* LOCAL DEVICE | LINK PARTNER
|
1099 |
|
|
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
|
1100 |
|
|
*-------|---------|-------|---------|--------------------
|
1101 |
|
|
* 1 | 1 | 0 | 1 | e1000_fc_rx_pause
|
1102 |
|
|
*
|
1103 |
|
|
*/
|
1104 |
|
|
else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
|
1105 |
|
|
(mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
|
1106 |
|
|
!(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
|
1107 |
|
|
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
|
1108 |
|
|
mac->fc = e1000_fc_rx_pause;
|
1109 |
|
|
hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
|
1110 |
|
|
}
|
1111 |
|
|
/* Per the IEEE spec, at this point flow control should be
|
1112 |
|
|
* disabled. However, we want to consider that we could
|
1113 |
|
|
* be connected to a legacy switch that doesn't advertise
|
1114 |
|
|
* desired flow control, but can be forced on the link
|
1115 |
|
|
* partner. So if we advertised no flow control, that is
|
1116 |
|
|
* what we will resolve to. If we advertised some kind of
|
1117 |
|
|
* receive capability (Rx Pause Only or Full Flow Control)
|
1118 |
|
|
* and the link partner advertised none, we will configure
|
1119 |
|
|
* ourselves to enable Rx Flow Control only. We can do
|
1120 |
|
|
* this safely for two reasons: If the link partner really
|
1121 |
|
|
* didn't want flow control enabled, and we enable Rx, no
|
1122 |
|
|
* harm done since we won't be receiving any PAUSE frames
|
1123 |
|
|
* anyway. If the intent on the link partner was to have
|
1124 |
|
|
* flow control enabled, then by us enabling RX only, we
|
1125 |
|
|
* can at least receive pause frames and process them.
|
1126 |
|
|
* This is a good idea because in most cases, since we are
|
1127 |
|
|
* predominantly a server NIC, more times than not we will
|
1128 |
|
|
* be asked to delay transmission of packets than asking
|
1129 |
|
|
* our link partner to pause transmission of frames.
|
1130 |
|
|
*/
|
1131 |
|
|
else if ((mac->original_fc == e1000_fc_none) ||
|
1132 |
|
|
(mac->original_fc == e1000_fc_tx_pause)) {
|
1133 |
|
|
mac->fc = e1000_fc_none;
|
1134 |
|
|
hw_dbg(hw, "Flow Control = NONE.\r\n");
|
1135 |
|
|
} else {
|
1136 |
|
|
mac->fc = e1000_fc_rx_pause;
|
1137 |
|
|
hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
|
1138 |
|
|
}
|
1139 |
|
|
|
1140 |
|
|
/* Now we need to do one last check... If we auto-
|
1141 |
|
|
* negotiated to HALF DUPLEX, flow control should not be
|
1142 |
|
|
* enabled per IEEE 802.3 spec.
|
1143 |
|
|
*/
|
1144 |
|
|
ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
|
1145 |
|
|
if (ret_val) {
|
1146 |
|
|
hw_dbg(hw, "Error getting link speed and duplex\n");
|
1147 |
|
|
return ret_val;
|
1148 |
|
|
}
|
1149 |
|
|
|
1150 |
|
|
if (duplex == HALF_DUPLEX)
|
1151 |
|
|
mac->fc = e1000_fc_none;
|
1152 |
|
|
|
1153 |
|
|
/* Now we call a subroutine to actually force the MAC
|
1154 |
|
|
* controller to use the correct flow control settings.
|
1155 |
|
|
*/
|
1156 |
|
|
ret_val = e1000e_force_mac_fc(hw);
|
1157 |
|
|
if (ret_val) {
|
1158 |
|
|
hw_dbg(hw, "Error forcing flow control settings\n");
|
1159 |
|
|
return ret_val;
|
1160 |
|
|
}
|
1161 |
|
|
}
|
1162 |
|
|
|
1163 |
|
|
return 0;
|
1164 |
|
|
}
|
1165 |
|
|
|
1166 |
|
|
/**
|
1167 |
|
|
* e1000e_get_speed_and_duplex_copper - Retreive current speed/duplex
|
1168 |
|
|
* @hw: pointer to the HW structure
|
1169 |
|
|
* @speed: stores the current speed
|
1170 |
|
|
* @duplex: stores the current duplex
|
1171 |
|
|
*
|
1172 |
|
|
* Read the status register for the current speed/duplex and store the current
|
1173 |
|
|
* speed and duplex for copper connections.
|
1174 |
|
|
**/
|
1175 |
|
|
s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex)
|
1176 |
|
|
{
|
1177 |
|
|
u32 status;
|
1178 |
|
|
|
1179 |
|
|
status = er32(STATUS);
|
1180 |
|
|
if (status & E1000_STATUS_SPEED_1000) {
|
1181 |
|
|
*speed = SPEED_1000;
|
1182 |
|
|
hw_dbg(hw, "1000 Mbs, ");
|
1183 |
|
|
} else if (status & E1000_STATUS_SPEED_100) {
|
1184 |
|
|
*speed = SPEED_100;
|
1185 |
|
|
hw_dbg(hw, "100 Mbs, ");
|
1186 |
|
|
} else {
|
1187 |
|
|
*speed = SPEED_10;
|
1188 |
|
|
hw_dbg(hw, "10 Mbs, ");
|
1189 |
|
|
}
|
1190 |
|
|
|
1191 |
|
|
if (status & E1000_STATUS_FD) {
|
1192 |
|
|
*duplex = FULL_DUPLEX;
|
1193 |
|
|
hw_dbg(hw, "Full Duplex\n");
|
1194 |
|
|
} else {
|
1195 |
|
|
*duplex = HALF_DUPLEX;
|
1196 |
|
|
hw_dbg(hw, "Half Duplex\n");
|
1197 |
|
|
}
|
1198 |
|
|
|
1199 |
|
|
return 0;
|
1200 |
|
|
}
|
1201 |
|
|
|
1202 |
|
|
/**
|
1203 |
|
|
* e1000e_get_speed_and_duplex_fiber_serdes - Retreive current speed/duplex
|
1204 |
|
|
* @hw: pointer to the HW structure
|
1205 |
|
|
* @speed: stores the current speed
|
1206 |
|
|
* @duplex: stores the current duplex
|
1207 |
|
|
*
|
1208 |
|
|
* Sets the speed and duplex to gigabit full duplex (the only possible option)
|
1209 |
|
|
* for fiber/serdes links.
|
1210 |
|
|
**/
|
1211 |
|
|
s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex)
|
1212 |
|
|
{
|
1213 |
|
|
*speed = SPEED_1000;
|
1214 |
|
|
*duplex = FULL_DUPLEX;
|
1215 |
|
|
|
1216 |
|
|
return 0;
|
1217 |
|
|
}
|
1218 |
|
|
|
1219 |
|
|
/**
|
1220 |
|
|
* e1000e_get_hw_semaphore - Acquire hardware semaphore
|
1221 |
|
|
* @hw: pointer to the HW structure
|
1222 |
|
|
*
|
1223 |
|
|
* Acquire the HW semaphore to access the PHY or NVM
|
1224 |
|
|
**/
|
1225 |
|
|
s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
|
1226 |
|
|
{
|
1227 |
|
|
u32 swsm;
|
1228 |
|
|
s32 timeout = hw->nvm.word_size + 1;
|
1229 |
|
|
s32 i = 0;
|
1230 |
|
|
|
1231 |
|
|
/* Get the SW semaphore */
|
1232 |
|
|
while (i < timeout) {
|
1233 |
|
|
swsm = er32(SWSM);
|
1234 |
|
|
if (!(swsm & E1000_SWSM_SMBI))
|
1235 |
|
|
break;
|
1236 |
|
|
|
1237 |
|
|
udelay(50);
|
1238 |
|
|
i++;
|
1239 |
|
|
}
|
1240 |
|
|
|
1241 |
|
|
if (i == timeout) {
|
1242 |
|
|
hw_dbg(hw, "Driver can't access device - SMBI bit is set.\n");
|
1243 |
|
|
return -E1000_ERR_NVM;
|
1244 |
|
|
}
|
1245 |
|
|
|
1246 |
|
|
/* Get the FW semaphore. */
|
1247 |
|
|
for (i = 0; i < timeout; i++) {
|
1248 |
|
|
swsm = er32(SWSM);
|
1249 |
|
|
ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
|
1250 |
|
|
|
1251 |
|
|
/* Semaphore acquired if bit latched */
|
1252 |
|
|
if (er32(SWSM) & E1000_SWSM_SWESMBI)
|
1253 |
|
|
break;
|
1254 |
|
|
|
1255 |
|
|
udelay(50);
|
1256 |
|
|
}
|
1257 |
|
|
|
1258 |
|
|
if (i == timeout) {
|
1259 |
|
|
/* Release semaphores */
|
1260 |
|
|
e1000e_put_hw_semaphore(hw);
|
1261 |
|
|
hw_dbg(hw, "Driver can't access the NVM\n");
|
1262 |
|
|
return -E1000_ERR_NVM;
|
1263 |
|
|
}
|
1264 |
|
|
|
1265 |
|
|
return 0;
|
1266 |
|
|
}
|
1267 |
|
|
|
1268 |
|
|
/**
|
1269 |
|
|
* e1000e_put_hw_semaphore - Release hardware semaphore
|
1270 |
|
|
* @hw: pointer to the HW structure
|
1271 |
|
|
*
|
1272 |
|
|
* Release hardware semaphore used to access the PHY or NVM
|
1273 |
|
|
**/
|
1274 |
|
|
void e1000e_put_hw_semaphore(struct e1000_hw *hw)
|
1275 |
|
|
{
|
1276 |
|
|
u32 swsm;
|
1277 |
|
|
|
1278 |
|
|
swsm = er32(SWSM);
|
1279 |
|
|
swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
|
1280 |
|
|
ew32(SWSM, swsm);
|
1281 |
|
|
}
|
1282 |
|
|
|
1283 |
|
|
/**
|
1284 |
|
|
* e1000e_get_auto_rd_done - Check for auto read completion
|
1285 |
|
|
* @hw: pointer to the HW structure
|
1286 |
|
|
*
|
1287 |
|
|
* Check EEPROM for Auto Read done bit.
|
1288 |
|
|
**/
|
1289 |
|
|
s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
|
1290 |
|
|
{
|
1291 |
|
|
s32 i = 0;
|
1292 |
|
|
|
1293 |
|
|
while (i < AUTO_READ_DONE_TIMEOUT) {
|
1294 |
|
|
if (er32(EECD) & E1000_EECD_AUTO_RD)
|
1295 |
|
|
break;
|
1296 |
|
|
msleep(1);
|
1297 |
|
|
i++;
|
1298 |
|
|
}
|
1299 |
|
|
|
1300 |
|
|
if (i == AUTO_READ_DONE_TIMEOUT) {
|
1301 |
|
|
hw_dbg(hw, "Auto read by HW from NVM has not completed.\n");
|
1302 |
|
|
return -E1000_ERR_RESET;
|
1303 |
|
|
}
|
1304 |
|
|
|
1305 |
|
|
return 0;
|
1306 |
|
|
}
|
1307 |
|
|
|
1308 |
|
|
/**
|
1309 |
|
|
* e1000e_valid_led_default - Verify a valid default LED config
|
1310 |
|
|
* @hw: pointer to the HW structure
|
1311 |
|
|
* @data: pointer to the NVM (EEPROM)
|
1312 |
|
|
*
|
1313 |
|
|
* Read the EEPROM for the current default LED configuration. If the
|
1314 |
|
|
* LED configuration is not valid, set to a valid LED configuration.
|
1315 |
|
|
**/
|
1316 |
|
|
s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
|
1317 |
|
|
{
|
1318 |
|
|
s32 ret_val;
|
1319 |
|
|
|
1320 |
|
|
ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
|
1321 |
|
|
if (ret_val) {
|
1322 |
|
|
hw_dbg(hw, "NVM Read Error\n");
|
1323 |
|
|
return ret_val;
|
1324 |
|
|
}
|
1325 |
|
|
|
1326 |
|
|
if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
|
1327 |
|
|
*data = ID_LED_DEFAULT;
|
1328 |
|
|
|
1329 |
|
|
return 0;
|
1330 |
|
|
}
|
1331 |
|
|
|
1332 |
|
|
/**
|
1333 |
|
|
* e1000e_id_led_init -
|
1334 |
|
|
* @hw: pointer to the HW structure
|
1335 |
|
|
*
|
1336 |
|
|
**/
|
1337 |
|
|
s32 e1000e_id_led_init(struct e1000_hw *hw)
|
1338 |
|
|
{
|
1339 |
|
|
struct e1000_mac_info *mac = &hw->mac;
|
1340 |
|
|
s32 ret_val;
|
1341 |
|
|
const u32 ledctl_mask = 0x000000FF;
|
1342 |
|
|
const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
|
1343 |
|
|
const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
|
1344 |
|
|
u16 data, i, temp;
|
1345 |
|
|
const u16 led_mask = 0x0F;
|
1346 |
|
|
|
1347 |
|
|
ret_val = hw->nvm.ops.valid_led_default(hw, &data);
|
1348 |
|
|
if (ret_val)
|
1349 |
|
|
return ret_val;
|
1350 |
|
|
|
1351 |
|
|
mac->ledctl_default = er32(LEDCTL);
|
1352 |
|
|
mac->ledctl_mode1 = mac->ledctl_default;
|
1353 |
|
|
mac->ledctl_mode2 = mac->ledctl_default;
|
1354 |
|
|
|
1355 |
|
|
for (i = 0; i < 4; i++) {
|
1356 |
|
|
temp = (data >> (i << 2)) & led_mask;
|
1357 |
|
|
switch (temp) {
|
1358 |
|
|
case ID_LED_ON1_DEF2:
|
1359 |
|
|
case ID_LED_ON1_ON2:
|
1360 |
|
|
case ID_LED_ON1_OFF2:
|
1361 |
|
|
mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
|
1362 |
|
|
mac->ledctl_mode1 |= ledctl_on << (i << 3);
|
1363 |
|
|
break;
|
1364 |
|
|
case ID_LED_OFF1_DEF2:
|
1365 |
|
|
case ID_LED_OFF1_ON2:
|
1366 |
|
|
case ID_LED_OFF1_OFF2:
|
1367 |
|
|
mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
|
1368 |
|
|
mac->ledctl_mode1 |= ledctl_off << (i << 3);
|
1369 |
|
|
break;
|
1370 |
|
|
default:
|
1371 |
|
|
/* Do nothing */
|
1372 |
|
|
break;
|
1373 |
|
|
}
|
1374 |
|
|
switch (temp) {
|
1375 |
|
|
case ID_LED_DEF1_ON2:
|
1376 |
|
|
case ID_LED_ON1_ON2:
|
1377 |
|
|
case ID_LED_OFF1_ON2:
|
1378 |
|
|
mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
|
1379 |
|
|
mac->ledctl_mode2 |= ledctl_on << (i << 3);
|
1380 |
|
|
break;
|
1381 |
|
|
case ID_LED_DEF1_OFF2:
|
1382 |
|
|
case ID_LED_ON1_OFF2:
|
1383 |
|
|
case ID_LED_OFF1_OFF2:
|
1384 |
|
|
mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
|
1385 |
|
|
mac->ledctl_mode2 |= ledctl_off << (i << 3);
|
1386 |
|
|
break;
|
1387 |
|
|
default:
|
1388 |
|
|
/* Do nothing */
|
1389 |
|
|
break;
|
1390 |
|
|
}
|
1391 |
|
|
}
|
1392 |
|
|
|
1393 |
|
|
return 0;
|
1394 |
|
|
}
|
1395 |
|
|
|
1396 |
|
|
/**
|
1397 |
|
|
* e1000e_cleanup_led_generic - Set LED config to default operation
|
1398 |
|
|
* @hw: pointer to the HW structure
|
1399 |
|
|
*
|
1400 |
|
|
* Remove the current LED configuration and set the LED configuration
|
1401 |
|
|
* to the default value, saved from the EEPROM.
|
1402 |
|
|
**/
|
1403 |
|
|
s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
|
1404 |
|
|
{
|
1405 |
|
|
ew32(LEDCTL, hw->mac.ledctl_default);
|
1406 |
|
|
return 0;
|
1407 |
|
|
}
|
1408 |
|
|
|
1409 |
|
|
/**
|
1410 |
|
|
* e1000e_blink_led - Blink LED
|
1411 |
|
|
* @hw: pointer to the HW structure
|
1412 |
|
|
*
|
1413 |
|
|
* Blink the led's which are set to be on.
|
1414 |
|
|
**/
|
1415 |
|
|
s32 e1000e_blink_led(struct e1000_hw *hw)
|
1416 |
|
|
{
|
1417 |
|
|
u32 ledctl_blink = 0;
|
1418 |
|
|
u32 i;
|
1419 |
|
|
|
1420 |
|
|
if (hw->media_type == e1000_media_type_fiber) {
|
1421 |
|
|
/* always blink LED0 for PCI-E fiber */
|
1422 |
|
|
ledctl_blink = E1000_LEDCTL_LED0_BLINK |
|
1423 |
|
|
(E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
|
1424 |
|
|
} else {
|
1425 |
|
|
/* set the blink bit for each LED that's "on" (0x0E)
|
1426 |
|
|
* in ledctl_mode2 */
|
1427 |
|
|
ledctl_blink = hw->mac.ledctl_mode2;
|
1428 |
|
|
for (i = 0; i < 4; i++)
|
1429 |
|
|
if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
|
1430 |
|
|
E1000_LEDCTL_MODE_LED_ON)
|
1431 |
|
|
ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
|
1432 |
|
|
(i * 8));
|
1433 |
|
|
}
|
1434 |
|
|
|
1435 |
|
|
ew32(LEDCTL, ledctl_blink);
|
1436 |
|
|
|
1437 |
|
|
return 0;
|
1438 |
|
|
}
|
1439 |
|
|
|
1440 |
|
|
/**
|
1441 |
|
|
* e1000e_led_on_generic - Turn LED on
|
1442 |
|
|
* @hw: pointer to the HW structure
|
1443 |
|
|
*
|
1444 |
|
|
* Turn LED on.
|
1445 |
|
|
**/
|
1446 |
|
|
s32 e1000e_led_on_generic(struct e1000_hw *hw)
|
1447 |
|
|
{
|
1448 |
|
|
u32 ctrl;
|
1449 |
|
|
|
1450 |
|
|
switch (hw->media_type) {
|
1451 |
|
|
case e1000_media_type_fiber:
|
1452 |
|
|
ctrl = er32(CTRL);
|
1453 |
|
|
ctrl &= ~E1000_CTRL_SWDPIN0;
|
1454 |
|
|
ctrl |= E1000_CTRL_SWDPIO0;
|
1455 |
|
|
ew32(CTRL, ctrl);
|
1456 |
|
|
break;
|
1457 |
|
|
case e1000_media_type_copper:
|
1458 |
|
|
ew32(LEDCTL, hw->mac.ledctl_mode2);
|
1459 |
|
|
break;
|
1460 |
|
|
default:
|
1461 |
|
|
break;
|
1462 |
|
|
}
|
1463 |
|
|
|
1464 |
|
|
return 0;
|
1465 |
|
|
}
|
1466 |
|
|
|
1467 |
|
|
/**
|
1468 |
|
|
* e1000e_led_off_generic - Turn LED off
|
1469 |
|
|
* @hw: pointer to the HW structure
|
1470 |
|
|
*
|
1471 |
|
|
* Turn LED off.
|
1472 |
|
|
**/
|
1473 |
|
|
s32 e1000e_led_off_generic(struct e1000_hw *hw)
|
1474 |
|
|
{
|
1475 |
|
|
u32 ctrl;
|
1476 |
|
|
|
1477 |
|
|
switch (hw->media_type) {
|
1478 |
|
|
case e1000_media_type_fiber:
|
1479 |
|
|
ctrl = er32(CTRL);
|
1480 |
|
|
ctrl |= E1000_CTRL_SWDPIN0;
|
1481 |
|
|
ctrl |= E1000_CTRL_SWDPIO0;
|
1482 |
|
|
ew32(CTRL, ctrl);
|
1483 |
|
|
break;
|
1484 |
|
|
case e1000_media_type_copper:
|
1485 |
|
|
ew32(LEDCTL, hw->mac.ledctl_mode1);
|
1486 |
|
|
break;
|
1487 |
|
|
default:
|
1488 |
|
|
break;
|
1489 |
|
|
}
|
1490 |
|
|
|
1491 |
|
|
return 0;
|
1492 |
|
|
}
|
1493 |
|
|
|
1494 |
|
|
/**
|
1495 |
|
|
* e1000e_set_pcie_no_snoop - Set PCI-express capabilities
|
1496 |
|
|
* @hw: pointer to the HW structure
|
1497 |
|
|
* @no_snoop: bitmap of snoop events
|
1498 |
|
|
*
|
1499 |
|
|
* Set the PCI-express register to snoop for events enabled in 'no_snoop'.
|
1500 |
|
|
**/
|
1501 |
|
|
void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop)
|
1502 |
|
|
{
|
1503 |
|
|
u32 gcr;
|
1504 |
|
|
|
1505 |
|
|
if (no_snoop) {
|
1506 |
|
|
gcr = er32(GCR);
|
1507 |
|
|
gcr &= ~(PCIE_NO_SNOOP_ALL);
|
1508 |
|
|
gcr |= no_snoop;
|
1509 |
|
|
ew32(GCR, gcr);
|
1510 |
|
|
}
|
1511 |
|
|
}
|
1512 |
|
|
|
1513 |
|
|
/**
|
1514 |
|
|
* e1000e_disable_pcie_master - Disables PCI-express master access
|
1515 |
|
|
* @hw: pointer to the HW structure
|
1516 |
|
|
*
|
1517 |
|
|
* Returns 0 if successful, else returns -10
|
1518 |
|
|
* (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued
|
1519 |
|
|
* the master requests to be disabled.
|
1520 |
|
|
*
|
1521 |
|
|
* Disables PCI-Express master access and verifies there are no pending
|
1522 |
|
|
* requests.
|
1523 |
|
|
**/
|
1524 |
|
|
s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
|
1525 |
|
|
{
|
1526 |
|
|
u32 ctrl;
|
1527 |
|
|
s32 timeout = MASTER_DISABLE_TIMEOUT;
|
1528 |
|
|
|
1529 |
|
|
ctrl = er32(CTRL);
|
1530 |
|
|
ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
|
1531 |
|
|
ew32(CTRL, ctrl);
|
1532 |
|
|
|
1533 |
|
|
while (timeout) {
|
1534 |
|
|
if (!(er32(STATUS) &
|
1535 |
|
|
E1000_STATUS_GIO_MASTER_ENABLE))
|
1536 |
|
|
break;
|
1537 |
|
|
udelay(100);
|
1538 |
|
|
timeout--;
|
1539 |
|
|
}
|
1540 |
|
|
|
1541 |
|
|
if (!timeout) {
|
1542 |
|
|
hw_dbg(hw, "Master requests are pending.\n");
|
1543 |
|
|
return -E1000_ERR_MASTER_REQUESTS_PENDING;
|
1544 |
|
|
}
|
1545 |
|
|
|
1546 |
|
|
return 0;
|
1547 |
|
|
}
|
1548 |
|
|
|
1549 |
|
|
/**
|
1550 |
|
|
* e1000e_reset_adaptive - Reset Adaptive Interframe Spacing
|
1551 |
|
|
* @hw: pointer to the HW structure
|
1552 |
|
|
*
|
1553 |
|
|
* Reset the Adaptive Interframe Spacing throttle to default values.
|
1554 |
|
|
**/
|
1555 |
|
|
void e1000e_reset_adaptive(struct e1000_hw *hw)
|
1556 |
|
|
{
|
1557 |
|
|
struct e1000_mac_info *mac = &hw->mac;
|
1558 |
|
|
|
1559 |
|
|
mac->current_ifs_val = 0;
|
1560 |
|
|
mac->ifs_min_val = IFS_MIN;
|
1561 |
|
|
mac->ifs_max_val = IFS_MAX;
|
1562 |
|
|
mac->ifs_step_size = IFS_STEP;
|
1563 |
|
|
mac->ifs_ratio = IFS_RATIO;
|
1564 |
|
|
|
1565 |
|
|
mac->in_ifs_mode = 0;
|
1566 |
|
|
ew32(AIT, 0);
|
1567 |
|
|
}
|
1568 |
|
|
|
1569 |
|
|
/**
|
1570 |
|
|
* e1000e_update_adaptive - Update Adaptive Interframe Spacing
|
1571 |
|
|
* @hw: pointer to the HW structure
|
1572 |
|
|
*
|
1573 |
|
|
* Update the Adaptive Interframe Spacing Throttle value based on the
|
1574 |
|
|
* time between transmitted packets and time between collisions.
|
1575 |
|
|
**/
|
1576 |
|
|
void e1000e_update_adaptive(struct e1000_hw *hw)
|
1577 |
|
|
{
|
1578 |
|
|
struct e1000_mac_info *mac = &hw->mac;
|
1579 |
|
|
|
1580 |
|
|
if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
|
1581 |
|
|
if (mac->tx_packet_delta > MIN_NUM_XMITS) {
|
1582 |
|
|
mac->in_ifs_mode = 1;
|
1583 |
|
|
if (mac->current_ifs_val < mac->ifs_max_val) {
|
1584 |
|
|
if (!mac->current_ifs_val)
|
1585 |
|
|
mac->current_ifs_val = mac->ifs_min_val;
|
1586 |
|
|
else
|
1587 |
|
|
mac->current_ifs_val +=
|
1588 |
|
|
mac->ifs_step_size;
|
1589 |
|
|
ew32(AIT,
|
1590 |
|
|
mac->current_ifs_val);
|
1591 |
|
|
}
|
1592 |
|
|
}
|
1593 |
|
|
} else {
|
1594 |
|
|
if (mac->in_ifs_mode &&
|
1595 |
|
|
(mac->tx_packet_delta <= MIN_NUM_XMITS)) {
|
1596 |
|
|
mac->current_ifs_val = 0;
|
1597 |
|
|
mac->in_ifs_mode = 0;
|
1598 |
|
|
ew32(AIT, 0);
|
1599 |
|
|
}
|
1600 |
|
|
}
|
1601 |
|
|
}
|
1602 |
|
|
|
1603 |
|
|
/**
|
1604 |
|
|
* e1000_raise_eec_clk - Raise EEPROM clock
|
1605 |
|
|
* @hw: pointer to the HW structure
|
1606 |
|
|
* @eecd: pointer to the EEPROM
|
1607 |
|
|
*
|
1608 |
|
|
* Enable/Raise the EEPROM clock bit.
|
1609 |
|
|
**/
|
1610 |
|
|
static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
|
1611 |
|
|
{
|
1612 |
|
|
*eecd = *eecd | E1000_EECD_SK;
|
1613 |
|
|
ew32(EECD, *eecd);
|
1614 |
|
|
e1e_flush();
|
1615 |
|
|
udelay(hw->nvm.delay_usec);
|
1616 |
|
|
}
|
1617 |
|
|
|
1618 |
|
|
/**
|
1619 |
|
|
* e1000_lower_eec_clk - Lower EEPROM clock
|
1620 |
|
|
* @hw: pointer to the HW structure
|
1621 |
|
|
* @eecd: pointer to the EEPROM
|
1622 |
|
|
*
|
1623 |
|
|
* Clear/Lower the EEPROM clock bit.
|
1624 |
|
|
**/
|
1625 |
|
|
static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
|
1626 |
|
|
{
|
1627 |
|
|
*eecd = *eecd & ~E1000_EECD_SK;
|
1628 |
|
|
ew32(EECD, *eecd);
|
1629 |
|
|
e1e_flush();
|
1630 |
|
|
udelay(hw->nvm.delay_usec);
|
1631 |
|
|
}
|
1632 |
|
|
|
1633 |
|
|
/**
|
1634 |
|
|
* e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
|
1635 |
|
|
* @hw: pointer to the HW structure
|
1636 |
|
|
* @data: data to send to the EEPROM
|
1637 |
|
|
* @count: number of bits to shift out
|
1638 |
|
|
*
|
1639 |
|
|
* We need to shift 'count' bits out to the EEPROM. So, the value in the
|
1640 |
|
|
* "data" parameter will be shifted out to the EEPROM one bit at a time.
|
1641 |
|
|
* In order to do this, "data" must be broken down into bits.
|
1642 |
|
|
**/
|
1643 |
|
|
static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
|
1644 |
|
|
{
|
1645 |
|
|
struct e1000_nvm_info *nvm = &hw->nvm;
|
1646 |
|
|
u32 eecd = er32(EECD);
|
1647 |
|
|
u32 mask;
|
1648 |
|
|
|
1649 |
|
|
mask = 0x01 << (count - 1);
|
1650 |
|
|
if (nvm->type == e1000_nvm_eeprom_spi)
|
1651 |
|
|
eecd |= E1000_EECD_DO;
|
1652 |
|
|
|
1653 |
|
|
do {
|
1654 |
|
|
eecd &= ~E1000_EECD_DI;
|
1655 |
|
|
|
1656 |
|
|
if (data & mask)
|
1657 |
|
|
eecd |= E1000_EECD_DI;
|
1658 |
|
|
|
1659 |
|
|
ew32(EECD, eecd);
|
1660 |
|
|
e1e_flush();
|
1661 |
|
|
|
1662 |
|
|
udelay(nvm->delay_usec);
|
1663 |
|
|
|
1664 |
|
|
e1000_raise_eec_clk(hw, &eecd);
|
1665 |
|
|
e1000_lower_eec_clk(hw, &eecd);
|
1666 |
|
|
|
1667 |
|
|
mask >>= 1;
|
1668 |
|
|
} while (mask);
|
1669 |
|
|
|
1670 |
|
|
eecd &= ~E1000_EECD_DI;
|
1671 |
|
|
ew32(EECD, eecd);
|
1672 |
|
|
}
|
1673 |
|
|
|
1674 |
|
|
/**
|
1675 |
|
|
* e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
|
1676 |
|
|
* @hw: pointer to the HW structure
|
1677 |
|
|
* @count: number of bits to shift in
|
1678 |
|
|
*
|
1679 |
|
|
* In order to read a register from the EEPROM, we need to shift 'count' bits
|
1680 |
|
|
* in from the EEPROM. Bits are "shifted in" by raising the clock input to
|
1681 |
|
|
* the EEPROM (setting the SK bit), and then reading the value of the data out
|
1682 |
|
|
* "DO" bit. During this "shifting in" process the data in "DI" bit should
|
1683 |
|
|
* always be clear.
|
1684 |
|
|
**/
|
1685 |
|
|
static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
|
1686 |
|
|
{
|
1687 |
|
|
u32 eecd;
|
1688 |
|
|
u32 i;
|
1689 |
|
|
u16 data;
|
1690 |
|
|
|
1691 |
|
|
eecd = er32(EECD);
|
1692 |
|
|
|
1693 |
|
|
eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
|
1694 |
|
|
data = 0;
|
1695 |
|
|
|
1696 |
|
|
for (i = 0; i < count; i++) {
|
1697 |
|
|
data <<= 1;
|
1698 |
|
|
e1000_raise_eec_clk(hw, &eecd);
|
1699 |
|
|
|
1700 |
|
|
eecd = er32(EECD);
|
1701 |
|
|
|
1702 |
|
|
eecd &= ~E1000_EECD_DI;
|
1703 |
|
|
if (eecd & E1000_EECD_DO)
|
1704 |
|
|
data |= 1;
|
1705 |
|
|
|
1706 |
|
|
e1000_lower_eec_clk(hw, &eecd);
|
1707 |
|
|
}
|
1708 |
|
|
|
1709 |
|
|
return data;
|
1710 |
|
|
}
|
1711 |
|
|
|
1712 |
|
|
/**
|
1713 |
|
|
* e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion
|
1714 |
|
|
* @hw: pointer to the HW structure
|
1715 |
|
|
* @ee_reg: EEPROM flag for polling
|
1716 |
|
|
*
|
1717 |
|
|
* Polls the EEPROM status bit for either read or write completion based
|
1718 |
|
|
* upon the value of 'ee_reg'.
|
1719 |
|
|
**/
|
1720 |
|
|
s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
|
1721 |
|
|
{
|
1722 |
|
|
u32 attempts = 100000;
|
1723 |
|
|
u32 i, reg = 0;
|
1724 |
|
|
|
1725 |
|
|
for (i = 0; i < attempts; i++) {
|
1726 |
|
|
if (ee_reg == E1000_NVM_POLL_READ)
|
1727 |
|
|
reg = er32(EERD);
|
1728 |
|
|
else
|
1729 |
|
|
reg = er32(EEWR);
|
1730 |
|
|
|
1731 |
|
|
if (reg & E1000_NVM_RW_REG_DONE)
|
1732 |
|
|
return 0;
|
1733 |
|
|
|
1734 |
|
|
udelay(5);
|
1735 |
|
|
}
|
1736 |
|
|
|
1737 |
|
|
return -E1000_ERR_NVM;
|
1738 |
|
|
}
|
1739 |
|
|
|
1740 |
|
|
/**
|
1741 |
|
|
* e1000e_acquire_nvm - Generic request for access to EEPROM
|
1742 |
|
|
* @hw: pointer to the HW structure
|
1743 |
|
|
*
|
1744 |
|
|
* Set the EEPROM access request bit and wait for EEPROM access grant bit.
|
1745 |
|
|
* Return successful if access grant bit set, else clear the request for
|
1746 |
|
|
* EEPROM access and return -E1000_ERR_NVM (-1).
|
1747 |
|
|
**/
|
1748 |
|
|
s32 e1000e_acquire_nvm(struct e1000_hw *hw)
|
1749 |
|
|
{
|
1750 |
|
|
u32 eecd = er32(EECD);
|
1751 |
|
|
s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
|
1752 |
|
|
|
1753 |
|
|
ew32(EECD, eecd | E1000_EECD_REQ);
|
1754 |
|
|
eecd = er32(EECD);
|
1755 |
|
|
|
1756 |
|
|
while (timeout) {
|
1757 |
|
|
if (eecd & E1000_EECD_GNT)
|
1758 |
|
|
break;
|
1759 |
|
|
udelay(5);
|
1760 |
|
|
eecd = er32(EECD);
|
1761 |
|
|
timeout--;
|
1762 |
|
|
}
|
1763 |
|
|
|
1764 |
|
|
if (!timeout) {
|
1765 |
|
|
eecd &= ~E1000_EECD_REQ;
|
1766 |
|
|
ew32(EECD, eecd);
|
1767 |
|
|
hw_dbg(hw, "Could not acquire NVM grant\n");
|
1768 |
|
|
return -E1000_ERR_NVM;
|
1769 |
|
|
}
|
1770 |
|
|
|
1771 |
|
|
return 0;
|
1772 |
|
|
}
|
1773 |
|
|
|
1774 |
|
|
/**
|
1775 |
|
|
* e1000_standby_nvm - Return EEPROM to standby state
|
1776 |
|
|
* @hw: pointer to the HW structure
|
1777 |
|
|
*
|
1778 |
|
|
* Return the EEPROM to a standby state.
|
1779 |
|
|
**/
|
1780 |
|
|
static void e1000_standby_nvm(struct e1000_hw *hw)
|
1781 |
|
|
{
|
1782 |
|
|
struct e1000_nvm_info *nvm = &hw->nvm;
|
1783 |
|
|
u32 eecd = er32(EECD);
|
1784 |
|
|
|
1785 |
|
|
if (nvm->type == e1000_nvm_eeprom_spi) {
|
1786 |
|
|
/* Toggle CS to flush commands */
|
1787 |
|
|
eecd |= E1000_EECD_CS;
|
1788 |
|
|
ew32(EECD, eecd);
|
1789 |
|
|
e1e_flush();
|
1790 |
|
|
udelay(nvm->delay_usec);
|
1791 |
|
|
eecd &= ~E1000_EECD_CS;
|
1792 |
|
|
ew32(EECD, eecd);
|
1793 |
|
|
e1e_flush();
|
1794 |
|
|
udelay(nvm->delay_usec);
|
1795 |
|
|
}
|
1796 |
|
|
}
|
1797 |
|
|
|
1798 |
|
|
/**
|
1799 |
|
|
* e1000_stop_nvm - Terminate EEPROM command
|
1800 |
|
|
* @hw: pointer to the HW structure
|
1801 |
|
|
*
|
1802 |
|
|
* Terminates the current command by inverting the EEPROM's chip select pin.
|
1803 |
|
|
**/
|
1804 |
|
|
static void e1000_stop_nvm(struct e1000_hw *hw)
|
1805 |
|
|
{
|
1806 |
|
|
u32 eecd;
|
1807 |
|
|
|
1808 |
|
|
eecd = er32(EECD);
|
1809 |
|
|
if (hw->nvm.type == e1000_nvm_eeprom_spi) {
|
1810 |
|
|
/* Pull CS high */
|
1811 |
|
|
eecd |= E1000_EECD_CS;
|
1812 |
|
|
e1000_lower_eec_clk(hw, &eecd);
|
1813 |
|
|
}
|
1814 |
|
|
}
|
1815 |
|
|
|
1816 |
|
|
/**
|
1817 |
|
|
* e1000e_release_nvm - Release exclusive access to EEPROM
|
1818 |
|
|
* @hw: pointer to the HW structure
|
1819 |
|
|
*
|
1820 |
|
|
* Stop any current commands to the EEPROM and clear the EEPROM request bit.
|
1821 |
|
|
**/
|
1822 |
|
|
void e1000e_release_nvm(struct e1000_hw *hw)
|
1823 |
|
|
{
|
1824 |
|
|
u32 eecd;
|
1825 |
|
|
|
1826 |
|
|
e1000_stop_nvm(hw);
|
1827 |
|
|
|
1828 |
|
|
eecd = er32(EECD);
|
1829 |
|
|
eecd &= ~E1000_EECD_REQ;
|
1830 |
|
|
ew32(EECD, eecd);
|
1831 |
|
|
}
|
1832 |
|
|
|
1833 |
|
|
/**
|
1834 |
|
|
* e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
|
1835 |
|
|
* @hw: pointer to the HW structure
|
1836 |
|
|
*
|
1837 |
|
|
* Setups the EEPROM for reading and writing.
|
1838 |
|
|
**/
|
1839 |
|
|
static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
|
1840 |
|
|
{
|
1841 |
|
|
struct e1000_nvm_info *nvm = &hw->nvm;
|
1842 |
|
|
u32 eecd = er32(EECD);
|
1843 |
|
|
u16 timeout = 0;
|
1844 |
|
|
u8 spi_stat_reg;
|
1845 |
|
|
|
1846 |
|
|
if (nvm->type == e1000_nvm_eeprom_spi) {
|
1847 |
|
|
/* Clear SK and CS */
|
1848 |
|
|
eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
|
1849 |
|
|
ew32(EECD, eecd);
|
1850 |
|
|
udelay(1);
|
1851 |
|
|
timeout = NVM_MAX_RETRY_SPI;
|
1852 |
|
|
|
1853 |
|
|
/* Read "Status Register" repeatedly until the LSB is cleared.
|
1854 |
|
|
* The EEPROM will signal that the command has been completed
|
1855 |
|
|
* by clearing bit 0 of the internal status register. If it's
|
1856 |
|
|
* not cleared within 'timeout', then error out. */
|
1857 |
|
|
while (timeout) {
|
1858 |
|
|
e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
|
1859 |
|
|
hw->nvm.opcode_bits);
|
1860 |
|
|
spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
|
1861 |
|
|
if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
|
1862 |
|
|
break;
|
1863 |
|
|
|
1864 |
|
|
udelay(5);
|
1865 |
|
|
e1000_standby_nvm(hw);
|
1866 |
|
|
timeout--;
|
1867 |
|
|
}
|
1868 |
|
|
|
1869 |
|
|
if (!timeout) {
|
1870 |
|
|
hw_dbg(hw, "SPI NVM Status error\n");
|
1871 |
|
|
return -E1000_ERR_NVM;
|
1872 |
|
|
}
|
1873 |
|
|
}
|
1874 |
|
|
|
1875 |
|
|
return 0;
|
1876 |
|
|
}
|
1877 |
|
|
|
1878 |
|
|
/**
|
1879 |
|
|
* e1000e_read_nvm_spi - Read EEPROM's using SPI
|
1880 |
|
|
* @hw: pointer to the HW structure
|
1881 |
|
|
* @offset: offset of word in the EEPROM to read
|
1882 |
|
|
* @words: number of words to read
|
1883 |
|
|
* @data: word read from the EEPROM
|
1884 |
|
|
*
|
1885 |
|
|
* Reads a 16 bit word from the EEPROM.
|
1886 |
|
|
**/
|
1887 |
|
|
s32 e1000e_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
|
1888 |
|
|
{
|
1889 |
|
|
struct e1000_nvm_info *nvm = &hw->nvm;
|
1890 |
|
|
u32 i = 0;
|
1891 |
|
|
s32 ret_val;
|
1892 |
|
|
u16 word_in;
|
1893 |
|
|
u8 read_opcode = NVM_READ_OPCODE_SPI;
|
1894 |
|
|
|
1895 |
|
|
/* A check for invalid values: offset too large, too many words,
|
1896 |
|
|
* and not enough words. */
|
1897 |
|
|
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
|
1898 |
|
|
(words == 0)) {
|
1899 |
|
|
hw_dbg(hw, "nvm parameter(s) out of bounds\n");
|
1900 |
|
|
return -E1000_ERR_NVM;
|
1901 |
|
|
}
|
1902 |
|
|
|
1903 |
|
|
ret_val = nvm->ops.acquire_nvm(hw);
|
1904 |
|
|
if (ret_val)
|
1905 |
|
|
return ret_val;
|
1906 |
|
|
|
1907 |
|
|
ret_val = e1000_ready_nvm_eeprom(hw);
|
1908 |
|
|
if (ret_val) {
|
1909 |
|
|
nvm->ops.release_nvm(hw);
|
1910 |
|
|
return ret_val;
|
1911 |
|
|
}
|
1912 |
|
|
|
1913 |
|
|
e1000_standby_nvm(hw);
|
1914 |
|
|
|
1915 |
|
|
if ((nvm->address_bits == 8) && (offset >= 128))
|
1916 |
|
|
read_opcode |= NVM_A8_OPCODE_SPI;
|
1917 |
|
|
|
1918 |
|
|
/* Send the READ command (opcode + addr) */
|
1919 |
|
|
e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
|
1920 |
|
|
e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
|
1921 |
|
|
|
1922 |
|
|
/* Read the data. SPI NVMs increment the address with each byte
|
1923 |
|
|
* read and will roll over if reading beyond the end. This allows
|
1924 |
|
|
* us to read the whole NVM from any offset */
|
1925 |
|
|
for (i = 0; i < words; i++) {
|
1926 |
|
|
word_in = e1000_shift_in_eec_bits(hw, 16);
|
1927 |
|
|
data[i] = (word_in >> 8) | (word_in << 8);
|
1928 |
|
|
}
|
1929 |
|
|
|
1930 |
|
|
nvm->ops.release_nvm(hw);
|
1931 |
|
|
return 0;
|
1932 |
|
|
}
|
1933 |
|
|
|
1934 |
|
|
/**
|
1935 |
|
|
* e1000e_read_nvm_eerd - Reads EEPROM using EERD register
|
1936 |
|
|
* @hw: pointer to the HW structure
|
1937 |
|
|
* @offset: offset of word in the EEPROM to read
|
1938 |
|
|
* @words: number of words to read
|
1939 |
|
|
* @data: word read from the EEPROM
|
1940 |
|
|
*
|
1941 |
|
|
* Reads a 16 bit word from the EEPROM using the EERD register.
|
1942 |
|
|
**/
|
1943 |
|
|
s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
|
1944 |
|
|
{
|
1945 |
|
|
struct e1000_nvm_info *nvm = &hw->nvm;
|
1946 |
|
|
u32 i, eerd = 0;
|
1947 |
|
|
s32 ret_val = 0;
|
1948 |
|
|
|
1949 |
|
|
/* A check for invalid values: offset too large, too many words,
|
1950 |
|
|
* and not enough words. */
|
1951 |
|
|
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
|
1952 |
|
|
(words == 0)) {
|
1953 |
|
|
hw_dbg(hw, "nvm parameter(s) out of bounds\n");
|
1954 |
|
|
return -E1000_ERR_NVM;
|
1955 |
|
|
}
|
1956 |
|
|
|
1957 |
|
|
for (i = 0; i < words; i++) {
|
1958 |
|
|
eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
|
1959 |
|
|
E1000_NVM_RW_REG_START;
|
1960 |
|
|
|
1961 |
|
|
ew32(EERD, eerd);
|
1962 |
|
|
ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
|
1963 |
|
|
if (ret_val)
|
1964 |
|
|
break;
|
1965 |
|
|
|
1966 |
|
|
data[i] = (er32(EERD) >>
|
1967 |
|
|
E1000_NVM_RW_REG_DATA);
|
1968 |
|
|
}
|
1969 |
|
|
|
1970 |
|
|
return ret_val;
|
1971 |
|
|
}
|
1972 |
|
|
|
1973 |
|
|
/**
|
1974 |
|
|
* e1000e_write_nvm_spi - Write to EEPROM using SPI
|
1975 |
|
|
* @hw: pointer to the HW structure
|
1976 |
|
|
* @offset: offset within the EEPROM to be written to
|
1977 |
|
|
* @words: number of words to write
|
1978 |
|
|
* @data: 16 bit word(s) to be written to the EEPROM
|
1979 |
|
|
*
|
1980 |
|
|
* Writes data to EEPROM at offset using SPI interface.
|
1981 |
|
|
*
|
1982 |
|
|
* If e1000e_update_nvm_checksum is not called after this function , the
|
1983 |
|
|
* EEPROM will most likley contain an invalid checksum.
|
1984 |
|
|
**/
|
1985 |
|
|
s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
|
1986 |
|
|
{
|
1987 |
|
|
struct e1000_nvm_info *nvm = &hw->nvm;
|
1988 |
|
|
s32 ret_val;
|
1989 |
|
|
u16 widx = 0;
|
1990 |
|
|
|
1991 |
|
|
/* A check for invalid values: offset too large, too many words,
|
1992 |
|
|
* and not enough words. */
|
1993 |
|
|
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
|
1994 |
|
|
(words == 0)) {
|
1995 |
|
|
hw_dbg(hw, "nvm parameter(s) out of bounds\n");
|
1996 |
|
|
return -E1000_ERR_NVM;
|
1997 |
|
|
}
|
1998 |
|
|
|
1999 |
|
|
ret_val = nvm->ops.acquire_nvm(hw);
|
2000 |
|
|
if (ret_val)
|
2001 |
|
|
return ret_val;
|
2002 |
|
|
|
2003 |
|
|
msleep(10);
|
2004 |
|
|
|
2005 |
|
|
while (widx < words) {
|
2006 |
|
|
u8 write_opcode = NVM_WRITE_OPCODE_SPI;
|
2007 |
|
|
|
2008 |
|
|
ret_val = e1000_ready_nvm_eeprom(hw);
|
2009 |
|
|
if (ret_val) {
|
2010 |
|
|
nvm->ops.release_nvm(hw);
|
2011 |
|
|
return ret_val;
|
2012 |
|
|
}
|
2013 |
|
|
|
2014 |
|
|
e1000_standby_nvm(hw);
|
2015 |
|
|
|
2016 |
|
|
/* Send the WRITE ENABLE command (8 bit opcode) */
|
2017 |
|
|
e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
|
2018 |
|
|
nvm->opcode_bits);
|
2019 |
|
|
|
2020 |
|
|
e1000_standby_nvm(hw);
|
2021 |
|
|
|
2022 |
|
|
/* Some SPI eeproms use the 8th address bit embedded in the
|
2023 |
|
|
* opcode */
|
2024 |
|
|
if ((nvm->address_bits == 8) && (offset >= 128))
|
2025 |
|
|
write_opcode |= NVM_A8_OPCODE_SPI;
|
2026 |
|
|
|
2027 |
|
|
/* Send the Write command (8-bit opcode + addr) */
|
2028 |
|
|
e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
|
2029 |
|
|
e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
|
2030 |
|
|
nvm->address_bits);
|
2031 |
|
|
|
2032 |
|
|
/* Loop to allow for up to whole page write of eeprom */
|
2033 |
|
|
while (widx < words) {
|
2034 |
|
|
u16 word_out = data[widx];
|
2035 |
|
|
word_out = (word_out >> 8) | (word_out << 8);
|
2036 |
|
|
e1000_shift_out_eec_bits(hw, word_out, 16);
|
2037 |
|
|
widx++;
|
2038 |
|
|
|
2039 |
|
|
if ((((offset + widx) * 2) % nvm->page_size) == 0) {
|
2040 |
|
|
e1000_standby_nvm(hw);
|
2041 |
|
|
break;
|
2042 |
|
|
}
|
2043 |
|
|
}
|
2044 |
|
|
}
|
2045 |
|
|
|
2046 |
|
|
msleep(10);
|
2047 |
|
|
return 0;
|
2048 |
|
|
}
|
2049 |
|
|
|
2050 |
|
|
/**
|
2051 |
|
|
* e1000e_read_mac_addr - Read device MAC address
|
2052 |
|
|
* @hw: pointer to the HW structure
|
2053 |
|
|
*
|
2054 |
|
|
* Reads the device MAC address from the EEPROM and stores the value.
|
2055 |
|
|
* Since devices with two ports use the same EEPROM, we increment the
|
2056 |
|
|
* last bit in the MAC address for the second port.
|
2057 |
|
|
**/
|
2058 |
|
|
s32 e1000e_read_mac_addr(struct e1000_hw *hw)
|
2059 |
|
|
{
|
2060 |
|
|
s32 ret_val;
|
2061 |
|
|
u16 offset, nvm_data, i;
|
2062 |
|
|
|
2063 |
|
|
for (i = 0; i < ETH_ALEN; i += 2) {
|
2064 |
|
|
offset = i >> 1;
|
2065 |
|
|
ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
|
2066 |
|
|
if (ret_val) {
|
2067 |
|
|
hw_dbg(hw, "NVM Read Error\n");
|
2068 |
|
|
return ret_val;
|
2069 |
|
|
}
|
2070 |
|
|
hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
|
2071 |
|
|
hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
|
2072 |
|
|
}
|
2073 |
|
|
|
2074 |
|
|
/* Flip last bit of mac address if we're on second port */
|
2075 |
|
|
if (hw->bus.func == E1000_FUNC_1)
|
2076 |
|
|
hw->mac.perm_addr[5] ^= 1;
|
2077 |
|
|
|
2078 |
|
|
for (i = 0; i < ETH_ALEN; i++)
|
2079 |
|
|
hw->mac.addr[i] = hw->mac.perm_addr[i];
|
2080 |
|
|
|
2081 |
|
|
return 0;
|
2082 |
|
|
}
|
2083 |
|
|
|
2084 |
|
|
/**
|
2085 |
|
|
* e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum
|
2086 |
|
|
* @hw: pointer to the HW structure
|
2087 |
|
|
*
|
2088 |
|
|
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
|
2089 |
|
|
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
|
2090 |
|
|
**/
|
2091 |
|
|
s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
|
2092 |
|
|
{
|
2093 |
|
|
s32 ret_val;
|
2094 |
|
|
u16 checksum = 0;
|
2095 |
|
|
u16 i, nvm_data;
|
2096 |
|
|
|
2097 |
|
|
for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
|
2098 |
|
|
ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
|
2099 |
|
|
if (ret_val) {
|
2100 |
|
|
hw_dbg(hw, "NVM Read Error\n");
|
2101 |
|
|
return ret_val;
|
2102 |
|
|
}
|
2103 |
|
|
checksum += nvm_data;
|
2104 |
|
|
}
|
2105 |
|
|
|
2106 |
|
|
if (checksum != (u16) NVM_SUM) {
|
2107 |
|
|
hw_dbg(hw, "NVM Checksum Invalid\n");
|
2108 |
|
|
return -E1000_ERR_NVM;
|
2109 |
|
|
}
|
2110 |
|
|
|
2111 |
|
|
return 0;
|
2112 |
|
|
}
|
2113 |
|
|
|
2114 |
|
|
/**
|
2115 |
|
|
* e1000e_update_nvm_checksum_generic - Update EEPROM checksum
|
2116 |
|
|
* @hw: pointer to the HW structure
|
2117 |
|
|
*
|
2118 |
|
|
* Updates the EEPROM checksum by reading/adding each word of the EEPROM
|
2119 |
|
|
* up to the checksum. Then calculates the EEPROM checksum and writes the
|
2120 |
|
|
* value to the EEPROM.
|
2121 |
|
|
**/
|
2122 |
|
|
s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
|
2123 |
|
|
{
|
2124 |
|
|
s32 ret_val;
|
2125 |
|
|
u16 checksum = 0;
|
2126 |
|
|
u16 i, nvm_data;
|
2127 |
|
|
|
2128 |
|
|
for (i = 0; i < NVM_CHECKSUM_REG; i++) {
|
2129 |
|
|
ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
|
2130 |
|
|
if (ret_val) {
|
2131 |
|
|
hw_dbg(hw, "NVM Read Error while updating checksum.\n");
|
2132 |
|
|
return ret_val;
|
2133 |
|
|
}
|
2134 |
|
|
checksum += nvm_data;
|
2135 |
|
|
}
|
2136 |
|
|
checksum = (u16) NVM_SUM - checksum;
|
2137 |
|
|
ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
|
2138 |
|
|
if (ret_val)
|
2139 |
|
|
hw_dbg(hw, "NVM Write Error while updating checksum.\n");
|
2140 |
|
|
|
2141 |
|
|
return ret_val;
|
2142 |
|
|
}
|
2143 |
|
|
|
2144 |
|
|
/**
|
2145 |
|
|
* e1000e_reload_nvm - Reloads EEPROM
|
2146 |
|
|
* @hw: pointer to the HW structure
|
2147 |
|
|
*
|
2148 |
|
|
* Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
|
2149 |
|
|
* extended control register.
|
2150 |
|
|
**/
|
2151 |
|
|
void e1000e_reload_nvm(struct e1000_hw *hw)
|
2152 |
|
|
{
|
2153 |
|
|
u32 ctrl_ext;
|
2154 |
|
|
|
2155 |
|
|
udelay(10);
|
2156 |
|
|
ctrl_ext = er32(CTRL_EXT);
|
2157 |
|
|
ctrl_ext |= E1000_CTRL_EXT_EE_RST;
|
2158 |
|
|
ew32(CTRL_EXT, ctrl_ext);
|
2159 |
|
|
e1e_flush();
|
2160 |
|
|
}
|
2161 |
|
|
|
2162 |
|
|
/**
|
2163 |
|
|
* e1000_calculate_checksum - Calculate checksum for buffer
|
2164 |
|
|
* @buffer: pointer to EEPROM
|
2165 |
|
|
* @length: size of EEPROM to calculate a checksum for
|
2166 |
|
|
*
|
2167 |
|
|
* Calculates the checksum for some buffer on a specified length. The
|
2168 |
|
|
* checksum calculated is returned.
|
2169 |
|
|
**/
|
2170 |
|
|
static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
|
2171 |
|
|
{
|
2172 |
|
|
u32 i;
|
2173 |
|
|
u8 sum = 0;
|
2174 |
|
|
|
2175 |
|
|
if (!buffer)
|
2176 |
|
|
return 0;
|
2177 |
|
|
|
2178 |
|
|
for (i = 0; i < length; i++)
|
2179 |
|
|
sum += buffer[i];
|
2180 |
|
|
|
2181 |
|
|
return (u8) (0 - sum);
|
2182 |
|
|
}
|
2183 |
|
|
|
2184 |
|
|
/**
|
2185 |
|
|
* e1000_mng_enable_host_if - Checks host interface is enabled
|
2186 |
|
|
* @hw: pointer to the HW structure
|
2187 |
|
|
*
|
2188 |
|
|
* Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
|
2189 |
|
|
*
|
2190 |
|
|
* This function checks whether the HOST IF is enabled for command operaton
|
2191 |
|
|
* and also checks whether the previous command is completed. It busy waits
|
2192 |
|
|
* in case of previous command is not completed.
|
2193 |
|
|
**/
|
2194 |
|
|
static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
|
2195 |
|
|
{
|
2196 |
|
|
u32 hicr;
|
2197 |
|
|
u8 i;
|
2198 |
|
|
|
2199 |
|
|
/* Check that the host interface is enabled. */
|
2200 |
|
|
hicr = er32(HICR);
|
2201 |
|
|
if ((hicr & E1000_HICR_EN) == 0) {
|
2202 |
|
|
hw_dbg(hw, "E1000_HOST_EN bit disabled.\n");
|
2203 |
|
|
return -E1000_ERR_HOST_INTERFACE_COMMAND;
|
2204 |
|
|
}
|
2205 |
|
|
/* check the previous command is completed */
|
2206 |
|
|
for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
|
2207 |
|
|
hicr = er32(HICR);
|
2208 |
|
|
if (!(hicr & E1000_HICR_C))
|
2209 |
|
|
break;
|
2210 |
|
|
mdelay(1);
|
2211 |
|
|
}
|
2212 |
|
|
|
2213 |
|
|
if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
|
2214 |
|
|
hw_dbg(hw, "Previous command timeout failed .\n");
|
2215 |
|
|
return -E1000_ERR_HOST_INTERFACE_COMMAND;
|
2216 |
|
|
}
|
2217 |
|
|
|
2218 |
|
|
return 0;
|
2219 |
|
|
}
|
2220 |
|
|
|
2221 |
|
|
/**
|
2222 |
|
|
* e1000e_check_mng_mode - check managament mode
|
2223 |
|
|
* @hw: pointer to the HW structure
|
2224 |
|
|
*
|
2225 |
|
|
* Reads the firmware semaphore register and returns true (>0) if
|
2226 |
|
|
* manageability is enabled, else false (0).
|
2227 |
|
|
**/
|
2228 |
|
|
bool e1000e_check_mng_mode(struct e1000_hw *hw)
|
2229 |
|
|
{
|
2230 |
|
|
u32 fwsm = er32(FWSM);
|
2231 |
|
|
|
2232 |
|
|
return (fwsm & E1000_FWSM_MODE_MASK) == hw->mac.ops.mng_mode_enab;
|
2233 |
|
|
}
|
2234 |
|
|
|
2235 |
|
|
/**
|
2236 |
|
|
* e1000e_enable_tx_pkt_filtering - Enable packet filtering on TX
|
2237 |
|
|
* @hw: pointer to the HW structure
|
2238 |
|
|
*
|
2239 |
|
|
* Enables packet filtering on transmit packets if manageability is enabled
|
2240 |
|
|
* and host interface is enabled.
|
2241 |
|
|
**/
|
2242 |
|
|
bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
|
2243 |
|
|
{
|
2244 |
|
|
struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
|
2245 |
|
|
u32 *buffer = (u32 *)&hw->mng_cookie;
|
2246 |
|
|
u32 offset;
|
2247 |
|
|
s32 ret_val, hdr_csum, csum;
|
2248 |
|
|
u8 i, len;
|
2249 |
|
|
|
2250 |
|
|
/* No manageability, no filtering */
|
2251 |
|
|
if (!e1000e_check_mng_mode(hw)) {
|
2252 |
|
|
hw->mac.tx_pkt_filtering = 0;
|
2253 |
|
|
return 0;
|
2254 |
|
|
}
|
2255 |
|
|
|
2256 |
|
|
/* If we can't read from the host interface for whatever
|
2257 |
|
|
* reason, disable filtering.
|
2258 |
|
|
*/
|
2259 |
|
|
ret_val = e1000_mng_enable_host_if(hw);
|
2260 |
|
|
if (ret_val != 0) {
|
2261 |
|
|
hw->mac.tx_pkt_filtering = 0;
|
2262 |
|
|
return ret_val;
|
2263 |
|
|
}
|
2264 |
|
|
|
2265 |
|
|
/* Read in the header. Length and offset are in dwords. */
|
2266 |
|
|
len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
|
2267 |
|
|
offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
|
2268 |
|
|
for (i = 0; i < len; i++)
|
2269 |
|
|
*(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i);
|
2270 |
|
|
hdr_csum = hdr->checksum;
|
2271 |
|
|
hdr->checksum = 0;
|
2272 |
|
|
csum = e1000_calculate_checksum((u8 *)hdr,
|
2273 |
|
|
E1000_MNG_DHCP_COOKIE_LENGTH);
|
2274 |
|
|
/* If either the checksums or signature don't match, then
|
2275 |
|
|
* the cookie area isn't considered valid, in which case we
|
2276 |
|
|
* take the safe route of assuming Tx filtering is enabled.
|
2277 |
|
|
*/
|
2278 |
|
|
if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
|
2279 |
|
|
hw->mac.tx_pkt_filtering = 1;
|
2280 |
|
|
return 1;
|
2281 |
|
|
}
|
2282 |
|
|
|
2283 |
|
|
/* Cookie area is valid, make the final check for filtering. */
|
2284 |
|
|
if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
|
2285 |
|
|
hw->mac.tx_pkt_filtering = 0;
|
2286 |
|
|
return 0;
|
2287 |
|
|
}
|
2288 |
|
|
|
2289 |
|
|
hw->mac.tx_pkt_filtering = 1;
|
2290 |
|
|
return 1;
|
2291 |
|
|
}
|
2292 |
|
|
|
2293 |
|
|
/**
|
2294 |
|
|
* e1000_mng_write_cmd_header - Writes manageability command header
|
2295 |
|
|
* @hw: pointer to the HW structure
|
2296 |
|
|
* @hdr: pointer to the host interface command header
|
2297 |
|
|
*
|
2298 |
|
|
* Writes the command header after does the checksum calculation.
|
2299 |
|
|
**/
|
2300 |
|
|
static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
|
2301 |
|
|
struct e1000_host_mng_command_header *hdr)
|
2302 |
|
|
{
|
2303 |
|
|
u16 i, length = sizeof(struct e1000_host_mng_command_header);
|
2304 |
|
|
|
2305 |
|
|
/* Write the whole command header structure with new checksum. */
|
2306 |
|
|
|
2307 |
|
|
hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
|
2308 |
|
|
|
2309 |
|
|
length >>= 2;
|
2310 |
|
|
/* Write the relevant command block into the ram area. */
|
2311 |
|
|
for (i = 0; i < length; i++) {
|
2312 |
|
|
E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i,
|
2313 |
|
|
*((u32 *) hdr + i));
|
2314 |
|
|
e1e_flush();
|
2315 |
|
|
}
|
2316 |
|
|
|
2317 |
|
|
return 0;
|
2318 |
|
|
}
|
2319 |
|
|
|
2320 |
|
|
/**
|
2321 |
|
|
* e1000_mng_host_if_write - Writes to the manageability host interface
|
2322 |
|
|
* @hw: pointer to the HW structure
|
2323 |
|
|
* @buffer: pointer to the host interface buffer
|
2324 |
|
|
* @length: size of the buffer
|
2325 |
|
|
* @offset: location in the buffer to write to
|
2326 |
|
|
* @sum: sum of the data (not checksum)
|
2327 |
|
|
*
|
2328 |
|
|
* This function writes the buffer content at the offset given on the host if.
|
2329 |
|
|
* It also does alignment considerations to do the writes in most efficient
|
2330 |
|
|
* way. Also fills up the sum of the buffer in *buffer parameter.
|
2331 |
|
|
**/
|
2332 |
|
|
static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
|
2333 |
|
|
u16 length, u16 offset, u8 *sum)
|
2334 |
|
|
{
|
2335 |
|
|
u8 *tmp;
|
2336 |
|
|
u8 *bufptr = buffer;
|
2337 |
|
|
u32 data = 0;
|
2338 |
|
|
u16 remaining, i, j, prev_bytes;
|
2339 |
|
|
|
2340 |
|
|
/* sum = only sum of the data and it is not checksum */
|
2341 |
|
|
|
2342 |
|
|
if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
|
2343 |
|
|
return -E1000_ERR_PARAM;
|
2344 |
|
|
|
2345 |
|
|
tmp = (u8 *)&data;
|
2346 |
|
|
prev_bytes = offset & 0x3;
|
2347 |
|
|
offset >>= 2;
|
2348 |
|
|
|
2349 |
|
|
if (prev_bytes) {
|
2350 |
|
|
data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset);
|
2351 |
|
|
for (j = prev_bytes; j < sizeof(u32); j++) {
|
2352 |
|
|
*(tmp + j) = *bufptr++;
|
2353 |
|
|
*sum += *(tmp + j);
|
2354 |
|
|
}
|
2355 |
|
|
E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data);
|
2356 |
|
|
length -= j - prev_bytes;
|
2357 |
|
|
offset++;
|
2358 |
|
|
}
|
2359 |
|
|
|
2360 |
|
|
remaining = length & 0x3;
|
2361 |
|
|
length -= remaining;
|
2362 |
|
|
|
2363 |
|
|
/* Calculate length in DWORDs */
|
2364 |
|
|
length >>= 2;
|
2365 |
|
|
|
2366 |
|
|
/* The device driver writes the relevant command block into the
|
2367 |
|
|
* ram area. */
|
2368 |
|
|
for (i = 0; i < length; i++) {
|
2369 |
|
|
for (j = 0; j < sizeof(u32); j++) {
|
2370 |
|
|
*(tmp + j) = *bufptr++;
|
2371 |
|
|
*sum += *(tmp + j);
|
2372 |
|
|
}
|
2373 |
|
|
|
2374 |
|
|
E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
|
2375 |
|
|
}
|
2376 |
|
|
if (remaining) {
|
2377 |
|
|
for (j = 0; j < sizeof(u32); j++) {
|
2378 |
|
|
if (j < remaining)
|
2379 |
|
|
*(tmp + j) = *bufptr++;
|
2380 |
|
|
else
|
2381 |
|
|
*(tmp + j) = 0;
|
2382 |
|
|
|
2383 |
|
|
*sum += *(tmp + j);
|
2384 |
|
|
}
|
2385 |
|
|
E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
|
2386 |
|
|
}
|
2387 |
|
|
|
2388 |
|
|
return 0;
|
2389 |
|
|
}
|
2390 |
|
|
|
2391 |
|
|
/**
|
2392 |
|
|
* e1000e_mng_write_dhcp_info - Writes DHCP info to host interface
|
2393 |
|
|
* @hw: pointer to the HW structure
|
2394 |
|
|
* @buffer: pointer to the host interface
|
2395 |
|
|
* @length: size of the buffer
|
2396 |
|
|
*
|
2397 |
|
|
* Writes the DHCP information to the host interface.
|
2398 |
|
|
**/
|
2399 |
|
|
s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
|
2400 |
|
|
{
|
2401 |
|
|
struct e1000_host_mng_command_header hdr;
|
2402 |
|
|
s32 ret_val;
|
2403 |
|
|
u32 hicr;
|
2404 |
|
|
|
2405 |
|
|
hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
|
2406 |
|
|
hdr.command_length = length;
|
2407 |
|
|
hdr.reserved1 = 0;
|
2408 |
|
|
hdr.reserved2 = 0;
|
2409 |
|
|
hdr.checksum = 0;
|
2410 |
|
|
|
2411 |
|
|
/* Enable the host interface */
|
2412 |
|
|
ret_val = e1000_mng_enable_host_if(hw);
|
2413 |
|
|
if (ret_val)
|
2414 |
|
|
return ret_val;
|
2415 |
|
|
|
2416 |
|
|
/* Populate the host interface with the contents of "buffer". */
|
2417 |
|
|
ret_val = e1000_mng_host_if_write(hw, buffer, length,
|
2418 |
|
|
sizeof(hdr), &(hdr.checksum));
|
2419 |
|
|
if (ret_val)
|
2420 |
|
|
return ret_val;
|
2421 |
|
|
|
2422 |
|
|
/* Write the manageability command header */
|
2423 |
|
|
ret_val = e1000_mng_write_cmd_header(hw, &hdr);
|
2424 |
|
|
if (ret_val)
|
2425 |
|
|
return ret_val;
|
2426 |
|
|
|
2427 |
|
|
/* Tell the ARC a new command is pending. */
|
2428 |
|
|
hicr = er32(HICR);
|
2429 |
|
|
ew32(HICR, hicr | E1000_HICR_C);
|
2430 |
|
|
|
2431 |
|
|
return 0;
|
2432 |
|
|
}
|
2433 |
|
|
|
2434 |
|
|
/**
|
2435 |
|
|
* e1000e_enable_mng_pass_thru - Enable processing of ARP's
|
2436 |
|
|
* @hw: pointer to the HW structure
|
2437 |
|
|
*
|
2438 |
|
|
* Verifies the hardware needs to allow ARPs to be processed by the host.
|
2439 |
|
|
**/
|
2440 |
|
|
bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
|
2441 |
|
|
{
|
2442 |
|
|
u32 manc;
|
2443 |
|
|
u32 fwsm, factps;
|
2444 |
|
|
bool ret_val = 0;
|
2445 |
|
|
|
2446 |
|
|
manc = er32(MANC);
|
2447 |
|
|
|
2448 |
|
|
if (!(manc & E1000_MANC_RCV_TCO_EN) ||
|
2449 |
|
|
!(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
|
2450 |
|
|
return ret_val;
|
2451 |
|
|
|
2452 |
|
|
if (hw->mac.arc_subsystem_valid) {
|
2453 |
|
|
fwsm = er32(FWSM);
|
2454 |
|
|
factps = er32(FACTPS);
|
2455 |
|
|
|
2456 |
|
|
if (!(factps & E1000_FACTPS_MNGCG) &&
|
2457 |
|
|
((fwsm & E1000_FWSM_MODE_MASK) ==
|
2458 |
|
|
(e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
|
2459 |
|
|
ret_val = 1;
|
2460 |
|
|
return ret_val;
|
2461 |
|
|
}
|
2462 |
|
|
} else {
|
2463 |
|
|
if ((manc & E1000_MANC_SMBUS_EN) &&
|
2464 |
|
|
!(manc & E1000_MANC_ASF_EN)) {
|
2465 |
|
|
ret_val = 1;
|
2466 |
|
|
return ret_val;
|
2467 |
|
|
}
|
2468 |
|
|
}
|
2469 |
|
|
|
2470 |
|
|
return ret_val;
|
2471 |
|
|
}
|
2472 |
|
|
|
2473 |
|
|
s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num)
|
2474 |
|
|
{
|
2475 |
|
|
s32 ret_val;
|
2476 |
|
|
u16 nvm_data;
|
2477 |
|
|
|
2478 |
|
|
ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
|
2479 |
|
|
if (ret_val) {
|
2480 |
|
|
hw_dbg(hw, "NVM Read Error\n");
|
2481 |
|
|
return ret_val;
|
2482 |
|
|
}
|
2483 |
|
|
*part_num = (u32)(nvm_data << 16);
|
2484 |
|
|
|
2485 |
|
|
ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
|
2486 |
|
|
if (ret_val) {
|
2487 |
|
|
hw_dbg(hw, "NVM Read Error\n");
|
2488 |
|
|
return ret_val;
|
2489 |
|
|
}
|
2490 |
|
|
*part_num |= nvm_data;
|
2491 |
|
|
|
2492 |
|
|
return 0;
|
2493 |
|
|
}
|