1 |
62 |
marcus.erl |
/**************************************************************************/
|
2 |
|
|
/* */
|
3 |
|
|
/* IBM eServer i/pSeries Virtual Ethernet Device Driver */
|
4 |
|
|
/* Copyright (C) 2003 IBM Corp. */
|
5 |
|
|
/* Originally written by Dave Larson (larson1@us.ibm.com) */
|
6 |
|
|
/* Maintained by Santiago Leon (santil@us.ibm.com) */
|
7 |
|
|
/* */
|
8 |
|
|
/* This program is free software; you can redistribute it and/or modify */
|
9 |
|
|
/* it under the terms of the GNU General Public License as published by */
|
10 |
|
|
/* the Free Software Foundation; either version 2 of the License, or */
|
11 |
|
|
/* (at your option) any later version. */
|
12 |
|
|
/* */
|
13 |
|
|
/* This program is distributed in the hope that it will be useful, */
|
14 |
|
|
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
|
15 |
|
|
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
|
16 |
|
|
/* GNU General Public License for more details. */
|
17 |
|
|
/* */
|
18 |
|
|
/* You should have received a copy of the GNU General Public License */
|
19 |
|
|
/* along with this program; if not, write to the Free Software */
|
20 |
|
|
/* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
|
21 |
|
|
/* USA */
|
22 |
|
|
/* */
|
23 |
|
|
/* This module contains the implementation of a virtual ethernet device */
|
24 |
|
|
/* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
|
25 |
|
|
/* option of the RS/6000 Platform Architechture to interface with virtual */
|
26 |
|
|
/* ethernet NICs that are presented to the partition by the hypervisor. */
|
27 |
|
|
/* */
|
28 |
|
|
/**************************************************************************/
|
29 |
|
|
/*
|
30 |
|
|
TODO:
|
31 |
|
|
- add support for sysfs
|
32 |
|
|
- possibly remove procfs support
|
33 |
|
|
*/
|
34 |
|
|
|
35 |
|
|
#include <linux/module.h>
|
36 |
|
|
#include <linux/types.h>
|
37 |
|
|
#include <linux/errno.h>
|
38 |
|
|
#include <linux/ioport.h>
|
39 |
|
|
#include <linux/dma-mapping.h>
|
40 |
|
|
#include <linux/kernel.h>
|
41 |
|
|
#include <linux/netdevice.h>
|
42 |
|
|
#include <linux/etherdevice.h>
|
43 |
|
|
#include <linux/skbuff.h>
|
44 |
|
|
#include <linux/init.h>
|
45 |
|
|
#include <linux/delay.h>
|
46 |
|
|
#include <linux/mm.h>
|
47 |
|
|
#include <linux/ethtool.h>
|
48 |
|
|
#include <linux/proc_fs.h>
|
49 |
|
|
#include <linux/in.h>
|
50 |
|
|
#include <linux/ip.h>
|
51 |
|
|
#include <net/net_namespace.h>
|
52 |
|
|
#include <asm/semaphore.h>
|
53 |
|
|
#include <asm/hvcall.h>
|
54 |
|
|
#include <asm/atomic.h>
|
55 |
|
|
#include <asm/vio.h>
|
56 |
|
|
#include <asm/uaccess.h>
|
57 |
|
|
#include <linux/seq_file.h>
|
58 |
|
|
|
59 |
|
|
#include "ibmveth.h"
|
60 |
|
|
|
61 |
|
|
#undef DEBUG
|
62 |
|
|
|
63 |
|
|
#define ibmveth_printk(fmt, args...) \
|
64 |
|
|
printk(KERN_DEBUG "%s: " fmt, __FILE__, ## args)
|
65 |
|
|
|
66 |
|
|
#define ibmveth_error_printk(fmt, args...) \
|
67 |
|
|
printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
|
68 |
|
|
|
69 |
|
|
#ifdef DEBUG
|
70 |
|
|
#define ibmveth_debug_printk_no_adapter(fmt, args...) \
|
71 |
|
|
printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args)
|
72 |
|
|
#define ibmveth_debug_printk(fmt, args...) \
|
73 |
|
|
printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
|
74 |
|
|
#define ibmveth_assert(expr) \
|
75 |
|
|
if(!(expr)) { \
|
76 |
|
|
printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \
|
77 |
|
|
BUG(); \
|
78 |
|
|
}
|
79 |
|
|
#else
|
80 |
|
|
#define ibmveth_debug_printk_no_adapter(fmt, args...)
|
81 |
|
|
#define ibmveth_debug_printk(fmt, args...)
|
82 |
|
|
#define ibmveth_assert(expr)
|
83 |
|
|
#endif
|
84 |
|
|
|
85 |
|
|
static int ibmveth_open(struct net_device *dev);
|
86 |
|
|
static int ibmveth_close(struct net_device *dev);
|
87 |
|
|
static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
|
88 |
|
|
static int ibmveth_poll(struct napi_struct *napi, int budget);
|
89 |
|
|
static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev);
|
90 |
|
|
static void ibmveth_set_multicast_list(struct net_device *dev);
|
91 |
|
|
static int ibmveth_change_mtu(struct net_device *dev, int new_mtu);
|
92 |
|
|
static void ibmveth_proc_register_driver(void);
|
93 |
|
|
static void ibmveth_proc_unregister_driver(void);
|
94 |
|
|
static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
|
95 |
|
|
static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
|
96 |
|
|
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
|
97 |
|
|
static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
|
98 |
|
|
static struct kobj_type ktype_veth_pool;
|
99 |
|
|
|
100 |
|
|
#ifdef CONFIG_PROC_FS
|
101 |
|
|
#define IBMVETH_PROC_DIR "ibmveth"
|
102 |
|
|
static struct proc_dir_entry *ibmveth_proc_dir;
|
103 |
|
|
#endif
|
104 |
|
|
|
105 |
|
|
static const char ibmveth_driver_name[] = "ibmveth";
|
106 |
|
|
static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver";
|
107 |
|
|
#define ibmveth_driver_version "1.03"
|
108 |
|
|
|
109 |
|
|
MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>");
|
110 |
|
|
MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver");
|
111 |
|
|
MODULE_LICENSE("GPL");
|
112 |
|
|
MODULE_VERSION(ibmveth_driver_version);
|
113 |
|
|
|
114 |
|
|
struct ibmveth_stat {
|
115 |
|
|
char name[ETH_GSTRING_LEN];
|
116 |
|
|
int offset;
|
117 |
|
|
};
|
118 |
|
|
|
119 |
|
|
#define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
|
120 |
|
|
#define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
|
121 |
|
|
|
122 |
|
|
struct ibmveth_stat ibmveth_stats[] = {
|
123 |
|
|
{ "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
|
124 |
|
|
{ "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
|
125 |
|
|
{ "replenish_add_buff_failure", IBMVETH_STAT_OFF(replenish_add_buff_failure) },
|
126 |
|
|
{ "replenish_add_buff_success", IBMVETH_STAT_OFF(replenish_add_buff_success) },
|
127 |
|
|
{ "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
|
128 |
|
|
{ "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
|
129 |
|
|
{ "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
|
130 |
|
|
{ "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
|
131 |
|
|
};
|
132 |
|
|
|
133 |
|
|
/* simple methods of getting data from the current rxq entry */
|
134 |
|
|
static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
|
135 |
|
|
{
|
136 |
|
|
return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off;
|
137 |
|
|
}
|
138 |
|
|
|
139 |
|
|
static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
|
140 |
|
|
{
|
141 |
|
|
return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> IBMVETH_RXQ_TOGGLE_SHIFT;
|
142 |
|
|
}
|
143 |
|
|
|
144 |
|
|
static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
|
145 |
|
|
{
|
146 |
|
|
return (ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle);
|
147 |
|
|
}
|
148 |
|
|
|
149 |
|
|
static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
|
150 |
|
|
{
|
151 |
|
|
return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID);
|
152 |
|
|
}
|
153 |
|
|
|
154 |
|
|
static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
|
155 |
|
|
{
|
156 |
|
|
return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK);
|
157 |
|
|
}
|
158 |
|
|
|
159 |
|
|
static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
|
160 |
|
|
{
|
161 |
|
|
return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
|
162 |
|
|
}
|
163 |
|
|
|
164 |
|
|
static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
|
165 |
|
|
{
|
166 |
|
|
return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD);
|
167 |
|
|
}
|
168 |
|
|
|
169 |
|
|
/* setup the initial settings for a buffer pool */
|
170 |
|
|
static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active)
|
171 |
|
|
{
|
172 |
|
|
pool->size = pool_size;
|
173 |
|
|
pool->index = pool_index;
|
174 |
|
|
pool->buff_size = buff_size;
|
175 |
|
|
pool->threshold = pool_size / 2;
|
176 |
|
|
pool->active = pool_active;
|
177 |
|
|
}
|
178 |
|
|
|
179 |
|
|
/* allocate and setup an buffer pool - called during open */
|
180 |
|
|
static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
|
181 |
|
|
{
|
182 |
|
|
int i;
|
183 |
|
|
|
184 |
|
|
pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
|
185 |
|
|
|
186 |
|
|
if(!pool->free_map) {
|
187 |
|
|
return -1;
|
188 |
|
|
}
|
189 |
|
|
|
190 |
|
|
pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
|
191 |
|
|
if(!pool->dma_addr) {
|
192 |
|
|
kfree(pool->free_map);
|
193 |
|
|
pool->free_map = NULL;
|
194 |
|
|
return -1;
|
195 |
|
|
}
|
196 |
|
|
|
197 |
|
|
pool->skbuff = kmalloc(sizeof(void*) * pool->size, GFP_KERNEL);
|
198 |
|
|
|
199 |
|
|
if(!pool->skbuff) {
|
200 |
|
|
kfree(pool->dma_addr);
|
201 |
|
|
pool->dma_addr = NULL;
|
202 |
|
|
|
203 |
|
|
kfree(pool->free_map);
|
204 |
|
|
pool->free_map = NULL;
|
205 |
|
|
return -1;
|
206 |
|
|
}
|
207 |
|
|
|
208 |
|
|
memset(pool->skbuff, 0, sizeof(void*) * pool->size);
|
209 |
|
|
memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
|
210 |
|
|
|
211 |
|
|
for(i = 0; i < pool->size; ++i) {
|
212 |
|
|
pool->free_map[i] = i;
|
213 |
|
|
}
|
214 |
|
|
|
215 |
|
|
atomic_set(&pool->available, 0);
|
216 |
|
|
pool->producer_index = 0;
|
217 |
|
|
pool->consumer_index = 0;
|
218 |
|
|
|
219 |
|
|
return 0;
|
220 |
|
|
}
|
221 |
|
|
|
222 |
|
|
/* replenish the buffers for a pool. note that we don't need to
|
223 |
|
|
* skb_reserve these since they are used for incoming...
|
224 |
|
|
*/
|
225 |
|
|
static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
|
226 |
|
|
{
|
227 |
|
|
u32 i;
|
228 |
|
|
u32 count = pool->size - atomic_read(&pool->available);
|
229 |
|
|
u32 buffers_added = 0;
|
230 |
|
|
|
231 |
|
|
mb();
|
232 |
|
|
|
233 |
|
|
for(i = 0; i < count; ++i) {
|
234 |
|
|
struct sk_buff *skb;
|
235 |
|
|
unsigned int free_index, index;
|
236 |
|
|
u64 correlator;
|
237 |
|
|
union ibmveth_buf_desc desc;
|
238 |
|
|
unsigned long lpar_rc;
|
239 |
|
|
dma_addr_t dma_addr;
|
240 |
|
|
|
241 |
|
|
skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
|
242 |
|
|
|
243 |
|
|
if(!skb) {
|
244 |
|
|
ibmveth_debug_printk("replenish: unable to allocate skb\n");
|
245 |
|
|
adapter->replenish_no_mem++;
|
246 |
|
|
break;
|
247 |
|
|
}
|
248 |
|
|
|
249 |
|
|
free_index = pool->consumer_index;
|
250 |
|
|
pool->consumer_index = (pool->consumer_index + 1) % pool->size;
|
251 |
|
|
index = pool->free_map[free_index];
|
252 |
|
|
|
253 |
|
|
ibmveth_assert(index != IBM_VETH_INVALID_MAP);
|
254 |
|
|
ibmveth_assert(pool->skbuff[index] == NULL);
|
255 |
|
|
|
256 |
|
|
dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
|
257 |
|
|
pool->buff_size, DMA_FROM_DEVICE);
|
258 |
|
|
|
259 |
|
|
pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
|
260 |
|
|
pool->dma_addr[index] = dma_addr;
|
261 |
|
|
pool->skbuff[index] = skb;
|
262 |
|
|
|
263 |
|
|
correlator = ((u64)pool->index << 32) | index;
|
264 |
|
|
*(u64*)skb->data = correlator;
|
265 |
|
|
|
266 |
|
|
desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
|
267 |
|
|
desc.fields.address = dma_addr;
|
268 |
|
|
|
269 |
|
|
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
|
270 |
|
|
|
271 |
|
|
if(lpar_rc != H_SUCCESS) {
|
272 |
|
|
pool->free_map[free_index] = index;
|
273 |
|
|
pool->skbuff[index] = NULL;
|
274 |
|
|
if (pool->consumer_index == 0)
|
275 |
|
|
pool->consumer_index = pool->size - 1;
|
276 |
|
|
else
|
277 |
|
|
pool->consumer_index--;
|
278 |
|
|
dma_unmap_single(&adapter->vdev->dev,
|
279 |
|
|
pool->dma_addr[index], pool->buff_size,
|
280 |
|
|
DMA_FROM_DEVICE);
|
281 |
|
|
dev_kfree_skb_any(skb);
|
282 |
|
|
adapter->replenish_add_buff_failure++;
|
283 |
|
|
break;
|
284 |
|
|
} else {
|
285 |
|
|
buffers_added++;
|
286 |
|
|
adapter->replenish_add_buff_success++;
|
287 |
|
|
}
|
288 |
|
|
}
|
289 |
|
|
|
290 |
|
|
mb();
|
291 |
|
|
atomic_add(buffers_added, &(pool->available));
|
292 |
|
|
}
|
293 |
|
|
|
294 |
|
|
/* replenish routine */
|
295 |
|
|
static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
|
296 |
|
|
{
|
297 |
|
|
int i;
|
298 |
|
|
|
299 |
|
|
adapter->replenish_task_cycles++;
|
300 |
|
|
|
301 |
|
|
for(i = 0; i < IbmVethNumBufferPools; i++)
|
302 |
|
|
if(adapter->rx_buff_pool[i].active)
|
303 |
|
|
ibmveth_replenish_buffer_pool(adapter,
|
304 |
|
|
&adapter->rx_buff_pool[i]);
|
305 |
|
|
|
306 |
|
|
adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
|
307 |
|
|
}
|
308 |
|
|
|
309 |
|
|
/* empty and free ana buffer pool - also used to do cleanup in error paths */
|
310 |
|
|
static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
|
311 |
|
|
{
|
312 |
|
|
int i;
|
313 |
|
|
|
314 |
|
|
kfree(pool->free_map);
|
315 |
|
|
pool->free_map = NULL;
|
316 |
|
|
|
317 |
|
|
if(pool->skbuff && pool->dma_addr) {
|
318 |
|
|
for(i = 0; i < pool->size; ++i) {
|
319 |
|
|
struct sk_buff *skb = pool->skbuff[i];
|
320 |
|
|
if(skb) {
|
321 |
|
|
dma_unmap_single(&adapter->vdev->dev,
|
322 |
|
|
pool->dma_addr[i],
|
323 |
|
|
pool->buff_size,
|
324 |
|
|
DMA_FROM_DEVICE);
|
325 |
|
|
dev_kfree_skb_any(skb);
|
326 |
|
|
pool->skbuff[i] = NULL;
|
327 |
|
|
}
|
328 |
|
|
}
|
329 |
|
|
}
|
330 |
|
|
|
331 |
|
|
if(pool->dma_addr) {
|
332 |
|
|
kfree(pool->dma_addr);
|
333 |
|
|
pool->dma_addr = NULL;
|
334 |
|
|
}
|
335 |
|
|
|
336 |
|
|
if(pool->skbuff) {
|
337 |
|
|
kfree(pool->skbuff);
|
338 |
|
|
pool->skbuff = NULL;
|
339 |
|
|
}
|
340 |
|
|
}
|
341 |
|
|
|
342 |
|
|
/* remove a buffer from a pool */
|
343 |
|
|
static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 correlator)
|
344 |
|
|
{
|
345 |
|
|
unsigned int pool = correlator >> 32;
|
346 |
|
|
unsigned int index = correlator & 0xffffffffUL;
|
347 |
|
|
unsigned int free_index;
|
348 |
|
|
struct sk_buff *skb;
|
349 |
|
|
|
350 |
|
|
ibmveth_assert(pool < IbmVethNumBufferPools);
|
351 |
|
|
ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
|
352 |
|
|
|
353 |
|
|
skb = adapter->rx_buff_pool[pool].skbuff[index];
|
354 |
|
|
|
355 |
|
|
ibmveth_assert(skb != NULL);
|
356 |
|
|
|
357 |
|
|
adapter->rx_buff_pool[pool].skbuff[index] = NULL;
|
358 |
|
|
|
359 |
|
|
dma_unmap_single(&adapter->vdev->dev,
|
360 |
|
|
adapter->rx_buff_pool[pool].dma_addr[index],
|
361 |
|
|
adapter->rx_buff_pool[pool].buff_size,
|
362 |
|
|
DMA_FROM_DEVICE);
|
363 |
|
|
|
364 |
|
|
free_index = adapter->rx_buff_pool[pool].producer_index;
|
365 |
|
|
adapter->rx_buff_pool[pool].producer_index
|
366 |
|
|
= (adapter->rx_buff_pool[pool].producer_index + 1)
|
367 |
|
|
% adapter->rx_buff_pool[pool].size;
|
368 |
|
|
adapter->rx_buff_pool[pool].free_map[free_index] = index;
|
369 |
|
|
|
370 |
|
|
mb();
|
371 |
|
|
|
372 |
|
|
atomic_dec(&(adapter->rx_buff_pool[pool].available));
|
373 |
|
|
}
|
374 |
|
|
|
375 |
|
|
/* get the current buffer on the rx queue */
|
376 |
|
|
static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
|
377 |
|
|
{
|
378 |
|
|
u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
|
379 |
|
|
unsigned int pool = correlator >> 32;
|
380 |
|
|
unsigned int index = correlator & 0xffffffffUL;
|
381 |
|
|
|
382 |
|
|
ibmveth_assert(pool < IbmVethNumBufferPools);
|
383 |
|
|
ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
|
384 |
|
|
|
385 |
|
|
return adapter->rx_buff_pool[pool].skbuff[index];
|
386 |
|
|
}
|
387 |
|
|
|
388 |
|
|
/* recycle the current buffer on the rx queue */
|
389 |
|
|
static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
|
390 |
|
|
{
|
391 |
|
|
u32 q_index = adapter->rx_queue.index;
|
392 |
|
|
u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
|
393 |
|
|
unsigned int pool = correlator >> 32;
|
394 |
|
|
unsigned int index = correlator & 0xffffffffUL;
|
395 |
|
|
union ibmveth_buf_desc desc;
|
396 |
|
|
unsigned long lpar_rc;
|
397 |
|
|
|
398 |
|
|
ibmveth_assert(pool < IbmVethNumBufferPools);
|
399 |
|
|
ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
|
400 |
|
|
|
401 |
|
|
if(!adapter->rx_buff_pool[pool].active) {
|
402 |
|
|
ibmveth_rxq_harvest_buffer(adapter);
|
403 |
|
|
ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
|
404 |
|
|
return;
|
405 |
|
|
}
|
406 |
|
|
|
407 |
|
|
desc.fields.flags_len = IBMVETH_BUF_VALID |
|
408 |
|
|
adapter->rx_buff_pool[pool].buff_size;
|
409 |
|
|
desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
|
410 |
|
|
|
411 |
|
|
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
|
412 |
|
|
|
413 |
|
|
if(lpar_rc != H_SUCCESS) {
|
414 |
|
|
ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
|
415 |
|
|
ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
|
416 |
|
|
}
|
417 |
|
|
|
418 |
|
|
if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
|
419 |
|
|
adapter->rx_queue.index = 0;
|
420 |
|
|
adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
|
421 |
|
|
}
|
422 |
|
|
}
|
423 |
|
|
|
424 |
|
|
static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
|
425 |
|
|
{
|
426 |
|
|
ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
|
427 |
|
|
|
428 |
|
|
if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
|
429 |
|
|
adapter->rx_queue.index = 0;
|
430 |
|
|
adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
|
431 |
|
|
}
|
432 |
|
|
}
|
433 |
|
|
|
434 |
|
|
static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
|
435 |
|
|
{
|
436 |
|
|
int i;
|
437 |
|
|
|
438 |
|
|
if(adapter->buffer_list_addr != NULL) {
|
439 |
|
|
if(!dma_mapping_error(adapter->buffer_list_dma)) {
|
440 |
|
|
dma_unmap_single(&adapter->vdev->dev,
|
441 |
|
|
adapter->buffer_list_dma, 4096,
|
442 |
|
|
DMA_BIDIRECTIONAL);
|
443 |
|
|
adapter->buffer_list_dma = DMA_ERROR_CODE;
|
444 |
|
|
}
|
445 |
|
|
free_page((unsigned long)adapter->buffer_list_addr);
|
446 |
|
|
adapter->buffer_list_addr = NULL;
|
447 |
|
|
}
|
448 |
|
|
|
449 |
|
|
if(adapter->filter_list_addr != NULL) {
|
450 |
|
|
if(!dma_mapping_error(adapter->filter_list_dma)) {
|
451 |
|
|
dma_unmap_single(&adapter->vdev->dev,
|
452 |
|
|
adapter->filter_list_dma, 4096,
|
453 |
|
|
DMA_BIDIRECTIONAL);
|
454 |
|
|
adapter->filter_list_dma = DMA_ERROR_CODE;
|
455 |
|
|
}
|
456 |
|
|
free_page((unsigned long)adapter->filter_list_addr);
|
457 |
|
|
adapter->filter_list_addr = NULL;
|
458 |
|
|
}
|
459 |
|
|
|
460 |
|
|
if(adapter->rx_queue.queue_addr != NULL) {
|
461 |
|
|
if(!dma_mapping_error(adapter->rx_queue.queue_dma)) {
|
462 |
|
|
dma_unmap_single(&adapter->vdev->dev,
|
463 |
|
|
adapter->rx_queue.queue_dma,
|
464 |
|
|
adapter->rx_queue.queue_len,
|
465 |
|
|
DMA_BIDIRECTIONAL);
|
466 |
|
|
adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
|
467 |
|
|
}
|
468 |
|
|
kfree(adapter->rx_queue.queue_addr);
|
469 |
|
|
adapter->rx_queue.queue_addr = NULL;
|
470 |
|
|
}
|
471 |
|
|
|
472 |
|
|
for(i = 0; i<IbmVethNumBufferPools; i++)
|
473 |
|
|
if (adapter->rx_buff_pool[i].active)
|
474 |
|
|
ibmveth_free_buffer_pool(adapter,
|
475 |
|
|
&adapter->rx_buff_pool[i]);
|
476 |
|
|
}
|
477 |
|
|
|
478 |
|
|
static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
|
479 |
|
|
union ibmveth_buf_desc rxq_desc, u64 mac_address)
|
480 |
|
|
{
|
481 |
|
|
int rc, try_again = 1;
|
482 |
|
|
|
483 |
|
|
/* After a kexec the adapter will still be open, so our attempt to
|
484 |
|
|
* open it will fail. So if we get a failure we free the adapter and
|
485 |
|
|
* try again, but only once. */
|
486 |
|
|
retry:
|
487 |
|
|
rc = h_register_logical_lan(adapter->vdev->unit_address,
|
488 |
|
|
adapter->buffer_list_dma, rxq_desc.desc,
|
489 |
|
|
adapter->filter_list_dma, mac_address);
|
490 |
|
|
|
491 |
|
|
if (rc != H_SUCCESS && try_again) {
|
492 |
|
|
do {
|
493 |
|
|
rc = h_free_logical_lan(adapter->vdev->unit_address);
|
494 |
|
|
} while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
|
495 |
|
|
|
496 |
|
|
try_again = 0;
|
497 |
|
|
goto retry;
|
498 |
|
|
}
|
499 |
|
|
|
500 |
|
|
return rc;
|
501 |
|
|
}
|
502 |
|
|
|
503 |
|
|
static int ibmveth_open(struct net_device *netdev)
|
504 |
|
|
{
|
505 |
|
|
struct ibmveth_adapter *adapter = netdev->priv;
|
506 |
|
|
u64 mac_address = 0;
|
507 |
|
|
int rxq_entries = 1;
|
508 |
|
|
unsigned long lpar_rc;
|
509 |
|
|
int rc;
|
510 |
|
|
union ibmveth_buf_desc rxq_desc;
|
511 |
|
|
int i;
|
512 |
|
|
|
513 |
|
|
ibmveth_debug_printk("open starting\n");
|
514 |
|
|
|
515 |
|
|
napi_enable(&adapter->napi);
|
516 |
|
|
|
517 |
|
|
for(i = 0; i<IbmVethNumBufferPools; i++)
|
518 |
|
|
rxq_entries += adapter->rx_buff_pool[i].size;
|
519 |
|
|
|
520 |
|
|
adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
|
521 |
|
|
adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
|
522 |
|
|
|
523 |
|
|
if(!adapter->buffer_list_addr || !adapter->filter_list_addr) {
|
524 |
|
|
ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
|
525 |
|
|
ibmveth_cleanup(adapter);
|
526 |
|
|
napi_disable(&adapter->napi);
|
527 |
|
|
return -ENOMEM;
|
528 |
|
|
}
|
529 |
|
|
|
530 |
|
|
adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries;
|
531 |
|
|
adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL);
|
532 |
|
|
|
533 |
|
|
if(!adapter->rx_queue.queue_addr) {
|
534 |
|
|
ibmveth_error_printk("unable to allocate rx queue pages\n");
|
535 |
|
|
ibmveth_cleanup(adapter);
|
536 |
|
|
napi_disable(&adapter->napi);
|
537 |
|
|
return -ENOMEM;
|
538 |
|
|
}
|
539 |
|
|
|
540 |
|
|
adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev,
|
541 |
|
|
adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
|
542 |
|
|
adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev,
|
543 |
|
|
adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
|
544 |
|
|
adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev,
|
545 |
|
|
adapter->rx_queue.queue_addr,
|
546 |
|
|
adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
|
547 |
|
|
|
548 |
|
|
if((dma_mapping_error(adapter->buffer_list_dma) ) ||
|
549 |
|
|
(dma_mapping_error(adapter->filter_list_dma)) ||
|
550 |
|
|
(dma_mapping_error(adapter->rx_queue.queue_dma))) {
|
551 |
|
|
ibmveth_error_printk("unable to map filter or buffer list pages\n");
|
552 |
|
|
ibmveth_cleanup(adapter);
|
553 |
|
|
napi_disable(&adapter->napi);
|
554 |
|
|
return -ENOMEM;
|
555 |
|
|
}
|
556 |
|
|
|
557 |
|
|
adapter->rx_queue.index = 0;
|
558 |
|
|
adapter->rx_queue.num_slots = rxq_entries;
|
559 |
|
|
adapter->rx_queue.toggle = 1;
|
560 |
|
|
|
561 |
|
|
memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
|
562 |
|
|
mac_address = mac_address >> 16;
|
563 |
|
|
|
564 |
|
|
rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len;
|
565 |
|
|
rxq_desc.fields.address = adapter->rx_queue.queue_dma;
|
566 |
|
|
|
567 |
|
|
ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr);
|
568 |
|
|
ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
|
569 |
|
|
ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
|
570 |
|
|
|
571 |
|
|
h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
|
572 |
|
|
|
573 |
|
|
lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
|
574 |
|
|
|
575 |
|
|
if(lpar_rc != H_SUCCESS) {
|
576 |
|
|
ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
|
577 |
|
|
ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n",
|
578 |
|
|
adapter->buffer_list_dma,
|
579 |
|
|
adapter->filter_list_dma,
|
580 |
|
|
rxq_desc.desc,
|
581 |
|
|
mac_address);
|
582 |
|
|
ibmveth_cleanup(adapter);
|
583 |
|
|
napi_disable(&adapter->napi);
|
584 |
|
|
return -ENONET;
|
585 |
|
|
}
|
586 |
|
|
|
587 |
|
|
for(i = 0; i<IbmVethNumBufferPools; i++) {
|
588 |
|
|
if(!adapter->rx_buff_pool[i].active)
|
589 |
|
|
continue;
|
590 |
|
|
if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
|
591 |
|
|
ibmveth_error_printk("unable to alloc pool\n");
|
592 |
|
|
adapter->rx_buff_pool[i].active = 0;
|
593 |
|
|
ibmveth_cleanup(adapter);
|
594 |
|
|
napi_disable(&adapter->napi);
|
595 |
|
|
return -ENOMEM ;
|
596 |
|
|
}
|
597 |
|
|
}
|
598 |
|
|
|
599 |
|
|
ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
|
600 |
|
|
if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
|
601 |
|
|
ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
|
602 |
|
|
do {
|
603 |
|
|
rc = h_free_logical_lan(adapter->vdev->unit_address);
|
604 |
|
|
} while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
|
605 |
|
|
|
606 |
|
|
ibmveth_cleanup(adapter);
|
607 |
|
|
napi_disable(&adapter->napi);
|
608 |
|
|
return rc;
|
609 |
|
|
}
|
610 |
|
|
|
611 |
|
|
ibmveth_debug_printk("initial replenish cycle\n");
|
612 |
|
|
ibmveth_interrupt(netdev->irq, netdev);
|
613 |
|
|
|
614 |
|
|
netif_start_queue(netdev);
|
615 |
|
|
|
616 |
|
|
ibmveth_debug_printk("open complete\n");
|
617 |
|
|
|
618 |
|
|
return 0;
|
619 |
|
|
}
|
620 |
|
|
|
621 |
|
|
static int ibmveth_close(struct net_device *netdev)
|
622 |
|
|
{
|
623 |
|
|
struct ibmveth_adapter *adapter = netdev->priv;
|
624 |
|
|
long lpar_rc;
|
625 |
|
|
|
626 |
|
|
ibmveth_debug_printk("close starting\n");
|
627 |
|
|
|
628 |
|
|
napi_disable(&adapter->napi);
|
629 |
|
|
|
630 |
|
|
if (!adapter->pool_config)
|
631 |
|
|
netif_stop_queue(netdev);
|
632 |
|
|
|
633 |
|
|
free_irq(netdev->irq, netdev);
|
634 |
|
|
|
635 |
|
|
do {
|
636 |
|
|
lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
|
637 |
|
|
} while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
|
638 |
|
|
|
639 |
|
|
if(lpar_rc != H_SUCCESS)
|
640 |
|
|
{
|
641 |
|
|
ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n",
|
642 |
|
|
lpar_rc);
|
643 |
|
|
}
|
644 |
|
|
|
645 |
|
|
adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
|
646 |
|
|
|
647 |
|
|
ibmveth_cleanup(adapter);
|
648 |
|
|
|
649 |
|
|
ibmveth_debug_printk("close complete\n");
|
650 |
|
|
|
651 |
|
|
return 0;
|
652 |
|
|
}
|
653 |
|
|
|
654 |
|
|
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
|
655 |
|
|
cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
|
656 |
|
|
cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE);
|
657 |
|
|
cmd->speed = SPEED_1000;
|
658 |
|
|
cmd->duplex = DUPLEX_FULL;
|
659 |
|
|
cmd->port = PORT_FIBRE;
|
660 |
|
|
cmd->phy_address = 0;
|
661 |
|
|
cmd->transceiver = XCVR_INTERNAL;
|
662 |
|
|
cmd->autoneg = AUTONEG_ENABLE;
|
663 |
|
|
cmd->maxtxpkt = 0;
|
664 |
|
|
cmd->maxrxpkt = 1;
|
665 |
|
|
return 0;
|
666 |
|
|
}
|
667 |
|
|
|
668 |
|
|
static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) {
|
669 |
|
|
strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
|
670 |
|
|
strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1);
|
671 |
|
|
}
|
672 |
|
|
|
673 |
|
|
static u32 netdev_get_link(struct net_device *dev) {
|
674 |
|
|
return 1;
|
675 |
|
|
}
|
676 |
|
|
|
677 |
|
|
static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data)
|
678 |
|
|
{
|
679 |
|
|
struct ibmveth_adapter *adapter = dev->priv;
|
680 |
|
|
|
681 |
|
|
if (data)
|
682 |
|
|
adapter->rx_csum = 1;
|
683 |
|
|
else {
|
684 |
|
|
/*
|
685 |
|
|
* Since the ibmveth firmware interface does not have the concept of
|
686 |
|
|
* separate tx/rx checksum offload enable, if rx checksum is disabled
|
687 |
|
|
* we also have to disable tx checksum offload. Once we disable rx
|
688 |
|
|
* checksum offload, we are no longer allowed to send tx buffers that
|
689 |
|
|
* are not properly checksummed.
|
690 |
|
|
*/
|
691 |
|
|
adapter->rx_csum = 0;
|
692 |
|
|
dev->features &= ~NETIF_F_IP_CSUM;
|
693 |
|
|
}
|
694 |
|
|
}
|
695 |
|
|
|
696 |
|
|
static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data)
|
697 |
|
|
{
|
698 |
|
|
struct ibmveth_adapter *adapter = dev->priv;
|
699 |
|
|
|
700 |
|
|
if (data) {
|
701 |
|
|
dev->features |= NETIF_F_IP_CSUM;
|
702 |
|
|
adapter->rx_csum = 1;
|
703 |
|
|
} else
|
704 |
|
|
dev->features &= ~NETIF_F_IP_CSUM;
|
705 |
|
|
}
|
706 |
|
|
|
707 |
|
|
static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
|
708 |
|
|
void (*done) (struct net_device *, u32))
|
709 |
|
|
{
|
710 |
|
|
struct ibmveth_adapter *adapter = dev->priv;
|
711 |
|
|
u64 set_attr, clr_attr, ret_attr;
|
712 |
|
|
long ret;
|
713 |
|
|
int rc1 = 0, rc2 = 0;
|
714 |
|
|
int restart = 0;
|
715 |
|
|
|
716 |
|
|
if (netif_running(dev)) {
|
717 |
|
|
restart = 1;
|
718 |
|
|
adapter->pool_config = 1;
|
719 |
|
|
ibmveth_close(dev);
|
720 |
|
|
adapter->pool_config = 0;
|
721 |
|
|
}
|
722 |
|
|
|
723 |
|
|
set_attr = 0;
|
724 |
|
|
clr_attr = 0;
|
725 |
|
|
|
726 |
|
|
if (data)
|
727 |
|
|
set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
|
728 |
|
|
else
|
729 |
|
|
clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
|
730 |
|
|
|
731 |
|
|
ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
|
732 |
|
|
|
733 |
|
|
if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
|
734 |
|
|
!(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
|
735 |
|
|
(ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
|
736 |
|
|
ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
|
737 |
|
|
set_attr, &ret_attr);
|
738 |
|
|
|
739 |
|
|
if (ret != H_SUCCESS) {
|
740 |
|
|
rc1 = -EIO;
|
741 |
|
|
ibmveth_error_printk("unable to change checksum offload settings."
|
742 |
|
|
" %d rc=%ld\n", data, ret);
|
743 |
|
|
|
744 |
|
|
ret = h_illan_attributes(adapter->vdev->unit_address,
|
745 |
|
|
set_attr, clr_attr, &ret_attr);
|
746 |
|
|
} else
|
747 |
|
|
done(dev, data);
|
748 |
|
|
} else {
|
749 |
|
|
rc1 = -EIO;
|
750 |
|
|
ibmveth_error_printk("unable to change checksum offload settings."
|
751 |
|
|
" %d rc=%ld ret_attr=%lx\n", data, ret, ret_attr);
|
752 |
|
|
}
|
753 |
|
|
|
754 |
|
|
if (restart)
|
755 |
|
|
rc2 = ibmveth_open(dev);
|
756 |
|
|
|
757 |
|
|
return rc1 ? rc1 : rc2;
|
758 |
|
|
}
|
759 |
|
|
|
760 |
|
|
static int ibmveth_set_rx_csum(struct net_device *dev, u32 data)
|
761 |
|
|
{
|
762 |
|
|
struct ibmveth_adapter *adapter = dev->priv;
|
763 |
|
|
|
764 |
|
|
if ((data && adapter->rx_csum) || (!data && !adapter->rx_csum))
|
765 |
|
|
return 0;
|
766 |
|
|
|
767 |
|
|
return ibmveth_set_csum_offload(dev, data, ibmveth_set_rx_csum_flags);
|
768 |
|
|
}
|
769 |
|
|
|
770 |
|
|
static int ibmveth_set_tx_csum(struct net_device *dev, u32 data)
|
771 |
|
|
{
|
772 |
|
|
struct ibmveth_adapter *adapter = dev->priv;
|
773 |
|
|
int rc = 0;
|
774 |
|
|
|
775 |
|
|
if (data && (dev->features & NETIF_F_IP_CSUM))
|
776 |
|
|
return 0;
|
777 |
|
|
if (!data && !(dev->features & NETIF_F_IP_CSUM))
|
778 |
|
|
return 0;
|
779 |
|
|
|
780 |
|
|
if (data && !adapter->rx_csum)
|
781 |
|
|
rc = ibmveth_set_csum_offload(dev, data, ibmveth_set_tx_csum_flags);
|
782 |
|
|
else
|
783 |
|
|
ibmveth_set_tx_csum_flags(dev, data);
|
784 |
|
|
|
785 |
|
|
return rc;
|
786 |
|
|
}
|
787 |
|
|
|
788 |
|
|
static u32 ibmveth_get_rx_csum(struct net_device *dev)
|
789 |
|
|
{
|
790 |
|
|
struct ibmveth_adapter *adapter = dev->priv;
|
791 |
|
|
return adapter->rx_csum;
|
792 |
|
|
}
|
793 |
|
|
|
794 |
|
|
static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
|
795 |
|
|
{
|
796 |
|
|
int i;
|
797 |
|
|
|
798 |
|
|
if (stringset != ETH_SS_STATS)
|
799 |
|
|
return;
|
800 |
|
|
|
801 |
|
|
for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
|
802 |
|
|
memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
|
803 |
|
|
}
|
804 |
|
|
|
805 |
|
|
static int ibmveth_get_sset_count(struct net_device *dev, int sset)
|
806 |
|
|
{
|
807 |
|
|
switch (sset) {
|
808 |
|
|
case ETH_SS_STATS:
|
809 |
|
|
return ARRAY_SIZE(ibmveth_stats);
|
810 |
|
|
default:
|
811 |
|
|
return -EOPNOTSUPP;
|
812 |
|
|
}
|
813 |
|
|
}
|
814 |
|
|
|
815 |
|
|
static void ibmveth_get_ethtool_stats(struct net_device *dev,
|
816 |
|
|
struct ethtool_stats *stats, u64 *data)
|
817 |
|
|
{
|
818 |
|
|
int i;
|
819 |
|
|
struct ibmveth_adapter *adapter = dev->priv;
|
820 |
|
|
|
821 |
|
|
for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
|
822 |
|
|
data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
|
823 |
|
|
}
|
824 |
|
|
|
825 |
|
|
static const struct ethtool_ops netdev_ethtool_ops = {
|
826 |
|
|
.get_drvinfo = netdev_get_drvinfo,
|
827 |
|
|
.get_settings = netdev_get_settings,
|
828 |
|
|
.get_link = netdev_get_link,
|
829 |
|
|
.set_tx_csum = ibmveth_set_tx_csum,
|
830 |
|
|
.get_rx_csum = ibmveth_get_rx_csum,
|
831 |
|
|
.set_rx_csum = ibmveth_set_rx_csum,
|
832 |
|
|
.get_strings = ibmveth_get_strings,
|
833 |
|
|
.get_sset_count = ibmveth_get_sset_count,
|
834 |
|
|
.get_ethtool_stats = ibmveth_get_ethtool_stats,
|
835 |
|
|
};
|
836 |
|
|
|
837 |
|
|
static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
838 |
|
|
{
|
839 |
|
|
return -EOPNOTSUPP;
|
840 |
|
|
}
|
841 |
|
|
|
842 |
|
|
#define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
|
843 |
|
|
|
844 |
|
|
static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
|
845 |
|
|
{
|
846 |
|
|
struct ibmveth_adapter *adapter = netdev->priv;
|
847 |
|
|
union ibmveth_buf_desc desc;
|
848 |
|
|
unsigned long lpar_rc;
|
849 |
|
|
unsigned long correlator;
|
850 |
|
|
unsigned long flags;
|
851 |
|
|
unsigned int retry_count;
|
852 |
|
|
unsigned int tx_dropped = 0;
|
853 |
|
|
unsigned int tx_bytes = 0;
|
854 |
|
|
unsigned int tx_packets = 0;
|
855 |
|
|
unsigned int tx_send_failed = 0;
|
856 |
|
|
unsigned int tx_map_failed = 0;
|
857 |
|
|
|
858 |
|
|
desc.fields.flags_len = IBMVETH_BUF_VALID | skb->len;
|
859 |
|
|
desc.fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
|
860 |
|
|
skb->len, DMA_TO_DEVICE);
|
861 |
|
|
|
862 |
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL &&
|
863 |
|
|
ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) {
|
864 |
|
|
ibmveth_error_printk("tx: failed to checksum packet\n");
|
865 |
|
|
tx_dropped++;
|
866 |
|
|
goto out;
|
867 |
|
|
}
|
868 |
|
|
|
869 |
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
870 |
|
|
unsigned char *buf = skb_transport_header(skb) + skb->csum_offset;
|
871 |
|
|
|
872 |
|
|
desc.fields.flags_len |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
|
873 |
|
|
|
874 |
|
|
/* Need to zero out the checksum */
|
875 |
|
|
buf[0] = 0;
|
876 |
|
|
buf[1] = 0;
|
877 |
|
|
}
|
878 |
|
|
|
879 |
|
|
if (dma_mapping_error(desc.fields.address)) {
|
880 |
|
|
ibmveth_error_printk("tx: unable to map xmit buffer\n");
|
881 |
|
|
tx_map_failed++;
|
882 |
|
|
tx_dropped++;
|
883 |
|
|
goto out;
|
884 |
|
|
}
|
885 |
|
|
|
886 |
|
|
/* send the frame. Arbitrarily set retrycount to 1024 */
|
887 |
|
|
correlator = 0;
|
888 |
|
|
retry_count = 1024;
|
889 |
|
|
do {
|
890 |
|
|
lpar_rc = h_send_logical_lan(adapter->vdev->unit_address,
|
891 |
|
|
desc.desc, 0, 0, 0, 0, 0,
|
892 |
|
|
correlator, &correlator);
|
893 |
|
|
} while ((lpar_rc == H_BUSY) && (retry_count--));
|
894 |
|
|
|
895 |
|
|
if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) {
|
896 |
|
|
ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc);
|
897 |
|
|
ibmveth_error_printk("tx: valid=%d, len=%d, address=0x%08x\n",
|
898 |
|
|
(desc.fields.flags_len & IBMVETH_BUF_VALID) ? 1 : 0,
|
899 |
|
|
skb->len, desc.fields.address);
|
900 |
|
|
tx_send_failed++;
|
901 |
|
|
tx_dropped++;
|
902 |
|
|
} else {
|
903 |
|
|
tx_packets++;
|
904 |
|
|
tx_bytes += skb->len;
|
905 |
|
|
netdev->trans_start = jiffies;
|
906 |
|
|
}
|
907 |
|
|
|
908 |
|
|
dma_unmap_single(&adapter->vdev->dev, desc.fields.address,
|
909 |
|
|
skb->len, DMA_TO_DEVICE);
|
910 |
|
|
|
911 |
|
|
out: spin_lock_irqsave(&adapter->stats_lock, flags);
|
912 |
|
|
netdev->stats.tx_dropped += tx_dropped;
|
913 |
|
|
netdev->stats.tx_bytes += tx_bytes;
|
914 |
|
|
netdev->stats.tx_packets += tx_packets;
|
915 |
|
|
adapter->tx_send_failed += tx_send_failed;
|
916 |
|
|
adapter->tx_map_failed += tx_map_failed;
|
917 |
|
|
spin_unlock_irqrestore(&adapter->stats_lock, flags);
|
918 |
|
|
|
919 |
|
|
dev_kfree_skb(skb);
|
920 |
|
|
return 0;
|
921 |
|
|
}
|
922 |
|
|
|
923 |
|
|
static int ibmveth_poll(struct napi_struct *napi, int budget)
|
924 |
|
|
{
|
925 |
|
|
struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi);
|
926 |
|
|
struct net_device *netdev = adapter->netdev;
|
927 |
|
|
int frames_processed = 0;
|
928 |
|
|
unsigned long lpar_rc;
|
929 |
|
|
|
930 |
|
|
restart_poll:
|
931 |
|
|
do {
|
932 |
|
|
struct sk_buff *skb;
|
933 |
|
|
|
934 |
|
|
if (!ibmveth_rxq_pending_buffer(adapter))
|
935 |
|
|
break;
|
936 |
|
|
|
937 |
|
|
rmb();
|
938 |
|
|
if (!ibmveth_rxq_buffer_valid(adapter)) {
|
939 |
|
|
wmb(); /* suggested by larson1 */
|
940 |
|
|
adapter->rx_invalid_buffer++;
|
941 |
|
|
ibmveth_debug_printk("recycling invalid buffer\n");
|
942 |
|
|
ibmveth_rxq_recycle_buffer(adapter);
|
943 |
|
|
} else {
|
944 |
|
|
int length = ibmveth_rxq_frame_length(adapter);
|
945 |
|
|
int offset = ibmveth_rxq_frame_offset(adapter);
|
946 |
|
|
int csum_good = ibmveth_rxq_csum_good(adapter);
|
947 |
|
|
|
948 |
|
|
skb = ibmveth_rxq_get_buffer(adapter);
|
949 |
|
|
if (csum_good)
|
950 |
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
951 |
|
|
|
952 |
|
|
ibmveth_rxq_harvest_buffer(adapter);
|
953 |
|
|
|
954 |
|
|
skb_reserve(skb, offset);
|
955 |
|
|
skb_put(skb, length);
|
956 |
|
|
skb->protocol = eth_type_trans(skb, netdev);
|
957 |
|
|
|
958 |
|
|
netif_receive_skb(skb); /* send it up */
|
959 |
|
|
|
960 |
|
|
netdev->stats.rx_packets++;
|
961 |
|
|
netdev->stats.rx_bytes += length;
|
962 |
|
|
frames_processed++;
|
963 |
|
|
netdev->last_rx = jiffies;
|
964 |
|
|
}
|
965 |
|
|
} while (frames_processed < budget);
|
966 |
|
|
|
967 |
|
|
ibmveth_replenish_task(adapter);
|
968 |
|
|
|
969 |
|
|
if (frames_processed < budget) {
|
970 |
|
|
/* We think we are done - reenable interrupts,
|
971 |
|
|
* then check once more to make sure we are done.
|
972 |
|
|
*/
|
973 |
|
|
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
|
974 |
|
|
VIO_IRQ_ENABLE);
|
975 |
|
|
|
976 |
|
|
ibmveth_assert(lpar_rc == H_SUCCESS);
|
977 |
|
|
|
978 |
|
|
netif_rx_complete(netdev, napi);
|
979 |
|
|
|
980 |
|
|
if (ibmveth_rxq_pending_buffer(adapter) &&
|
981 |
|
|
netif_rx_reschedule(netdev, napi)) {
|
982 |
|
|
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
|
983 |
|
|
VIO_IRQ_DISABLE);
|
984 |
|
|
goto restart_poll;
|
985 |
|
|
}
|
986 |
|
|
}
|
987 |
|
|
|
988 |
|
|
return frames_processed;
|
989 |
|
|
}
|
990 |
|
|
|
991 |
|
|
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
|
992 |
|
|
{
|
993 |
|
|
struct net_device *netdev = dev_instance;
|
994 |
|
|
struct ibmveth_adapter *adapter = netdev->priv;
|
995 |
|
|
unsigned long lpar_rc;
|
996 |
|
|
|
997 |
|
|
if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
|
998 |
|
|
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
|
999 |
|
|
VIO_IRQ_DISABLE);
|
1000 |
|
|
ibmveth_assert(lpar_rc == H_SUCCESS);
|
1001 |
|
|
__netif_rx_schedule(netdev, &adapter->napi);
|
1002 |
|
|
}
|
1003 |
|
|
return IRQ_HANDLED;
|
1004 |
|
|
}
|
1005 |
|
|
|
1006 |
|
|
static void ibmveth_set_multicast_list(struct net_device *netdev)
|
1007 |
|
|
{
|
1008 |
|
|
struct ibmveth_adapter *adapter = netdev->priv;
|
1009 |
|
|
unsigned long lpar_rc;
|
1010 |
|
|
|
1011 |
|
|
if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) {
|
1012 |
|
|
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
|
1013 |
|
|
IbmVethMcastEnableRecv |
|
1014 |
|
|
IbmVethMcastDisableFiltering,
|
1015 |
|
|
0);
|
1016 |
|
|
if(lpar_rc != H_SUCCESS) {
|
1017 |
|
|
ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
|
1018 |
|
|
}
|
1019 |
|
|
} else {
|
1020 |
|
|
struct dev_mc_list *mclist = netdev->mc_list;
|
1021 |
|
|
int i;
|
1022 |
|
|
/* clear the filter table & disable filtering */
|
1023 |
|
|
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
|
1024 |
|
|
IbmVethMcastEnableRecv |
|
1025 |
|
|
IbmVethMcastDisableFiltering |
|
1026 |
|
|
IbmVethMcastClearFilterTable,
|
1027 |
|
|
0);
|
1028 |
|
|
if(lpar_rc != H_SUCCESS) {
|
1029 |
|
|
ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
|
1030 |
|
|
}
|
1031 |
|
|
/* add the addresses to the filter table */
|
1032 |
|
|
for(i = 0; i < netdev->mc_count; ++i, mclist = mclist->next) {
|
1033 |
|
|
// add the multicast address to the filter table
|
1034 |
|
|
unsigned long mcast_addr = 0;
|
1035 |
|
|
memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6);
|
1036 |
|
|
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
|
1037 |
|
|
IbmVethMcastAddFilter,
|
1038 |
|
|
mcast_addr);
|
1039 |
|
|
if(lpar_rc != H_SUCCESS) {
|
1040 |
|
|
ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc);
|
1041 |
|
|
}
|
1042 |
|
|
}
|
1043 |
|
|
|
1044 |
|
|
/* re-enable filtering */
|
1045 |
|
|
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
|
1046 |
|
|
IbmVethMcastEnableFiltering,
|
1047 |
|
|
0);
|
1048 |
|
|
if(lpar_rc != H_SUCCESS) {
|
1049 |
|
|
ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc);
|
1050 |
|
|
}
|
1051 |
|
|
}
|
1052 |
|
|
}
|
1053 |
|
|
|
1054 |
|
|
static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
|
1055 |
|
|
{
|
1056 |
|
|
struct ibmveth_adapter *adapter = dev->priv;
|
1057 |
|
|
int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
|
1058 |
|
|
int reinit = 0;
|
1059 |
|
|
int i, rc;
|
1060 |
|
|
|
1061 |
|
|
if (new_mtu < IBMVETH_MAX_MTU)
|
1062 |
|
|
return -EINVAL;
|
1063 |
|
|
|
1064 |
|
|
for (i = 0; i < IbmVethNumBufferPools; i++)
|
1065 |
|
|
if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
|
1066 |
|
|
break;
|
1067 |
|
|
|
1068 |
|
|
if (i == IbmVethNumBufferPools)
|
1069 |
|
|
return -EINVAL;
|
1070 |
|
|
|
1071 |
|
|
/* Look for an active buffer pool that can hold the new MTU */
|
1072 |
|
|
for(i = 0; i<IbmVethNumBufferPools; i++) {
|
1073 |
|
|
if (!adapter->rx_buff_pool[i].active) {
|
1074 |
|
|
adapter->rx_buff_pool[i].active = 1;
|
1075 |
|
|
reinit = 1;
|
1076 |
|
|
}
|
1077 |
|
|
|
1078 |
|
|
if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
|
1079 |
|
|
if (reinit && netif_running(adapter->netdev)) {
|
1080 |
|
|
adapter->pool_config = 1;
|
1081 |
|
|
ibmveth_close(adapter->netdev);
|
1082 |
|
|
adapter->pool_config = 0;
|
1083 |
|
|
dev->mtu = new_mtu;
|
1084 |
|
|
if ((rc = ibmveth_open(adapter->netdev)))
|
1085 |
|
|
return rc;
|
1086 |
|
|
} else
|
1087 |
|
|
dev->mtu = new_mtu;
|
1088 |
|
|
return 0;
|
1089 |
|
|
}
|
1090 |
|
|
}
|
1091 |
|
|
return -EINVAL;
|
1092 |
|
|
}
|
1093 |
|
|
|
1094 |
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
1095 |
|
|
static void ibmveth_poll_controller(struct net_device *dev)
|
1096 |
|
|
{
|
1097 |
|
|
ibmveth_replenish_task(dev->priv);
|
1098 |
|
|
ibmveth_interrupt(dev->irq, dev);
|
1099 |
|
|
}
|
1100 |
|
|
#endif
|
1101 |
|
|
|
1102 |
|
|
static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
1103 |
|
|
{
|
1104 |
|
|
int rc, i;
|
1105 |
|
|
long ret;
|
1106 |
|
|
struct net_device *netdev;
|
1107 |
|
|
struct ibmveth_adapter *adapter;
|
1108 |
|
|
u64 set_attr, ret_attr;
|
1109 |
|
|
|
1110 |
|
|
unsigned char *mac_addr_p;
|
1111 |
|
|
unsigned int *mcastFilterSize_p;
|
1112 |
|
|
|
1113 |
|
|
|
1114 |
|
|
ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
|
1115 |
|
|
dev->unit_address);
|
1116 |
|
|
|
1117 |
|
|
mac_addr_p = (unsigned char *) vio_get_attribute(dev,
|
1118 |
|
|
VETH_MAC_ADDR, NULL);
|
1119 |
|
|
if(!mac_addr_p) {
|
1120 |
|
|
printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR "
|
1121 |
|
|
"attribute\n", __FILE__, __LINE__);
|
1122 |
|
|
return 0;
|
1123 |
|
|
}
|
1124 |
|
|
|
1125 |
|
|
mcastFilterSize_p = (unsigned int *) vio_get_attribute(dev,
|
1126 |
|
|
VETH_MCAST_FILTER_SIZE, NULL);
|
1127 |
|
|
if(!mcastFilterSize_p) {
|
1128 |
|
|
printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find "
|
1129 |
|
|
"VETH_MCAST_FILTER_SIZE attribute\n",
|
1130 |
|
|
__FILE__, __LINE__);
|
1131 |
|
|
return 0;
|
1132 |
|
|
}
|
1133 |
|
|
|
1134 |
|
|
netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
|
1135 |
|
|
|
1136 |
|
|
if(!netdev)
|
1137 |
|
|
return -ENOMEM;
|
1138 |
|
|
|
1139 |
|
|
adapter = netdev->priv;
|
1140 |
|
|
dev->dev.driver_data = netdev;
|
1141 |
|
|
|
1142 |
|
|
adapter->vdev = dev;
|
1143 |
|
|
adapter->netdev = netdev;
|
1144 |
|
|
adapter->mcastFilterSize= *mcastFilterSize_p;
|
1145 |
|
|
adapter->pool_config = 0;
|
1146 |
|
|
|
1147 |
|
|
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
|
1148 |
|
|
|
1149 |
|
|
/* Some older boxes running PHYP non-natively have an OF that
|
1150 |
|
|
returns a 8-byte local-mac-address field (and the first
|
1151 |
|
|
2 bytes have to be ignored) while newer boxes' OF return
|
1152 |
|
|
a 6-byte field. Note that IEEE 1275 specifies that
|
1153 |
|
|
local-mac-address must be a 6-byte field.
|
1154 |
|
|
The RPA doc specifies that the first byte must be 10b, so
|
1155 |
|
|
we'll just look for it to solve this 8 vs. 6 byte field issue */
|
1156 |
|
|
|
1157 |
|
|
if ((*mac_addr_p & 0x3) != 0x02)
|
1158 |
|
|
mac_addr_p += 2;
|
1159 |
|
|
|
1160 |
|
|
adapter->mac_addr = 0;
|
1161 |
|
|
memcpy(&adapter->mac_addr, mac_addr_p, 6);
|
1162 |
|
|
|
1163 |
|
|
netdev->irq = dev->irq;
|
1164 |
|
|
netdev->open = ibmveth_open;
|
1165 |
|
|
netdev->stop = ibmveth_close;
|
1166 |
|
|
netdev->hard_start_xmit = ibmveth_start_xmit;
|
1167 |
|
|
netdev->set_multicast_list = ibmveth_set_multicast_list;
|
1168 |
|
|
netdev->do_ioctl = ibmveth_ioctl;
|
1169 |
|
|
netdev->ethtool_ops = &netdev_ethtool_ops;
|
1170 |
|
|
netdev->change_mtu = ibmveth_change_mtu;
|
1171 |
|
|
SET_NETDEV_DEV(netdev, &dev->dev);
|
1172 |
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
1173 |
|
|
netdev->poll_controller = ibmveth_poll_controller;
|
1174 |
|
|
#endif
|
1175 |
|
|
netdev->features |= NETIF_F_LLTX;
|
1176 |
|
|
spin_lock_init(&adapter->stats_lock);
|
1177 |
|
|
|
1178 |
|
|
memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
|
1179 |
|
|
|
1180 |
|
|
for(i = 0; i<IbmVethNumBufferPools; i++) {
|
1181 |
|
|
struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
|
1182 |
|
|
ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
|
1183 |
|
|
pool_count[i], pool_size[i],
|
1184 |
|
|
pool_active[i]);
|
1185 |
|
|
kobj->parent = &dev->dev.kobj;
|
1186 |
|
|
kobject_set_name(kobj, "pool%d", i);
|
1187 |
|
|
kobj->ktype = &ktype_veth_pool;
|
1188 |
|
|
kobject_register(kobj);
|
1189 |
|
|
}
|
1190 |
|
|
|
1191 |
|
|
ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
|
1192 |
|
|
|
1193 |
|
|
adapter->buffer_list_dma = DMA_ERROR_CODE;
|
1194 |
|
|
adapter->filter_list_dma = DMA_ERROR_CODE;
|
1195 |
|
|
adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
|
1196 |
|
|
|
1197 |
|
|
ibmveth_debug_printk("registering netdev...\n");
|
1198 |
|
|
|
1199 |
|
|
ret = h_illan_attributes(dev->unit_address, 0, 0, &ret_attr);
|
1200 |
|
|
|
1201 |
|
|
if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
|
1202 |
|
|
!(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
|
1203 |
|
|
(ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
|
1204 |
|
|
set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
|
1205 |
|
|
|
1206 |
|
|
ret = h_illan_attributes(dev->unit_address, 0, set_attr, &ret_attr);
|
1207 |
|
|
|
1208 |
|
|
if (ret == H_SUCCESS) {
|
1209 |
|
|
adapter->rx_csum = 1;
|
1210 |
|
|
netdev->features |= NETIF_F_IP_CSUM;
|
1211 |
|
|
} else
|
1212 |
|
|
ret = h_illan_attributes(dev->unit_address, set_attr, 0, &ret_attr);
|
1213 |
|
|
}
|
1214 |
|
|
|
1215 |
|
|
rc = register_netdev(netdev);
|
1216 |
|
|
|
1217 |
|
|
if(rc) {
|
1218 |
|
|
ibmveth_debug_printk("failed to register netdev rc=%d\n", rc);
|
1219 |
|
|
free_netdev(netdev);
|
1220 |
|
|
return rc;
|
1221 |
|
|
}
|
1222 |
|
|
|
1223 |
|
|
ibmveth_debug_printk("registered\n");
|
1224 |
|
|
|
1225 |
|
|
ibmveth_proc_register_adapter(adapter);
|
1226 |
|
|
|
1227 |
|
|
return 0;
|
1228 |
|
|
}
|
1229 |
|
|
|
1230 |
|
|
static int __devexit ibmveth_remove(struct vio_dev *dev)
|
1231 |
|
|
{
|
1232 |
|
|
struct net_device *netdev = dev->dev.driver_data;
|
1233 |
|
|
struct ibmveth_adapter *adapter = netdev->priv;
|
1234 |
|
|
int i;
|
1235 |
|
|
|
1236 |
|
|
for(i = 0; i<IbmVethNumBufferPools; i++)
|
1237 |
|
|
kobject_unregister(&adapter->rx_buff_pool[i].kobj);
|
1238 |
|
|
|
1239 |
|
|
unregister_netdev(netdev);
|
1240 |
|
|
|
1241 |
|
|
ibmveth_proc_unregister_adapter(adapter);
|
1242 |
|
|
|
1243 |
|
|
free_netdev(netdev);
|
1244 |
|
|
return 0;
|
1245 |
|
|
}
|
1246 |
|
|
|
1247 |
|
|
#ifdef CONFIG_PROC_FS
|
1248 |
|
|
static void ibmveth_proc_register_driver(void)
|
1249 |
|
|
{
|
1250 |
|
|
ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, init_net.proc_net);
|
1251 |
|
|
if (ibmveth_proc_dir) {
|
1252 |
|
|
}
|
1253 |
|
|
}
|
1254 |
|
|
|
1255 |
|
|
static void ibmveth_proc_unregister_driver(void)
|
1256 |
|
|
{
|
1257 |
|
|
remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net);
|
1258 |
|
|
}
|
1259 |
|
|
|
1260 |
|
|
static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
|
1261 |
|
|
{
|
1262 |
|
|
if (*pos == 0) {
|
1263 |
|
|
return (void *)1;
|
1264 |
|
|
} else {
|
1265 |
|
|
return NULL;
|
1266 |
|
|
}
|
1267 |
|
|
}
|
1268 |
|
|
|
1269 |
|
|
static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
1270 |
|
|
{
|
1271 |
|
|
++*pos;
|
1272 |
|
|
return NULL;
|
1273 |
|
|
}
|
1274 |
|
|
|
1275 |
|
|
static void ibmveth_seq_stop(struct seq_file *seq, void *v)
|
1276 |
|
|
{
|
1277 |
|
|
}
|
1278 |
|
|
|
1279 |
|
|
static int ibmveth_seq_show(struct seq_file *seq, void *v)
|
1280 |
|
|
{
|
1281 |
|
|
struct ibmveth_adapter *adapter = seq->private;
|
1282 |
|
|
char *current_mac = ((char*) &adapter->netdev->dev_addr);
|
1283 |
|
|
char *firmware_mac = ((char*) &adapter->mac_addr) ;
|
1284 |
|
|
DECLARE_MAC_BUF(mac);
|
1285 |
|
|
|
1286 |
|
|
seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
|
1287 |
|
|
|
1288 |
|
|
seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address);
|
1289 |
|
|
seq_printf(seq, "Current MAC: %s\n", print_mac(mac, current_mac));
|
1290 |
|
|
seq_printf(seq, "Firmware MAC: %s\n", print_mac(mac, firmware_mac));
|
1291 |
|
|
|
1292 |
|
|
seq_printf(seq, "\nAdapter Statistics:\n");
|
1293 |
|
|
seq_printf(seq, " TX: vio_map_single failres: %ld\n", adapter->tx_map_failed);
|
1294 |
|
|
seq_printf(seq, " send failures: %ld\n", adapter->tx_send_failed);
|
1295 |
|
|
seq_printf(seq, " RX: replenish task cycles: %ld\n", adapter->replenish_task_cycles);
|
1296 |
|
|
seq_printf(seq, " alloc_skb_failures: %ld\n", adapter->replenish_no_mem);
|
1297 |
|
|
seq_printf(seq, " add buffer failures: %ld\n", adapter->replenish_add_buff_failure);
|
1298 |
|
|
seq_printf(seq, " invalid buffers: %ld\n", adapter->rx_invalid_buffer);
|
1299 |
|
|
seq_printf(seq, " no buffers: %ld\n", adapter->rx_no_buffer);
|
1300 |
|
|
|
1301 |
|
|
return 0;
|
1302 |
|
|
}
|
1303 |
|
|
static struct seq_operations ibmveth_seq_ops = {
|
1304 |
|
|
.start = ibmveth_seq_start,
|
1305 |
|
|
.next = ibmveth_seq_next,
|
1306 |
|
|
.stop = ibmveth_seq_stop,
|
1307 |
|
|
.show = ibmveth_seq_show,
|
1308 |
|
|
};
|
1309 |
|
|
|
1310 |
|
|
static int ibmveth_proc_open(struct inode *inode, struct file *file)
|
1311 |
|
|
{
|
1312 |
|
|
struct seq_file *seq;
|
1313 |
|
|
struct proc_dir_entry *proc;
|
1314 |
|
|
int rc;
|
1315 |
|
|
|
1316 |
|
|
rc = seq_open(file, &ibmveth_seq_ops);
|
1317 |
|
|
if (!rc) {
|
1318 |
|
|
/* recover the pointer buried in proc_dir_entry data */
|
1319 |
|
|
seq = file->private_data;
|
1320 |
|
|
proc = PDE(inode);
|
1321 |
|
|
seq->private = proc->data;
|
1322 |
|
|
}
|
1323 |
|
|
return rc;
|
1324 |
|
|
}
|
1325 |
|
|
|
1326 |
|
|
static const struct file_operations ibmveth_proc_fops = {
|
1327 |
|
|
.owner = THIS_MODULE,
|
1328 |
|
|
.open = ibmveth_proc_open,
|
1329 |
|
|
.read = seq_read,
|
1330 |
|
|
.llseek = seq_lseek,
|
1331 |
|
|
.release = seq_release,
|
1332 |
|
|
};
|
1333 |
|
|
|
1334 |
|
|
static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
|
1335 |
|
|
{
|
1336 |
|
|
struct proc_dir_entry *entry;
|
1337 |
|
|
if (ibmveth_proc_dir) {
|
1338 |
|
|
char u_addr[10];
|
1339 |
|
|
sprintf(u_addr, "%x", adapter->vdev->unit_address);
|
1340 |
|
|
entry = create_proc_entry(u_addr, S_IFREG, ibmveth_proc_dir);
|
1341 |
|
|
if (!entry) {
|
1342 |
|
|
ibmveth_error_printk("Cannot create adapter proc entry");
|
1343 |
|
|
} else {
|
1344 |
|
|
entry->data = (void *) adapter;
|
1345 |
|
|
entry->proc_fops = &ibmveth_proc_fops;
|
1346 |
|
|
}
|
1347 |
|
|
}
|
1348 |
|
|
return;
|
1349 |
|
|
}
|
1350 |
|
|
|
1351 |
|
|
static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
|
1352 |
|
|
{
|
1353 |
|
|
if (ibmveth_proc_dir) {
|
1354 |
|
|
char u_addr[10];
|
1355 |
|
|
sprintf(u_addr, "%x", adapter->vdev->unit_address);
|
1356 |
|
|
remove_proc_entry(u_addr, ibmveth_proc_dir);
|
1357 |
|
|
}
|
1358 |
|
|
}
|
1359 |
|
|
|
1360 |
|
|
#else /* CONFIG_PROC_FS */
|
1361 |
|
|
static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
|
1362 |
|
|
{
|
1363 |
|
|
}
|
1364 |
|
|
|
1365 |
|
|
static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
|
1366 |
|
|
{
|
1367 |
|
|
}
|
1368 |
|
|
static void ibmveth_proc_register_driver(void)
|
1369 |
|
|
{
|
1370 |
|
|
}
|
1371 |
|
|
|
1372 |
|
|
static void ibmveth_proc_unregister_driver(void)
|
1373 |
|
|
{
|
1374 |
|
|
}
|
1375 |
|
|
#endif /* CONFIG_PROC_FS */
|
1376 |
|
|
|
1377 |
|
|
static struct attribute veth_active_attr;
|
1378 |
|
|
static struct attribute veth_num_attr;
|
1379 |
|
|
static struct attribute veth_size_attr;
|
1380 |
|
|
|
1381 |
|
|
static ssize_t veth_pool_show(struct kobject * kobj,
|
1382 |
|
|
struct attribute * attr, char * buf)
|
1383 |
|
|
{
|
1384 |
|
|
struct ibmveth_buff_pool *pool = container_of(kobj,
|
1385 |
|
|
struct ibmveth_buff_pool,
|
1386 |
|
|
kobj);
|
1387 |
|
|
|
1388 |
|
|
if (attr == &veth_active_attr)
|
1389 |
|
|
return sprintf(buf, "%d\n", pool->active);
|
1390 |
|
|
else if (attr == &veth_num_attr)
|
1391 |
|
|
return sprintf(buf, "%d\n", pool->size);
|
1392 |
|
|
else if (attr == &veth_size_attr)
|
1393 |
|
|
return sprintf(buf, "%d\n", pool->buff_size);
|
1394 |
|
|
return 0;
|
1395 |
|
|
}
|
1396 |
|
|
|
1397 |
|
|
static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr,
|
1398 |
|
|
const char * buf, size_t count)
|
1399 |
|
|
{
|
1400 |
|
|
struct ibmveth_buff_pool *pool = container_of(kobj,
|
1401 |
|
|
struct ibmveth_buff_pool,
|
1402 |
|
|
kobj);
|
1403 |
|
|
struct net_device *netdev =
|
1404 |
|
|
container_of(kobj->parent, struct device, kobj)->driver_data;
|
1405 |
|
|
struct ibmveth_adapter *adapter = netdev->priv;
|
1406 |
|
|
long value = simple_strtol(buf, NULL, 10);
|
1407 |
|
|
long rc;
|
1408 |
|
|
|
1409 |
|
|
if (attr == &veth_active_attr) {
|
1410 |
|
|
if (value && !pool->active) {
|
1411 |
|
|
if (netif_running(netdev)) {
|
1412 |
|
|
if(ibmveth_alloc_buffer_pool(pool)) {
|
1413 |
|
|
ibmveth_error_printk("unable to alloc pool\n");
|
1414 |
|
|
return -ENOMEM;
|
1415 |
|
|
}
|
1416 |
|
|
pool->active = 1;
|
1417 |
|
|
adapter->pool_config = 1;
|
1418 |
|
|
ibmveth_close(netdev);
|
1419 |
|
|
adapter->pool_config = 0;
|
1420 |
|
|
if ((rc = ibmveth_open(netdev)))
|
1421 |
|
|
return rc;
|
1422 |
|
|
} else
|
1423 |
|
|
pool->active = 1;
|
1424 |
|
|
} else if (!value && pool->active) {
|
1425 |
|
|
int mtu = netdev->mtu + IBMVETH_BUFF_OH;
|
1426 |
|
|
int i;
|
1427 |
|
|
/* Make sure there is a buffer pool with buffers that
|
1428 |
|
|
can hold a packet of the size of the MTU */
|
1429 |
|
|
for (i = 0; i < IbmVethNumBufferPools; i++) {
|
1430 |
|
|
if (pool == &adapter->rx_buff_pool[i])
|
1431 |
|
|
continue;
|
1432 |
|
|
if (!adapter->rx_buff_pool[i].active)
|
1433 |
|
|
continue;
|
1434 |
|
|
if (mtu <= adapter->rx_buff_pool[i].buff_size)
|
1435 |
|
|
break;
|
1436 |
|
|
}
|
1437 |
|
|
|
1438 |
|
|
if (i == IbmVethNumBufferPools) {
|
1439 |
|
|
ibmveth_error_printk("no active pool >= MTU\n");
|
1440 |
|
|
return -EPERM;
|
1441 |
|
|
}
|
1442 |
|
|
|
1443 |
|
|
pool->active = 0;
|
1444 |
|
|
if (netif_running(netdev)) {
|
1445 |
|
|
adapter->pool_config = 1;
|
1446 |
|
|
ibmveth_close(netdev);
|
1447 |
|
|
adapter->pool_config = 0;
|
1448 |
|
|
if ((rc = ibmveth_open(netdev)))
|
1449 |
|
|
return rc;
|
1450 |
|
|
}
|
1451 |
|
|
}
|
1452 |
|
|
} else if (attr == &veth_num_attr) {
|
1453 |
|
|
if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT)
|
1454 |
|
|
return -EINVAL;
|
1455 |
|
|
else {
|
1456 |
|
|
if (netif_running(netdev)) {
|
1457 |
|
|
adapter->pool_config = 1;
|
1458 |
|
|
ibmveth_close(netdev);
|
1459 |
|
|
adapter->pool_config = 0;
|
1460 |
|
|
pool->size = value;
|
1461 |
|
|
if ((rc = ibmveth_open(netdev)))
|
1462 |
|
|
return rc;
|
1463 |
|
|
} else
|
1464 |
|
|
pool->size = value;
|
1465 |
|
|
}
|
1466 |
|
|
} else if (attr == &veth_size_attr) {
|
1467 |
|
|
if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE)
|
1468 |
|
|
return -EINVAL;
|
1469 |
|
|
else {
|
1470 |
|
|
if (netif_running(netdev)) {
|
1471 |
|
|
adapter->pool_config = 1;
|
1472 |
|
|
ibmveth_close(netdev);
|
1473 |
|
|
adapter->pool_config = 0;
|
1474 |
|
|
pool->buff_size = value;
|
1475 |
|
|
if ((rc = ibmveth_open(netdev)))
|
1476 |
|
|
return rc;
|
1477 |
|
|
} else
|
1478 |
|
|
pool->buff_size = value;
|
1479 |
|
|
}
|
1480 |
|
|
}
|
1481 |
|
|
|
1482 |
|
|
/* kick the interrupt handler to allocate/deallocate pools */
|
1483 |
|
|
ibmveth_interrupt(netdev->irq, netdev);
|
1484 |
|
|
return count;
|
1485 |
|
|
}
|
1486 |
|
|
|
1487 |
|
|
|
1488 |
|
|
#define ATTR(_name, _mode) \
|
1489 |
|
|
struct attribute veth_##_name##_attr = { \
|
1490 |
|
|
.name = __stringify(_name), .mode = _mode, \
|
1491 |
|
|
};
|
1492 |
|
|
|
1493 |
|
|
static ATTR(active, 0644);
|
1494 |
|
|
static ATTR(num, 0644);
|
1495 |
|
|
static ATTR(size, 0644);
|
1496 |
|
|
|
1497 |
|
|
static struct attribute * veth_pool_attrs[] = {
|
1498 |
|
|
&veth_active_attr,
|
1499 |
|
|
&veth_num_attr,
|
1500 |
|
|
&veth_size_attr,
|
1501 |
|
|
NULL,
|
1502 |
|
|
};
|
1503 |
|
|
|
1504 |
|
|
static struct sysfs_ops veth_pool_ops = {
|
1505 |
|
|
.show = veth_pool_show,
|
1506 |
|
|
.store = veth_pool_store,
|
1507 |
|
|
};
|
1508 |
|
|
|
1509 |
|
|
static struct kobj_type ktype_veth_pool = {
|
1510 |
|
|
.release = NULL,
|
1511 |
|
|
.sysfs_ops = &veth_pool_ops,
|
1512 |
|
|
.default_attrs = veth_pool_attrs,
|
1513 |
|
|
};
|
1514 |
|
|
|
1515 |
|
|
|
1516 |
|
|
static struct vio_device_id ibmveth_device_table[] __devinitdata= {
|
1517 |
|
|
{ "network", "IBM,l-lan"},
|
1518 |
|
|
{ "", "" }
|
1519 |
|
|
};
|
1520 |
|
|
MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
|
1521 |
|
|
|
1522 |
|
|
static struct vio_driver ibmveth_driver = {
|
1523 |
|
|
.id_table = ibmveth_device_table,
|
1524 |
|
|
.probe = ibmveth_probe,
|
1525 |
|
|
.remove = ibmveth_remove,
|
1526 |
|
|
.driver = {
|
1527 |
|
|
.name = ibmveth_driver_name,
|
1528 |
|
|
.owner = THIS_MODULE,
|
1529 |
|
|
}
|
1530 |
|
|
};
|
1531 |
|
|
|
1532 |
|
|
static int __init ibmveth_module_init(void)
|
1533 |
|
|
{
|
1534 |
|
|
ibmveth_printk("%s: %s %s\n", ibmveth_driver_name, ibmveth_driver_string, ibmveth_driver_version);
|
1535 |
|
|
|
1536 |
|
|
ibmveth_proc_register_driver();
|
1537 |
|
|
|
1538 |
|
|
return vio_register_driver(&ibmveth_driver);
|
1539 |
|
|
}
|
1540 |
|
|
|
1541 |
|
|
static void __exit ibmveth_module_exit(void)
|
1542 |
|
|
{
|
1543 |
|
|
vio_unregister_driver(&ibmveth_driver);
|
1544 |
|
|
ibmveth_proc_unregister_driver();
|
1545 |
|
|
}
|
1546 |
|
|
|
1547 |
|
|
module_init(ibmveth_module_init);
|
1548 |
|
|
module_exit(ibmveth_module_exit);
|