/[linux-patches]/genpatches-2.6/trunk/2.6.14/2100_skge-driver-v1.3.patch
Gentoo

Contents of /genpatches-2.6/trunk/2.6.14/2100_skge-driver-v1.3.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 328 - (show annotations) (download) (as text)
Tue Mar 14 13:34:17 2006 UTC (14 years, 7 months ago) by johnm
File MIME type: text/x-diff
File size: 25922 byte(s)
2.6.14-11, rebase against local tree
1 This patch is for 2.6.14.7 and upgrades the SKGE driver from v1.1 -> v1.3
2 (the same version as found in 2.6.15.6). While attempting to backport stable
3 tree fixes for 2.6.14 I came across the following patches:
4
5 * [PATCH] skge: handle out of memory on ring changes (2.6.15.1)
6 * [PATCH] skge: speed setting (2.6.15.5)
7 * [PATCH] skge: fix NAPI/IRQ race (2.6.15.5)
8 * [PATCH] skge: genesis phy initialization fix (2.6.15.5)
9 * [PATCH} skge: fix SMP race (2.6.15.5)
10
11 In this case, it was easier to simply roll an incremental patch for the whole
12 driver rather than fiddle around trying to backport each of the above patches
13 individually.
14
15 --Kerin Millar <kerframil@gmail.com>
16
17 --- linux-2.6.14.7/drivers/net/skge.h 2005-10-27 19:02:08.000000000 -0500
18 +++ linux-2.6.15.6/drivers/net/skge.h 2006-03-08 00:52:20.000000000 -0500
19 @@ -6,6 +6,8 @@
20
21 /* PCI config registers */
22 #define PCI_DEV_REG1 0x40
23 +#define PCI_PHY_COMA 0x8000000
24 +#define PCI_VIO 0x2000000
25 #define PCI_DEV_REG2 0x44
26 #define PCI_REV_DESC 0x4
27
28 @@ -2471,6 +2473,7 @@ struct skge_hw {
29
30 struct tasklet_struct ext_tasklet;
31 spinlock_t phy_lock;
32 + spinlock_t hw_lock;
33 };
34
35 enum {
36 --- linux-2.6.14.7/drivers/net/skge.c 2005-10-27 19:02:08.000000000 -0500
37 +++ linux-2.6.15.6/drivers/net/skge.c 2006-03-08 00:52:20.000000000 -0500
38 @@ -37,12 +37,13 @@
39 #include <linux/delay.h>
40 #include <linux/crc32.h>
41 #include <linux/dma-mapping.h>
42 +#include <linux/mii.h>
43 #include <asm/irq.h>
44
45 #include "skge.h"
46
47 #define DRV_NAME "skge"
48 -#define DRV_VERSION "1.1"
49 +#define DRV_VERSION "1.3"
50 #define PFX DRV_NAME " "
51
52 #define DEFAULT_TX_RING_SIZE 128
53 @@ -87,15 +88,14 @@ MODULE_DEVICE_TABLE(pci, skge_id_table);
54
55 static int skge_up(struct net_device *dev);
56 static int skge_down(struct net_device *dev);
57 +static void skge_phy_reset(struct skge_port *skge);
58 static void skge_tx_clean(struct skge_port *skge);
59 -static void xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
60 -static void gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
61 +static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
62 +static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
63 static void genesis_get_stats(struct skge_port *skge, u64 *data);
64 static void yukon_get_stats(struct skge_port *skge, u64 *data);
65 static void yukon_init(struct skge_hw *hw, int port);
66 -static void yukon_reset(struct skge_hw *hw, int port);
67 static void genesis_mac_init(struct skge_hw *hw, int port);
68 -static void genesis_reset(struct skge_hw *hw, int port);
69 static void genesis_link_up(struct skge_port *skge);
70
71 /* Avoid conditionals by using array */
72 @@ -129,7 +129,7 @@ static void skge_get_regs(struct net_dev
73 regs->len - B3_RI_WTO_R1);
74 }
75
76 -/* Wake on Lan only supported on Yukon chps with rev 1 or above */
77 +/* Wake on Lan only supported on Yukon chips with rev 1 or above */
78 static int wol_supported(const struct skge_hw *hw)
79 {
80 return !((hw->chip_id == CHIP_ID_GENESIS ||
81 @@ -169,8 +169,8 @@ static int skge_set_wol(struct net_devic
82 return 0;
83 }
84
85 -/* Determine supported/adverised modes based on hardware.
86 - * Note: ethtoool ADVERTISED_xxx == SUPPORTED_xxx
87 +/* Determine supported/advertised modes based on hardware.
88 + * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx
89 */
90 static u32 skge_supported_modes(const struct skge_hw *hw)
91 {
92 @@ -275,10 +275,9 @@ static int skge_set_settings(struct net_
93 skge->autoneg = ecmd->autoneg;
94 skge->advertising = ecmd->advertising;
95
96 - if (netif_running(dev)) {
97 - skge_down(dev);
98 - skge_up(dev);
99 - }
100 + if (netif_running(dev))
101 + skge_phy_reset(skge);
102 +
103 return (0);
104 }
105
106 @@ -398,6 +397,7 @@ static int skge_set_ring_param(struct ne
107 struct ethtool_ringparam *p)
108 {
109 struct skge_port *skge = netdev_priv(dev);
110 + int err;
111
112 if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
113 p->tx_pending == 0 || p->tx_pending > MAX_TX_RING_SIZE)
114 @@ -408,7 +408,11 @@ static int skge_set_ring_param(struct ne
115
116 if (netif_running(dev)) {
117 skge_down(dev);
118 - skge_up(dev);
119 + err = skge_up(dev);
120 + if (err)
121 + dev_close(dev);
122 + else
123 + dev->set_multicast_list(dev);
124 }
125
126 return 0;
127 @@ -429,21 +433,11 @@ static void skge_set_msglevel(struct net
128 static int skge_nway_reset(struct net_device *dev)
129 {
130 struct skge_port *skge = netdev_priv(dev);
131 - struct skge_hw *hw = skge->hw;
132 - int port = skge->port;
133
134 if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev))
135 return -EINVAL;
136
137 - spin_lock_bh(&hw->phy_lock);
138 - if (hw->chip_id == CHIP_ID_GENESIS) {
139 - genesis_reset(hw, port);
140 - genesis_mac_init(hw, port);
141 - } else {
142 - yukon_reset(hw, port);
143 - yukon_init(hw, port);
144 - }
145 - spin_unlock_bh(&hw->phy_lock);
146 + skge_phy_reset(skge);
147 return 0;
148 }
149
150 @@ -515,10 +509,8 @@ static int skge_set_pauseparam(struct ne
151 else
152 skge->flow_control = FLOW_MODE_NONE;
153
154 - if (netif_running(dev)) {
155 - skge_down(dev);
156 - skge_up(dev);
157 - }
158 + if (netif_running(dev))
159 + skge_phy_reset(skge);
160 return 0;
161 }
162
163 @@ -531,13 +523,13 @@ static inline u32 hwkhz(const struct skg
164 return 78215; /* or: 78.125 MHz */
165 }
166
167 -/* Chip hz to microseconds */
168 +/* Chip HZ to microseconds */
169 static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks)
170 {
171 return (ticks * 1000) / hwkhz(hw);
172 }
173
174 -/* Microseconds to chip hz */
175 +/* Microseconds to chip HZ */
176 static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec)
177 {
178 return hwkhz(hw) * usec / 1000;
179 @@ -730,6 +722,7 @@ static struct ethtool_ops skge_ethtool_o
180 .phys_id = skge_phys_id,
181 .get_stats_count = skge_get_stats_count,
182 .get_ethtool_stats = skge_get_ethtool_stats,
183 + .get_perm_addr = ethtool_op_get_perm_addr,
184 };
185
186 /*
187 @@ -882,32 +875,36 @@ static void skge_link_down(struct skge_p
188 printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name);
189 }
190
191 -static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
192 +static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
193 {
194 int i;
195 - u16 v;
196
197 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
198 - v = xm_read16(hw, port, XM_PHY_DATA);
199 + *val = xm_read16(hw, port, XM_PHY_DATA);
200
201 - /* Need to wait for external PHY */
202 for (i = 0; i < PHY_RETRIES; i++) {
203 - udelay(1);
204 - if (xm_read16(hw, port, XM_MMU_CMD)
205 - & XM_MMU_PHY_RDY)
206 + if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY)
207 goto ready;
208 + udelay(1);
209 }
210
211 - printk(KERN_WARNING PFX "%s: phy read timed out\n",
212 - hw->dev[port]->name);
213 - return 0;
214 + return -ETIMEDOUT;
215 ready:
216 - v = xm_read16(hw, port, XM_PHY_DATA);
217 + *val = xm_read16(hw, port, XM_PHY_DATA);
218 +
219 + return 0;
220 +}
221
222 +static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
223 +{
224 + u16 v = 0;
225 + if (__xm_phy_read(hw, port, reg, &v))
226 + printk(KERN_WARNING PFX "%s: phy read timed out\n",
227 + hw->dev[port]->name);
228 return v;
229 }
230
231 -static void xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
232 +static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
233 {
234 int i;
235
236 @@ -917,19 +914,16 @@ static void xm_phy_write(struct skge_hw
237 goto ready;
238 udelay(1);
239 }
240 - printk(KERN_WARNING PFX "%s: phy write failed to come ready\n",
241 - hw->dev[port]->name);
242 -
243 + return -EIO;
244
245 ready:
246 xm_write16(hw, port, XM_PHY_DATA, val);
247 for (i = 0; i < PHY_RETRIES; i++) {
248 - udelay(1);
249 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
250 - return;
251 + return 0;
252 + udelay(1);
253 }
254 - printk(KERN_WARNING PFX "%s: phy write timed out\n",
255 - hw->dev[port]->name);
256 + return -ETIMEDOUT;
257 }
258
259 static void genesis_init(struct skge_hw *hw)
260 @@ -1164,7 +1158,7 @@ static void bcom_phy_init(struct skge_po
261 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext);
262 xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl);
263
264 - /* Use link status change interrrupt */
265 + /* Use link status change interrupt */
266 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
267
268 bcom_check_link(hw, port);
269 @@ -1179,13 +1173,17 @@ static void genesis_mac_init(struct skge
270 u32 r;
271 const u8 zero[6] = { 0 };
272
273 - /* Clear MIB counters */
274 - xm_write16(hw, port, XM_STAT_CMD,
275 - XM_SC_CLR_RXC | XM_SC_CLR_TXC);
276 - /* Clear two times according to Errata #3 */
277 - xm_write16(hw, port, XM_STAT_CMD,
278 - XM_SC_CLR_RXC | XM_SC_CLR_TXC);
279 + for (i = 0; i < 10; i++) {
280 + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
281 + MFF_SET_MAC_RST);
282 + if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)
283 + goto reset_ok;
284 + udelay(1);
285 + }
286 +
287 + printk(KERN_WARNING PFX "%s: genesis reset failed\n", dev->name);
288
289 + reset_ok:
290 /* Unreset the XMAC. */
291 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
292
293 @@ -1202,9 +1200,9 @@ static void genesis_mac_init(struct skge
294 r |= GP_DIR_2|GP_IO_2;
295
296 skge_write32(hw, B2_GP_IO, r);
297 - skge_read32(hw, B2_GP_IO);
298
299 - /* Enable GMII interfac */
300 +
301 + /* Enable GMII interface */
302 xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
303
304 bcom_phy_init(skge, jumbo);
305 @@ -1216,6 +1214,13 @@ static void genesis_mac_init(struct skge
306 for (i = 1; i < 16; i++)
307 xm_outaddr(hw, port, XM_EXM(i), zero);
308
309 + /* Clear MIB counters */
310 + xm_write16(hw, port, XM_STAT_CMD,
311 + XM_SC_CLR_RXC | XM_SC_CLR_TXC);
312 + /* Clear two times according to Errata #3 */
313 + xm_write16(hw, port, XM_STAT_CMD,
314 + XM_SC_CLR_RXC | XM_SC_CLR_TXC);
315 +
316 /* configure Rx High Water Mark (XM_RX_HI_WM) */
317 xm_write16(hw, port, XM_RX_HI_WM, 1450);
318
319 @@ -1255,7 +1260,7 @@ static void genesis_mac_init(struct skge
320 * that jumbo frames larger than 8192 bytes will be
321 * truncated. Disabling all bad frame filtering causes
322 * the RX FIFO to operate in streaming mode, in which
323 - * case the XMAC will start transfering frames out of the
324 + * case the XMAC will start transferring frames out of the
325 * RX FIFO as soon as the FIFO threshold is reached.
326 */
327 xm_write32(hw, port, XM_MODE, XM_DEF_MODE);
328 @@ -1322,7 +1327,7 @@ static void genesis_stop(struct skge_por
329 port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2);
330
331 /*
332 - * If the transfer stucks at the MAC the STOP command will not
333 + * If the transfer sticks at the MAC the STOP command will not
334 * terminate if we don't flush the XMAC's transmit FIFO !
335 */
336 xm_write32(hw, port, XM_MODE,
337 @@ -1399,42 +1404,6 @@ static void genesis_mac_intr(struct skge
338 }
339 }
340
341 -static void gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
342 -{
343 - int i;
344 -
345 - gma_write16(hw, port, GM_SMI_DATA, val);
346 - gma_write16(hw, port, GM_SMI_CTRL,
347 - GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg));
348 - for (i = 0; i < PHY_RETRIES; i++) {
349 - udelay(1);
350 -
351 - if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
352 - break;
353 - }
354 -}
355 -
356 -static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
357 -{
358 - int i;
359 -
360 - gma_write16(hw, port, GM_SMI_CTRL,
361 - GM_SMI_CT_PHY_AD(hw->phy_addr)
362 - | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
363 -
364 - for (i = 0; i < PHY_RETRIES; i++) {
365 - udelay(1);
366 - if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
367 - goto ready;
368 - }
369 -
370 - printk(KERN_WARNING PFX "%s: phy read timeout\n",
371 - hw->dev[port]->name);
372 - return 0;
373 - ready:
374 - return gma_read16(hw, port, GM_SMI_DATA);
375 -}
376 -
377 static void genesis_link_up(struct skge_port *skge)
378 {
379 struct skge_hw *hw = skge->hw;
380 @@ -1548,7 +1517,55 @@ static inline void bcom_phy_intr(struct
381
382 }
383
384 -/* Marvell Phy Initailization */
385 +static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
386 +{
387 + int i;
388 +
389 + gma_write16(hw, port, GM_SMI_DATA, val);
390 + gma_write16(hw, port, GM_SMI_CTRL,
391 + GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg));
392 + for (i = 0; i < PHY_RETRIES; i++) {
393 + udelay(1);
394 +
395 + if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
396 + return 0;
397 + }
398 +
399 + printk(KERN_WARNING PFX "%s: phy write timeout\n",
400 + hw->dev[port]->name);
401 + return -EIO;
402 +}
403 +
404 +static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
405 +{
406 + int i;
407 +
408 + gma_write16(hw, port, GM_SMI_CTRL,
409 + GM_SMI_CT_PHY_AD(hw->phy_addr)
410 + | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
411 +
412 + for (i = 0; i < PHY_RETRIES; i++) {
413 + udelay(1);
414 + if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
415 + goto ready;
416 + }
417 +
418 + return -ETIMEDOUT;
419 + ready:
420 + *val = gma_read16(hw, port, GM_SMI_DATA);
421 + return 0;
422 +}
423 +
424 +static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
425 +{
426 + u16 v = 0;
427 + if (__gm_phy_read(hw, port, reg, &v))
428 + printk(KERN_WARNING PFX "%s: phy read timeout\n",
429 + hw->dev[port]->name);
430 + return v;
431 +}
432 +
433 +/* Marvell Phy Initialization */
434 static void yukon_init(struct skge_hw *hw, int port)
435 {
436 struct skge_port *skge = netdev_priv(hw->dev[port]);
437 @@ -1696,6 +1713,7 @@ static void yukon_mac_init(struct skge_h
438 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
439 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR);
440 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
441 +
442 if (skge->autoneg == AUTONEG_DISABLE) {
443 reg = GM_GPCR_AU_ALL_DIS;
444 gma_write16(hw, port, GM_GP_CTRL,
445 @@ -1703,16 +1721,23 @@ static void yukon_mac_init(struct skge_h
446
447 switch (skge->speed) {
448 case SPEED_1000:
449 + reg &= ~GM_GPCR_SPEED_100;
450 reg |= GM_GPCR_SPEED_1000;
451 - /* fallthru */
452 + break;
453 case SPEED_100:
454 + reg &= ~GM_GPCR_SPEED_1000;
455 reg |= GM_GPCR_SPEED_100;
456 + break;
457 + case SPEED_10:
458 + reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100);
459 + break;
460 }
461
462 if (skge->duplex == DUPLEX_FULL)
463 reg |= GM_GPCR_DUP_FULL;
464 } else
465 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
466 +
467 switch (skge->flow_control) {
468 case FLOW_MODE_NONE:
469 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
470 @@ -1793,6 +1818,25 @@ static void yukon_mac_init(struct skge_h
471 skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
472 }
473
474 +/* Go into power down mode */
475 +static void yukon_suspend(struct skge_hw *hw, int port)
476 +{
477 + u16 ctrl;
478 +
479 + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL);
480 + ctrl |= PHY_M_PC_POL_R_DIS;
481 + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl);
482 +
483 + ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
484 + ctrl |= PHY_CT_RESET;
485 + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
486 +
487 + /* switch IEEE compatible power down mode on */
488 + ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
489 + ctrl |= PHY_CT_PDOWN;
490 + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
491 +}
492 +
493 static void yukon_stop(struct skge_port *skge)
494 {
495 struct skge_hw *hw = skge->hw;
496 @@ -1806,14 +1850,7 @@ static void yukon_stop(struct skge_port
497 & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA));
498 gma_read16(hw, port, GM_GP_CTRL);
499
500 - if (hw->chip_id == CHIP_ID_YUKON_LITE &&
501 - hw->chip_rev >= CHIP_REV_YU_LITE_A3) {
502 - u32 io = skge_read32(hw, B2_GP_IO);
503 -
504 - io |= GP_DIR_9 | GP_IO_9;
505 - skge_write32(hw, B2_GP_IO, io);
506 - skge_read32(hw, B2_GP_IO);
507 - }
508 + yukon_suspend(hw, port);
509
510 /* set GPHY Control reset */
511 skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
512 @@ -1912,7 +1949,6 @@ static void yukon_link_down(struct skge_
513
514 }
515
516 - yukon_reset(hw, port);
517 skge_link_down(skge);
518
519 yukon_init(hw, port);
520 @@ -1996,6 +2032,67 @@ static void yukon_phy_intr(struct skge_p
521 /* XXX restart autonegotiation? */
522 }
523
524 +static void skge_phy_reset(struct skge_port *skge)
525 +{
526 + struct skge_hw *hw = skge->hw;
527 + int port = skge->port;
528 +
529 + netif_stop_queue(skge->netdev);
530 + netif_carrier_off(skge->netdev);
531 +
532 + spin_lock_bh(&hw->phy_lock);
533 + if (hw->chip_id == CHIP_ID_GENESIS)
534 + genesis_mac_init(hw, port);
535 + else
536 + yukon_init(hw, port);
537 + spin_unlock_bh(&hw->phy_lock);
538 +}
539 +
540 +/* Basic MII support */
541 +static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
542 +{
543 + struct mii_ioctl_data *data = if_mii(ifr);
544 + struct skge_port *skge = netdev_priv(dev);
545 + struct skge_hw *hw = skge->hw;
546 + int err = -EOPNOTSUPP;
547 +
548 + if (!netif_running(dev))
549 + return -ENODEV; /* Phy still in reset */
550 +
551 + switch(cmd) {
552 + case SIOCGMIIPHY:
553 + data->phy_id = hw->phy_addr;
554 +
555 + /* fallthru */
556 + case SIOCGMIIREG: {
557 + u16 val = 0;
558 + spin_lock_bh(&hw->phy_lock);
559 + if (hw->chip_id == CHIP_ID_GENESIS)
560 + err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
561 + else
562 + err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
563 + spin_unlock_bh(&hw->phy_lock);
564 + data->val_out = val;
565 + break;
566 + }
567 +
568 + case SIOCSMIIREG:
569 + if (!capable(CAP_NET_ADMIN))
570 + return -EPERM;
571 +
572 + spin_lock_bh(&hw->phy_lock);
573 + if (hw->chip_id == CHIP_ID_GENESIS)
574 + err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f,
575 + data->val_in);
576 + else
577 + err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f,
578 + data->val_in);
579 + spin_unlock_bh(&hw->phy_lock);
580 + break;
581 + }
582 + return err;
583 +}
584 +
585 static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len)
586 {
587 u32 end;
588 @@ -2085,10 +2182,12 @@ static int skge_up(struct net_device *de
589 skge->tx_avail = skge->tx_ring.count - 1;
590
591 /* Enable IRQ from port */
592 + spin_lock_irq(&hw->hw_lock);
593 hw->intr_mask |= portirqmask[port];
594 skge_write32(hw, B0_IMSK, hw->intr_mask);
595 + spin_unlock_irq(&hw->hw_lock);
596
597 - /* Initialze MAC */
598 + /* Initialize MAC */
599 spin_lock_bh(&hw->phy_lock);
600 if (hw->chip_id == CHIP_ID_GENESIS)
601 genesis_mac_init(hw, port);
602 @@ -2119,6 +2218,7 @@ static int skge_up(struct net_device *de
603 kfree(skge->rx_ring.start);
604 free_pci_mem:
605 pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
606 + skge->mem = NULL;
607
608 return err;
609 }
610 @@ -2129,6 +2229,9 @@ static int skge_down(struct net_device *
611 struct skge_hw *hw = skge->hw;
612 int port = skge->port;
613
614 + if (skge->mem == NULL)
615 + return 0;
616 +
617 if (netif_msg_ifdown(skge))
618 printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
619
620 @@ -2140,8 +2243,10 @@ static int skge_down(struct net_device *
621 else
622 yukon_stop(skge);
623
624 + spin_lock_irq(&hw->hw_lock);
625 hw->intr_mask &= ~portirqmask[skge->port];
626 skge_write32(hw, B0_IMSK, hw->intr_mask);
627 + spin_unlock_irq(&hw->hw_lock);
628
629 /* Stop transmitter */
630 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
631 @@ -2185,6 +2290,7 @@ static int skge_down(struct net_device *
632 kfree(skge->rx_ring.start);
633 kfree(skge->tx_ring.start);
634 pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma);
635 + skge->mem = NULL;
636 return 0;
637 }
638
639 @@ -2212,11 +2318,13 @@ static int skge_xmit_frame(struct sk_buf
640 }
641
642 if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) {
643 - netif_stop_queue(dev);
644 - spin_unlock_irqrestore(&skge->tx_lock, flags);
645 + if (!netif_queue_stopped(dev)) {
646 + netif_stop_queue(dev);
647
648 - printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
649 - dev->name);
650 + printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
651 + dev->name);
652 + }
653 + spin_unlock_irqrestore(&skge->tx_lock, flags);
654 return NETDEV_TX_BUSY;
655 }
656
657 @@ -2232,14 +2340,12 @@ static int skge_xmit_frame(struct sk_buf
658 td->dma_hi = map >> 32;
659
660 if (skb->ip_summed == CHECKSUM_HW) {
661 - const struct iphdr *ip
662 - = (const struct iphdr *) (skb->data + ETH_HLEN);
663 int offset = skb->h.raw - skb->data;
664
665 /* This seems backwards, but it is what the sk98lin
666 * does. Looks like hardware is wrong?
667 */
668 - if (ip->protocol == IPPROTO_UDP
669 + if (skb->h.ipiph->protocol == IPPROTO_UDP
670 && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
671 control = BMU_TCP_CHECK;
672 else
673 @@ -2345,18 +2451,23 @@ static void skge_tx_timeout(struct net_d
674
675 static int skge_change_mtu(struct net_device *dev, int new_mtu)
676 {
677 - int err = 0;
678 - int running = netif_running(dev);
679 + int err;
680
681 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
682 return -EINVAL;
683
684 + if (!netif_running(dev)) {
685 + dev->mtu = new_mtu;
686 + return 0;
687 + }
688 +
689 + skge_down(dev);
690
691 - if (running)
692 - skge_down(dev);
693 dev->mtu = new_mtu;
694 - if (running)
695 - skge_up(dev);
696 +
697 + err = skge_up(dev);
698 + if (err)
699 + dev_close(dev);
700
701 return err;
702 }
703 @@ -2408,7 +2519,7 @@ static void yukon_set_multicast(struct n
704 reg = gma_read16(hw, port, GM_RX_CTRL);
705 reg |= GM_RXCR_UCF_ENA;
706
707 - if (dev->flags & IFF_PROMISC) /* promiscious */
708 + if (dev->flags & IFF_PROMISC) /* promiscuous */
709 reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
710 else if (dev->flags & IFF_ALLMULTI) /* all multicast */
711 memset(filter, 0xff, sizeof(filter));
712 @@ -2559,7 +2670,7 @@ static int skge_poll(struct net_device *
713 unsigned int to_do = min(dev->quota, *budget);
714 unsigned int work_done = 0;
715
716 - for (e = ring->to_clean; work_done < to_do; e = e->next) {
717 + for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
718 struct skge_rx_desc *rd = e->desc;
719 struct sk_buff *skb;
720 u32 control;
721 @@ -2583,8 +2694,7 @@ static int skge_poll(struct net_device *
722
723 /* restart receiver */
724 wmb();
725 - skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR),
726 - CSR_START | CSR_IRQ_CL_F);
727 + skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START);
728
729 *budget -= work_done;
730 dev->quota -= work_done;
731 @@ -2592,11 +2702,12 @@ static int skge_poll(struct net_device *
732 if (work_done >= to_do)
733 return 1; /* not done */
734
735 - local_irq_disable();
736 + spin_lock_irq(&hw->hw_lock);
737 __netif_rx_complete(dev);
738 - hw->intr_mask |= portirqmask[skge->port];
739 - skge_write32(hw, B0_IMSK, hw->intr_mask);
740 - local_irq_enable();
741 + hw->intr_mask |= portirqmask[skge->port];
742 + skge_write32(hw, B0_IMSK, hw->intr_mask);
743 + spin_unlock_irq(&hw->hw_lock);
744 +
745 return 0;
746 }
747
748 @@ -2608,7 +2719,7 @@ static inline void skge_tx_intr(struct n
749 struct skge_element *e;
750
751 spin_lock(&skge->tx_lock);
752 - for (e = ring->to_clean; e != ring->to_use; e = e->next) {
753 + for (e = ring->to_clean; prefetch(e->next), e != ring->to_use; e = e->next) {
754 struct skge_tx_desc *td = e->desc;
755 u32 control;
756
757 @@ -2731,7 +2842,7 @@ static void skge_error_irq(struct skge_h
758 }
759
760 /*
761 - * Interrrupt from PHY are handled in tasklet (soft irq)
762 + * Interrupt from PHY are handled in tasklet (soft irq)
763 * because accessing phy registers requires spin wait which might
764 * cause excess interrupt latency.
765 */
766 @@ -2755,10 +2866,10 @@ static void skge_extirq(unsigned long da
767 }
768 spin_unlock(&hw->phy_lock);
769
770 - local_irq_disable();
771 + spin_lock_irq(&hw->hw_lock);
772 hw->intr_mask |= IS_EXT_REG;
773 skge_write32(hw, B0_IMSK, hw->intr_mask);
774 - local_irq_enable();
775 + spin_unlock_irq(&hw->hw_lock);
776 }
777
778 static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
779 @@ -2769,13 +2880,15 @@ static irqreturn_t skge_intr(int irq, vo
780 if (status == 0 || status == ~0) /* hotplug or shared irq */
781 return IRQ_NONE;
782
783 - status &= hw->intr_mask;
784 + spin_lock(&hw->hw_lock);
785 if (status & IS_R1_F) {
786 + skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
787 hw->intr_mask &= ~IS_R1_F;
788 netif_rx_schedule(hw->dev[0]);
789 }
790
791 if (status & IS_R2_F) {
792 + skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
793 hw->intr_mask &= ~IS_R2_F;
794 netif_rx_schedule(hw->dev[1]);
795 }
796 @@ -2819,6 +2932,7 @@ static irqreturn_t skge_intr(int irq, vo
797 }
798
799 skge_write32(hw, B0_IMSK, hw->intr_mask);
800 + spin_unlock(&hw->hw_lock);
801
802 return IRQ_HANDLED;
803 }
804 @@ -2892,6 +3006,7 @@ static const char *skge_board_name(const
805 */
806 static int skge_reset(struct skge_hw *hw)
807 {
808 + u32 reg;
809 u16 ctst;
810 u8 t8, mac_cfg, pmd_type, phy_type;
811 int i;
812 @@ -2970,6 +3085,7 @@ static int skge_reset(struct skge_hw *hw
813 /* switch power to VCC (WA for VAUX problem) */
814 skge_write8(hw, B0_POWER_CTRL,
815 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
816 +
817 /* avoid boards with stuck Hardware error bits */
818 if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) &&
819 (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) {
820 @@ -2977,6 +3093,14 @@ static int skge_reset(struct skge_hw *hw
821 hw->intr_mask &= ~IS_HW_ERR;
822 }
823
824 + /* Clear PHY COMA */
825 + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
826 + pci_read_config_dword(hw->pdev, PCI_DEV_REG1, &reg);
827 + reg &= ~PCI_PHY_COMA;
828 + pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg);
829 + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
830 +
831 +
832 for (i = 0; i < hw->ports; i++) {
833 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
834 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
835 @@ -3047,6 +3171,7 @@ static struct net_device *skge_devinit(s
836 SET_NETDEV_DEV(dev, &hw->pdev->dev);
837 dev->open = skge_up;
838 dev->stop = skge_down;
839 + dev->do_ioctl = skge_ioctl;
840 dev->hard_start_xmit = skge_xmit_frame;
841 dev->get_stats = skge_get_stats;
842 if (hw->chip_id == CHIP_ID_GENESIS)
843 @@ -3096,6 +3221,7 @@ static struct net_device *skge_devinit(s
844
845 /* read the mac address */
846 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
847 + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
848
849 /* device is off until link detection */
850 netif_carrier_off(dev);
851 @@ -3145,7 +3271,7 @@ static int __devinit skge_probe(struct p
852 }
853
854 #ifdef __BIG_ENDIAN
855 - /* byte swap decriptors in hardware */
856 + /* byte swap descriptors in hardware */
857 {
858 u32 reg;
859
860 @@ -3156,16 +3282,16 @@ static int __devinit skge_probe(struct p
861 #endif
862
863 err = -ENOMEM;
864 - hw = kmalloc(sizeof(*hw), GFP_KERNEL);
865 + hw = kzalloc(sizeof(*hw), GFP_KERNEL);
866 if (!hw) {
867 printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
868 pci_name(pdev));
869 goto err_out_free_regions;
870 }
871
872 - memset(hw, 0, sizeof(*hw));
873 hw->pdev = pdev;
874 spin_lock_init(&hw->phy_lock);
875 + spin_lock_init(&hw->hw_lock);
876 tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
877
878 hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
879 @@ -3186,7 +3312,7 @@ static int __devinit skge_probe(struct p
880 if (err)
881 goto err_out_free_irq;
882
883 - printk(KERN_INFO PFX "addr 0x%lx irq %d chip %s rev %d\n",
884 + printk(KERN_INFO PFX DRV_VERSION " addr 0x%lx irq %d chip %s rev %d\n",
885 pci_resource_start(pdev, 0), pdev->irq,
886 skge_board_name(hw), hw->chip_rev);
887
888 @@ -3311,8 +3437,8 @@ static int skge_resume(struct pci_dev *p
889 struct net_device *dev = hw->dev[i];
890 if (dev) {
891 netif_device_attach(dev);
892 - if (netif_running(dev))
893 - skge_up(dev);
894 + if (netif_running(dev) && skge_up(dev))
895 + dev_close(dev);
896 }
897 }
898 return 0;

  ViewVC Help
Powered by ViewVC 1.1.20