Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (82 commits)
[NET]: Make sure sockets implement splice_read
netconsole: avoid null pointer dereference at show_local_mac()
[IPV6]: Fix reversed local_df test in ip6_fragment
[XFRM]: Avoid bogus BUG() when throwing new policy away.
[AF_KEY]: Fix bug in spdadd
[NETFILTER] nf_conntrack_proto_tcp.c: Mistyped state corrected.
net: xfrm statistics depend on INET
[NETFILTER]: make secmark_tg_destroy() static
[INET]: Unexport inet_listen_wlock
[INET]: Unexport __inet_hash_connect
[NET]: Improve cache line coherency of ingress qdisc
[NET]: Fix race in dev_close(). (Bug 9750)
[IPSEC]: Fix bogus usage of u64 on input sequence number
[RTNETLINK]: Send a single notification on device state changes.
[NETLABLE]: Hide netlbl_unlabel_audit_addr6 under ifdef CONFIG_IPV6.
[NETLABEL]: Don't produce unused variables when IPv6 is off.
[NETLABEL]: Compilation for CONFIG_AUDIT=n case.
[GENETLINK]: Relax dances with genl_lock.
[NETLABEL]: Fix lookup logic of netlbl_domhsh_search_def.
[IPV6]: remove unused method declaration (net/ndisc.h).
...

+5664 -1954
+1
drivers/bluetooth/hci_ldisc.c
··· 208 208 return 0; 209 209 210 210 hci_uart_flush(hdev); 211 + hdev->flush = NULL; 211 212 return 0; 212 213 } 213 214
+1 -1
drivers/net/8139too.c
··· 168 168 * Warning: 64K ring has hardware issues and may lock up. 169 169 */ 170 170 #if defined(CONFIG_SH_DREAMCAST) 171 - #define RX_BUF_IDX 1 /* 16K ring */ 171 + #define RX_BUF_IDX 0 /* 8K ring */ 172 172 #else 173 173 #define RX_BUF_IDX 2 /* 32K ring */ 174 174 #endif
+18
drivers/net/Kconfig
··· 931 931 Enable the verify after the buffer write useful for debugging purpose. 932 932 If unsure, say N. 933 933 934 + config DM9000_DEBUGLEVEL 935 + int "DM9000 maximum debug level" 936 + depends on DM9000 937 + default 4 938 + help 939 + The maximum level of debugging code compiled into the DM9000 940 + driver. 941 + 934 942 config SMC911X 935 943 tristate "SMSC LAN911[5678] support" 936 944 select CRC32 ··· 2359 2351 2360 2352 To compile this driver as a module, choose M here: the 2361 2353 module will be called ps3_gelic. 2354 + 2355 + config GELIC_WIRELESS 2356 + bool "PS3 Wireless support" 2357 + depends on GELIC_NET 2358 + help 2359 + This option adds the support for the wireless feature of PS3. 2360 + If you have the wireless-less model of PS3 or have no plan to 2361 + use wireless feature, disabling this option saves memory. As 2362 + the driver automatically distinguishes the models, you can 2363 + safely enable this option even if you have a wireless-less model. 2362 2364 2363 2365 config GIANFAR 2364 2366 tristate "Gianfar Ethernet"
+2 -1
drivers/net/Makefile
··· 70 70 spidernet-y += spider_net.o spider_net_ethtool.o 71 71 obj-$(CONFIG_SPIDER_NET) += spidernet.o sungem_phy.o 72 72 obj-$(CONFIG_GELIC_NET) += ps3_gelic.o 73 - ps3_gelic-objs += ps3_gelic_net.o 73 + gelic_wireless-$(CONFIG_GELIC_WIRELESS) += ps3_gelic_wireless.o 74 + ps3_gelic-objs += ps3_gelic_net.o $(gelic_wireless-y) 74 75 obj-$(CONFIG_TC35815) += tc35815.o 75 76 obj-$(CONFIG_SKGE) += skge.o 76 77 obj-$(CONFIG_SKY2) += sky2.o
+1 -1
drivers/net/cxgb3/l2t.c
··· 404 404 if (neigh->nud_state & NUD_FAILED) { 405 405 arpq = e->arpq_head; 406 406 e->arpq_head = e->arpq_tail = NULL; 407 - } else if (neigh_is_connected(neigh)) 407 + } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE)) 408 408 setup_l2e_send_pending(dev, NULL, e); 409 409 } else { 410 410 e->state = neigh_is_connected(neigh) ?
+15 -20
drivers/net/cxgb3/sge.c
··· 1059 1059 htonl(V_WR_TID(q->token))); 1060 1060 } 1061 1061 1062 + static inline void t3_stop_queue(struct net_device *dev, struct sge_qset *qs, 1063 + struct sge_txq *q) 1064 + { 1065 + netif_stop_queue(dev); 1066 + set_bit(TXQ_ETH, &qs->txq_stopped); 1067 + q->stops++; 1068 + } 1069 + 1062 1070 /** 1063 1071 * eth_xmit - add a packet to the Ethernet Tx queue 1064 1072 * @skb: the packet ··· 1098 1090 ndesc = calc_tx_descs(skb); 1099 1091 1100 1092 if (unlikely(credits < ndesc)) { 1101 - if (!netif_queue_stopped(dev)) { 1102 - netif_stop_queue(dev); 1103 - set_bit(TXQ_ETH, &qs->txq_stopped); 1104 - q->stops++; 1105 - dev_err(&adap->pdev->dev, 1106 - "%s: Tx ring %u full while queue awake!\n", 1107 - dev->name, q->cntxt_id & 7); 1108 - } 1093 + t3_stop_queue(dev, qs, q); 1094 + dev_err(&adap->pdev->dev, 1095 + "%s: Tx ring %u full while queue awake!\n", 1096 + dev->name, q->cntxt_id & 7); 1109 1097 spin_unlock(&q->lock); 1110 1098 return NETDEV_TX_BUSY; 1111 1099 } 1112 1100 1113 1101 q->in_use += ndesc; 1114 - if (unlikely(credits - ndesc < q->stop_thres)) { 1115 - q->stops++; 1116 - netif_stop_queue(dev); 1117 - set_bit(TXQ_ETH, &qs->txq_stopped); 1118 - #if !USE_GTS 1119 - if (should_restart_tx(q) && 1120 - test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { 1121 - q->restarts++; 1122 - netif_wake_queue(dev); 1123 - } 1124 - #endif 1125 - } 1102 + if (unlikely(credits - ndesc < q->stop_thres)) 1103 + if (USE_GTS || !should_restart_tx(q)) 1104 + t3_stop_queue(dev, qs, q); 1126 1105 1127 1106 gen = q->gen; 1128 1107 q->unacked += ndesc;
+433 -237
drivers/net/dm9000.c
··· 1 1 /* 2 - * dm9000.c: Version 1.2 03/18/2003 3 - * 4 - * A Davicom DM9000 ISA NIC fast Ethernet driver for Linux. 2 + * Davicom DM9000 Fast Ethernet driver for Linux. 5 3 * Copyright (C) 1997 Sten Wang 6 4 * 7 5 * This program is free software; you can redistribute it and/or ··· 12 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 15 * GNU General Public License for more details. 14 16 * 15 - * (C)Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved. 17 + * (C) Copyright 1997-1998 DAVICOM Semiconductor,Inc. All Rights Reserved. 16 18 * 17 - * V0.11 06/20/2001 REG_0A bit3=1, default enable BP with DA match 18 - * 06/22/2001 Support DM9801 progrmming 19 - * E3: R25 = ((R24 + NF) & 0x00ff) | 0xf000 20 - * E4: R25 = ((R24 + NF) & 0x00ff) | 0xc200 21 - * R17 = (R17 & 0xfff0) | NF + 3 22 - * E5: R25 = ((R24 + NF - 3) & 0x00ff) | 0xc200 23 - * R17 = (R17 & 0xfff0) | NF 24 - * 25 - * v1.00 modify by simon 2001.9.5 26 - * change for kernel 2.4.x 27 - * 28 - * v1.1 11/09/2001 fix force mode bug 29 - * 30 - * v1.2 03/18/2003 Weilun Huang <weilun_huang@davicom.com.tw>: 31 - * Fixed phy reset. 32 - * Added tx/rx 32 bit mode. 33 - * Cleaned up for kernel merge. 34 - * 35 - * 03/03/2004 Sascha Hauer <s.hauer@pengutronix.de> 36 - * Port to 2.6 kernel 37 - * 38 - * 24-Sep-2004 Ben Dooks <ben@simtec.co.uk> 39 - * Cleanup of code to remove ifdefs 40 - * Allowed platform device data to influence access width 41 - * Reformatting areas of code 42 - * 43 - * 17-Mar-2005 Sascha Hauer <s.hauer@pengutronix.de> 44 - * * removed 2.4 style module parameters 45 - * * removed removed unused stat counter and fixed 46 - * net_device_stats 47 - * * introduced tx_timeout function 48 - * * reworked locking 49 - * 50 - * 01-Jul-2005 Ben Dooks <ben@simtec.co.uk> 51 - * * fixed spinlock call without pointer 52 - * * ensure spinlock is initialised 19 + * Additional updates, Copyright: 20 + * Ben Dooks <ben@simtec.co.uk> 21 + * Sascha Hauer <s.hauer@pengutronix.de> 53 22 */ 54 23 55 24 #include <linux/module.h> ··· 28 63 #include <linux/spinlock.h> 29 64 #include <linux/crc32.h> 30 65 #include <linux/mii.h> 66 + #include <linux/ethtool.h> 31 67 #include <linux/dm9000.h> 32 68 #include <linux/delay.h> 33 69 #include <linux/platform_device.h> ··· 46 80 47 81 #define CARDNAME "dm9000" 48 82 #define PFX CARDNAME ": " 49 - 50 - #define DM9000_TIMER_WUT jiffies+(HZ*2) /* timer wakeup time : 2 second */ 51 - 52 - #define DM9000_DEBUG 0 53 - 54 - #if DM9000_DEBUG > 2 55 - #define PRINTK3(args...) printk(CARDNAME ": " args) 56 - #else 57 - #define PRINTK3(args...) do { } while(0) 58 - #endif 59 - 60 - #if DM9000_DEBUG > 1 61 - #define PRINTK2(args...) printk(CARDNAME ": " args) 62 - #else 63 - #define PRINTK2(args...) do { } while(0) 64 - #endif 65 - 66 - #if DM9000_DEBUG > 0 67 - #define PRINTK1(args...) printk(CARDNAME ": " args) 68 - #define PRINTK(args...) printk(CARDNAME ": " args) 69 - #else 70 - #define PRINTK1(args...) do { } while(0) 71 - #define PRINTK(args...) printk(KERN_DEBUG args) 72 - #endif 83 + #define DRV_VERSION "1.30" 73 84 74 85 #ifdef CONFIG_BLACKFIN 75 86 #define readsb insb ··· 55 112 #define writesb outsb 56 113 #define writesw outsw 57 114 #define writesl outsl 58 - #define DM9000_IRQ_FLAGS (IRQF_SHARED | IRQF_TRIGGER_HIGH) 115 + #define DEFAULT_TRIGGER IRQF_TRIGGER_HIGH 59 116 #else 60 - #define DM9000_IRQ_FLAGS (IRQF_SHARED | IRQT_RISING) 117 + #define DEFAULT_TRIGGER (0) 61 118 #endif 62 119 63 120 /* ··· 66 123 static int watchdog = 5000; 67 124 module_param(watchdog, int, 0400); 68 125 MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds"); 126 + 127 + /* DM9000 register address locking. 128 + * 129 + * The DM9000 uses an address register to control where data written 130 + * to the data register goes. This means that the address register 131 + * must be preserved over interrupts or similar calls. 132 + * 133 + * During interrupt and other critical calls, a spinlock is used to 134 + * protect the system, but the calls themselves save the address 135 + * in the address register in case they are interrupting another 136 + * access to the device. 137 + * 138 + * For general accesses a lock is provided so that calls which are 139 + * allowed to sleep are serialised so that the address register does 140 + * not need to be saved. This lock also serves to serialise access 141 + * to the EEPROM and PHY access registers which are shared between 142 + * these two devices. 143 + */ 69 144 70 145 /* Structure/enum declaration ------------------------------- */ 71 146 typedef struct board_info { ··· 98 137 u16 dbug_cnt; 99 138 u8 io_mode; /* 0:word, 2:byte */ 100 139 u8 phy_addr; 140 + unsigned int flags; 141 + unsigned int in_suspend :1; 142 + 143 + int debug_level; 101 144 102 145 void (*inblk)(void __iomem *port, void *data, int length); 103 146 void (*outblk)(void __iomem *port, void *data, int length); 104 147 void (*dumpblk)(void __iomem *port, int length); 148 + 149 + struct device *dev; /* parent device */ 105 150 106 151 struct resource *addr_res; /* resources found */ 107 152 struct resource *data_res; ··· 115 148 struct resource *data_req; 116 149 struct resource *irq_res; 117 150 118 - struct timer_list timer; 119 - unsigned char srom[128]; 151 + struct mutex addr_lock; /* phy and eeprom access lock */ 152 + 120 153 spinlock_t lock; 121 154 122 155 struct mii_if_info mii; 123 156 u32 msg_enable; 124 157 } board_info_t; 125 158 159 + /* debug code */ 160 + 161 + #define dm9000_dbg(db, lev, msg...) do { \ 162 + if ((lev) < CONFIG_DM9000_DEBUGLEVEL && \ 163 + (lev) < db->debug_level) { \ 164 + dev_dbg(db->dev, msg); \ 165 + } \ 166 + } while (0) 167 + 168 + static inline board_info_t *to_dm9000_board(struct net_device *dev) 169 + { 170 + return dev->priv; 171 + } 172 + 126 173 /* function declaration ------------------------------------- */ 127 174 static int dm9000_probe(struct platform_device *); 128 175 static int dm9000_open(struct net_device *); 129 176 static int dm9000_start_xmit(struct sk_buff *, struct net_device *); 130 177 static int dm9000_stop(struct net_device *); 178 + static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd); 131 179 132 - 133 - static void dm9000_timer(unsigned long); 134 180 static void dm9000_init_dm9000(struct net_device *); 135 181 136 182 static irqreturn_t dm9000_interrupt(int, void *); ··· 151 171 static int dm9000_phy_read(struct net_device *dev, int phyaddr_unsused, int reg); 152 172 static void dm9000_phy_write(struct net_device *dev, int phyaddr_unused, int reg, 153 173 int value); 154 - static u16 read_srom_word(board_info_t *, int); 174 + 175 + static void dm9000_read_eeprom(board_info_t *, int addr, u8 *to); 176 + static void dm9000_write_eeprom(board_info_t *, int addr, u8 *dp); 155 177 static void dm9000_rx(struct net_device *); 156 178 static void dm9000_hash_table(struct net_device *); 157 179 158 - //#define DM9000_PROGRAM_EEPROM 159 - #ifdef DM9000_PROGRAM_EEPROM 160 - static void program_eeprom(board_info_t * db); 161 - #endif 162 180 /* DM9000 network board routine ---------------------------- */ 163 181 164 182 static void 165 183 dm9000_reset(board_info_t * db) 166 184 { 167 - PRINTK1("dm9000x: resetting\n"); 185 + dev_dbg(db->dev, "resetting device\n"); 186 + 168 187 /* RESET device */ 169 188 writeb(DM9000_NCR, db->io_addr); 170 189 udelay(200); ··· 279 300 db->inblk = dm9000_inblk_8bit; 280 301 break; 281 302 282 - case 2: 283 - db->dumpblk = dm9000_dumpblk_16bit; 284 - db->outblk = dm9000_outblk_16bit; 285 - db->inblk = dm9000_inblk_16bit; 286 - break; 287 303 288 304 case 3: 289 - printk(KERN_ERR PFX ": 3 byte IO, falling back to 16bit\n"); 305 + dev_dbg(db->dev, ": 3 byte IO, falling back to 16bit\n"); 306 + case 2: 290 307 db->dumpblk = dm9000_dumpblk_16bit; 291 308 db->outblk = dm9000_outblk_16bit; 292 309 db->inblk = dm9000_inblk_16bit; ··· 333 358 } 334 359 #endif 335 360 361 + static int dm9000_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 362 + { 363 + board_info_t *dm = to_dm9000_board(dev); 364 + 365 + if (!netif_running(dev)) 366 + return -EINVAL; 367 + 368 + return generic_mii_ioctl(&dm->mii, if_mii(req), cmd, NULL); 369 + } 370 + 371 + /* ethtool ops */ 372 + 373 + static void dm9000_get_drvinfo(struct net_device *dev, 374 + struct ethtool_drvinfo *info) 375 + { 376 + board_info_t *dm = to_dm9000_board(dev); 377 + 378 + strcpy(info->driver, CARDNAME); 379 + strcpy(info->version, DRV_VERSION); 380 + strcpy(info->bus_info, to_platform_device(dm->dev)->name); 381 + } 382 + 383 + static u32 dm9000_get_msglevel(struct net_device *dev) 384 + { 385 + board_info_t *dm = to_dm9000_board(dev); 386 + 387 + return dm->msg_enable; 388 + } 389 + 390 + static void dm9000_set_msglevel(struct net_device *dev, u32 value) 391 + { 392 + board_info_t *dm = to_dm9000_board(dev); 393 + 394 + dm->msg_enable = value; 395 + } 396 + 397 + static int dm9000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 398 + { 399 + board_info_t *dm = to_dm9000_board(dev); 400 + 401 + mii_ethtool_gset(&dm->mii, cmd); 402 + return 0; 403 + } 404 + 405 + static int dm9000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 406 + { 407 + board_info_t *dm = to_dm9000_board(dev); 408 + 409 + return mii_ethtool_sset(&dm->mii, cmd); 410 + } 411 + 412 + static int dm9000_nway_reset(struct net_device *dev) 413 + { 414 + board_info_t *dm = to_dm9000_board(dev); 415 + return mii_nway_restart(&dm->mii); 416 + } 417 + 418 + static u32 dm9000_get_link(struct net_device *dev) 419 + { 420 + board_info_t *dm = to_dm9000_board(dev); 421 + return mii_link_ok(&dm->mii); 422 + } 423 + 424 + #define DM_EEPROM_MAGIC (0x444D394B) 425 + 426 + static int dm9000_get_eeprom_len(struct net_device *dev) 427 + { 428 + return 128; 429 + } 430 + 431 + static int dm9000_get_eeprom(struct net_device *dev, 432 + struct ethtool_eeprom *ee, u8 *data) 433 + { 434 + board_info_t *dm = to_dm9000_board(dev); 435 + int offset = ee->offset; 436 + int len = ee->len; 437 + int i; 438 + 439 + /* EEPROM access is aligned to two bytes */ 440 + 441 + if ((len & 1) != 0 || (offset & 1) != 0) 442 + return -EINVAL; 443 + 444 + if (dm->flags & DM9000_PLATF_NO_EEPROM) 445 + return -ENOENT; 446 + 447 + ee->magic = DM_EEPROM_MAGIC; 448 + 449 + for (i = 0; i < len; i += 2) 450 + dm9000_read_eeprom(dm, (offset + i) / 2, data + i); 451 + 452 + return 0; 453 + } 454 + 455 + static int dm9000_set_eeprom(struct net_device *dev, 456 + struct ethtool_eeprom *ee, u8 *data) 457 + { 458 + board_info_t *dm = to_dm9000_board(dev); 459 + int offset = ee->offset; 460 + int len = ee->len; 461 + int i; 462 + 463 + /* EEPROM access is aligned to two bytes */ 464 + 465 + if ((len & 1) != 0 || (offset & 1) != 0) 466 + return -EINVAL; 467 + 468 + if (dm->flags & DM9000_PLATF_NO_EEPROM) 469 + return -ENOENT; 470 + 471 + if (ee->magic != DM_EEPROM_MAGIC) 472 + return -EINVAL; 473 + 474 + for (i = 0; i < len; i += 2) 475 + dm9000_write_eeprom(dm, (offset + i) / 2, data + i); 476 + 477 + return 0; 478 + } 479 + 480 + static const struct ethtool_ops dm9000_ethtool_ops = { 481 + .get_drvinfo = dm9000_get_drvinfo, 482 + .get_settings = dm9000_get_settings, 483 + .set_settings = dm9000_set_settings, 484 + .get_msglevel = dm9000_get_msglevel, 485 + .set_msglevel = dm9000_set_msglevel, 486 + .nway_reset = dm9000_nway_reset, 487 + .get_link = dm9000_get_link, 488 + .get_eeprom_len = dm9000_get_eeprom_len, 489 + .get_eeprom = dm9000_get_eeprom, 490 + .set_eeprom = dm9000_set_eeprom, 491 + }; 492 + 493 + 336 494 /* dm9000_release_board 337 495 * 338 496 * release a board, and any mapped resources ··· 509 401 struct dm9000_plat_data *pdata = pdev->dev.platform_data; 510 402 struct board_info *db; /* Point a board information structure */ 511 403 struct net_device *ndev; 404 + const unsigned char *mac_src; 512 405 unsigned long base; 513 406 int ret = 0; 514 407 int iosize; ··· 519 410 /* Init network device */ 520 411 ndev = alloc_etherdev(sizeof (struct board_info)); 521 412 if (!ndev) { 522 - printk("%s: could not allocate device.\n", CARDNAME); 413 + dev_err(&pdev->dev, "could not allocate device.\n"); 523 414 return -ENOMEM; 524 415 } 525 416 526 417 SET_NETDEV_DEV(ndev, &pdev->dev); 527 418 528 - PRINTK2("dm9000_probe()"); 419 + dev_dbg(&pdev->dev, "dm9000_probe()"); 529 420 530 421 /* setup board info structure */ 531 422 db = (struct board_info *) ndev->priv; 532 423 memset(db, 0, sizeof (*db)); 533 424 425 + db->dev = &pdev->dev; 426 + 534 427 spin_lock_init(&db->lock); 428 + mutex_init(&db->addr_lock); 535 429 536 430 if (pdev->num_resources < 2) { 537 431 ret = -ENODEV; ··· 562 450 563 451 if (db->addr_res == NULL || db->data_res == NULL || 564 452 db->irq_res == NULL) { 565 - printk(KERN_ERR PFX "insufficient resources\n"); 453 + dev_err(db->dev, "insufficient resources\n"); 566 454 ret = -ENOENT; 567 455 goto out; 568 456 } ··· 572 460 pdev->name); 573 461 574 462 if (db->addr_req == NULL) { 575 - printk(KERN_ERR PFX "cannot claim address reg area\n"); 463 + dev_err(db->dev, "cannot claim address reg area\n"); 576 464 ret = -EIO; 577 465 goto out; 578 466 } ··· 580 468 db->io_addr = ioremap(db->addr_res->start, i); 581 469 582 470 if (db->io_addr == NULL) { 583 - printk(KERN_ERR "failed to ioremap address reg\n"); 471 + dev_err(db->dev, "failed to ioremap address reg\n"); 584 472 ret = -EINVAL; 585 473 goto out; 586 474 } ··· 590 478 pdev->name); 591 479 592 480 if (db->data_req == NULL) { 593 - printk(KERN_ERR PFX "cannot claim data reg area\n"); 481 + dev_err(db->dev, "cannot claim data reg area\n"); 594 482 ret = -EIO; 595 483 goto out; 596 484 } ··· 598 486 db->io_data = ioremap(db->data_res->start, iosize); 599 487 600 488 if (db->io_data == NULL) { 601 - printk(KERN_ERR "failed to ioremap data reg\n"); 489 + dev_err(db->dev,"failed to ioremap data reg\n"); 602 490 ret = -EINVAL; 603 491 goto out; 604 492 } ··· 637 525 638 526 if (pdata->dumpblk != NULL) 639 527 db->dumpblk = pdata->dumpblk; 528 + 529 + db->flags = pdata->flags; 640 530 } 641 531 642 532 dm9000_reset(db); 643 533 644 534 /* try two times, DM9000 sometimes gets the first read wrong */ 645 - for (i = 0; i < 2; i++) { 535 + for (i = 0; i < 8; i++) { 646 536 id_val = ior(db, DM9000_VIDL); 647 537 id_val |= (u32)ior(db, DM9000_VIDH) << 8; 648 538 id_val |= (u32)ior(db, DM9000_PIDL) << 16; ··· 652 538 653 539 if (id_val == DM9000_ID) 654 540 break; 655 - printk("%s: read wrong id 0x%08x\n", CARDNAME, id_val); 541 + dev_err(db->dev, "read wrong id 0x%08x\n", id_val); 656 542 } 657 543 658 544 if (id_val != DM9000_ID) { 659 - printk("%s: wrong id: 0x%08x\n", CARDNAME, id_val); 545 + dev_err(db->dev, "wrong id: 0x%08x\n", id_val); 660 546 ret = -ENODEV; 661 547 goto out; 662 548 } ··· 672 558 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 673 559 ndev->stop = &dm9000_stop; 674 560 ndev->set_multicast_list = &dm9000_hash_table; 561 + ndev->ethtool_ops = &dm9000_ethtool_ops; 562 + ndev->do_ioctl = &dm9000_ioctl; 563 + 675 564 #ifdef CONFIG_NET_POLL_CONTROLLER 676 565 ndev->poll_controller = &dm9000_poll_controller; 677 566 #endif 678 567 679 - #ifdef DM9000_PROGRAM_EEPROM 680 - program_eeprom(db); 681 - #endif 682 568 db->msg_enable = NETIF_MSG_LINK; 683 569 db->mii.phy_id_mask = 0x1f; 684 570 db->mii.reg_num_mask = 0x1f; ··· 688 574 db->mii.mdio_read = dm9000_phy_read; 689 575 db->mii.mdio_write = dm9000_phy_write; 690 576 691 - /* Read SROM content */ 692 - for (i = 0; i < 64; i++) 693 - ((u16 *) db->srom)[i] = read_srom_word(db, i); 577 + mac_src = "eeprom"; 694 578 695 - /* Set Node Address */ 696 - for (i = 0; i < 6; i++) 697 - ndev->dev_addr[i] = db->srom[i]; 579 + /* try reading the node address from the attached EEPROM */ 580 + for (i = 0; i < 6; i += 2) 581 + dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i); 698 582 699 583 if (!is_valid_ether_addr(ndev->dev_addr)) { 700 584 /* try reading from mac */ 701 - 585 + 586 + mac_src = "chip"; 702 587 for (i = 0; i < 6; i++) 703 588 ndev->dev_addr[i] = ior(db, i+DM9000_PAR); 704 589 } 705 590 706 591 if (!is_valid_ether_addr(ndev->dev_addr)) 707 - printk("%s: Invalid ethernet MAC address. Please " 708 - "set using ifconfig\n", ndev->name); 592 + dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please " 593 + "set using ifconfig\n", ndev->name); 709 594 710 595 platform_set_drvdata(pdev, ndev); 711 596 ret = register_netdev(ndev); 712 597 713 598 if (ret == 0) { 714 599 DECLARE_MAC_BUF(mac); 715 - printk("%s: dm9000 at %p,%p IRQ %d MAC: %s\n", 600 + printk("%s: dm9000 at %p,%p IRQ %d MAC: %s (%s)\n", 716 601 ndev->name, db->io_addr, db->io_data, ndev->irq, 717 - print_mac(mac, ndev->dev_addr)); 602 + print_mac(mac, ndev->dev_addr), mac_src); 718 603 } 719 604 return 0; 720 605 721 606 out: 722 - printk("%s: not found (%d).\n", CARDNAME, ret); 607 + dev_err(db->dev, "not found (%d).\n", ret); 723 608 724 609 dm9000_release_board(pdev, db); 725 610 free_netdev(ndev); ··· 734 621 dm9000_open(struct net_device *dev) 735 622 { 736 623 board_info_t *db = (board_info_t *) dev->priv; 624 + unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK; 737 625 738 - PRINTK2("entering dm9000_open\n"); 626 + if (netif_msg_ifup(db)) 627 + dev_dbg(db->dev, "enabling %s\n", dev->name); 739 628 740 - if (request_irq(dev->irq, &dm9000_interrupt, DM9000_IRQ_FLAGS, dev->name, dev)) 629 + /* If there is no IRQ type specified, default to something that 630 + * may work, and tell the user that this is a problem */ 631 + 632 + if (irqflags == IRQF_TRIGGER_NONE) { 633 + dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); 634 + irqflags = DEFAULT_TRIGGER; 635 + } 636 + 637 + irqflags |= IRQF_SHARED; 638 + 639 + if (request_irq(dev->irq, &dm9000_interrupt, irqflags, dev->name, dev)) 741 640 return -EAGAIN; 742 641 743 642 /* Initialize DM9000 board */ ··· 758 633 759 634 /* Init driver variable */ 760 635 db->dbug_cnt = 0; 761 - 762 - /* set and active a timer process */ 763 - init_timer(&db->timer); 764 - db->timer.expires = DM9000_TIMER_WUT; 765 - db->timer.data = (unsigned long) dev; 766 - db->timer.function = &dm9000_timer; 767 - add_timer(&db->timer); 768 636 769 637 mii_check_media(&db->mii, netif_msg_link(db), 1); 770 638 netif_start_queue(dev); ··· 773 655 { 774 656 board_info_t *db = (board_info_t *) dev->priv; 775 657 776 - PRINTK1("entering %s\n",__FUNCTION__); 658 + dm9000_dbg(db, 1, "entering %s\n", __func__); 777 659 778 660 /* I/O mode */ 779 661 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */ ··· 782 664 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ 783 665 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 784 666 iow(db, DM9000_GPR, 0); /* Enable PHY */ 667 + 668 + if (db->flags & DM9000_PLATF_EXT_PHY) 669 + iow(db, DM9000_NCR, NCR_EXT_PHY); 785 670 786 671 /* Program operating register */ 787 672 iow(db, DM9000_TCR, 0); /* TX Polling clear */ ··· 819 698 unsigned long flags; 820 699 board_info_t *db = (board_info_t *) dev->priv; 821 700 822 - PRINTK3("dm9000_start_xmit\n"); 701 + dm9000_dbg(db, 3, "%s:\n", __func__); 823 702 824 703 if (db->tx_pkt_cnt > 1) 825 704 return 1; ··· 836 715 /* TX control: First packet immediately send, second packet queue */ 837 716 if (db->tx_pkt_cnt == 1) { 838 717 /* Set TX length to DM9000 */ 839 - iow(db, DM9000_TXPLL, skb->len & 0xff); 840 - iow(db, DM9000_TXPLH, (skb->len >> 8) & 0xff); 718 + iow(db, DM9000_TXPLL, skb->len); 719 + iow(db, DM9000_TXPLH, skb->len >> 8); 841 720 842 721 /* Issue TX polling command */ 843 722 iow(db, DM9000_TCR, TCR_TXREQ); /* Cleared after TX complete */ ··· 878 757 { 879 758 board_info_t *db = (board_info_t *) ndev->priv; 880 759 881 - PRINTK1("entering %s\n",__FUNCTION__); 882 - 883 - /* deleted timer */ 884 - del_timer(&db->timer); 760 + if (netif_msg_ifdown(db)) 761 + dev_dbg(db->dev, "shutting down %s\n", ndev->name); 885 762 886 763 netif_stop_queue(ndev); 887 764 netif_carrier_off(ndev); ··· 907 788 db->tx_pkt_cnt--; 908 789 dev->stats.tx_packets++; 909 790 791 + if (netif_msg_tx_done(db)) 792 + dev_dbg(db->dev, "tx done, NSR %02x\n", tx_status); 793 + 910 794 /* Queue packet check & send */ 911 795 if (db->tx_pkt_cnt > 0) { 912 - iow(db, DM9000_TXPLL, db->queue_pkt_len & 0xff); 913 - iow(db, DM9000_TXPLH, (db->queue_pkt_len >> 8) & 0xff); 796 + iow(db, DM9000_TXPLL, db->queue_pkt_len); 797 + iow(db, DM9000_TXPLH, db->queue_pkt_len >> 8); 914 798 iow(db, DM9000_TCR, TCR_TXREQ); 915 799 dev->trans_start = jiffies; 916 800 } ··· 925 803 dm9000_interrupt(int irq, void *dev_id) 926 804 { 927 805 struct net_device *dev = dev_id; 928 - board_info_t *db; 806 + board_info_t *db = (board_info_t *) dev->priv; 929 807 int int_status; 930 808 u8 reg_save; 931 809 932 - PRINTK3("entering %s\n",__FUNCTION__); 933 - 934 - if (!dev) { 935 - PRINTK1("dm9000_interrupt() without DEVICE arg\n"); 936 - return IRQ_HANDLED; 937 - } 810 + dm9000_dbg(db, 3, "entering %s\n", __func__); 938 811 939 812 /* A real interrupt coming */ 940 - db = (board_info_t *) dev->priv; 813 + 941 814 spin_lock(&db->lock); 942 815 943 816 /* Save previous register address */ ··· 944 827 /* Got DM9000 interrupt status */ 945 828 int_status = ior(db, DM9000_ISR); /* Got ISR */ 946 829 iow(db, DM9000_ISR, int_status); /* Clear ISR status */ 830 + 831 + if (netif_msg_intr(db)) 832 + dev_dbg(db->dev, "interrupt status %02x\n", int_status); 947 833 948 834 /* Received the coming packet */ 949 835 if (int_status & ISR_PRS) ··· 967 847 return IRQ_HANDLED; 968 848 } 969 849 970 - /* 971 - * A periodic timer routine 972 - * Dynamic media sense, allocated Rx buffer... 973 - */ 974 - static void 975 - dm9000_timer(unsigned long data) 976 - { 977 - struct net_device *dev = (struct net_device *) data; 978 - board_info_t *db = (board_info_t *) dev->priv; 979 - 980 - PRINTK3("dm9000_timer()\n"); 981 - 982 - mii_check_media(&db->mii, netif_msg_link(db), 0); 983 - 984 - /* Set timer again */ 985 - db->timer.expires = DM9000_TIMER_WUT; 986 - add_timer(&db->timer); 987 - } 988 - 989 850 struct dm9000_rxhdr { 990 - u16 RxStatus; 851 + u8 RxPktReady; 852 + u8 RxStatus; 991 853 u16 RxLen; 992 854 } __attribute__((__packed__)); 993 855 ··· 995 893 996 894 /* Status check: this byte must be 0 or 1 */ 997 895 if (rxbyte > DM9000_PKT_RDY) { 998 - printk("status check failed: %d\n", rxbyte); 896 + dev_warn(db->dev, "status check fail: %d\n", rxbyte); 999 897 iow(db, DM9000_RCR, 0x00); /* Stop Device */ 1000 898 iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */ 1001 899 return; ··· 1010 908 1011 909 (db->inblk)(db->io_data, &rxhdr, sizeof(rxhdr)); 1012 910 1013 - RxLen = rxhdr.RxLen; 911 + RxLen = le16_to_cpu(rxhdr.RxLen); 912 + 913 + if (netif_msg_rx_status(db)) 914 + dev_dbg(db->dev, "RX: status %02x, length %04x\n", 915 + rxhdr.RxStatus, RxLen); 1014 916 1015 917 /* Packet Status check */ 1016 918 if (RxLen < 0x40) { 1017 919 GoodPacket = false; 1018 - PRINTK1("Bad Packet received (runt)\n"); 920 + if (netif_msg_rx_err(db)) 921 + dev_dbg(db->dev, "RX: Bad Packet (runt)\n"); 1019 922 } 1020 923 1021 924 if (RxLen > DM9000_PKT_MAX) { 1022 - PRINTK1("RST: RX Len:%x\n", RxLen); 925 + dev_dbg(db->dev, "RST: RX Len:%x\n", RxLen); 1023 926 } 1024 927 1025 - if (rxhdr.RxStatus & 0xbf00) { 928 + if (rxhdr.RxStatus & 0xbf) { 1026 929 GoodPacket = false; 1027 - if (rxhdr.RxStatus & 0x100) { 1028 - PRINTK1("fifo error\n"); 930 + if (rxhdr.RxStatus & 0x01) { 931 + if (netif_msg_rx_err(db)) 932 + dev_dbg(db->dev, "fifo error\n"); 1029 933 dev->stats.rx_fifo_errors++; 1030 934 } 1031 - if (rxhdr.RxStatus & 0x200) { 1032 - PRINTK1("crc error\n"); 935 + if (rxhdr.RxStatus & 0x02) { 936 + if (netif_msg_rx_err(db)) 937 + dev_dbg(db->dev, "crc error\n"); 1033 938 dev->stats.rx_crc_errors++; 1034 939 } 1035 - if (rxhdr.RxStatus & 0x8000) { 1036 - PRINTK1("length error\n"); 940 + if (rxhdr.RxStatus & 0x80) { 941 + if (netif_msg_rx_err(db)) 942 + dev_dbg(db->dev, "length error\n"); 1037 943 dev->stats.rx_length_errors++; 1038 944 } 1039 945 } ··· 1070 960 } while (rxbyte == DM9000_PKT_RDY); 1071 961 } 1072 962 1073 - /* 1074 - * Read a word data from SROM 1075 - */ 1076 - static u16 1077 - read_srom_word(board_info_t * db, int offset) 963 + static unsigned int 964 + dm9000_read_locked(board_info_t *db, int reg) 1078 965 { 1079 - iow(db, DM9000_EPAR, offset); 1080 - iow(db, DM9000_EPCR, EPCR_ERPRR); 1081 - mdelay(8); /* according to the datasheet 200us should be enough, 1082 - but it doesn't work */ 1083 - iow(db, DM9000_EPCR, 0x0); 1084 - return (ior(db, DM9000_EPDRL) + (ior(db, DM9000_EPDRH) << 8)); 966 + unsigned long flags; 967 + unsigned int ret; 968 + 969 + spin_lock_irqsave(&db->lock, flags); 970 + ret = ior(db, reg); 971 + spin_unlock_irqrestore(&db->lock, flags); 972 + 973 + return ret; 1085 974 } 1086 975 1087 - #ifdef DM9000_PROGRAM_EEPROM 976 + static int dm9000_wait_eeprom(board_info_t *db) 977 + { 978 + unsigned int status; 979 + int timeout = 8; /* wait max 8msec */ 980 + 981 + /* The DM9000 data sheets say we should be able to 982 + * poll the ERRE bit in EPCR to wait for the EEPROM 983 + * operation. From testing several chips, this bit 984 + * does not seem to work. 985 + * 986 + * We attempt to use the bit, but fall back to the 987 + * timeout (which is why we do not return an error 988 + * on expiry) to say that the EEPROM operation has 989 + * completed. 990 + */ 991 + 992 + while (1) { 993 + status = dm9000_read_locked(db, DM9000_EPCR); 994 + 995 + if ((status & EPCR_ERRE) == 0) 996 + break; 997 + 998 + if (timeout-- < 0) { 999 + dev_dbg(db->dev, "timeout waiting EEPROM\n"); 1000 + break; 1001 + } 1002 + } 1003 + 1004 + return 0; 1005 + } 1006 + 1007 + /* 1008 + * Read a word data from EEPROM 1009 + */ 1010 + static void 1011 + dm9000_read_eeprom(board_info_t *db, int offset, u8 *to) 1012 + { 1013 + unsigned long flags; 1014 + 1015 + if (db->flags & DM9000_PLATF_NO_EEPROM) { 1016 + to[0] = 0xff; 1017 + to[1] = 0xff; 1018 + return; 1019 + } 1020 + 1021 + mutex_lock(&db->addr_lock); 1022 + 1023 + spin_lock_irqsave(&db->lock, flags); 1024 + 1025 + iow(db, DM9000_EPAR, offset); 1026 + iow(db, DM9000_EPCR, EPCR_ERPRR); 1027 + 1028 + spin_unlock_irqrestore(&db->lock, flags); 1029 + 1030 + dm9000_wait_eeprom(db); 1031 + 1032 + /* delay for at-least 150uS */ 1033 + msleep(1); 1034 + 1035 + spin_lock_irqsave(&db->lock, flags); 1036 + 1037 + iow(db, DM9000_EPCR, 0x0); 1038 + 1039 + to[0] = ior(db, DM9000_EPDRL); 1040 + to[1] = ior(db, DM9000_EPDRH); 1041 + 1042 + spin_unlock_irqrestore(&db->lock, flags); 1043 + 1044 + mutex_unlock(&db->addr_lock); 1045 + } 1046 + 1088 1047 /* 1089 1048 * Write a word data to SROM 1090 1049 */ 1091 1050 static void 1092 - write_srom_word(board_info_t * db, int offset, u16 val) 1051 + dm9000_write_eeprom(board_info_t *db, int offset, u8 *data) 1093 1052 { 1053 + unsigned long flags; 1054 + 1055 + if (db->flags & DM9000_PLATF_NO_EEPROM) 1056 + return; 1057 + 1058 + mutex_lock(&db->addr_lock); 1059 + 1060 + spin_lock_irqsave(&db->lock, flags); 1094 1061 iow(db, DM9000_EPAR, offset); 1095 - iow(db, DM9000_EPDRH, ((val >> 8) & 0xff)); 1096 - iow(db, DM9000_EPDRL, (val & 0xff)); 1062 + iow(db, DM9000_EPDRH, data[1]); 1063 + iow(db, DM9000_EPDRL, data[0]); 1097 1064 iow(db, DM9000_EPCR, EPCR_WEP | EPCR_ERPRW); 1098 - mdelay(8); /* same shit */ 1065 + spin_unlock_irqrestore(&db->lock, flags); 1066 + 1067 + dm9000_wait_eeprom(db); 1068 + 1069 + mdelay(1); /* wait at least 150uS to clear */ 1070 + 1071 + spin_lock_irqsave(&db->lock, flags); 1099 1072 iow(db, DM9000_EPCR, 0); 1100 - } 1073 + spin_unlock_irqrestore(&db->lock, flags); 1101 1074 1102 - /* 1103 - * Only for development: 1104 - * Here we write static data to the eeprom in case 1105 - * we don't have valid content on a new board 1106 - */ 1107 - static void 1108 - program_eeprom(board_info_t * db) 1109 - { 1110 - u16 eeprom[] = { 0x0c00, 0x007f, 0x1300, /* MAC Address */ 1111 - 0x0000, /* Autoload: accept nothing */ 1112 - 0x0a46, 0x9000, /* Vendor / Product ID */ 1113 - 0x0000, /* pin control */ 1114 - 0x0000, 1115 - }; /* Wake-up mode control */ 1116 - int i; 1117 - for (i = 0; i < 8; i++) 1118 - write_srom_word(db, i, eeprom[i]); 1119 - } 1120 - #endif 1121 - 1122 - 1123 - /* 1124 - * Calculate the CRC valude of the Rx packet 1125 - * flag = 1 : return the reverse CRC (for the received packet CRC) 1126 - * 0 : return the normal CRC (for Hash Table index) 1127 - */ 1128 - 1129 - static unsigned long 1130 - cal_CRC(unsigned char *Data, unsigned int Len, u8 flag) 1131 - { 1132 - 1133 - u32 crc = ether_crc_le(Len, Data); 1134 - 1135 - if (flag) 1136 - return ~crc; 1137 - 1138 - return crc; 1075 + mutex_unlock(&db->addr_lock); 1139 1076 } 1140 1077 1141 1078 /* ··· 1194 1037 board_info_t *db = (board_info_t *) dev->priv; 1195 1038 struct dev_mc_list *mcptr = dev->mc_list; 1196 1039 int mc_cnt = dev->mc_count; 1040 + int i, oft; 1197 1041 u32 hash_val; 1198 - u16 i, oft, hash_table[4]; 1042 + u16 hash_table[4]; 1199 1043 unsigned long flags; 1200 1044 1201 - PRINTK2("dm9000_hash_table()\n"); 1045 + dm9000_dbg(db, 1, "entering %s\n", __func__); 1202 1046 1203 - spin_lock_irqsave(&db->lock,flags); 1047 + spin_lock_irqsave(&db->lock, flags); 1204 1048 1205 - for (i = 0, oft = 0x10; i < 6; i++, oft++) 1049 + for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++) 1206 1050 iow(db, oft, dev->dev_addr[i]); 1207 1051 1208 1052 /* Clear Hash Table */ ··· 1215 1057 1216 1058 /* the multicast address in Hash Table : 64 bits */ 1217 1059 for (i = 0; i < mc_cnt; i++, mcptr = mcptr->next) { 1218 - hash_val = cal_CRC((char *) mcptr->dmi_addr, 6, 0) & 0x3f; 1060 + hash_val = ether_crc_le(6, mcptr->dmi_addr) & 0x3f; 1219 1061 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); 1220 1062 } 1221 1063 1222 1064 /* Write the hash table to MAC MD table */ 1223 - for (i = 0, oft = 0x16; i < 4; i++) { 1224 - iow(db, oft++, hash_table[i] & 0xff); 1225 - iow(db, oft++, (hash_table[i] >> 8) & 0xff); 1065 + for (i = 0, oft = DM9000_MAR; i < 4; i++) { 1066 + iow(db, oft++, hash_table[i]); 1067 + iow(db, oft++, hash_table[i] >> 8); 1226 1068 } 1227 1069 1228 - spin_unlock_irqrestore(&db->lock,flags); 1070 + spin_unlock_irqrestore(&db->lock, flags); 1229 1071 } 1230 1072 1073 + 1074 + /* 1075 + * Sleep, either by using msleep() or if we are suspending, then 1076 + * use mdelay() to sleep. 1077 + */ 1078 + static void dm9000_msleep(board_info_t *db, unsigned int ms) 1079 + { 1080 + if (db->in_suspend) 1081 + mdelay(ms); 1082 + else 1083 + msleep(ms); 1084 + } 1231 1085 1232 1086 /* 1233 1087 * Read a word from phyxcer ··· 1252 1082 unsigned int reg_save; 1253 1083 int ret; 1254 1084 1085 + mutex_lock(&db->addr_lock); 1086 + 1255 1087 spin_lock_irqsave(&db->lock,flags); 1256 1088 1257 1089 /* Save previous register address */ ··· 1263 1091 iow(db, DM9000_EPAR, DM9000_PHY | reg); 1264 1092 1265 1093 iow(db, DM9000_EPCR, 0xc); /* Issue phyxcer read command */ 1266 - udelay(100); /* Wait read complete */ 1094 + 1095 + writeb(reg_save, db->io_addr); 1096 + spin_unlock_irqrestore(&db->lock,flags); 1097 + 1098 + dm9000_msleep(db, 1); /* Wait read complete */ 1099 + 1100 + spin_lock_irqsave(&db->lock,flags); 1101 + reg_save = readb(db->io_addr); 1102 + 1267 1103 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */ 1268 1104 1269 1105 /* The read data keeps on REG_0D & REG_0E */ ··· 1279 1099 1280 1100 /* restore the previous address */ 1281 1101 writeb(reg_save, db->io_addr); 1282 - 1283 1102 spin_unlock_irqrestore(&db->lock,flags); 1284 1103 1104 + mutex_unlock(&db->addr_lock); 1285 1105 return ret; 1286 1106 } 1287 1107 ··· 1295 1115 unsigned long flags; 1296 1116 unsigned long reg_save; 1297 1117 1118 + mutex_lock(&db->addr_lock); 1119 + 1298 1120 spin_lock_irqsave(&db->lock,flags); 1299 1121 1300 1122 /* Save previous register address */ ··· 1306 1124 iow(db, DM9000_EPAR, DM9000_PHY | reg); 1307 1125 1308 1126 /* Fill the written data into REG_0D & REG_0E */ 1309 - iow(db, DM9000_EPDRL, (value & 0xff)); 1310 - iow(db, DM9000_EPDRH, ((value >> 8) & 0xff)); 1127 + iow(db, DM9000_EPDRL, value); 1128 + iow(db, DM9000_EPDRH, value >> 8); 1311 1129 1312 1130 iow(db, DM9000_EPCR, 0xa); /* Issue phyxcer write command */ 1313 - udelay(500); /* Wait write complete */ 1131 + 1132 + writeb(reg_save, db->io_addr); 1133 + spin_unlock_irqrestore(&db->lock, flags); 1134 + 1135 + dm9000_msleep(db, 1); /* Wait write complete */ 1136 + 1137 + spin_lock_irqsave(&db->lock,flags); 1138 + reg_save = readb(db->io_addr); 1139 + 1314 1140 iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */ 1315 1141 1316 1142 /* restore the previous address */ 1317 1143 writeb(reg_save, db->io_addr); 1318 1144 1319 - spin_unlock_irqrestore(&db->lock,flags); 1145 + spin_unlock_irqrestore(&db->lock, flags); 1146 + mutex_unlock(&db->addr_lock); 1320 1147 } 1321 1148 1322 1149 static int 1323 1150 dm9000_drv_suspend(struct platform_device *dev, pm_message_t state) 1324 1151 { 1325 1152 struct net_device *ndev = platform_get_drvdata(dev); 1153 + board_info_t *db; 1326 1154 1327 1155 if (ndev) { 1156 + db = (board_info_t *) ndev->priv; 1157 + db->in_suspend = 1; 1158 + 1328 1159 if (netif_running(ndev)) { 1329 1160 netif_device_detach(ndev); 1330 1161 dm9000_shutdown(ndev); ··· 1360 1165 1361 1166 netif_device_attach(ndev); 1362 1167 } 1168 + 1169 + db->in_suspend = 0; 1363 1170 } 1364 1171 return 0; 1365 1172 } ··· 1377 1180 dm9000_release_board(pdev, (board_info_t *) ndev->priv); 1378 1181 free_netdev(ndev); /* free device structure */ 1379 1182 1380 - PRINTK1("clean_module() exit\n"); 1381 - 1183 + dev_dbg(&pdev->dev, "released and freed device\n"); 1382 1184 return 0; 1383 1185 } 1384 1186 ··· 1395 1199 static int __init 1396 1200 dm9000_init(void) 1397 1201 { 1398 - printk(KERN_INFO "%s Ethernet Driver\n", CARDNAME); 1202 + printk(KERN_INFO "%s Ethernet Driver, V%s\n", CARDNAME, DRV_VERSION); 1399 1203 1400 1204 return platform_driver_register(&dm9000_driver); /* search board and register */ 1401 1205 }
+5 -13
drivers/net/e1000/e1000_main.c
··· 926 926 { 927 927 struct net_device *netdev; 928 928 struct e1000_adapter *adapter; 929 - unsigned long mmio_start, mmio_len; 930 - unsigned long flash_start, flash_len; 931 929 932 930 static int cards_found = 0; 933 931 static int global_quad_port_a = 0; /* global ksp3 port a indication */ ··· 968 970 adapter->hw.back = adapter; 969 971 adapter->msg_enable = (1 << debug) - 1; 970 972 971 - mmio_start = pci_resource_start(pdev, BAR_0); 972 - mmio_len = pci_resource_len(pdev, BAR_0); 973 - 974 973 err = -EIO; 975 - adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); 974 + adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, BAR_0), 975 + pci_resource_len(pdev, BAR_0)); 976 976 if (!adapter->hw.hw_addr) 977 977 goto err_ioremap; 978 978 ··· 1005 1009 #endif 1006 1010 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); 1007 1011 1008 - netdev->mem_start = mmio_start; 1009 - netdev->mem_end = mmio_start + mmio_len; 1010 - netdev->base_addr = adapter->hw.io_base; 1011 - 1012 1012 adapter->bd_number = cards_found; 1013 1013 1014 1014 /* setup the private structure */ ··· 1017 1025 * because it depends on mac_type */ 1018 1026 if ((adapter->hw.mac_type == e1000_ich8lan) && 1019 1027 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { 1020 - flash_start = pci_resource_start(pdev, 1); 1021 - flash_len = pci_resource_len(pdev, 1); 1022 - adapter->hw.flash_address = ioremap(flash_start, flash_len); 1028 + adapter->hw.flash_address = 1029 + ioremap(pci_resource_start(pdev, 1), 1030 + pci_resource_len(pdev, 1)); 1023 1031 if (!adapter->hw.flash_address) 1024 1032 goto err_flashmap; 1025 1033 }
+80 -52
drivers/net/forcedeth.c
··· 166 166 * Hardware access: 167 167 */ 168 168 169 - #define DEV_NEED_TIMERIRQ 0x0001 /* set the timer irq flag in the irq mask */ 170 - #define DEV_NEED_LINKTIMER 0x0002 /* poll link settings. Relies on the timer irq */ 171 - #define DEV_HAS_LARGEDESC 0x0004 /* device supports jumbo frames and needs packet format 2 */ 172 - #define DEV_HAS_HIGH_DMA 0x0008 /* device supports 64bit dma */ 173 - #define DEV_HAS_CHECKSUM 0x0010 /* device supports tx and rx checksum offloads */ 174 - #define DEV_HAS_VLAN 0x0020 /* device supports vlan tagging and striping */ 175 - #define DEV_HAS_MSI 0x0040 /* device supports MSI */ 176 - #define DEV_HAS_MSI_X 0x0080 /* device supports MSI-X */ 177 - #define DEV_HAS_POWER_CNTRL 0x0100 /* device supports power savings */ 178 - #define DEV_HAS_PAUSEFRAME_TX 0x0200 /* device supports tx pause frames */ 179 - #define DEV_HAS_STATISTICS_V1 0x0400 /* device supports hw statistics version 1 */ 180 - #define DEV_HAS_STATISTICS_V2 0x0800 /* device supports hw statistics version 2 */ 181 - #define DEV_HAS_TEST_EXTENDED 0x1000 /* device supports extended diagnostic test */ 182 - #define DEV_HAS_MGMT_UNIT 0x2000 /* device supports management unit */ 183 - #define DEV_HAS_CORRECT_MACADDR 0x4000 /* device supports correct mac address order */ 169 + #define DEV_NEED_TIMERIRQ 0x00001 /* set the timer irq flag in the irq mask */ 170 + #define DEV_NEED_LINKTIMER 0x00002 /* poll link settings. Relies on the timer irq */ 171 + #define DEV_HAS_LARGEDESC 0x00004 /* device supports jumbo frames and needs packet format 2 */ 172 + #define DEV_HAS_HIGH_DMA 0x00008 /* device supports 64bit dma */ 173 + #define DEV_HAS_CHECKSUM 0x00010 /* device supports tx and rx checksum offloads */ 174 + #define DEV_HAS_VLAN 0x00020 /* device supports vlan tagging and striping */ 175 + #define DEV_HAS_MSI 0x00040 /* device supports MSI */ 176 + #define DEV_HAS_MSI_X 0x00080 /* device supports MSI-X */ 177 + #define DEV_HAS_POWER_CNTRL 0x00100 /* device supports power savings */ 178 + #define DEV_HAS_STATISTICS_V1 0x00200 /* device supports hw statistics version 1 */ 179 + #define DEV_HAS_STATISTICS_V2 0x00400 /* device supports hw statistics version 2 */ 180 + #define DEV_HAS_TEST_EXTENDED 0x00800 /* device supports extended diagnostic test */ 181 + #define DEV_HAS_MGMT_UNIT 0x01000 /* device supports management unit */ 182 + #define DEV_HAS_CORRECT_MACADDR 0x02000 /* device supports correct mac address order */ 183 + #define DEV_HAS_COLLISION_FIX 0x04000 /* device supports tx collision fix */ 184 + #define DEV_HAS_PAUSEFRAME_TX_V1 0x08000 /* device supports tx pause frames version 1 */ 185 + #define DEV_HAS_PAUSEFRAME_TX_V2 0x10000 /* device supports tx pause frames version 2 */ 186 + #define DEV_HAS_PAUSEFRAME_TX_V3 0x20000 /* device supports tx pause frames version 3 */ 184 187 185 188 enum { 186 189 NvRegIrqStatus = 0x000, ··· 269 266 #define NVREG_RNDSEED_FORCE3 0x7400 270 267 271 268 NvRegTxDeferral = 0xA0, 272 - #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f 273 - #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f 274 - #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f 269 + #define NVREG_TX_DEFERRAL_DEFAULT 0x15050f 270 + #define NVREG_TX_DEFERRAL_RGMII_10_100 0x16070f 271 + #define NVREG_TX_DEFERRAL_RGMII_1000 0x14050f 272 + #define NVREG_TX_DEFERRAL_RGMII_STRETCH_10 0x16190f 273 + #define NVREG_TX_DEFERRAL_RGMII_STRETCH_100 0x16300f 274 + #define NVREG_TX_DEFERRAL_MII_STRETCH 0x152000 275 275 NvRegRxDeferral = 0xA4, 276 276 #define NVREG_RX_DEFERRAL_DEFAULT 0x16 277 277 NvRegMacAddrA = 0xA8, ··· 324 318 NvRegTxRingPhysAddrHigh = 0x148, 325 319 NvRegRxRingPhysAddrHigh = 0x14C, 326 320 NvRegTxPauseFrame = 0x170, 327 - #define NVREG_TX_PAUSEFRAME_DISABLE 0x01ff0080 328 - #define NVREG_TX_PAUSEFRAME_ENABLE 0x01800010 321 + #define NVREG_TX_PAUSEFRAME_DISABLE 0x0fff0080 322 + #define NVREG_TX_PAUSEFRAME_ENABLE_V1 0x01800010 323 + #define NVREG_TX_PAUSEFRAME_ENABLE_V2 0x056003f0 324 + #define NVREG_TX_PAUSEFRAME_ENABLE_V3 0x09f00880 329 325 NvRegMIIStatus = 0x180, 330 326 #define NVREG_MIISTAT_ERROR 0x0001 331 327 #define NVREG_MIISTAT_LINKCHANGE 0x0008 ··· 2759 2751 if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) { 2760 2752 u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX; 2761 2753 if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) { 2762 - writel(NVREG_TX_PAUSEFRAME_ENABLE, base + NvRegTxPauseFrame); 2754 + u32 pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V1; 2755 + if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) 2756 + pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V2; 2757 + if (np->driver_data & DEV_HAS_PAUSEFRAME_TX_V3) 2758 + pause_enable = NVREG_TX_PAUSEFRAME_ENABLE_V3; 2759 + writel(pause_enable, base + NvRegTxPauseFrame); 2763 2760 writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1); 2764 2761 np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2765 2762 } else { ··· 2798 2785 int retval = 0; 2799 2786 u32 control_1000, status_1000, phyreg, pause_flags, txreg; 2800 2787 u32 txrxFlags = 0; 2788 + u32 phy_exp; 2801 2789 2802 2790 /* BMSR_LSTATUS is latched, read it twice: 2803 2791 * we want the current value. ··· 2926 2912 phyreg |= PHY_1000; 2927 2913 writel(phyreg, base + NvRegPhyInterface); 2928 2914 2915 + phy_exp = mii_rw(dev, np->phyaddr, MII_EXPANSION, MII_READ) & EXPANSION_NWAY; /* autoneg capable */ 2929 2916 if (phyreg & PHY_RGMII) { 2930 - if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) 2917 + if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_1000) { 2931 2918 txreg = NVREG_TX_DEFERRAL_RGMII_1000; 2932 - else 2933 - txreg = NVREG_TX_DEFERRAL_RGMII_10_100; 2919 + } else { 2920 + if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) { 2921 + if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_10) 2922 + txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_10; 2923 + else 2924 + txreg = NVREG_TX_DEFERRAL_RGMII_STRETCH_100; 2925 + } else { 2926 + txreg = NVREG_TX_DEFERRAL_RGMII_10_100; 2927 + } 2928 + } 2934 2929 } else { 2935 - txreg = NVREG_TX_DEFERRAL_DEFAULT; 2930 + if (!phy_exp && !np->duplex && (np->driver_data & DEV_HAS_COLLISION_FIX)) 2931 + txreg = NVREG_TX_DEFERRAL_MII_STRETCH; 2932 + else 2933 + txreg = NVREG_TX_DEFERRAL_DEFAULT; 2936 2934 } 2937 2935 writel(txreg, base + NvRegTxDeferral); 2938 2936 ··· 5181 5155 } 5182 5156 5183 5157 np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG; 5184 - if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) { 5158 + if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) || 5159 + (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) || 5160 + (id->driver_data & DEV_HAS_PAUSEFRAME_TX_V3)) { 5185 5161 np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE | NV_PAUSEFRAME_TX_REQ; 5186 5162 } 5187 5163 ··· 5587 5559 }, 5588 5560 { /* MCP55 Ethernet Controller */ 5589 5561 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14), 5590 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5562 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5591 5563 }, 5592 5564 { /* MCP55 Ethernet Controller */ 5593 5565 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15), 5594 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5566 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT, 5595 5567 }, 5596 5568 { /* MCP61 Ethernet Controller */ 5597 5569 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_16), 5598 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5570 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5599 5571 }, 5600 5572 { /* MCP61 Ethernet Controller */ 5601 5573 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_17), 5602 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5574 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5603 5575 }, 5604 5576 { /* MCP61 Ethernet Controller */ 5605 5577 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_18), 5606 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5578 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5607 5579 }, 5608 5580 { /* MCP61 Ethernet Controller */ 5609 5581 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_19), 5610 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5582 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5611 5583 }, 5612 5584 { /* MCP65 Ethernet Controller */ 5613 5585 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_20), 5614 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5586 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5615 5587 }, 5616 5588 { /* MCP65 Ethernet Controller */ 5617 5589 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_21), 5618 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5590 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5619 5591 }, 5620 5592 { /* MCP65 Ethernet Controller */ 5621 5593 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_22), 5622 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5594 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5623 5595 }, 5624 5596 { /* MCP65 Ethernet Controller */ 5625 5597 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_23), 5626 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5598 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5627 5599 }, 5628 5600 { /* MCP67 Ethernet Controller */ 5629 5601 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_24), 5630 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5602 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5631 5603 }, 5632 5604 { /* MCP67 Ethernet Controller */ 5633 5605 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_25), 5634 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5606 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5635 5607 }, 5636 5608 { /* MCP67 Ethernet Controller */ 5637 5609 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_26), 5638 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5610 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5639 5611 }, 5640 5612 { /* MCP67 Ethernet Controller */ 5641 5613 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_27), 5642 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5614 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5643 5615 }, 5644 5616 { /* MCP73 Ethernet Controller */ 5645 5617 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_28), 5646 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5618 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5647 5619 }, 5648 5620 { /* MCP73 Ethernet Controller */ 5649 5621 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_29), 5650 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5622 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5651 5623 }, 5652 5624 { /* MCP73 Ethernet Controller */ 5653 5625 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_30), 5654 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5626 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5655 5627 }, 5656 5628 { /* MCP73 Ethernet Controller */ 5657 5629 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_31), 5658 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5630 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_HIGH_DMA|DEV_HAS_POWER_CNTRL|DEV_HAS_MSI|DEV_HAS_PAUSEFRAME_TX_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5659 5631 }, 5660 5632 { /* MCP77 Ethernet Controller */ 5661 5633 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_32), 5662 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5634 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5663 5635 }, 5664 5636 { /* MCP77 Ethernet Controller */ 5665 5637 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_33), 5666 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5638 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5667 5639 }, 5668 5640 { /* MCP77 Ethernet Controller */ 5669 5641 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_34), 5670 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5642 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5671 5643 }, 5672 5644 { /* MCP77 Ethernet Controller */ 5673 5645 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_35), 5674 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5646 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V2|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5675 5647 }, 5676 5648 { /* MCP79 Ethernet Controller */ 5677 5649 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_36), 5678 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5650 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5679 5651 }, 5680 5652 { /* MCP79 Ethernet Controller */ 5681 5653 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_37), 5682 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5654 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5683 5655 }, 5684 5656 { /* MCP79 Ethernet Controller */ 5685 5657 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_38), 5686 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5658 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5687 5659 }, 5688 5660 { /* MCP79 Ethernet Controller */ 5689 5661 PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_39), 5690 - .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR, 5662 + .driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_MSI|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX_V3|DEV_HAS_STATISTICS_V2|DEV_HAS_TEST_EXTENDED|DEV_HAS_MGMT_UNIT|DEV_HAS_CORRECT_MACADDR|DEV_HAS_COLLISION_FIX, 5691 5663 }, 5692 5664 {0,}, 5693 5665 };
+2 -2
drivers/net/netconsole.c
··· 309 309 struct net_device *dev = nt->np.dev; 310 310 311 311 DECLARE_MAC_BUF(mac); 312 - return snprintf(buf, PAGE_SIZE, "%s\n", 313 - print_mac(mac, dev->dev_addr)); 312 + return snprintf(buf, PAGE_SIZE, "%s\n", dev ? 313 + print_mac(mac, dev->dev_addr) : "ff:ff:ff:ff:ff:ff"); 314 314 } 315 315 316 316 static ssize_t show_remote_mac(struct netconsole_target *nt, char *buf)
+551 -589
drivers/net/ni52.c
··· 33 33 * I have also done a look in the following sources: (mail me if you need them) 34 34 * crynwr-packet-driver by Russ Nelson 35 35 * Garret A. Wollman's (fourth) i82586-driver for BSD 36 - * (before getting an i82596 (yes 596 not 586) manual, the existing drivers helped 37 - * me a lot to understand this tricky chip.) 36 + * (before getting an i82596 (yes 596 not 586) manual, the existing drivers 37 + * helped me a lot to understand this tricky chip.) 38 38 * 39 39 * Known Problems: 40 40 * The internal sysbus seems to be slow. So we often lose packets because of 41 41 * overruns while receiving from a fast remote host. 42 - * This can slow down TCP connections. Maybe the newer ni5210 cards are better. 43 - * my experience is, that if a machine sends with more than about 500-600K/s 44 - * the fifo/sysbus overflows. 42 + * This can slow down TCP connections. Maybe the newer ni5210 cards are 43 + * better. My experience is, that if a machine sends with more than about 44 + * 500-600K/s the fifo/sysbus overflows. 45 45 * 46 46 * IMPORTANT NOTE: 47 47 * On fast networks, it's a (very) good idea to have 16K shared memory. With 48 - * 8K, we can store only 4 receive frames, so it can (easily) happen that a remote 49 - * machine 'overruns' our system. 48 + * 8K, we can store only 4 receive frames, so it can (easily) happen that a 49 + * remote machine 'overruns' our system. 50 50 * 51 51 * Known i82586/card problems (I'm sure, there are many more!): 52 52 * Running the NOP-mode, the i82586 sometimes seems to forget to report ··· 60 60 * 61 61 * results from ftp performance tests with Linux 1.2.5 62 62 * send and receive about 350-400 KByte/s (peak up to 460 kbytes/s) 63 - * sending in NOP-mode: peak performance up to 530K/s (but better don't run this mode) 63 + * sending in NOP-mode: peak performance up to 530K/s (but better don't 64 + * run this mode) 64 65 */ 65 66 66 67 /* ··· 95 94 * 96 95 * 26.March.94: patches for Linux 1.0 and iomem-auto-probe (MH) 97 96 * 98 - * 30.Sep.93: Added nop-chain .. driver now runs with only one Xmit-Buff, too (MH) 97 + * 30.Sep.93: Added nop-chain .. driver now runs with only one Xmit-Buff, 98 + * too (MH) 99 99 * 100 100 * < 30.Sep.93: first versions 101 101 */ ··· 104 102 static int debuglevel; /* debug-printk 0: off 1: a few 2: more */ 105 103 static int automatic_resume; /* experimental .. better should be zero */ 106 104 static int rfdadd; /* rfdadd=1 may be better for 8K MEM cards */ 107 - static int fifo=0x8; /* don't change */ 105 + static int fifo = 0x8; /* don't change */ 108 106 109 107 #include <linux/module.h> 110 108 #include <linux/kernel.h> ··· 129 127 #define DEBUG /* debug on */ 130 128 #define SYSBUSVAL 1 /* 8 Bit */ 131 129 132 - #define ni_attn586() {outb(0,dev->base_addr+NI52_ATTENTION);} 133 - #define ni_reset586() {outb(0,dev->base_addr+NI52_RESET);} 134 - #define ni_disint() {outb(0,dev->base_addr+NI52_INTDIS);} 135 - #define ni_enaint() {outb(0,dev->base_addr+NI52_INTENA);} 130 + #define ni_attn586() { outb(0, dev->base_addr + NI52_ATTENTION); } 131 + #define ni_reset586() { outb(0, dev->base_addr + NI52_RESET); } 132 + #define ni_disint() { outb(0, dev->base_addr + NI52_INTDIS); } 133 + #define ni_enaint() { outb(0, dev->base_addr + NI52_INTENA); } 136 134 137 - #define make32(ptr16) (p->memtop + (short) (ptr16) ) 138 - #define make24(ptr32) ( ((char *) (ptr32)) - p->base) 139 - #define make16(ptr32) ((unsigned short) ((unsigned long)(ptr32) - (unsigned long) p->memtop )) 135 + #define make32(ptr16) (p->memtop + (short) (ptr16)) 136 + #define make24(ptr32) ((unsigned long)(ptr32)) - p->base 137 + #define make16(ptr32) ((unsigned short) ((unsigned long)(ptr32)\ 138 + - (unsigned long) p->memtop)) 140 139 141 140 /******************* how to calculate the buffers ***************************** 142 141 ··· 162 159 163 160 /**************************************************************************/ 164 161 165 - /* different DELAYs */ 166 - #define DELAY(x) mdelay(32 * x); 167 - #define DELAY_16(); { udelay(16); } 168 - #define DELAY_18(); { udelay(4); } 169 - 170 - /* wait for command with timeout: */ 171 - #define WAIT_4_SCB_CMD() \ 172 - { int i; \ 173 - for(i=0;i<16384;i++) { \ 174 - if(!p->scb->cmd_cuc) break; \ 175 - DELAY_18(); \ 176 - if(i == 16383) { \ 177 - printk("%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_cuc,p->scb->cus); \ 178 - if(!p->reseted) { p->reseted = 1; ni_reset586(); } } } } 179 - 180 - #define WAIT_4_SCB_CMD_RUC() { int i; \ 181 - for(i=0;i<16384;i++) { \ 182 - if(!p->scb->cmd_ruc) break; \ 183 - DELAY_18(); \ 184 - if(i == 16383) { \ 185 - printk("%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",dev->name,p->scb->cmd_ruc,p->scb->rus); \ 186 - if(!p->reseted) { p->reseted = 1; ni_reset586(); } } } } 187 - 188 - #define WAIT_4_STAT_COMPL(addr) { int i; \ 189 - for(i=0;i<32767;i++) { \ 190 - if((addr)->cmd_status & STAT_COMPL) break; \ 191 - DELAY_16(); DELAY_16(); } } 192 162 193 163 #define NI52_TOTAL_SIZE 16 194 164 #define NI52_ADDR0 0x02 195 165 #define NI52_ADDR1 0x07 196 166 #define NI52_ADDR2 0x01 197 167 198 - static int ni52_probe1(struct net_device *dev,int ioaddr); 199 - static irqreturn_t ni52_interrupt(int irq,void *dev_id); 168 + static int ni52_probe1(struct net_device *dev, int ioaddr); 169 + static irqreturn_t ni52_interrupt(int irq, void *dev_id); 200 170 static int ni52_open(struct net_device *dev); 201 171 static int ni52_close(struct net_device *dev); 202 - static int ni52_send_packet(struct sk_buff *,struct net_device *); 172 + static int ni52_send_packet(struct sk_buff *, struct net_device *); 203 173 static struct net_device_stats *ni52_get_stats(struct net_device *dev); 204 174 static void set_multicast_list(struct net_device *dev); 205 175 static void ni52_timeout(struct net_device *dev); 206 - #if 0 207 - static void ni52_dump(struct net_device *,void *); 208 - #endif 209 176 210 177 /* helper-functions */ 211 178 static int init586(struct net_device *dev); 212 - static int check586(struct net_device *dev,char *where,unsigned size); 179 + static int check586(struct net_device *dev, char *where, unsigned size); 213 180 static void alloc586(struct net_device *dev); 214 181 static void startrecv586(struct net_device *dev); 215 - static void *alloc_rfa(struct net_device *dev,void *ptr); 182 + static void *alloc_rfa(struct net_device *dev, void *ptr); 216 183 static void ni52_rcv_int(struct net_device *dev); 217 184 static void ni52_xmt_int(struct net_device *dev); 218 185 static void ni52_rnr_int(struct net_device *dev); 219 186 220 - struct priv 221 - { 187 + struct priv { 222 188 struct net_device_stats stats; 223 189 unsigned long base; 224 190 char *memtop; 225 - long int lock; 226 - int reseted; 227 - volatile struct rfd_struct *rfd_last,*rfd_top,*rfd_first; 228 - volatile struct scp_struct *scp; /* volatile is important */ 229 - volatile struct iscp_struct *iscp; /* volatile is important */ 230 - volatile struct scb_struct *scb; /* volatile is important */ 231 - volatile struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS]; 191 + spinlock_t spinlock; 192 + int reset; 193 + struct rfd_struct *rfd_last, *rfd_top, *rfd_first; 194 + struct scp_struct *scp; 195 + struct iscp_struct *iscp; 196 + struct scb_struct *scb; 197 + struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS]; 232 198 #if (NUM_XMIT_BUFFS == 1) 233 - volatile struct transmit_cmd_struct *xmit_cmds[2]; 234 - volatile struct nop_cmd_struct *nop_cmds[2]; 199 + struct transmit_cmd_struct *xmit_cmds[2]; 200 + struct nop_cmd_struct *nop_cmds[2]; 235 201 #else 236 - volatile struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS]; 237 - volatile struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS]; 202 + struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS]; 203 + struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS]; 238 204 #endif 239 - volatile int nop_point,num_recv_buffs; 240 - volatile char *xmit_cbuffs[NUM_XMIT_BUFFS]; 241 - volatile int xmit_count,xmit_last; 205 + int nop_point, num_recv_buffs; 206 + char *xmit_cbuffs[NUM_XMIT_BUFFS]; 207 + int xmit_count, xmit_last; 242 208 }; 209 + 210 + /* wait for command with timeout: */ 211 + static void wait_for_scb_cmd(struct net_device *dev) 212 + { 213 + struct priv *p = dev->priv; 214 + int i; 215 + for (i = 0; i < 16384; i++) { 216 + if (readb(&p->scb->cmd_cuc) == 0) 217 + break; 218 + udelay(4); 219 + if (i == 16383) { 220 + printk(KERN_ERR "%s: scb_cmd timed out: %04x,%04x .. disabling i82586!!\n", 221 + dev->name, readb(&p->scb->cmd_cuc), readb(&p->scb->cus)); 222 + if (!p->reset) { 223 + p->reset = 1; 224 + ni_reset586(); 225 + } 226 + } 227 + } 228 + } 229 + 230 + static void wait_for_scb_cmd_ruc(struct net_device *dev) 231 + { 232 + struct priv *p = dev->priv; 233 + int i; 234 + for (i = 0; i < 16384; i++) { 235 + if (readb(&p->scb->cmd_ruc) == 0) 236 + break; 237 + udelay(4); 238 + if (i == 16383) { 239 + printk(KERN_ERR "%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n", 240 + dev->name, p->scb->cmd_ruc, p->scb->rus); 241 + if (!p->reset) { 242 + p->reset = 1; 243 + ni_reset586(); 244 + } 245 + } 246 + } 247 + } 248 + 249 + static void wait_for_stat_compl(void *p) 250 + { 251 + struct nop_cmd_struct *addr = p; 252 + int i; 253 + for (i = 0; i < 32767; i++) { 254 + if (readw(&((addr)->cmd_status)) & STAT_COMPL) 255 + break; 256 + udelay(32); 257 + } 258 + } 243 259 244 260 /********************************************** 245 261 * close device ··· 266 244 static int ni52_close(struct net_device *dev) 267 245 { 268 246 free_irq(dev->irq, dev); 269 - 270 247 ni_reset586(); /* the hard way to stop the receiver */ 271 - 272 248 netif_stop_queue(dev); 273 - 274 249 return 0; 275 250 } 276 251 ··· 284 265 startrecv586(dev); 285 266 ni_enaint(); 286 267 287 - ret = request_irq(dev->irq, &ni52_interrupt,0,dev->name,dev); 288 - if (ret) 289 - { 268 + ret = request_irq(dev->irq, &ni52_interrupt, 0, dev->name, dev); 269 + if (ret) { 290 270 ni_reset586(); 291 271 return ret; 292 272 } 293 - 294 273 netif_start_queue(dev); 295 - 296 274 return 0; /* most done by init */ 297 275 } 298 276 299 277 /********************************************** 300 278 * Check to see if there's an 82586 out there. 301 279 */ 302 - static int check586(struct net_device *dev,char *where,unsigned size) 280 + static int check586(struct net_device *dev, char *where, unsigned size) 303 281 { 304 282 struct priv pb; 305 283 struct priv *p = /* (struct priv *) dev->priv*/ &pb; 306 284 char *iscp_addrs[2]; 307 285 int i; 308 286 309 - p->base = (unsigned long) isa_bus_to_virt((unsigned long)where) + size - 0x01000000; 287 + p->base = (unsigned long) isa_bus_to_virt((unsigned long)where) 288 + + size - 0x01000000; 310 289 p->memtop = isa_bus_to_virt((unsigned long)where) + size; 311 290 p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS); 312 - memset((char *)p->scp,0, sizeof(struct scp_struct)); 313 - for(i=0;i<sizeof(struct scp_struct);i++) /* memory was writeable? */ 314 - if(((char *)p->scp)[i]) 291 + memset_io((char *)p->scp, 0, sizeof(struct scp_struct)); 292 + for (i = 0; i < sizeof(struct scp_struct); i++) 293 + /* memory was writeable? */ 294 + if (readb((char *)p->scp + i)) 315 295 return 0; 316 - p->scp->sysbus = SYSBUSVAL; /* 1 = 8Bit-Bus, 0 = 16 Bit */ 317 - if(p->scp->sysbus != SYSBUSVAL) 296 + writeb(SYSBUSVAL, &p->scp->sysbus); /* 1 = 8Bit-Bus, 0 = 16 Bit */ 297 + if (readb(&p->scp->sysbus) != SYSBUSVAL) 318 298 return 0; 319 299 320 300 iscp_addrs[0] = isa_bus_to_virt((unsigned long)where); 321 - iscp_addrs[1]= (char *) p->scp - sizeof(struct iscp_struct); 301 + iscp_addrs[1] = (char *) p->scp - sizeof(struct iscp_struct); 322 302 323 - for(i=0;i<2;i++) 324 - { 303 + for (i = 0; i < 2; i++) { 325 304 p->iscp = (struct iscp_struct *) iscp_addrs[i]; 326 - memset((char *)p->iscp,0, sizeof(struct iscp_struct)); 305 + memset_io((char *)p->iscp, 0, sizeof(struct iscp_struct)); 327 306 328 - p->scp->iscp = make24(p->iscp); 329 - p->iscp->busy = 1; 307 + writel(make24(p->iscp), &p->scp->iscp); 308 + writeb(1, &p->iscp->busy); 330 309 331 310 ni_reset586(); 332 311 ni_attn586(); 333 - DELAY(1); /* wait a while... */ 334 - 335 - if(p->iscp->busy) /* i82586 clears 'busy' after successful init */ 312 + mdelay(32); /* wait a while... */ 313 + /* i82586 clears 'busy' after successful init */ 314 + if (readb(&p->iscp->busy)) 336 315 return 0; 337 316 } 338 317 return 1; ··· 344 327 struct priv *p = (struct priv *) dev->priv; 345 328 346 329 ni_reset586(); 347 - DELAY(1); 330 + mdelay(32); 331 + 332 + spin_lock_init(&p->spinlock); 348 333 349 334 p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS); 350 335 p->scb = (struct scb_struct *) isa_bus_to_virt(dev->mem_start); 351 - p->iscp = (struct iscp_struct *) ((char *)p->scp - sizeof(struct iscp_struct)); 336 + p->iscp = (struct iscp_struct *) 337 + ((char *)p->scp - sizeof(struct iscp_struct)); 352 338 353 - memset((char *) p->iscp,0,sizeof(struct iscp_struct)); 354 - memset((char *) p->scp ,0,sizeof(struct scp_struct)); 339 + memset_io(p->iscp, 0, sizeof(struct iscp_struct)); 340 + memset_io(p->scp , 0, sizeof(struct scp_struct)); 355 341 356 - p->scp->iscp = make24(p->iscp); 357 - p->scp->sysbus = SYSBUSVAL; 358 - p->iscp->scb_offset = make16(p->scb); 342 + writel(make24(p->iscp), &p->scp->iscp); 343 + writeb(SYSBUSVAL, &p->scp->sysbus); 344 + writew(make16(p->scb), &p->iscp->scb_offset); 359 345 360 - p->iscp->busy = 1; 346 + writeb(1, &p->iscp->busy); 361 347 ni_reset586(); 362 348 ni_attn586(); 363 349 364 - DELAY(1); 350 + mdelay(32); 365 351 366 - if(p->iscp->busy) 367 - printk("%s: Init-Problems (alloc).\n",dev->name); 352 + if (readb(&p->iscp->busy)) 353 + printk(KERN_ERR "%s: Init-Problems (alloc).\n", dev->name); 368 354 369 - p->reseted = 0; 355 + p->reset = 0; 370 356 371 - memset((char *)p->scb,0,sizeof(struct scb_struct)); 357 + memset_io((char *)p->scb, 0, sizeof(struct scb_struct)); 372 358 } 373 359 374 360 /* set: io,irq,memstart,memend or set it when calling insmod */ 375 - static int irq=9; 376 - static int io=0x300; 361 + static int irq = 9; 362 + static int io = 0x300; 377 363 static long memstart; /* e.g 0xd0000 */ 378 364 static long memend; /* e.g 0xd4000 */ 379 365 ··· 433 413 return ERR_PTR(err); 434 414 } 435 415 436 - static int __init ni52_probe1(struct net_device *dev,int ioaddr) 416 + static int __init ni52_probe1(struct net_device *dev, int ioaddr) 437 417 { 438 418 int i, size, retval; 439 419 ··· 445 425 if (!request_region(ioaddr, NI52_TOTAL_SIZE, DRV_NAME)) 446 426 return -EBUSY; 447 427 448 - if( !(inb(ioaddr+NI52_MAGIC1) == NI52_MAGICVAL1) || 428 + if (!(inb(ioaddr+NI52_MAGIC1) == NI52_MAGICVAL1) || 449 429 !(inb(ioaddr+NI52_MAGIC2) == NI52_MAGICVAL2)) { 450 430 retval = -ENODEV; 451 431 goto out; 452 432 } 453 433 454 - for(i=0;i<ETH_ALEN;i++) 434 + for (i = 0; i < ETH_ALEN; i++) 455 435 dev->dev_addr[i] = inb(dev->base_addr+i); 456 436 457 - if(dev->dev_addr[0] != NI52_ADDR0 || dev->dev_addr[1] != NI52_ADDR1 437 + if (dev->dev_addr[0] != NI52_ADDR0 || dev->dev_addr[1] != NI52_ADDR1 458 438 || dev->dev_addr[2] != NI52_ADDR2) { 459 439 retval = -ENODEV; 460 440 goto out; 461 441 } 462 442 463 - printk(KERN_INFO "%s: NI5210 found at %#3lx, ",dev->name,dev->base_addr); 443 + printk(KERN_INFO "%s: NI5210 found at %#3lx, ", 444 + dev->name, dev->base_addr); 464 445 465 446 /* 466 447 * check (or search) IO-Memory, 8K and 16K 467 448 */ 468 449 #ifdef MODULE 469 450 size = dev->mem_end - dev->mem_start; 470 - if(size != 0x2000 && size != 0x4000) { 471 - printk("\n%s: Illegal memory size %d. Allowed is 0x2000 or 0x4000 bytes.\n",dev->name,size); 451 + if (size != 0x2000 && size != 0x4000) { 452 + printk("\n"); 453 + printk(KERN_ERR "%s: Invalid memory size %d. Allowed is 0x2000 or 0x4000 bytes.\n", dev->name, size); 472 454 retval = -ENODEV; 473 455 goto out; 474 456 } 475 - if(!check586(dev,(char *) dev->mem_start,size)) { 476 - printk("?memcheck, Can't find memory at 0x%lx with size %d!\n",dev->mem_start,size); 457 + if (!check586(dev, (char *)dev->mem_start, size)) { 458 + printk(KERN_ERR "?memcheck, Can't find memory at 0x%lx with size %d!\n", dev->mem_start, size); 477 459 retval = -ENODEV; 478 460 goto out; 479 461 } 480 462 #else 481 - if(dev->mem_start != 0) /* no auto-mem-probe */ 482 - { 463 + if (dev->mem_start != 0) { 464 + /* no auto-mem-probe */ 483 465 size = 0x4000; /* check for 16K mem */ 484 - if(!check586(dev,(char *) dev->mem_start,size)) { 466 + if (!check586(dev, (char *) dev->mem_start, size)) { 485 467 size = 0x2000; /* check for 8K mem */ 486 - if(!check586(dev,(char *) dev->mem_start,size)) { 487 - printk("?memprobe, Can't find memory at 0x%lx!\n",dev->mem_start); 468 + if (!check586(dev, (char *)dev->mem_start, size)) { 469 + printk(KERN_ERR "?memprobe, Can't find memory at 0x%lx!\n", dev->mem_start); 488 470 retval = -ENODEV; 489 471 goto out; 490 472 } 491 473 } 492 - } 493 - else 494 - { 495 - static long memaddrs[] = { 0xc8000,0xca000,0xcc000,0xce000,0xd0000,0xd2000, 496 - 0xd4000,0xd6000,0xd8000,0xda000,0xdc000, 0 }; 497 - for(i=0;;i++) 498 - { 499 - if(!memaddrs[i]) { 500 - printk("?memprobe, Can't find io-memory!\n"); 474 + } else { 475 + static const unsigned long memaddrs[] = { 476 + 0xc8000, 0xca000, 0xcc000, 0xce000, 0xd0000, 0xd2000, 477 + 0xd4000, 0xd6000, 0xd8000, 0xda000, 0xdc000, 0 478 + }; 479 + for (i = 0;; i++) { 480 + if (!memaddrs[i]) { 481 + printk(KERN_ERR "?memprobe, Can't find io-memory!\n"); 501 482 retval = -ENODEV; 502 483 goto out; 503 484 } 504 485 dev->mem_start = memaddrs[i]; 505 486 size = 0x2000; /* check for 8K mem */ 506 - if(check586(dev,(char *)dev->mem_start,size)) /* 8K-check */ 487 + if (check586(dev, (char *)dev->mem_start, size)) 488 + /* 8K-check */ 507 489 break; 508 490 size = 0x4000; /* check for 16K mem */ 509 - if(check586(dev,(char *)dev->mem_start,size)) /* 16K-check */ 491 + if (check586(dev, (char *)dev->mem_start, size)) 492 + /* 16K-check */ 510 493 break; 511 494 } 512 495 } 513 - dev->mem_end = dev->mem_start + size; /* set mem_end showed by 'ifconfig' */ 496 + /* set mem_end showed by 'ifconfig' */ 497 + dev->mem_end = dev->mem_start + size; 514 498 #endif 515 499 516 - memset((char *) dev->priv,0,sizeof(struct priv)); 500 + memset((char *)dev->priv, 0, sizeof(struct priv)); 517 501 518 - ((struct priv *) (dev->priv))->memtop = isa_bus_to_virt(dev->mem_start) + size; 519 - ((struct priv *) (dev->priv))->base = (unsigned long) isa_bus_to_virt(dev->mem_start) + size - 0x01000000; 502 + ((struct priv *)(dev->priv))->memtop = 503 + isa_bus_to_virt(dev->mem_start) + size; 504 + ((struct priv *)(dev->priv))->base = (unsigned long) 505 + isa_bus_to_virt(dev->mem_start) + size - 0x01000000; 520 506 alloc586(dev); 521 507 522 508 /* set number of receive-buffs according to memsize */ 523 - if(size == 0x2000) 509 + if (size == 0x2000) 524 510 ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_8; 525 511 else 526 512 ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_16; 527 513 528 - printk("Memaddr: 0x%lx, Memsize: %d, ",dev->mem_start,size); 514 + printk(KERN_DEBUG "Memaddr: 0x%lx, Memsize: %d, ", 515 + dev->mem_start, size); 529 516 530 - if(dev->irq < 2) 531 - { 517 + if (dev->irq < 2) { 532 518 unsigned long irq_mask; 533 519 534 520 irq_mask = probe_irq_on(); ··· 543 517 544 518 mdelay(20); 545 519 dev->irq = probe_irq_off(irq_mask); 546 - if(!dev->irq) 547 - { 520 + if (!dev->irq) { 548 521 printk("?autoirq, Failed to detect IRQ line!\n"); 549 522 retval = -EAGAIN; 550 523 goto out; 551 524 } 552 - printk("IRQ %d (autodetected).\n",dev->irq); 553 - } 554 - else { 555 - if(dev->irq == 2) 525 + printk("IRQ %d (autodetected).\n", dev->irq); 526 + } else { 527 + if (dev->irq == 2) 556 528 dev->irq = 9; 557 - printk("IRQ %d (assigned and not checked!).\n",dev->irq); 529 + printk("IRQ %d (assigned and not checked!).\n", dev->irq); 558 530 } 559 531 560 532 dev->open = ni52_open; ··· 579 555 static int init586(struct net_device *dev) 580 556 { 581 557 void *ptr; 582 - int i,result=0; 583 - struct priv *p = (struct priv *) dev->priv; 584 - volatile struct configure_cmd_struct *cfg_cmd; 585 - volatile struct iasetup_cmd_struct *ias_cmd; 586 - volatile struct tdr_cmd_struct *tdr_cmd; 587 - volatile struct mcsetup_cmd_struct *mc_cmd; 588 - struct dev_mc_list *dmi=dev->mc_list; 589 - int num_addrs=dev->mc_count; 558 + int i, result = 0; 559 + struct priv *p = (struct priv *)dev->priv; 560 + struct configure_cmd_struct *cfg_cmd; 561 + struct iasetup_cmd_struct *ias_cmd; 562 + struct tdr_cmd_struct *tdr_cmd; 563 + struct mcsetup_cmd_struct *mc_cmd; 564 + struct dev_mc_list *dmi = dev->mc_list; 565 + int num_addrs = dev->mc_count; 590 566 591 567 ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct)); 592 568 593 569 cfg_cmd = (struct configure_cmd_struct *)ptr; /* configure-command */ 594 - cfg_cmd->cmd_status = 0; 595 - cfg_cmd->cmd_cmd = CMD_CONFIGURE | CMD_LAST; 596 - cfg_cmd->cmd_link = 0xffff; 570 + writew(0, &cfg_cmd->cmd_status); 571 + writew(CMD_CONFIGURE | CMD_LAST, &cfg_cmd->cmd_cmd); 572 + writew(0xFFFF, &cfg_cmd->cmd_link); 597 573 598 - cfg_cmd->byte_cnt = 0x0a; /* number of cfg bytes */ 599 - cfg_cmd->fifo = fifo; /* fifo-limit (8=tx:32/rx:64) */ 600 - cfg_cmd->sav_bf = 0x40; /* hold or discard bad recv frames (bit 7) */ 601 - cfg_cmd->adr_len = 0x2e; /* addr_len |!src_insert |pre-len |loopback */ 602 - cfg_cmd->priority = 0x00; 603 - cfg_cmd->ifs = 0x60; 604 - cfg_cmd->time_low = 0x00; 605 - cfg_cmd->time_high = 0xf2; 606 - cfg_cmd->promisc = 0; 607 - if(dev->flags & IFF_ALLMULTI) { 574 + /* number of cfg bytes */ 575 + writeb(0x0a, &cfg_cmd->byte_cnt); 576 + /* fifo-limit (8=tx:32/rx:64) */ 577 + writeb(fifo, &cfg_cmd->fifo); 578 + /* hold or discard bad recv frames (bit 7) */ 579 + writeb(0x40, &cfg_cmd->sav_bf); 580 + /* addr_len |!src_insert |pre-len |loopback */ 581 + writeb(0x2e, &cfg_cmd->adr_len); 582 + writeb(0x00, &cfg_cmd->priority); 583 + writeb(0x60, &cfg_cmd->ifs);; 584 + writeb(0x00, &cfg_cmd->time_low); 585 + writeb(0xf2, &cfg_cmd->time_high); 586 + writeb(0x00, &cfg_cmd->promisc);; 587 + if (dev->flags & IFF_ALLMULTI) { 608 588 int len = ((char *) p->iscp - (char *) ptr - 8) / 6; 609 - if(num_addrs > len) { 610 - printk("%s: switching to promisc. mode\n",dev->name); 611 - dev->flags|=IFF_PROMISC; 589 + if (num_addrs > len) { 590 + printk(KERN_ERR "%s: switching to promisc. mode\n", 591 + dev->name); 592 + dev->flags |= IFF_PROMISC; 612 593 } 613 594 } 614 - if(dev->flags&IFF_PROMISC) 615 - { 616 - cfg_cmd->promisc=1; 617 - dev->flags|=IFF_PROMISC; 618 - } 619 - cfg_cmd->carr_coll = 0x00; 595 + if (dev->flags & IFF_PROMISC) 596 + writeb(0x01, &cfg_cmd->promisc); 597 + writeb(0x00, &cfg_cmd->carr_coll); 598 + writew(make16(cfg_cmd), &p->scb->cbl_offset); 599 + writew(0, &p->scb->cmd_ruc); 620 600 621 - p->scb->cbl_offset = make16(cfg_cmd); 622 - p->scb->cmd_ruc = 0; 623 - 624 - p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */ 601 + writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */ 625 602 ni_attn586(); 626 603 627 - WAIT_4_STAT_COMPL(cfg_cmd); 604 + wait_for_stat_compl(cfg_cmd); 628 605 629 - if((cfg_cmd->cmd_status & (STAT_OK|STAT_COMPL)) != (STAT_COMPL|STAT_OK)) 630 - { 631 - printk("%s: configure command failed: %x\n",dev->name,cfg_cmd->cmd_status); 606 + if ((readw(&cfg_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != 607 + (STAT_COMPL|STAT_OK)) { 608 + printk(KERN_ERR "%s: configure command failed: %x\n", 609 + dev->name, readw(&cfg_cmd->cmd_status)); 632 610 return 1; 633 611 } 634 612 ··· 640 614 641 615 ias_cmd = (struct iasetup_cmd_struct *)ptr; 642 616 643 - ias_cmd->cmd_status = 0; 644 - ias_cmd->cmd_cmd = CMD_IASETUP | CMD_LAST; 645 - ias_cmd->cmd_link = 0xffff; 617 + writew(0, &ias_cmd->cmd_status); 618 + writew(CMD_IASETUP | CMD_LAST, &ias_cmd->cmd_cmd); 619 + writew(0xffff, &ias_cmd->cmd_link); 646 620 647 - memcpy((char *)&ias_cmd->iaddr,(char *) dev->dev_addr,ETH_ALEN); 621 + memcpy_toio((char *)&ias_cmd->iaddr, (char *)dev->dev_addr, ETH_ALEN); 648 622 649 - p->scb->cbl_offset = make16(ias_cmd); 623 + writew(make16(ias_cmd), &p->scb->cbl_offset); 650 624 651 - p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */ 625 + writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */ 652 626 ni_attn586(); 653 627 654 - WAIT_4_STAT_COMPL(ias_cmd); 628 + wait_for_stat_compl(ias_cmd); 655 629 656 - if((ias_cmd->cmd_status & (STAT_OK|STAT_COMPL)) != (STAT_OK|STAT_COMPL)) { 657 - printk("%s (ni52): individual address setup command failed: %04x\n",dev->name,ias_cmd->cmd_status); 630 + if ((readw(&ias_cmd->cmd_status) & (STAT_OK|STAT_COMPL)) != 631 + (STAT_OK|STAT_COMPL)) { 632 + printk(KERN_ERR "%s (ni52): individual address setup command failed: %04x\n", dev->name, readw(&ias_cmd->cmd_status)); 658 633 return 1; 659 634 } 660 635 ··· 665 638 666 639 tdr_cmd = (struct tdr_cmd_struct *)ptr; 667 640 668 - tdr_cmd->cmd_status = 0; 669 - tdr_cmd->cmd_cmd = CMD_TDR | CMD_LAST; 670 - tdr_cmd->cmd_link = 0xffff; 671 - tdr_cmd->status = 0; 641 + writew(0, &tdr_cmd->cmd_status); 642 + writew(CMD_TDR | CMD_LAST, &tdr_cmd->cmd_cmd); 643 + writew(0xffff, &tdr_cmd->cmd_link); 644 + writew(0, &tdr_cmd->status); 672 645 673 - p->scb->cbl_offset = make16(tdr_cmd); 674 - p->scb->cmd_cuc = CUC_START; /* cmd.-unit start */ 646 + writew(make16(tdr_cmd), &p->scb->cbl_offset); 647 + writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */ 675 648 ni_attn586(); 676 649 677 - WAIT_4_STAT_COMPL(tdr_cmd); 650 + wait_for_stat_compl(tdr_cmd); 678 651 679 - if(!(tdr_cmd->cmd_status & STAT_COMPL)) 680 - { 681 - printk("%s: Problems while running the TDR.\n",dev->name); 682 - } 683 - else 684 - { 685 - DELAY_16(); /* wait for result */ 686 - result = tdr_cmd->status; 687 - 688 - p->scb->cmd_cuc = p->scb->cus & STAT_MASK; 652 + if (!(readw(&tdr_cmd->cmd_status) & STAT_COMPL)) 653 + printk(KERN_ERR "%s: Problems while running the TDR.\n", 654 + dev->name); 655 + else { 656 + udelay(16); 657 + result = readw(&tdr_cmd->status); 658 + writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc); 689 659 ni_attn586(); /* ack the interrupts */ 690 660 691 - if(result & TDR_LNK_OK) 661 + if (result & TDR_LNK_OK) 692 662 ; 693 - else if(result & TDR_XCVR_PRB) 694 - printk("%s: TDR: Transceiver problem. Check the cable(s)!\n",dev->name); 695 - else if(result & TDR_ET_OPN) 696 - printk("%s: TDR: No correct termination %d clocks away.\n",dev->name,result & TDR_TIMEMASK); 697 - else if(result & TDR_ET_SRT) 698 - { 699 - if (result & TDR_TIMEMASK) /* time == 0 -> strange :-) */ 700 - printk("%s: TDR: Detected a short circuit %d clocks away.\n",dev->name,result & TDR_TIMEMASK); 701 - } 702 - else 703 - printk("%s: TDR: Unknown status %04x\n",dev->name,result); 663 + else if (result & TDR_XCVR_PRB) 664 + printk(KERN_ERR "%s: TDR: Transceiver problem. Check the cable(s)!\n", 665 + dev->name); 666 + else if (result & TDR_ET_OPN) 667 + printk(KERN_ERR "%s: TDR: No correct termination %d clocks away.\n", 668 + dev->name, result & TDR_TIMEMASK); 669 + else if (result & TDR_ET_SRT) { 670 + /* time == 0 -> strange :-) */ 671 + if (result & TDR_TIMEMASK) 672 + printk(KERN_ERR "%s: TDR: Detected a short circuit %d clocks away.\n", 673 + dev->name, result & TDR_TIMEMASK); 674 + } else 675 + printk(KERN_ERR "%s: TDR: Unknown status %04x\n", 676 + dev->name, result); 704 677 } 705 678 706 679 /* 707 680 * Multicast setup 708 681 */ 709 - if(num_addrs && !(dev->flags & IFF_PROMISC) ) 710 - { 682 + if (num_addrs && !(dev->flags & IFF_PROMISC)) { 711 683 mc_cmd = (struct mcsetup_cmd_struct *) ptr; 712 - mc_cmd->cmd_status = 0; 713 - mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST; 714 - mc_cmd->cmd_link = 0xffff; 715 - mc_cmd->mc_cnt = num_addrs * 6; 684 + writew(0, &mc_cmd->cmd_status); 685 + writew(CMD_MCSETUP | CMD_LAST, &mc_cmd->cmd_cmd); 686 + writew(0xffff, &mc_cmd->cmd_link); 687 + writew(num_addrs * 6, &mc_cmd->mc_cnt); 716 688 717 - for(i=0;i<num_addrs;i++,dmi=dmi->next) 718 - memcpy((char *) mc_cmd->mc_list[i], dmi->dmi_addr,6); 689 + for (i = 0; i < num_addrs; i++, dmi = dmi->next) 690 + memcpy_toio((char *) mc_cmd->mc_list[i], 691 + dmi->dmi_addr, 6); 719 692 720 - p->scb->cbl_offset = make16(mc_cmd); 721 - p->scb->cmd_cuc = CUC_START; 693 + writew(make16(mc_cmd), &p->scb->cbl_offset); 694 + writeb(CUC_START, &p->scb->cmd_cuc); 722 695 ni_attn586(); 723 696 724 - WAIT_4_STAT_COMPL(mc_cmd); 697 + wait_for_stat_compl(mc_cmd); 725 698 726 - if( (mc_cmd->cmd_status & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) ) 727 - printk("%s: Can't apply multicast-address-list.\n",dev->name); 699 + if ((readw(&mc_cmd->cmd_status) & (STAT_COMPL|STAT_OK)) 700 + != (STAT_COMPL|STAT_OK)) 701 + printk(KERN_ERR "%s: Can't apply multicast-address-list.\n", dev->name); 728 702 } 729 703 730 704 /* 731 705 * alloc nop/xmit-cmds 732 706 */ 733 707 #if (NUM_XMIT_BUFFS == 1) 734 - for(i=0;i<2;i++) 735 - { 736 - p->nop_cmds[i] = (struct nop_cmd_struct *)ptr; 737 - p->nop_cmds[i]->cmd_cmd = CMD_NOP; 738 - p->nop_cmds[i]->cmd_status = 0; 739 - p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i])); 708 + for (i = 0; i < 2; i++) { 709 + p->nop_cmds[i] = (struct nop_cmd_struct *)ptr; 710 + writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd); 711 + writew(0, &p->nop_cmds[i]->cmd_status); 712 + writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link); 740 713 ptr = (char *) ptr + sizeof(struct nop_cmd_struct); 741 714 } 742 715 #else 743 - for(i=0;i<NUM_XMIT_BUFFS;i++) 744 - { 745 - p->nop_cmds[i] = (struct nop_cmd_struct *)ptr; 746 - p->nop_cmds[i]->cmd_cmd = CMD_NOP; 747 - p->nop_cmds[i]->cmd_status = 0; 748 - p->nop_cmds[i]->cmd_link = make16((p->nop_cmds[i])); 716 + for (i = 0; i < NUM_XMIT_BUFFS; i++) { 717 + p->nop_cmds[i] = (struct nop_cmd_struct *)ptr; 718 + writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd); 719 + writew(0, &p->nop_cmds[i]->cmd_status); 720 + writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link); 749 721 ptr = (char *) ptr + sizeof(struct nop_cmd_struct); 750 722 } 751 723 #endif 752 724 753 - ptr = alloc_rfa(dev,(void *)ptr); /* init receive-frame-area */ 725 + ptr = alloc_rfa(dev, (void *)ptr); /* init receive-frame-area */ 754 726 755 727 /* 756 728 * alloc xmit-buffs / init xmit_cmds 757 729 */ 758 - for(i=0;i<NUM_XMIT_BUFFS;i++) 759 - { 760 - p->xmit_cmds[i] = (struct transmit_cmd_struct *)ptr; /*transmit cmd/buff 0*/ 730 + for (i = 0; i < NUM_XMIT_BUFFS; i++) { 731 + /* Transmit cmd/buff 0 */ 732 + p->xmit_cmds[i] = (struct transmit_cmd_struct *)ptr; 761 733 ptr = (char *) ptr + sizeof(struct transmit_cmd_struct); 762 734 p->xmit_cbuffs[i] = (char *)ptr; /* char-buffs */ 763 735 ptr = (char *) ptr + XMIT_BUFF_SIZE; 764 736 p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */ 765 737 ptr = (char *) ptr + sizeof(struct tbd_struct); 766 - if((void *)ptr > (void *)p->iscp) 767 - { 768 - printk("%s: not enough shared-mem for your configuration!\n",dev->name); 738 + if ((void *)ptr > (void *)p->iscp) { 739 + printk(KERN_ERR "%s: not enough shared-mem for your configuration!\n", 740 + dev->name); 769 741 return 1; 770 742 } 771 - memset((char *)(p->xmit_cmds[i]) ,0, sizeof(struct transmit_cmd_struct)); 772 - memset((char *)(p->xmit_buffs[i]),0, sizeof(struct tbd_struct)); 773 - p->xmit_cmds[i]->cmd_link = make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]); 774 - p->xmit_cmds[i]->cmd_status = STAT_COMPL; 775 - p->xmit_cmds[i]->cmd_cmd = CMD_XMIT | CMD_INT; 776 - p->xmit_cmds[i]->tbd_offset = make16((p->xmit_buffs[i])); 777 - p->xmit_buffs[i]->next = 0xffff; 778 - p->xmit_buffs[i]->buffer = make24((p->xmit_cbuffs[i])); 743 + memset_io((char *)(p->xmit_cmds[i]), 0, 744 + sizeof(struct transmit_cmd_struct)); 745 + memset_io((char *)(p->xmit_buffs[i]), 0, 746 + sizeof(struct tbd_struct)); 747 + writew(make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]), 748 + &p->xmit_cmds[i]->cmd_link); 749 + writew(STAT_COMPL, &p->xmit_cmds[i]->cmd_status); 750 + writew(CMD_XMIT|CMD_INT, &p->xmit_cmds[i]->cmd_cmd); 751 + writew(make16(p->xmit_buffs[i]), &p->xmit_cmds[i]->tbd_offset); 752 + writew(0xffff, &p->xmit_buffs[i]->next); 753 + writel(make24(p->xmit_cbuffs[i]), &p->xmit_buffs[i]->buffer); 779 754 } 780 755 781 756 p->xmit_count = 0; ··· 790 761 * 'start transmitter' 791 762 */ 792 763 #ifndef NO_NOPCOMMANDS 793 - p->scb->cbl_offset = make16(p->nop_cmds[0]); 794 - p->scb->cmd_cuc = CUC_START; 764 + writew(make16(p->nop_cmds[0]), &p->scb->cbl_offset); 765 + writeb(CUC_START, &p->scb->cmd_cuc); 795 766 ni_attn586(); 796 - WAIT_4_SCB_CMD(); 767 + wait_for_scb_cmd(dev); 797 768 #else 798 - p->xmit_cmds[0]->cmd_link = make16(p->xmit_cmds[0]); 799 - p->xmit_cmds[0]->cmd_cmd = CMD_XMIT | CMD_SUSPEND | CMD_INT; 769 + writew(make16(p->xmit_cmds[0]), &p->xmit_cmds[0]->cmd_link); 770 + writew(CMD_XMIT | CMD_SUSPEND | CMD_INT, &p->xmit_cmds[0]->cmd_cmd); 800 771 #endif 801 772 802 773 /* 803 774 * ack. interrupts 804 775 */ 805 - p->scb->cmd_cuc = p->scb->cus & STAT_MASK; 776 + writeb(readb(&p->scb->cus) & STAT_MASK, &p->scb->cmd_cuc); 806 777 ni_attn586(); 807 - DELAY_16(); 778 + udelay(16); 808 779 809 780 ni_enaint(); 810 781 ··· 816 787 * It sets up the Receive Frame Area (RFA). 817 788 */ 818 789 819 - static void *alloc_rfa(struct net_device *dev,void *ptr) 790 + static void *alloc_rfa(struct net_device *dev, void *ptr) 820 791 { 821 - volatile struct rfd_struct *rfd = (struct rfd_struct *)ptr; 822 - volatile struct rbd_struct *rbd; 792 + struct rfd_struct *rfd = (struct rfd_struct *)ptr; 793 + struct rbd_struct *rbd; 823 794 int i; 824 795 struct priv *p = (struct priv *) dev->priv; 825 796 826 - memset((char *) rfd,0,sizeof(struct rfd_struct)*(p->num_recv_buffs+rfdadd)); 797 + memset_io((char *) rfd, 0, 798 + sizeof(struct rfd_struct) * (p->num_recv_buffs + rfdadd)); 827 799 p->rfd_first = rfd; 828 800 829 - for(i = 0; i < (p->num_recv_buffs+rfdadd); i++) { 830 - rfd[i].next = make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd) ); 831 - rfd[i].rbd_offset = 0xffff; 801 + for (i = 0; i < (p->num_recv_buffs + rfdadd); i++) { 802 + writew(make16(rfd + (i+1) % (p->num_recv_buffs+rfdadd)), 803 + &rfd[i].next); 804 + writew(0xffff, &rfd[i].rbd_offset); 832 805 } 833 - rfd[p->num_recv_buffs-1+rfdadd].last = RFD_SUSP; /* RU suspend */ 806 + /* RU suspend */ 807 + writeb(RFD_SUSP, &rfd[p->num_recv_buffs-1+rfdadd].last); 834 808 835 - ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd) ); 809 + ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd)); 836 810 837 811 rbd = (struct rbd_struct *) ptr; 838 812 ptr = (void *) (rbd + p->num_recv_buffs); 839 813 840 814 /* clr descriptors */ 841 - memset((char *) rbd,0,sizeof(struct rbd_struct)*(p->num_recv_buffs)); 815 + memset_io((char *)rbd, 0, 816 + sizeof(struct rbd_struct) * (p->num_recv_buffs)); 842 817 843 - for(i=0;i<p->num_recv_buffs;i++) 844 - { 845 - rbd[i].next = make16((rbd + (i+1) % p->num_recv_buffs)); 846 - rbd[i].size = RECV_BUFF_SIZE; 847 - rbd[i].buffer = make24(ptr); 818 + for (i = 0; i < p->num_recv_buffs; i++) { 819 + writew(make16(rbd + (i+1) % p->num_recv_buffs), &rbd[i].next); 820 + writew(RECV_BUFF_SIZE, &rbd[i].size); 821 + writel(make24(ptr), &rbd[i].buffer); 848 822 ptr = (char *) ptr + RECV_BUFF_SIZE; 849 823 } 850 - 851 824 p->rfd_top = p->rfd_first; 852 825 p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd); 853 826 854 - p->scb->rfa_offset = make16(p->rfd_first); 855 - p->rfd_first->rbd_offset = make16(rbd); 827 + writew(make16(p->rfd_first), &p->scb->rfa_offset); 828 + writew(make16(rbd), &p->rfd_first->rbd_offset); 856 829 857 830 return ptr; 858 831 } ··· 864 833 * Interrupt Handler ... 865 834 */ 866 835 867 - static irqreturn_t ni52_interrupt(int irq,void *dev_id) 836 + static irqreturn_t ni52_interrupt(int irq, void *dev_id) 868 837 { 869 838 struct net_device *dev = dev_id; 870 - unsigned short stat; 871 - int cnt=0; 839 + unsigned int stat; 840 + int cnt = 0; 872 841 struct priv *p; 873 842 874 - if (!dev) { 875 - printk ("ni5210-interrupt: irq %d for unknown device.\n",irq); 876 - return IRQ_NONE; 877 - } 878 843 p = (struct priv *) dev->priv; 879 844 880 - if(debuglevel > 1) 845 + if (debuglevel > 1) 881 846 printk("I"); 882 847 883 - WAIT_4_SCB_CMD(); /* wait for last command */ 848 + spin_lock(&p->spinlock); 884 849 885 - while((stat=p->scb->cus & STAT_MASK)) 886 - { 887 - p->scb->cmd_cuc = stat; 850 + wait_for_scb_cmd(dev); /* wait for last command */ 851 + 852 + while ((stat = readb(&p->scb->cus) & STAT_MASK)) { 853 + writeb(stat, &p->scb->cmd_cuc); 888 854 ni_attn586(); 889 855 890 - if(stat & STAT_FR) /* received a frame */ 856 + if (stat & STAT_FR) /* received a frame */ 891 857 ni52_rcv_int(dev); 892 858 893 - if(stat & STAT_RNR) /* RU went 'not ready' */ 894 - { 859 + if (stat & STAT_RNR) { /* RU went 'not ready' */ 895 860 printk("(R)"); 896 - if(p->scb->rus & RU_SUSPEND) /* special case: RU_SUSPEND */ 897 - { 898 - WAIT_4_SCB_CMD(); 861 + if (readb(&p->scb->rus) & RU_SUSPEND) { 862 + /* special case: RU_SUSPEND */ 863 + wait_for_scb_cmd(dev); 899 864 p->scb->cmd_ruc = RUC_RESUME; 900 865 ni_attn586(); 901 - WAIT_4_SCB_CMD_RUC(); 902 - } 903 - else 904 - { 905 - printk("%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->rus); 866 + wait_for_scb_cmd_ruc(dev); 867 + } else { 868 + printk(KERN_ERR "%s: Receiver-Unit went 'NOT READY': %04x/%02x.\n", 869 + dev->name, stat, readb(&p->scb->rus)); 906 870 ni52_rnr_int(dev); 907 871 } 908 872 } 909 873 910 - if(stat & STAT_CX) /* command with I-bit set complete */ 874 + /* Command with I-bit set complete */ 875 + if (stat & STAT_CX) 911 876 ni52_xmt_int(dev); 912 877 913 878 #ifndef NO_NOPCOMMANDS 914 - if(stat & STAT_CNA) /* CU went 'not ready' */ 915 - { 916 - if(netif_running(dev)) 917 - printk("%s: oops! CU has left active state. stat: %04x/%02x.\n",dev->name,(int) stat,(int) p->scb->cus); 879 + if (stat & STAT_CNA) { /* CU went 'not ready' */ 880 + if (netif_running(dev)) 881 + printk(KERN_ERR "%s: oops! CU has left active state. stat: %04x/%02x.\n", 882 + dev->name, stat, readb(&p->scb->cus)); 918 883 } 919 884 #endif 920 885 921 - if(debuglevel > 1) 922 - printk("%d",cnt++); 886 + if (debuglevel > 1) 887 + printk("%d", cnt++); 923 888 924 - WAIT_4_SCB_CMD(); /* wait for ack. (ni52_xmt_int can be faster than ack!!) */ 925 - if(p->scb->cmd_cuc) /* timed out? */ 926 - { 927 - printk("%s: Acknowledge timed out.\n",dev->name); 889 + /* Wait for ack. (ni52_xmt_int can be faster than ack!!) */ 890 + wait_for_scb_cmd(dev); 891 + if (p->scb->cmd_cuc) { /* timed out? */ 892 + printk(KERN_ERR "%s: Acknowledge timed out.\n", 893 + dev->name); 928 894 ni_disint(); 929 895 break; 930 896 } 931 897 } 898 + spin_unlock(&p->spinlock); 932 899 933 - if(debuglevel > 1) 900 + if (debuglevel > 1) 934 901 printk("i"); 935 902 return IRQ_HANDLED; 936 903 } ··· 939 910 940 911 static void ni52_rcv_int(struct net_device *dev) 941 912 { 942 - int status,cnt=0; 913 + int status, cnt = 0; 943 914 unsigned short totlen; 944 915 struct sk_buff *skb; 945 916 struct rbd_struct *rbd; 946 - struct priv *p = (struct priv *) dev->priv; 917 + struct priv *p = (struct priv *)dev->priv; 947 918 948 - if(debuglevel > 0) 919 + if (debuglevel > 0) 949 920 printk("R"); 950 921 951 - for(;(status = p->rfd_top->stat_high) & RFD_COMPL;) 952 - { 953 - rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset); 954 - 955 - if(status & RFD_OK) /* frame received without error? */ 956 - { 957 - if( (totlen = rbd->status) & RBD_LAST) /* the first and the last buffer? */ 958 - { 959 - totlen &= RBD_MASK; /* length of this frame */ 960 - rbd->status = 0; 961 - skb = (struct sk_buff *) dev_alloc_skb(totlen+2); 962 - if(skb != NULL) 963 - { 964 - skb_reserve(skb,2); 965 - skb_put(skb,totlen); 966 - skb_copy_to_linear_data(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen); 967 - skb->protocol=eth_type_trans(skb,dev); 968 - netif_rx(skb); 969 - dev->last_rx = jiffies; 970 - p->stats.rx_packets++; 971 - p->stats.rx_bytes += totlen; 972 - } 973 - else 974 - p->stats.rx_dropped++; 975 - } 976 - else 977 - { 978 - int rstat; 979 - /* free all RBD's until RBD_LAST is set */ 980 - totlen = 0; 981 - while(!((rstat=rbd->status) & RBD_LAST)) 982 - { 983 - totlen += rstat & RBD_MASK; 984 - if(!rstat) 985 - { 986 - printk("%s: Whoops .. no end mark in RBD list\n",dev->name); 987 - break; 988 - } 989 - rbd->status = 0; 990 - rbd = (struct rbd_struct *) make32(rbd->next); 991 - } 992 - totlen += rstat & RBD_MASK; 993 - rbd->status = 0; 994 - printk("%s: received oversized frame! length: %d\n",dev->name,totlen); 922 + for (; (status = readb(&p->rfd_top->stat_high)) & RFD_COMPL;) { 923 + rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset); 924 + if (status & RFD_OK) { /* frame received without error? */ 925 + totlen = readw(&rbd->status); 926 + if (totlen & RBD_LAST) { 927 + /* the first and the last buffer? */ 928 + totlen &= RBD_MASK; /* length of this frame */ 929 + writew(0x00, &rbd->status); 930 + skb = (struct sk_buff *)dev_alloc_skb(totlen+2); 931 + if (skb != NULL) { 932 + skb_reserve(skb, 2); 933 + skb_put(skb, totlen); 934 + skb_copy_to_linear_data(skb, (char *)p->base + (unsigned long) rbd->buffer, totlen); 935 + skb->protocol = eth_type_trans(skb, dev); 936 + netif_rx(skb); 937 + dev->last_rx = jiffies; 938 + p->stats.rx_packets++; 939 + p->stats.rx_bytes += totlen; 940 + } else 995 941 p->stats.rx_dropped++; 942 + } else { 943 + int rstat; 944 + /* free all RBD's until RBD_LAST is set */ 945 + totlen = 0; 946 + while (!((rstat = readw(&rbd->status)) & RBD_LAST)) { 947 + totlen += rstat & RBD_MASK; 948 + if (!rstat) { 949 + printk(KERN_ERR "%s: Whoops .. no end mark in RBD list\n", dev->name); 950 + break; 951 + } 952 + writew(0, &rbd->status); 953 + rbd = (struct rbd_struct *) make32(readl(&rbd->next)); 954 + } 955 + totlen += rstat & RBD_MASK; 956 + writew(0, &rbd->status); 957 + printk(KERN_ERR "%s: received oversized frame! length: %d\n", 958 + dev->name, totlen); 959 + p->stats.rx_dropped++; 996 960 } 997 - } 998 - else /* frame !(ok), only with 'save-bad-frames' */ 999 - { 1000 - printk("%s: oops! rfd-error-status: %04x\n",dev->name,status); 961 + } else {/* frame !(ok), only with 'save-bad-frames' */ 962 + printk(KERN_ERR "%s: oops! rfd-error-status: %04x\n", 963 + dev->name, status); 1001 964 p->stats.rx_errors++; 1002 965 } 1003 - p->rfd_top->stat_high = 0; 1004 - p->rfd_top->last = RFD_SUSP; /* maybe exchange by RFD_LAST */ 1005 - p->rfd_top->rbd_offset = 0xffff; 1006 - p->rfd_last->last = 0; /* delete RFD_SUSP */ 966 + writeb(0, &p->rfd_top->stat_high); 967 + writeb(RFD_SUSP, &p->rfd_top->last); /* maybe exchange by RFD_LAST */ 968 + writew(0xffff, &p->rfd_top->rbd_offset); 969 + writeb(0, &p->rfd_last->last); /* delete RFD_SUSP */ 1007 970 p->rfd_last = p->rfd_top; 1008 971 p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */ 1009 - p->scb->rfa_offset = make16(p->rfd_top); 972 + writew(make16(p->rfd_top), &p->scb->rfa_offset); 1010 973 1011 - if(debuglevel > 0) 1012 - printk("%d",cnt++); 974 + if (debuglevel > 0) 975 + printk("%d", cnt++); 1013 976 } 1014 977 1015 - if(automatic_resume) 1016 - { 1017 - WAIT_4_SCB_CMD(); 1018 - p->scb->cmd_ruc = RUC_RESUME; 978 + if (automatic_resume) { 979 + wait_for_scb_cmd(dev); 980 + writeb(RUC_RESUME, &p->scb->cmd_ruc); 1019 981 ni_attn586(); 1020 - WAIT_4_SCB_CMD_RUC(); 982 + wait_for_scb_cmd_ruc(dev); 1021 983 } 1022 984 1023 985 #ifdef WAIT_4_BUSY 1024 986 { 1025 987 int i; 1026 - for(i=0;i<1024;i++) 1027 - { 1028 - if(p->rfd_top->status) 988 + for (i = 0; i < 1024; i++) { 989 + if (p->rfd_top->status) 1029 990 break; 1030 - DELAY_16(); 1031 - if(i == 1023) 1032 - printk("%s: RU hasn't fetched next RFD (not busy/complete)\n",dev->name); 991 + udelay(16); 992 + if (i == 1023) 993 + printk(KERN_ERR "%s: RU hasn't fetched next RFD (not busy/complete)\n", dev->name); 1033 994 } 1034 995 } 1035 996 #endif 1036 - 1037 - #if 0 1038 - if(!at_least_one) 1039 - { 1040 - int i; 1041 - volatile struct rfd_struct *rfds=p->rfd_top; 1042 - volatile struct rbd_struct *rbds; 1043 - printk("%s: received a FC intr. without having a frame: %04x %d\n",dev->name,status,old_at_least); 1044 - for(i=0;i< (p->num_recv_buffs+4);i++) 1045 - { 1046 - rbds = (struct rbd_struct *) make32(rfds->rbd_offset); 1047 - printk("%04x:%04x ",rfds->status,rbds->status); 1048 - rfds = (struct rfd_struct *) make32(rfds->next); 1049 - } 1050 - printk("\nerrs: %04x %04x stat: %04x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->status); 1051 - printk("\nerrs: %04x %04x rus: %02x, cus: %02x\n",(int)p->scb->rsc_errs,(int)p->scb->ovrn_errs,(int)p->scb->rus,(int)p->scb->cus); 1052 - } 1053 - old_at_least = at_least_one; 1054 - #endif 1055 - 1056 - if(debuglevel > 0) 997 + if (debuglevel > 0) 1057 998 printk("r"); 1058 999 } 1059 1000 ··· 1037 1038 1038 1039 p->stats.rx_errors++; 1039 1040 1040 - WAIT_4_SCB_CMD(); /* wait for the last cmd, WAIT_4_FULLSTAT?? */ 1041 - p->scb->cmd_ruc = RUC_ABORT; /* usually the RU is in the 'no resource'-state .. abort it now. */ 1041 + wait_for_scb_cmd(dev); /* wait for the last cmd, WAIT_4_FULLSTAT?? */ 1042 + writeb(RUC_ABORT, &p->scb->cmd_ruc); /* usually the RU is in the 'no resource'-state .. abort it now. */ 1042 1043 ni_attn586(); 1043 - WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. */ 1044 + wait_for_scb_cmd_ruc(dev); /* wait for accept cmd. */ 1044 1045 1045 - alloc_rfa(dev,(char *)p->rfd_first); 1046 - /* maybe add a check here, before restarting the RU */ 1046 + alloc_rfa(dev, (char *)p->rfd_first); 1047 + /* maybe add a check here, before restarting the RU */ 1047 1048 startrecv586(dev); /* restart RU */ 1048 1049 1049 - printk("%s: Receive-Unit restarted. Status: %04x\n",dev->name,p->scb->rus); 1050 + printk(KERN_ERR "%s: Receive-Unit restarted. Status: %04x\n", dev->name, p->scb->rus); 1050 1051 1051 1052 } 1052 1053 ··· 1059 1060 int status; 1060 1061 struct priv *p = (struct priv *) dev->priv; 1061 1062 1062 - if(debuglevel > 0) 1063 + if (debuglevel > 0) 1063 1064 printk("X"); 1064 1065 1065 - status = p->xmit_cmds[p->xmit_last]->cmd_status; 1066 - if(!(status & STAT_COMPL)) 1067 - printk("%s: strange .. xmit-int without a 'COMPLETE'\n",dev->name); 1066 + status = readw(&p->xmit_cmds[p->xmit_last]->cmd_status); 1067 + if (!(status & STAT_COMPL)) 1068 + printk(KERN_ERR "%s: strange .. xmit-int without a 'COMPLETE'\n", dev->name); 1068 1069 1069 - if(status & STAT_OK) 1070 - { 1070 + if (status & STAT_OK) { 1071 1071 p->stats.tx_packets++; 1072 1072 p->stats.collisions += (status & TCMD_MAXCOLLMASK); 1073 - } 1074 - else 1075 - { 1073 + } else { 1076 1074 p->stats.tx_errors++; 1077 - if(status & TCMD_LATECOLL) { 1078 - printk("%s: late collision detected.\n",dev->name); 1075 + if (status & TCMD_LATECOLL) { 1076 + printk(KERN_ERR "%s: late collision detected.\n", 1077 + dev->name); 1079 1078 p->stats.collisions++; 1080 - } 1081 - else if(status & TCMD_NOCARRIER) { 1079 + } else if (status & TCMD_NOCARRIER) { 1082 1080 p->stats.tx_carrier_errors++; 1083 - printk("%s: no carrier detected.\n",dev->name); 1084 - } 1085 - else if(status & TCMD_LOSTCTS) 1086 - printk("%s: loss of CTS detected.\n",dev->name); 1087 - else if(status & TCMD_UNDERRUN) { 1081 + printk(KERN_ERR "%s: no carrier detected.\n", 1082 + dev->name); 1083 + } else if (status & TCMD_LOSTCTS) 1084 + printk(KERN_ERR "%s: loss of CTS detected.\n", 1085 + dev->name); 1086 + else if (status & TCMD_UNDERRUN) { 1088 1087 p->stats.tx_fifo_errors++; 1089 - printk("%s: DMA underrun detected.\n",dev->name); 1090 - } 1091 - else if(status & TCMD_MAXCOLL) { 1092 - printk("%s: Max. collisions exceeded.\n",dev->name); 1088 + printk(KERN_ERR "%s: DMA underrun detected.\n", 1089 + dev->name); 1090 + } else if (status & TCMD_MAXCOLL) { 1091 + printk(KERN_ERR "%s: Max. collisions exceeded.\n", 1092 + dev->name); 1093 1093 p->stats.collisions += 16; 1094 1094 } 1095 1095 } 1096 - 1097 1096 #if (NUM_XMIT_BUFFS > 1) 1098 - if( (++p->xmit_last) == NUM_XMIT_BUFFS) 1097 + if ((++p->xmit_last) == NUM_XMIT_BUFFS) 1099 1098 p->xmit_last = 0; 1100 1099 #endif 1101 1100 netif_wake_queue(dev); ··· 1107 1110 { 1108 1111 struct priv *p = (struct priv *) dev->priv; 1109 1112 1110 - WAIT_4_SCB_CMD(); 1111 - WAIT_4_SCB_CMD_RUC(); 1112 - p->scb->rfa_offset = make16(p->rfd_first); 1113 - p->scb->cmd_ruc = RUC_START; 1113 + wait_for_scb_cmd(dev); 1114 + wait_for_scb_cmd_ruc(dev); 1115 + writew(make16(p->rfd_first), &p->scb->rfa_offset); 1116 + writeb(RUC_START, &p->scb->cmd_ruc); 1114 1117 ni_attn586(); /* start cmd. */ 1115 - WAIT_4_SCB_CMD_RUC(); /* wait for accept cmd. (no timeout!!) */ 1118 + wait_for_scb_cmd_ruc(dev); 1119 + /* wait for accept cmd. (no timeout!!) */ 1116 1120 } 1117 1121 1118 1122 static void ni52_timeout(struct net_device *dev) 1119 1123 { 1120 1124 struct priv *p = (struct priv *) dev->priv; 1121 1125 #ifndef NO_NOPCOMMANDS 1122 - if(p->scb->cus & CU_ACTIVE) /* COMMAND-UNIT active? */ 1123 - { 1126 + if (readb(&p->scb->cus) & CU_ACTIVE) { /* COMMAND-UNIT active? */ 1124 1127 netif_wake_queue(dev); 1125 1128 #ifdef DEBUG 1126 - printk("%s: strange ... timeout with CU active?!?\n",dev->name); 1127 - printk("%s: X0: %04x N0: %04x N1: %04x %d\n",dev->name,(int)p->xmit_cmds[0]->cmd_status,(int)p->nop_cmds[0]->cmd_status,(int)p->nop_cmds[1]->cmd_status,(int)p->nop_point); 1129 + printk(KERN_ERR "%s: strange ... timeout with CU active?!?\n", 1130 + dev->name); 1131 + printk(KERN_ERR "%s: X0: %04x N0: %04x N1: %04x %d\n", 1132 + dev->name, (int)p->xmit_cmds[0]->cmd_status, 1133 + readw(&p->nop_cmds[0]->cmd_status), 1134 + readw(&p->nop_cmds[1]->cmd_status), 1135 + p->nop_point); 1128 1136 #endif 1129 - p->scb->cmd_cuc = CUC_ABORT; 1137 + writeb(CUC_ABORT, &p->scb->cmd_cuc); 1130 1138 ni_attn586(); 1131 - WAIT_4_SCB_CMD(); 1132 - p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]); 1133 - p->scb->cmd_cuc = CUC_START; 1139 + wait_for_scb_cmd(dev); 1140 + writew(make16(p->nop_cmds[p->nop_point]), &p->scb->cbl_offset); 1141 + writeb(CUC_START, &p->scb->cmd_cuc); 1134 1142 ni_attn586(); 1135 - WAIT_4_SCB_CMD(); 1143 + wait_for_scb_cmd(dev); 1136 1144 dev->trans_start = jiffies; 1137 1145 return 0; 1138 1146 } 1139 1147 #endif 1140 1148 { 1141 1149 #ifdef DEBUG 1142 - printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus); 1143 - printk("%s: command-stats: %04x %04x\n",dev->name,p->xmit_cmds[0]->cmd_status,p->xmit_cmds[1]->cmd_status); 1144 - printk("%s: check, whether you set the right interrupt number!\n",dev->name); 1150 + printk(KERN_ERR "%s: xmitter timed out, try to restart! stat: %02x\n", 1151 + dev->name, readb(&p->scb->cus)); 1152 + printk(KERN_ERR "%s: command-stats: %04x %04x\n", 1153 + dev->name, 1154 + readw(&p->xmit_cmds[0]->cmd_status), 1155 + readw(&p->xmit_cmds[1]->cmd_status)); 1156 + printk(KERN_ERR "%s: check, whether you set the right interrupt number!\n", 1157 + dev->name); 1145 1158 #endif 1146 1159 ni52_close(dev); 1147 1160 ni52_open(dev); ··· 1165 1158 1166 1159 static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev) 1167 1160 { 1168 - int len,i; 1161 + int len, i; 1169 1162 #ifndef NO_NOPCOMMANDS 1170 1163 int next_nop; 1171 1164 #endif 1172 1165 struct priv *p = (struct priv *) dev->priv; 1173 1166 1174 - if(skb->len > XMIT_BUFF_SIZE) 1175 - { 1176 - printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len); 1167 + if (skb->len > XMIT_BUFF_SIZE) { 1168 + printk(KERN_ERR "%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n", dev->name, XMIT_BUFF_SIZE, skb->len); 1177 1169 return 0; 1178 1170 } 1179 1171 1180 1172 netif_stop_queue(dev); 1181 1173 1182 - #if(NUM_XMIT_BUFFS > 1) 1183 - if(test_and_set_bit(0,(void *) &p->lock)) { 1184 - printk("%s: Queue was locked\n",dev->name); 1185 - return 1; 1174 + skb_copy_from_linear_data(skb, (char *)p->xmit_cbuffs[p->xmit_count], 1175 + skb->len); 1176 + len = skb->len; 1177 + if (len < ETH_ZLEN) { 1178 + len = ETH_ZLEN; 1179 + memset((char *)p->xmit_cbuffs[p->xmit_count]+skb->len, 0, 1180 + len - skb->len); 1186 1181 } 1187 - else 1188 - #endif 1189 - { 1190 - skb_copy_from_linear_data(skb, (char *) p->xmit_cbuffs[p->xmit_count], skb->len); 1191 - len = skb->len; 1192 - if (len < ETH_ZLEN) { 1193 - len = ETH_ZLEN; 1194 - memset((char *)p->xmit_cbuffs[p->xmit_count]+skb->len, 0, len - skb->len); 1195 - } 1196 1182 1197 1183 #if (NUM_XMIT_BUFFS == 1) 1198 1184 # ifdef NO_NOPCOMMANDS 1199 1185 1200 1186 #ifdef DEBUG 1201 - if(p->scb->cus & CU_ACTIVE) 1202 - { 1203 - printk("%s: Hmmm .. CU is still running and we wanna send a new packet.\n",dev->name); 1204 - printk("%s: stat: %04x %04x\n",dev->name,p->scb->cus,p->xmit_cmds[0]->cmd_status); 1205 - } 1187 + if (p->scb->cus & CU_ACTIVE) { 1188 + printk(KERN_ERR "%s: Hmmm .. CU is still running and we wanna send a new packet.\n", dev->name); 1189 + printk(KERN_ERR "%s: stat: %04x %04x\n", 1190 + dev->name, readb(&p->scb->cus), 1191 + readw(&p->xmit_cmds[0]->cmd_status)); 1192 + } 1206 1193 #endif 1207 - 1208 - p->xmit_buffs[0]->size = TBD_LAST | len; 1209 - for(i=0;i<16;i++) 1210 - { 1211 - p->xmit_cmds[0]->cmd_status = 0; 1212 - WAIT_4_SCB_CMD(); 1213 - if( (p->scb->cus & CU_STATUS) == CU_SUSPEND) 1214 - p->scb->cmd_cuc = CUC_RESUME; 1215 - else 1216 - { 1217 - p->scb->cbl_offset = make16(p->xmit_cmds[0]); 1218 - p->scb->cmd_cuc = CUC_START; 1219 - } 1220 - 1221 - ni_attn586(); 1222 - dev->trans_start = jiffies; 1223 - if(!i) 1224 - dev_kfree_skb(skb); 1225 - WAIT_4_SCB_CMD(); 1226 - if( (p->scb->cus & CU_ACTIVE)) /* test it, because CU sometimes doesn't start immediately */ 1227 - break; 1228 - if(p->xmit_cmds[0]->cmd_status) 1229 - break; 1230 - if(i==15) 1231 - printk("%s: Can't start transmit-command.\n",dev->name); 1194 + writew(TBD_LAST | len, &p->xmit_buffs[0]->size);; 1195 + for (i = 0; i < 16; i++) { 1196 + writew(0, &p->xmit_cmds[0]->cmd_status); 1197 + wait_for_scb_cmd(dev); 1198 + if ((readb(&p->scb->cus) & CU_STATUS) == CU_SUSPEND) 1199 + writeb(CUC_RESUME, &p->scb->cmd_cuc); 1200 + else { 1201 + writew(make16(p->xmit_cmds[0]), &p->scb->cbl_offset); 1202 + writeb(CUC_START, &p->scb->cmd_cuc); 1232 1203 } 1233 - # else 1234 - next_nop = (p->nop_point + 1) & 0x1; 1235 - p->xmit_buffs[0]->size = TBD_LAST | len; 1236 - 1237 - p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link 1238 - = make16((p->nop_cmds[next_nop])); 1239 - p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0; 1240 - 1241 - p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0])); 1204 + ni_attn586(); 1242 1205 dev->trans_start = jiffies; 1243 - p->nop_point = next_nop; 1244 - dev_kfree_skb(skb); 1206 + if (!i) 1207 + dev_kfree_skb(skb); 1208 + wait_for_scb_cmd(dev); 1209 + /* test it, because CU sometimes doesn't start immediately */ 1210 + if (readb(&p->scb->cus) & CU_ACTIVE) 1211 + break; 1212 + if (readw(&p->xmit_cmds[0]->cmd_status)) 1213 + break; 1214 + if (i == 15) 1215 + printk(KERN_WARNING "%s: Can't start transmit-command.\n", dev->name); 1216 + } 1217 + # else 1218 + next_nop = (p->nop_point + 1) & 0x1; 1219 + writew(TBD_LAST | len, &p->xmit_buffs[0]->size); 1220 + writew(make16(p->nop_cmds[next_nop]), &p->xmit_cmds[0]->cmd_link); 1221 + writew(make16(p->nop_cmds[next_nop]), 1222 + &p->nop_cmds[next_nop]->cmd_link); 1223 + writew(0, &p->xmit_cmds[0]->cmd_status); 1224 + writew(0, &p->nop_cmds[next_nop]->cmd_status); 1225 + 1226 + writew(make16(p->xmit_cmds[0]), &p->nop_cmds[p->nop_point]->cmd_link); 1227 + dev->trans_start = jiffies; 1228 + p->nop_point = next_nop; 1229 + dev_kfree_skb(skb); 1245 1230 # endif 1246 1231 #else 1247 - p->xmit_buffs[p->xmit_count]->size = TBD_LAST | len; 1248 - if( (next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS ) 1249 - next_nop = 0; 1250 - 1251 - p->xmit_cmds[p->xmit_count]->cmd_status = 0; 1252 - /* linkpointer of xmit-command already points to next nop cmd */ 1253 - p->nop_cmds[next_nop]->cmd_link = make16((p->nop_cmds[next_nop])); 1254 - p->nop_cmds[next_nop]->cmd_status = 0; 1255 - 1256 - p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count])); 1257 - dev->trans_start = jiffies; 1258 - p->xmit_count = next_nop; 1259 - 1260 - { 1261 - unsigned long flags; 1262 - save_flags(flags); 1263 - cli(); 1264 - if(p->xmit_count != p->xmit_last) 1265 - netif_wake_queue(dev); 1266 - p->lock = 0; 1267 - restore_flags(flags); 1268 - } 1269 - dev_kfree_skb(skb); 1270 - #endif 1232 + writew(TBD_LAST | len, &p->xmit_buffs[p->xmit_count]->size); 1233 + next_nop = p->xmit_count + 1 1234 + if (next_nop == NUM_XMIT_BUFFS) 1235 + next_nop = 0; 1236 + writew(0, &p->xmit_cmds[p->xmit_count]->cmd_status); 1237 + /* linkpointer of xmit-command already points to next nop cmd */ 1238 + writew(make16(p->nop_cmds[next_nop]), 1239 + &p->nop_cmds[next_nop]->cmd_link); 1240 + writew(0, &p->nop_cmds[next_nop]->cmd_status); 1241 + writew(make16(p->xmit_cmds[p->xmit_count]), 1242 + &p->nop_cmds[p->xmit_count]->cmd_link); 1243 + dev->trans_start = jiffies; 1244 + p->xmit_count = next_nop; 1245 + { 1246 + unsigned long flags; 1247 + spin_lock_irqsave(&p->spinlock); 1248 + if (p->xmit_count != p->xmit_last) 1249 + netif_wake_queue(dev); 1250 + spin_unlock_irqrestore(&p->spinlock); 1271 1251 } 1252 + dev_kfree_skb(skb); 1253 + #endif 1272 1254 return 0; 1273 1255 } 1274 1256 ··· 1268 1272 static struct net_device_stats *ni52_get_stats(struct net_device *dev) 1269 1273 { 1270 1274 struct priv *p = (struct priv *) dev->priv; 1271 - unsigned short crc,aln,rsc,ovrn; 1275 + unsigned short crc, aln, rsc, ovrn; 1272 1276 1273 - crc = p->scb->crc_errs; /* get error-statistic from the ni82586 */ 1274 - p->scb->crc_errs = 0; 1275 - aln = p->scb->aln_errs; 1276 - p->scb->aln_errs = 0; 1277 - rsc = p->scb->rsc_errs; 1278 - p->scb->rsc_errs = 0; 1279 - ovrn = p->scb->ovrn_errs; 1280 - p->scb->ovrn_errs = 0; 1277 + /* Get error-statistics from the ni82586 */ 1278 + crc = readw(&p->scb->crc_errs); 1279 + writew(0, &p->scb->crc_errs); 1280 + aln = readw(&p->scb->aln_errs); 1281 + writew(0, &p->scb->aln_errs); 1282 + rsc = readw(&p->scb->rsc_errs); 1283 + writew(0, &p->scb->rsc_errs); 1284 + ovrn = readw(&p->scb->ovrn_errs); 1285 + writew(0, &p->scb->ovrn_errs); 1281 1286 1282 1287 p->stats.rx_crc_errors += crc; 1283 1288 p->stats.rx_fifo_errors += ovrn; ··· 1317 1320 1318 1321 int __init init_module(void) 1319 1322 { 1320 - if(io <= 0x0 || !memend || !memstart || irq < 2) { 1321 - printk("ni52: Autoprobing not allowed for modules.\nni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n"); 1323 + if (io <= 0x0 || !memend || !memstart || irq < 2) { 1324 + printk(KERN_ERR "ni52: Autoprobing not allowed for modules.\n"); 1325 + printk(KERN_ERR "ni52: Set symbols 'io' 'irq' 'memstart' and 'memend'\n"); 1322 1326 return -ENODEV; 1323 1327 } 1324 1328 dev_ni52 = ni52_probe(-1); ··· 1336 1338 } 1337 1339 #endif /* MODULE */ 1338 1340 1339 - #if 0 1340 - /* 1341 - * DUMP .. we expect a not running CMD unit and enough space 1342 - */ 1343 - void ni52_dump(struct net_device *dev,void *ptr) 1344 - { 1345 - struct priv *p = (struct priv *) dev->priv; 1346 - struct dump_cmd_struct *dump_cmd = (struct dump_cmd_struct *) ptr; 1347 - int i; 1348 - 1349 - p->scb->cmd_cuc = CUC_ABORT; 1350 - ni_attn586(); 1351 - WAIT_4_SCB_CMD(); 1352 - WAIT_4_SCB_CMD_RUC(); 1353 - 1354 - dump_cmd->cmd_status = 0; 1355 - dump_cmd->cmd_cmd = CMD_DUMP | CMD_LAST; 1356 - dump_cmd->dump_offset = make16((dump_cmd + 1)); 1357 - dump_cmd->cmd_link = 0xffff; 1358 - 1359 - p->scb->cbl_offset = make16(dump_cmd); 1360 - p->scb->cmd_cuc = CUC_START; 1361 - ni_attn586(); 1362 - WAIT_4_STAT_COMPL(dump_cmd); 1363 - 1364 - if( (dump_cmd->cmd_status & (STAT_COMPL|STAT_OK)) != (STAT_COMPL|STAT_OK) ) 1365 - printk("%s: Can't get dump information.\n",dev->name); 1366 - 1367 - for(i=0;i<170;i++) { 1368 - printk("%02x ",(int) ((unsigned char *) (dump_cmd + 1))[i]); 1369 - if(i % 24 == 23) 1370 - printk("\n"); 1371 - } 1372 - printk("\n"); 1373 - } 1374 - #endif 1375 1341 MODULE_LICENSE("GPL"); 1376 1342 1377 1343 /*
+79 -79
drivers/net/ni52.h
··· 36 36 37 37 struct scp_struct 38 38 { 39 - unsigned short zero_dum0; /* has to be zero */ 40 - unsigned char sysbus; /* 0=16Bit,1=8Bit */ 41 - unsigned char zero_dum1; /* has to be zero for 586 */ 42 - unsigned short zero_dum2; 43 - unsigned short zero_dum3; 44 - char *iscp; /* pointer to the iscp-block */ 39 + u16 zero_dum0; /* has to be zero */ 40 + u8 sysbus; /* 0=16Bit,1=8Bit */ 41 + u8 zero_dum1; /* has to be zero for 586 */ 42 + u8 zero_dum2; 43 + u8 zero_dum3; 44 + u32 iscp; /* pointer to the iscp-block */ 45 45 }; 46 46 47 47 ··· 50 50 */ 51 51 struct iscp_struct 52 52 { 53 - unsigned char busy; /* 586 clears after successful init */ 54 - unsigned char zero_dummy; /* has to be zero */ 55 - unsigned short scb_offset; /* pointeroffset to the scb_base */ 56 - char *scb_base; /* base-address of all 16-bit offsets */ 53 + u8 busy; /* 586 clears after successful init */ 54 + u8 zero_dummy; /* has to be zero */ 55 + u16 scb_offset; /* pointeroffset to the scb_base */ 56 + u32 scb_base; /* base-address of all 16-bit offsets */ 57 57 }; 58 58 59 59 /* ··· 61 61 */ 62 62 struct scb_struct 63 63 { 64 - unsigned char rus; 65 - unsigned char cus; 66 - unsigned char cmd_ruc; /* command word: RU part */ 67 - unsigned char cmd_cuc; /* command word: CU part & ACK */ 68 - unsigned short cbl_offset; /* pointeroffset, command block list */ 69 - unsigned short rfa_offset; /* pointeroffset, receive frame area */ 70 - unsigned short crc_errs; /* CRC-Error counter */ 71 - unsigned short aln_errs; /* alignmenterror counter */ 72 - unsigned short rsc_errs; /* Resourceerror counter */ 73 - unsigned short ovrn_errs; /* OVerrunerror counter */ 64 + u8 rus; 65 + u8 cus; 66 + u8 cmd_ruc; /* command word: RU part */ 67 + u8 cmd_cuc; /* command word: CU part & ACK */ 68 + u16 cbl_offset; /* pointeroffset, command block list */ 69 + u16 rfa_offset; /* pointeroffset, receive frame area */ 70 + u16 crc_errs; /* CRC-Error counter */ 71 + u16 aln_errs; /* alignmenterror counter */ 72 + u16 rsc_errs; /* Resourceerror counter */ 73 + u16 ovrn_errs; /* OVerrunerror counter */ 74 74 }; 75 75 76 76 /* ··· 119 119 */ 120 120 struct rfd_struct 121 121 { 122 - unsigned char stat_low; /* status word */ 123 - unsigned char stat_high; /* status word */ 124 - unsigned char rfd_sf; /* 82596 mode only */ 125 - unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */ 126 - unsigned short next; /* linkoffset to next RFD */ 127 - unsigned short rbd_offset; /* pointeroffset to RBD-buffer */ 128 - unsigned char dest[6]; /* ethernet-address, destination */ 129 - unsigned char source[6]; /* ethernet-address, source */ 130 - unsigned short length; /* 802.3 frame-length */ 131 - unsigned short zero_dummy; /* dummy */ 122 + u8 stat_low; /* status word */ 123 + u8 stat_high; /* status word */ 124 + u8 rfd_sf; /* 82596 mode only */ 125 + u8 last; /* Bit15,Last Frame on List / Bit14,suspend */ 126 + u16 next; /* linkoffset to next RFD */ 127 + u16 rbd_offset; /* pointeroffset to RBD-buffer */ 128 + u8 dest[6]; /* ethernet-address, destination */ 129 + u8 source[6]; /* ethernet-address, source */ 130 + u16 length; /* 802.3 frame-length */ 131 + u16 zero_dummy; /* dummy */ 132 132 }; 133 133 134 134 #define RFD_LAST 0x80 /* last: last rfd in the list */ ··· 153 153 */ 154 154 struct rbd_struct 155 155 { 156 - unsigned short status; /* status word,number of used bytes in buff */ 157 - unsigned short next; /* pointeroffset to next RBD */ 158 - char *buffer; /* receive buffer address pointer */ 159 - unsigned short size; /* size of this buffer */ 160 - unsigned short zero_dummy; /* dummy */ 156 + u16 status; /* status word,number of used bytes in buff */ 157 + u16 next; /* pointeroffset to next RBD */ 158 + u32 buffer; /* receive buffer address pointer */ 159 + u16 size; /* size of this buffer */ 160 + u16 zero_dummy; /* dummy */ 161 161 }; 162 162 163 163 #define RBD_LAST 0x8000 /* last buffer */ ··· 195 195 */ 196 196 struct nop_cmd_struct 197 197 { 198 - unsigned short cmd_status; /* status of this command */ 199 - unsigned short cmd_cmd; /* the command itself (+bits) */ 200 - unsigned short cmd_link; /* offsetpointer to next command */ 198 + u16 cmd_status; /* status of this command */ 199 + u16 cmd_cmd; /* the command itself (+bits) */ 200 + u16 cmd_link; /* offsetpointer to next command */ 201 201 }; 202 202 203 203 /* ··· 205 205 */ 206 206 struct iasetup_cmd_struct 207 207 { 208 - unsigned short cmd_status; 209 - unsigned short cmd_cmd; 210 - unsigned short cmd_link; 211 - unsigned char iaddr[6]; 208 + u16 cmd_status; 209 + u16 cmd_cmd; 210 + u16 cmd_link; 211 + u8 iaddr[6]; 212 212 }; 213 213 214 214 /* ··· 216 216 */ 217 217 struct configure_cmd_struct 218 218 { 219 - unsigned short cmd_status; 220 - unsigned short cmd_cmd; 221 - unsigned short cmd_link; 222 - unsigned char byte_cnt; /* size of the config-cmd */ 223 - unsigned char fifo; /* fifo/recv monitor */ 224 - unsigned char sav_bf; /* save bad frames (bit7=1)*/ 225 - unsigned char adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/ 226 - unsigned char priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */ 227 - unsigned char ifs; /* inter frame spacing */ 228 - unsigned char time_low; /* slot time low */ 229 - unsigned char time_high; /* slot time high(0-2) and max. retries(4-7) */ 230 - unsigned char promisc; /* promisc-mode(0) , et al (1-7) */ 231 - unsigned char carr_coll; /* carrier(0-3)/collision(4-7) stuff */ 232 - unsigned char fram_len; /* minimal frame len */ 233 - unsigned char dummy; /* dummy */ 219 + u16 cmd_status; 220 + u16 cmd_cmd; 221 + u16 cmd_link; 222 + u8 byte_cnt; /* size of the config-cmd */ 223 + u8 fifo; /* fifo/recv monitor */ 224 + u8 sav_bf; /* save bad frames (bit7=1)*/ 225 + u8 adr_len; /* adr_len(0-2),al_loc(3),pream(4-5),loopbak(6-7)*/ 226 + u8 priority; /* lin_prio(0-2),exp_prio(4-6),bof_metd(7) */ 227 + u8 ifs; /* inter frame spacing */ 228 + u8 time_low; /* slot time low */ 229 + u8 time_high; /* slot time high(0-2) and max. retries(4-7) */ 230 + u8 promisc; /* promisc-mode(0) , et al (1-7) */ 231 + u8 carr_coll; /* carrier(0-3)/collision(4-7) stuff */ 232 + u8 fram_len; /* minimal frame len */ 233 + u8 dummy; /* dummy */ 234 234 }; 235 235 236 236 /* ··· 238 238 */ 239 239 struct mcsetup_cmd_struct 240 240 { 241 - unsigned short cmd_status; 242 - unsigned short cmd_cmd; 243 - unsigned short cmd_link; 244 - unsigned short mc_cnt; /* number of bytes in the MC-List */ 245 - unsigned char mc_list[0][6]; /* pointer to 6 bytes entries */ 241 + u16 cmd_status; 242 + u16 cmd_cmd; 243 + u16 cmd_link; 244 + u16 mc_cnt; /* number of bytes in the MC-List */ 245 + u8 mc_list[0][6]; /* pointer to 6 bytes entries */ 246 246 }; 247 247 248 248 /* ··· 250 250 */ 251 251 struct dump_cmd_struct 252 252 { 253 - unsigned short cmd_status; 254 - unsigned short cmd_cmd; 255 - unsigned short cmd_link; 256 - unsigned short dump_offset; /* pointeroffset to DUMP space */ 253 + u16 cmd_status; 254 + u16 cmd_cmd; 255 + u16 cmd_link; 256 + u16 dump_offset; /* pointeroffset to DUMP space */ 257 257 }; 258 258 259 259 /* ··· 261 261 */ 262 262 struct transmit_cmd_struct 263 263 { 264 - unsigned short cmd_status; 265 - unsigned short cmd_cmd; 266 - unsigned short cmd_link; 267 - unsigned short tbd_offset; /* pointeroffset to TBD */ 268 - unsigned char dest[6]; /* destination address of the frame */ 269 - unsigned short length; /* user defined: 802.3 length / Ether type */ 264 + u16 cmd_status; 265 + u16 cmd_cmd; 266 + u16 cmd_link; 267 + u16 tbd_offset; /* pointeroffset to TBD */ 268 + u8 dest[6]; /* destination address of the frame */ 269 + u16 length; /* user defined: 802.3 length / Ether type */ 270 270 }; 271 271 272 272 #define TCMD_ERRMASK 0x0fa0 ··· 281 281 282 282 struct tdr_cmd_struct 283 283 { 284 - unsigned short cmd_status; 285 - unsigned short cmd_cmd; 286 - unsigned short cmd_link; 287 - unsigned short status; 284 + u16 cmd_status; 285 + u16 cmd_cmd; 286 + u16 cmd_link; 287 + u16 status; 288 288 }; 289 289 290 290 #define TDR_LNK_OK 0x8000 /* No link problem identified */ ··· 298 298 */ 299 299 struct tbd_struct 300 300 { 301 - unsigned short size; /* size + EOF-Flag(15) */ 302 - unsigned short next; /* pointeroffset to next TBD */ 303 - char *buffer; /* pointer to buffer */ 301 + u16 size; /* size + EOF-Flag(15) */ 302 + u16 next; /* pointeroffset to next TBD */ 303 + u32 buffer; /* pointer to buffer */ 304 304 }; 305 305 306 306 #define TBD_LAST 0x8000 /* EOF-Flag, indicates last buffer in list */
+26 -22
drivers/net/pcnet32.c
··· 174 174 #define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS)) 175 175 #define RX_MAX_RING_SIZE (1 << (PCNET32_LOG_MAX_RX_BUFFERS)) 176 176 177 - #define PKT_BUF_SZ 1544 177 + #define PKT_BUF_SKB 1544 178 + /* actual buffer length after being aligned */ 179 + #define PKT_BUF_SIZE (PKT_BUF_SKB - NET_IP_ALIGN) 180 + /* chip wants twos complement of the (aligned) buffer length */ 181 + #define NEG_BUF_SIZE (NET_IP_ALIGN - PKT_BUF_SKB) 178 182 179 183 /* Offsets from base I/O address. */ 180 184 #define PCNET32_WIO_RDP 0x10 ··· 608 604 /* now allocate any new buffers needed */ 609 605 for (; new < size; new++ ) { 610 606 struct sk_buff *rx_skbuff; 611 - new_skb_list[new] = dev_alloc_skb(PKT_BUF_SZ); 607 + new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB); 612 608 if (!(rx_skbuff = new_skb_list[new])) { 613 609 /* keep the original lists and buffers */ 614 610 if (netif_msg_drv(lp)) ··· 617 613 dev->name); 618 614 goto free_all_new; 619 615 } 620 - skb_reserve(rx_skbuff, 2); 616 + skb_reserve(rx_skbuff, NET_IP_ALIGN); 621 617 622 618 new_dma_addr_list[new] = 623 619 pci_map_single(lp->pci_dev, rx_skbuff->data, 624 - PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 620 + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); 625 621 new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]); 626 - new_rx_ring[new].buf_length = cpu_to_le16(2 - PKT_BUF_SZ); 622 + new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE); 627 623 new_rx_ring[new].status = cpu_to_le16(0x8000); 628 624 } 629 625 /* and free any unneeded buffers */ 630 626 for (; new < lp->rx_ring_size; new++) { 631 627 if (lp->rx_skbuff[new]) { 632 628 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new], 633 - PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 629 + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); 634 630 dev_kfree_skb(lp->rx_skbuff[new]); 635 631 } 636 632 } ··· 655 651 for (; --new >= lp->rx_ring_size; ) { 656 652 if (new_skb_list[new]) { 657 653 pci_unmap_single(lp->pci_dev, new_dma_addr_list[new], 658 - PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 654 + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); 659 655 dev_kfree_skb(new_skb_list[new]); 660 656 } 661 657 } ··· 682 678 wmb(); /* Make sure adapter sees owner change */ 683 679 if (lp->rx_skbuff[i]) { 684 680 pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i], 685 - PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 681 + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); 686 682 dev_kfree_skb_any(lp->rx_skbuff[i]); 687 683 } 688 684 lp->rx_skbuff[i] = NULL; ··· 1205 1201 pkt_len = (le32_to_cpu(rxp->msg_length) & 0xfff) - 4; 1206 1202 1207 1203 /* Discard oversize frames. */ 1208 - if (unlikely(pkt_len > PKT_BUF_SZ - 2)) { 1204 + if (unlikely(pkt_len > PKT_BUF_SIZE)) { 1209 1205 if (netif_msg_drv(lp)) 1210 1206 printk(KERN_ERR "%s: Impossible packet size %d!\n", 1211 1207 dev->name, pkt_len); ··· 1222 1218 if (pkt_len > rx_copybreak) { 1223 1219 struct sk_buff *newskb; 1224 1220 1225 - if ((newskb = dev_alloc_skb(PKT_BUF_SZ))) { 1226 - skb_reserve(newskb, 2); 1221 + if ((newskb = dev_alloc_skb(PKT_BUF_SKB))) { 1222 + skb_reserve(newskb, NET_IP_ALIGN); 1227 1223 skb = lp->rx_skbuff[entry]; 1228 1224 pci_unmap_single(lp->pci_dev, 1229 1225 lp->rx_dma_addr[entry], 1230 - PKT_BUF_SZ - 2, 1226 + PKT_BUF_SIZE, 1231 1227 PCI_DMA_FROMDEVICE); 1232 1228 skb_put(skb, pkt_len); 1233 1229 lp->rx_skbuff[entry] = newskb; 1234 1230 lp->rx_dma_addr[entry] = 1235 1231 pci_map_single(lp->pci_dev, 1236 1232 newskb->data, 1237 - PKT_BUF_SZ - 2, 1233 + PKT_BUF_SIZE, 1238 1234 PCI_DMA_FROMDEVICE); 1239 1235 rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]); 1240 1236 rx_in_place = 1; 1241 1237 } else 1242 1238 skb = NULL; 1243 1239 } else { 1244 - skb = dev_alloc_skb(pkt_len + 2); 1240 + skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN); 1245 1241 } 1246 1242 1247 1243 if (skb == NULL) { ··· 1254 1250 } 1255 1251 skb->dev = dev; 1256 1252 if (!rx_in_place) { 1257 - skb_reserve(skb, 2); /* 16 byte align */ 1253 + skb_reserve(skb, NET_IP_ALIGN); 1258 1254 skb_put(skb, pkt_len); /* Make room */ 1259 1255 pci_dma_sync_single_for_cpu(lp->pci_dev, 1260 1256 lp->rx_dma_addr[entry], ··· 1295 1291 * The docs say that the buffer length isn't touched, but Andrew 1296 1292 * Boyd of QNX reports that some revs of the 79C965 clear it. 1297 1293 */ 1298 - rxp->buf_length = cpu_to_le16(2 - PKT_BUF_SZ); 1294 + rxp->buf_length = cpu_to_le16(NEG_BUF_SIZE); 1299 1295 wmb(); /* Make sure owner changes after others are visible */ 1300 1296 rxp->status = cpu_to_le16(0x8000); 1301 1297 entry = (++lp->cur_rx) & lp->rx_mod_mask; ··· 1778 1774 memset(dev->dev_addr, 0, sizeof(dev->dev_addr)); 1779 1775 1780 1776 if (pcnet32_debug & NETIF_MSG_PROBE) { 1781 - for (i = 0; i < 6; i++) 1782 - printk(" %2.2x", dev->dev_addr[i]); 1777 + DECLARE_MAC_BUF(mac); 1778 + printk(" %s", print_mac(mac, dev->dev_addr)); 1783 1779 1784 1780 /* Version 0x2623 and 0x2624 */ 1785 1781 if (((chip_version + 1) & 0xfffe) == 0x2624) { ··· 2400 2396 if (rx_skbuff == NULL) { 2401 2397 if (! 2402 2398 (rx_skbuff = lp->rx_skbuff[i] = 2403 - dev_alloc_skb(PKT_BUF_SZ))) { 2399 + dev_alloc_skb(PKT_BUF_SKB))) { 2404 2400 /* there is not much, we can do at this point */ 2405 2401 if (netif_msg_drv(lp)) 2406 2402 printk(KERN_ERR ··· 2408 2404 dev->name); 2409 2405 return -1; 2410 2406 } 2411 - skb_reserve(rx_skbuff, 2); 2407 + skb_reserve(rx_skbuff, NET_IP_ALIGN); 2412 2408 } 2413 2409 2414 2410 rmb(); 2415 2411 if (lp->rx_dma_addr[i] == 0) 2416 2412 lp->rx_dma_addr[i] = 2417 2413 pci_map_single(lp->pci_dev, rx_skbuff->data, 2418 - PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE); 2414 + PKT_BUF_SIZE, PCI_DMA_FROMDEVICE); 2419 2415 lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]); 2420 - lp->rx_ring[i].buf_length = cpu_to_le16(2 - PKT_BUF_SZ); 2416 + lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE); 2421 2417 wmb(); /* Make sure owner changes after all others are visible */ 2422 2418 lp->rx_ring[i].status = cpu_to_le16(0x8000); 2423 2419 }
+2 -2
drivers/net/phy/fixed.c
··· 236 236 static void __exit fixed_mdio_bus_exit(void) 237 237 { 238 238 struct fixed_mdio_bus *fmb = &platform_fmb; 239 - struct fixed_phy *fp; 239 + struct fixed_phy *fp, *tmp; 240 240 241 241 mdiobus_unregister(&fmb->mii_bus); 242 242 platform_device_unregister(pdev); 243 243 244 - list_for_each_entry(fp, &fmb->phys, node) { 244 + list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { 245 245 list_del(&fp->node); 246 246 kfree(fp); 247 247 }
+697 -518
drivers/net/ps3_gelic_net.c
··· 46 46 #include <asm/lv1call.h> 47 47 48 48 #include "ps3_gelic_net.h" 49 + #include "ps3_gelic_wireless.h" 49 50 50 51 #define DRV_NAME "Gelic Network Driver" 51 - #define DRV_VERSION "1.0" 52 + #define DRV_VERSION "2.0" 52 53 53 54 MODULE_AUTHOR("SCE Inc."); 54 55 MODULE_DESCRIPTION("Gelic Network driver"); 55 56 MODULE_LICENSE("GPL"); 56 57 57 - static inline struct device *ctodev(struct gelic_net_card *card) 58 - { 59 - return &card->dev->core; 60 - } 61 - static inline u64 bus_id(struct gelic_net_card *card) 62 - { 63 - return card->dev->bus_id; 64 - } 65 - static inline u64 dev_id(struct gelic_net_card *card) 66 - { 67 - return card->dev->dev_id; 68 - } 58 + 59 + static inline void gelic_card_enable_rxdmac(struct gelic_card *card); 60 + static inline void gelic_card_disable_rxdmac(struct gelic_card *card); 61 + static inline void gelic_card_disable_txdmac(struct gelic_card *card); 62 + static inline void gelic_card_reset_chain(struct gelic_card *card, 63 + struct gelic_descr_chain *chain, 64 + struct gelic_descr *start_descr); 69 65 70 66 /* set irq_mask */ 71 - static int gelic_net_set_irq_mask(struct gelic_net_card *card, u64 mask) 67 + int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask) 72 68 { 73 69 int status; 74 70 ··· 72 76 mask, 0); 73 77 if (status) 74 78 dev_info(ctodev(card), 75 - "lv1_net_set_interrupt_mask failed %d\n", status); 79 + "%s failed %d\n", __func__, status); 76 80 return status; 77 81 } 78 - static inline void gelic_net_rx_irq_on(struct gelic_net_card *card) 82 + 83 + static inline void gelic_card_rx_irq_on(struct gelic_card *card) 79 84 { 80 - gelic_net_set_irq_mask(card, card->ghiintmask | GELIC_NET_RXINT); 85 + card->irq_mask |= GELIC_CARD_RXINT; 86 + gelic_card_set_irq_mask(card, card->irq_mask); 81 87 } 82 - static inline void gelic_net_rx_irq_off(struct gelic_net_card *card) 88 + static inline void gelic_card_rx_irq_off(struct gelic_card *card) 83 89 { 84 - gelic_net_set_irq_mask(card, card->ghiintmask & ~GELIC_NET_RXINT); 90 + card->irq_mask &= ~GELIC_CARD_RXINT; 91 + gelic_card_set_irq_mask(card, card->irq_mask); 85 92 } 93 + 94 + static void gelic_card_get_ether_port_status(struct gelic_card *card, 95 + int inform) 96 + { 97 + u64 v2; 98 + struct net_device *ether_netdev; 99 + 100 + lv1_net_control(bus_id(card), dev_id(card), 101 + GELIC_LV1_GET_ETH_PORT_STATUS, 102 + GELIC_LV1_VLAN_TX_ETHERNET, 0, 0, 103 + &card->ether_port_status, &v2); 104 + 105 + if (inform) { 106 + ether_netdev = card->netdev[GELIC_PORT_ETHERNET]; 107 + if (card->ether_port_status & GELIC_LV1_ETHER_LINK_UP) 108 + netif_carrier_on(ether_netdev); 109 + else 110 + netif_carrier_off(ether_netdev); 111 + } 112 + } 113 + 114 + void gelic_card_up(struct gelic_card *card) 115 + { 116 + pr_debug("%s: called\n", __func__); 117 + down(&card->updown_lock); 118 + if (atomic_inc_return(&card->users) == 1) { 119 + pr_debug("%s: real do\n", __func__); 120 + /* enable irq */ 121 + gelic_card_set_irq_mask(card, card->irq_mask); 122 + /* start rx */ 123 + gelic_card_enable_rxdmac(card); 124 + 125 + napi_enable(&card->napi); 126 + } 127 + up(&card->updown_lock); 128 + pr_debug("%s: done\n", __func__); 129 + } 130 + 131 + void gelic_card_down(struct gelic_card *card) 132 + { 133 + u64 mask; 134 + pr_debug("%s: called\n", __func__); 135 + down(&card->updown_lock); 136 + if (atomic_dec_if_positive(&card->users) == 0) { 137 + pr_debug("%s: real do\n", __func__); 138 + napi_disable(&card->napi); 139 + /* 140 + * Disable irq. Wireless interrupts will 141 + * be disabled later if any 142 + */ 143 + mask = card->irq_mask & (GELIC_CARD_WLAN_EVENT_RECEIVED | 144 + GELIC_CARD_WLAN_COMMAND_COMPLETED); 145 + gelic_card_set_irq_mask(card, mask); 146 + /* stop rx */ 147 + gelic_card_disable_rxdmac(card); 148 + gelic_card_reset_chain(card, &card->rx_chain, 149 + card->descr + GELIC_NET_TX_DESCRIPTORS); 150 + /* stop tx */ 151 + gelic_card_disable_txdmac(card); 152 + } 153 + up(&card->updown_lock); 154 + pr_debug("%s: done\n", __func__); 155 + } 156 + 86 157 /** 87 - * gelic_net_get_descr_status -- returns the status of a descriptor 158 + * gelic_descr_get_status -- returns the status of a descriptor 88 159 * @descr: descriptor to look at 89 160 * 90 161 * returns the status as in the dmac_cmd_status field of the descriptor 91 162 */ 92 - static enum gelic_net_descr_status 93 - gelic_net_get_descr_status(struct gelic_net_descr *descr) 163 + static enum gelic_descr_dma_status 164 + gelic_descr_get_status(struct gelic_descr *descr) 94 165 { 95 - u32 cmd_status; 96 - 97 - cmd_status = descr->dmac_cmd_status; 98 - cmd_status >>= GELIC_NET_DESCR_IND_PROC_SHIFT; 99 - return cmd_status; 166 + return be32_to_cpu(descr->dmac_cmd_status) & GELIC_DESCR_DMA_STAT_MASK; 100 167 } 101 168 102 169 /** 103 - * gelic_net_set_descr_status -- sets the status of a descriptor 170 + * gelic_descr_set_status -- sets the status of a descriptor 104 171 * @descr: descriptor to change 105 172 * @status: status to set in the descriptor 106 173 * 107 174 * changes the status to the specified value. Doesn't change other bits 108 175 * in the status 109 176 */ 110 - static void gelic_net_set_descr_status(struct gelic_net_descr *descr, 111 - enum gelic_net_descr_status status) 177 + static void gelic_descr_set_status(struct gelic_descr *descr, 178 + enum gelic_descr_dma_status status) 112 179 { 113 - u32 cmd_status; 114 - 115 - /* read the status */ 116 - cmd_status = descr->dmac_cmd_status; 117 - /* clean the upper 4 bits */ 118 - cmd_status &= GELIC_NET_DESCR_IND_PROC_MASKO; 119 - /* add the status to it */ 120 - cmd_status |= ((u32)status) << GELIC_NET_DESCR_IND_PROC_SHIFT; 121 - /* and write it back */ 122 - descr->dmac_cmd_status = cmd_status; 180 + descr->dmac_cmd_status = cpu_to_be32(status | 181 + (be32_to_cpu(descr->dmac_cmd_status) & 182 + ~GELIC_DESCR_DMA_STAT_MASK)); 123 183 /* 124 184 * dma_cmd_status field is used to indicate whether the descriptor 125 185 * is valid or not. ··· 186 134 } 187 135 188 136 /** 189 - * gelic_net_free_chain - free descriptor chain 137 + * gelic_card_free_chain - free descriptor chain 190 138 * @card: card structure 191 139 * @descr_in: address of desc 192 140 */ 193 - static void gelic_net_free_chain(struct gelic_net_card *card, 194 - struct gelic_net_descr *descr_in) 141 + static void gelic_card_free_chain(struct gelic_card *card, 142 + struct gelic_descr *descr_in) 195 143 { 196 - struct gelic_net_descr *descr; 144 + struct gelic_descr *descr; 197 145 198 146 for (descr = descr_in; descr && descr->bus_addr; descr = descr->next) { 199 147 dma_unmap_single(ctodev(card), descr->bus_addr, 200 - GELIC_NET_DESCR_SIZE, DMA_BIDIRECTIONAL); 148 + GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL); 201 149 descr->bus_addr = 0; 202 150 } 203 151 } 204 152 205 153 /** 206 - * gelic_net_init_chain - links descriptor chain 154 + * gelic_card_init_chain - links descriptor chain 207 155 * @card: card structure 208 156 * @chain: address of chain 209 157 * @start_descr: address of descriptor array ··· 214 162 * 215 163 * returns 0 on success, <0 on failure 216 164 */ 217 - static int gelic_net_init_chain(struct gelic_net_card *card, 218 - struct gelic_net_descr_chain *chain, 219 - struct gelic_net_descr *start_descr, int no) 165 + static int gelic_card_init_chain(struct gelic_card *card, 166 + struct gelic_descr_chain *chain, 167 + struct gelic_descr *start_descr, int no) 220 168 { 221 169 int i; 222 - struct gelic_net_descr *descr; 170 + struct gelic_descr *descr; 223 171 224 172 descr = start_descr; 225 173 memset(descr, 0, sizeof(*descr) * no); 226 174 227 175 /* set up the hardware pointers in each descriptor */ 228 176 for (i = 0; i < no; i++, descr++) { 229 - gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE); 177 + gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE); 230 178 descr->bus_addr = 231 179 dma_map_single(ctodev(card), descr, 232 - GELIC_NET_DESCR_SIZE, 180 + GELIC_DESCR_SIZE, 233 181 DMA_BIDIRECTIONAL); 234 182 235 183 if (!descr->bus_addr) ··· 245 193 /* chain bus addr of hw descriptor */ 246 194 descr = start_descr; 247 195 for (i = 0; i < no; i++, descr++) { 248 - descr->next_descr_addr = descr->next->bus_addr; 196 + descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr); 249 197 } 250 198 251 199 chain->head = start_descr; ··· 260 208 for (i--, descr--; 0 <= i; i--, descr--) 261 209 if (descr->bus_addr) 262 210 dma_unmap_single(ctodev(card), descr->bus_addr, 263 - GELIC_NET_DESCR_SIZE, 211 + GELIC_DESCR_SIZE, 264 212 DMA_BIDIRECTIONAL); 265 213 return -ENOMEM; 266 214 } 267 215 268 216 /** 269 - * gelic_net_prepare_rx_descr - reinitializes a rx descriptor 217 + * gelic_card_reset_chain - reset status of a descriptor chain 218 + * @card: card structure 219 + * @chain: address of chain 220 + * @start_descr: address of descriptor array 221 + * 222 + * Reset the status of dma descriptors to ready state 223 + * and re-initialize the hardware chain for later use 224 + */ 225 + static void gelic_card_reset_chain(struct gelic_card *card, 226 + struct gelic_descr_chain *chain, 227 + struct gelic_descr *start_descr) 228 + { 229 + struct gelic_descr *descr; 230 + 231 + for (descr = start_descr; start_descr != descr->next; descr++) { 232 + gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED); 233 + descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr); 234 + } 235 + 236 + chain->head = start_descr; 237 + chain->tail = (descr - 1); 238 + 239 + (descr - 1)->next_descr_addr = 0; 240 + } 241 + /** 242 + * gelic_descr_prepare_rx - reinitializes a rx descriptor 270 243 * @card: card structure 271 244 * @descr: descriptor to re-init 272 245 * ··· 300 223 * allocates a new rx skb, iommu-maps it and attaches it to the descriptor. 301 224 * Activate the descriptor state-wise 302 225 */ 303 - static int gelic_net_prepare_rx_descr(struct gelic_net_card *card, 304 - struct gelic_net_descr *descr) 226 + static int gelic_descr_prepare_rx(struct gelic_card *card, 227 + struct gelic_descr *descr) 305 228 { 306 229 int offset; 307 230 unsigned int bufsize; 308 231 309 - if (gelic_net_get_descr_status(descr) != GELIC_NET_DESCR_NOT_IN_USE) { 232 + if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE) 310 233 dev_info(ctodev(card), "%s: ERROR status \n", __func__); 311 - } 312 234 /* we need to round up the buffer size to a multiple of 128 */ 313 235 bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN); 314 236 315 237 /* and we need to have it 128 byte aligned, therefore we allocate a 316 238 * bit more */ 317 - descr->skb = netdev_alloc_skb(card->netdev, 318 - bufsize + GELIC_NET_RXBUF_ALIGN - 1); 239 + descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1); 319 240 if (!descr->skb) { 320 241 descr->buf_addr = 0; /* tell DMAC don't touch memory */ 321 242 dev_info(ctodev(card), 322 243 "%s:allocate skb failed !!\n", __func__); 323 244 return -ENOMEM; 324 245 } 325 - descr->buf_size = bufsize; 246 + descr->buf_size = cpu_to_be32(bufsize); 326 247 descr->dmac_cmd_status = 0; 327 248 descr->result_size = 0; 328 249 descr->valid_size = 0; ··· 331 256 if (offset) 332 257 skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset); 333 258 /* io-mmu-map the skb */ 334 - descr->buf_addr = dma_map_single(ctodev(card), descr->skb->data, 335 - GELIC_NET_MAX_MTU, 336 - DMA_FROM_DEVICE); 259 + descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card), 260 + descr->skb->data, 261 + GELIC_NET_MAX_MTU, 262 + DMA_FROM_DEVICE)); 337 263 if (!descr->buf_addr) { 338 264 dev_kfree_skb_any(descr->skb); 339 265 descr->skb = NULL; 340 266 dev_info(ctodev(card), 341 267 "%s:Could not iommu-map rx buffer\n", __func__); 342 - gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE); 268 + gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE); 343 269 return -ENOMEM; 344 270 } else { 345 - gelic_net_set_descr_status(descr, GELIC_NET_DESCR_CARDOWNED); 271 + gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED); 346 272 return 0; 347 273 } 348 274 } 349 275 350 276 /** 351 - * gelic_net_release_rx_chain - free all skb of rx descr 277 + * gelic_card_release_rx_chain - free all skb of rx descr 352 278 * @card: card structure 353 279 * 354 280 */ 355 - static void gelic_net_release_rx_chain(struct gelic_net_card *card) 281 + static void gelic_card_release_rx_chain(struct gelic_card *card) 356 282 { 357 - struct gelic_net_descr *descr = card->rx_chain.head; 283 + struct gelic_descr *descr = card->rx_chain.head; 358 284 359 285 do { 360 286 if (descr->skb) { 361 287 dma_unmap_single(ctodev(card), 362 - descr->buf_addr, 288 + be32_to_cpu(descr->buf_addr), 363 289 descr->skb->len, 364 290 DMA_FROM_DEVICE); 365 291 descr->buf_addr = 0; 366 292 dev_kfree_skb_any(descr->skb); 367 293 descr->skb = NULL; 368 - gelic_net_set_descr_status(descr, 369 - GELIC_NET_DESCR_NOT_IN_USE); 294 + gelic_descr_set_status(descr, 295 + GELIC_DESCR_DMA_NOT_IN_USE); 370 296 } 371 297 descr = descr->next; 372 298 } while (descr != card->rx_chain.head); 373 299 } 374 300 375 301 /** 376 - * gelic_net_fill_rx_chain - fills descriptors/skbs in the rx chains 302 + * gelic_card_fill_rx_chain - fills descriptors/skbs in the rx chains 377 303 * @card: card structure 378 304 * 379 305 * fills all descriptors in the rx chain: allocates skbs 380 306 * and iommu-maps them. 381 - * returns 0 on success, <0 on failure 307 + * returns 0 on success, < 0 on failure 382 308 */ 383 - static int gelic_net_fill_rx_chain(struct gelic_net_card *card) 309 + static int gelic_card_fill_rx_chain(struct gelic_card *card) 384 310 { 385 - struct gelic_net_descr *descr = card->rx_chain.head; 311 + struct gelic_descr *descr = card->rx_chain.head; 386 312 int ret; 387 313 388 314 do { 389 315 if (!descr->skb) { 390 - ret = gelic_net_prepare_rx_descr(card, descr); 316 + ret = gelic_descr_prepare_rx(card, descr); 391 317 if (ret) 392 318 goto rewind; 393 319 } ··· 397 321 398 322 return 0; 399 323 rewind: 400 - gelic_net_release_rx_chain(card); 324 + gelic_card_release_rx_chain(card); 401 325 return ret; 402 326 } 403 327 404 328 /** 405 - * gelic_net_alloc_rx_skbs - allocates rx skbs in rx descriptor chains 329 + * gelic_card_alloc_rx_skbs - allocates rx skbs in rx descriptor chains 406 330 * @card: card structure 407 331 * 408 - * returns 0 on success, <0 on failure 332 + * returns 0 on success, < 0 on failure 409 333 */ 410 - static int gelic_net_alloc_rx_skbs(struct gelic_net_card *card) 334 + static int gelic_card_alloc_rx_skbs(struct gelic_card *card) 411 335 { 412 - struct gelic_net_descr_chain *chain; 336 + struct gelic_descr_chain *chain; 413 337 int ret; 414 338 chain = &card->rx_chain; 415 - ret = gelic_net_fill_rx_chain(card); 416 - chain->head = card->rx_top->prev; /* point to the last */ 339 + ret = gelic_card_fill_rx_chain(card); 340 + chain->tail = card->rx_top->prev; /* point to the last */ 417 341 return ret; 418 342 } 419 343 420 344 /** 421 - * gelic_net_release_tx_descr - processes a used tx descriptor 345 + * gelic_descr_release_tx - processes a used tx descriptor 422 346 * @card: card structure 423 347 * @descr: descriptor to release 424 348 * 425 349 * releases a used tx descriptor (unmapping, freeing of skb) 426 350 */ 427 - static void gelic_net_release_tx_descr(struct gelic_net_card *card, 428 - struct gelic_net_descr *descr) 351 + static void gelic_descr_release_tx(struct gelic_card *card, 352 + struct gelic_descr *descr) 429 353 { 430 354 struct sk_buff *skb = descr->skb; 431 355 432 - BUG_ON(!(descr->data_status & (1 << GELIC_NET_TXDESC_TAIL))); 356 + BUG_ON(!(be32_to_cpu(descr->data_status) & GELIC_DESCR_TX_TAIL)); 433 357 434 - dma_unmap_single(ctodev(card), descr->buf_addr, skb->len, 358 + dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr), skb->len, 435 359 DMA_TO_DEVICE); 436 360 dev_kfree_skb_any(skb); 437 361 ··· 445 369 descr->skb = NULL; 446 370 447 371 /* set descr status */ 448 - gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE); 372 + gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE); 449 373 } 450 374 375 + static void gelic_card_stop_queues(struct gelic_card *card) 376 + { 377 + netif_stop_queue(card->netdev[GELIC_PORT_ETHERNET]); 378 + 379 + if (card->netdev[GELIC_PORT_WIRELESS]) 380 + netif_stop_queue(card->netdev[GELIC_PORT_WIRELESS]); 381 + } 382 + static void gelic_card_wake_queues(struct gelic_card *card) 383 + { 384 + netif_wake_queue(card->netdev[GELIC_PORT_ETHERNET]); 385 + 386 + if (card->netdev[GELIC_PORT_WIRELESS]) 387 + netif_wake_queue(card->netdev[GELIC_PORT_WIRELESS]); 388 + } 451 389 /** 452 - * gelic_net_release_tx_chain - processes sent tx descriptors 390 + * gelic_card_release_tx_chain - processes sent tx descriptors 453 391 * @card: adapter structure 454 392 * @stop: net_stop sequence 455 393 * 456 394 * releases the tx descriptors that gelic has finished with 457 395 */ 458 - static void gelic_net_release_tx_chain(struct gelic_net_card *card, int stop) 396 + static void gelic_card_release_tx_chain(struct gelic_card *card, int stop) 459 397 { 460 - struct gelic_net_descr_chain *tx_chain; 461 - enum gelic_net_descr_status status; 398 + struct gelic_descr_chain *tx_chain; 399 + enum gelic_descr_dma_status status; 400 + struct net_device *netdev; 462 401 int release = 0; 463 402 464 403 for (tx_chain = &card->tx_chain; 465 404 tx_chain->head != tx_chain->tail && tx_chain->tail; 466 405 tx_chain->tail = tx_chain->tail->next) { 467 - status = gelic_net_get_descr_status(tx_chain->tail); 406 + status = gelic_descr_get_status(tx_chain->tail); 407 + netdev = tx_chain->tail->skb->dev; 468 408 switch (status) { 469 - case GELIC_NET_DESCR_RESPONSE_ERROR: 470 - case GELIC_NET_DESCR_PROTECTION_ERROR: 471 - case GELIC_NET_DESCR_FORCE_END: 409 + case GELIC_DESCR_DMA_RESPONSE_ERROR: 410 + case GELIC_DESCR_DMA_PROTECTION_ERROR: 411 + case GELIC_DESCR_DMA_FORCE_END: 472 412 if (printk_ratelimit()) 473 413 dev_info(ctodev(card), 474 414 "%s: forcing end of tx descriptor " \ 475 415 "with status %x\n", 476 416 __func__, status); 477 - card->netdev->stats.tx_dropped++; 417 + netdev->stats.tx_dropped++; 478 418 break; 479 419 480 - case GELIC_NET_DESCR_COMPLETE: 420 + case GELIC_DESCR_DMA_COMPLETE: 481 421 if (tx_chain->tail->skb) { 482 - card->netdev->stats.tx_packets++; 483 - card->netdev->stats.tx_bytes += 422 + netdev->stats.tx_packets++; 423 + netdev->stats.tx_bytes += 484 424 tx_chain->tail->skb->len; 485 425 } 486 426 break; 487 427 488 - case GELIC_NET_DESCR_CARDOWNED: 428 + case GELIC_DESCR_DMA_CARDOWNED: 489 429 /* pending tx request */ 490 430 default: 491 - /* any other value (== GELIC_NET_DESCR_NOT_IN_USE) */ 431 + /* any other value (== GELIC_DESCR_DMA_NOT_IN_USE) */ 492 432 if (!stop) 493 433 goto out; 494 434 } 495 - gelic_net_release_tx_descr(card, tx_chain->tail); 435 + gelic_descr_release_tx(card, tx_chain->tail); 496 436 release ++; 497 437 } 498 438 out: 499 439 if (!stop && release) 500 - netif_wake_queue(card->netdev); 440 + gelic_card_wake_queues(card); 501 441 } 502 442 503 443 /** ··· 524 432 * netdev interface. It also sets up multicast, allmulti and promisc 525 433 * flags appropriately 526 434 */ 527 - static void gelic_net_set_multi(struct net_device *netdev) 435 + void gelic_net_set_multi(struct net_device *netdev) 528 436 { 529 - struct gelic_net_card *card = netdev_priv(netdev); 437 + struct gelic_card *card = netdev_card(netdev); 530 438 struct dev_mc_list *mc; 531 439 unsigned int i; 532 440 uint8_t *p; ··· 548 456 "lv1_net_add_multicast_address failed, %d\n", 549 457 status); 550 458 551 - if (netdev->flags & IFF_ALLMULTI 552 - || netdev->mc_count > GELIC_NET_MC_COUNT_MAX) { /* list max */ 459 + if ((netdev->flags & IFF_ALLMULTI) || 460 + (netdev->mc_count > GELIC_NET_MC_COUNT_MAX)) { 553 461 status = lv1_net_add_multicast_address(bus_id(card), 554 462 dev_id(card), 555 463 0, 1); ··· 560 468 return; 561 469 } 562 470 563 - /* set multicast address */ 471 + /* set multicast addresses */ 564 472 for (mc = netdev->mc_list; mc; mc = mc->next) { 565 473 addr = 0; 566 474 p = mc->dmi_addr; ··· 579 487 } 580 488 581 489 /** 582 - * gelic_net_enable_rxdmac - enables the receive DMA controller 490 + * gelic_card_enable_rxdmac - enables the receive DMA controller 583 491 * @card: card structure 584 492 * 585 - * gelic_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN 493 + * gelic_card_enable_rxdmac enables the DMA controller by setting RX_DMA_EN 586 494 * in the GDADMACCNTR register 587 495 */ 588 - static inline void gelic_net_enable_rxdmac(struct gelic_net_card *card) 496 + static inline void gelic_card_enable_rxdmac(struct gelic_card *card) 589 497 { 590 498 int status; 591 499 500 + #ifdef DEBUG 501 + if (gelic_descr_get_status(card->rx_chain.head) != 502 + GELIC_DESCR_DMA_CARDOWNED) { 503 + printk(KERN_ERR "%s: status=%x\n", __func__, 504 + be32_to_cpu(card->rx_chain.head->dmac_cmd_status)); 505 + printk(KERN_ERR "%s: nextphy=%x\n", __func__, 506 + be32_to_cpu(card->rx_chain.head->next_descr_addr)); 507 + printk(KERN_ERR "%s: head=%p\n", __func__, 508 + card->rx_chain.head); 509 + } 510 + #endif 592 511 status = lv1_net_start_rx_dma(bus_id(card), dev_id(card), 593 - card->rx_chain.tail->bus_addr, 0); 512 + card->rx_chain.head->bus_addr, 0); 594 513 if (status) 595 514 dev_info(ctodev(card), 596 515 "lv1_net_start_rx_dma failed, status=%d\n", status); 597 516 } 598 517 599 518 /** 600 - * gelic_net_disable_rxdmac - disables the receive DMA controller 519 + * gelic_card_disable_rxdmac - disables the receive DMA controller 601 520 * @card: card structure 602 521 * 603 - * gelic_net_disable_rxdmac terminates processing on the DMA controller by 522 + * gelic_card_disable_rxdmac terminates processing on the DMA controller by 604 523 * turing off DMA and issueing a force end 605 524 */ 606 - static inline void gelic_net_disable_rxdmac(struct gelic_net_card *card) 525 + static inline void gelic_card_disable_rxdmac(struct gelic_card *card) 607 526 { 608 527 int status; 609 528 ··· 626 523 } 627 524 628 525 /** 629 - * gelic_net_disable_txdmac - disables the transmit DMA controller 526 + * gelic_card_disable_txdmac - disables the transmit DMA controller 630 527 * @card: card structure 631 528 * 632 - * gelic_net_disable_txdmac terminates processing on the DMA controller by 529 + * gelic_card_disable_txdmac terminates processing on the DMA controller by 633 530 * turing off DMA and issueing a force end 634 531 */ 635 - static inline void gelic_net_disable_txdmac(struct gelic_net_card *card) 532 + static inline void gelic_card_disable_txdmac(struct gelic_card *card) 636 533 { 637 534 int status; 638 535 ··· 649 546 * 650 547 * always returns 0 651 548 */ 652 - static int gelic_net_stop(struct net_device *netdev) 549 + int gelic_net_stop(struct net_device *netdev) 653 550 { 654 - struct gelic_net_card *card = netdev_priv(netdev); 551 + struct gelic_card *card; 655 552 656 - napi_disable(&card->napi); 553 + pr_debug("%s: start\n", __func__); 554 + 657 555 netif_stop_queue(netdev); 658 - 659 - /* turn off DMA, force end */ 660 - gelic_net_disable_rxdmac(card); 661 - gelic_net_disable_txdmac(card); 662 - 663 - gelic_net_set_irq_mask(card, 0); 664 - 665 - /* disconnect event port */ 666 - free_irq(card->netdev->irq, card->netdev); 667 - ps3_sb_event_receive_port_destroy(card->dev, card->netdev->irq); 668 - card->netdev->irq = NO_IRQ; 669 - 670 556 netif_carrier_off(netdev); 671 557 672 - /* release chains */ 673 - gelic_net_release_tx_chain(card, 1); 674 - gelic_net_release_rx_chain(card); 558 + card = netdev_card(netdev); 559 + gelic_card_down(card); 675 560 676 - gelic_net_free_chain(card, card->tx_top); 677 - gelic_net_free_chain(card, card->rx_top); 678 - 561 + pr_debug("%s: done\n", __func__); 679 562 return 0; 680 563 } 681 564 682 565 /** 683 - * gelic_net_get_next_tx_descr - returns the next available tx descriptor 566 + * gelic_card_get_next_tx_descr - returns the next available tx descriptor 684 567 * @card: device structure to get descriptor from 685 568 * 686 569 * returns the address of the next descriptor, or NULL if not available. 687 570 */ 688 - static struct gelic_net_descr * 689 - gelic_net_get_next_tx_descr(struct gelic_net_card *card) 571 + static struct gelic_descr * 572 + gelic_card_get_next_tx_descr(struct gelic_card *card) 690 573 { 691 574 if (!card->tx_chain.head) 692 575 return NULL; 693 576 /* see if the next descriptor is free */ 694 577 if (card->tx_chain.tail != card->tx_chain.head->next && 695 - gelic_net_get_descr_status(card->tx_chain.head) == 696 - GELIC_NET_DESCR_NOT_IN_USE) 578 + gelic_descr_get_status(card->tx_chain.head) == 579 + GELIC_DESCR_DMA_NOT_IN_USE) 697 580 return card->tx_chain.head; 698 581 else 699 582 return NULL; ··· 695 606 * depending on hardware checksum settings. This function assumes a wmb() 696 607 * has executed before. 697 608 */ 698 - static void gelic_net_set_txdescr_cmdstat(struct gelic_net_descr *descr, 699 - struct sk_buff *skb) 609 + static void gelic_descr_set_tx_cmdstat(struct gelic_descr *descr, 610 + struct sk_buff *skb) 700 611 { 701 612 if (skb->ip_summed != CHECKSUM_PARTIAL) 702 - descr->dmac_cmd_status = GELIC_NET_DMAC_CMDSTAT_NOCS | 703 - GELIC_NET_DMAC_CMDSTAT_END_FRAME; 613 + descr->dmac_cmd_status = 614 + cpu_to_be32(GELIC_DESCR_DMA_CMD_NO_CHKSUM | 615 + GELIC_DESCR_TX_DMA_FRAME_TAIL); 704 616 else { 705 617 /* is packet ip? 706 618 * if yes: tcp? udp? */ 707 619 if (skb->protocol == htons(ETH_P_IP)) { 708 620 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 709 621 descr->dmac_cmd_status = 710 - GELIC_NET_DMAC_CMDSTAT_TCPCS | 711 - GELIC_NET_DMAC_CMDSTAT_END_FRAME; 622 + cpu_to_be32(GELIC_DESCR_DMA_CMD_TCP_CHKSUM | 623 + GELIC_DESCR_TX_DMA_FRAME_TAIL); 712 624 713 625 else if (ip_hdr(skb)->protocol == IPPROTO_UDP) 714 626 descr->dmac_cmd_status = 715 - GELIC_NET_DMAC_CMDSTAT_UDPCS | 716 - GELIC_NET_DMAC_CMDSTAT_END_FRAME; 627 + cpu_to_be32(GELIC_DESCR_DMA_CMD_UDP_CHKSUM | 628 + GELIC_DESCR_TX_DMA_FRAME_TAIL); 717 629 else /* 718 630 * the stack should checksum non-tcp and non-udp 719 631 * packets on his own: NETIF_F_IP_CSUM 720 632 */ 721 633 descr->dmac_cmd_status = 722 - GELIC_NET_DMAC_CMDSTAT_NOCS | 723 - GELIC_NET_DMAC_CMDSTAT_END_FRAME; 634 + cpu_to_be32(GELIC_DESCR_DMA_CMD_NO_CHKSUM | 635 + GELIC_DESCR_TX_DMA_FRAME_TAIL); 724 636 } 725 637 } 726 638 } ··· 752 662 } 753 663 754 664 /** 755 - * gelic_net_prepare_tx_descr_v - get dma address of skb_data 665 + * gelic_descr_prepare_tx - setup a descriptor for sending packets 756 666 * @card: card structure 757 667 * @descr: descriptor structure 758 668 * @skb: packet to use ··· 760 670 * returns 0 on success, <0 on failure. 761 671 * 762 672 */ 763 - static int gelic_net_prepare_tx_descr_v(struct gelic_net_card *card, 764 - struct gelic_net_descr *descr, 765 - struct sk_buff *skb) 673 + static int gelic_descr_prepare_tx(struct gelic_card *card, 674 + struct gelic_descr *descr, 675 + struct sk_buff *skb) 766 676 { 767 677 dma_addr_t buf; 768 678 769 - if (card->vlan_index != -1) { 679 + if (card->vlan_required) { 770 680 struct sk_buff *skb_tmp; 681 + enum gelic_port_type type; 682 + 683 + type = netdev_port(skb->dev)->type; 771 684 skb_tmp = gelic_put_vlan_tag(skb, 772 - card->vlan_id[card->vlan_index]); 685 + card->vlan[type].tx); 773 686 if (!skb_tmp) 774 687 return -ENOMEM; 775 688 skb = skb_tmp; ··· 787 694 return -ENOMEM; 788 695 } 789 696 790 - descr->buf_addr = buf; 791 - descr->buf_size = skb->len; 697 + descr->buf_addr = cpu_to_be32(buf); 698 + descr->buf_size = cpu_to_be32(skb->len); 792 699 descr->skb = skb; 793 700 descr->data_status = 0; 794 701 descr->next_descr_addr = 0; /* terminate hw descr */ 795 - gelic_net_set_txdescr_cmdstat(descr, skb); 702 + gelic_descr_set_tx_cmdstat(descr, skb); 796 703 797 704 /* bump free descriptor pointer */ 798 705 card->tx_chain.head = descr->next; ··· 800 707 } 801 708 802 709 /** 803 - * gelic_net_kick_txdma - enables TX DMA processing 710 + * gelic_card_kick_txdma - enables TX DMA processing 804 711 * @card: card structure 805 712 * @descr: descriptor address to enable TX processing at 806 713 * 807 714 */ 808 - static int gelic_net_kick_txdma(struct gelic_net_card *card, 809 - struct gelic_net_descr *descr) 715 + static int gelic_card_kick_txdma(struct gelic_card *card, 716 + struct gelic_descr *descr) 810 717 { 811 718 int status = 0; 812 719 813 720 if (card->tx_dma_progress) 814 721 return 0; 815 722 816 - if (gelic_net_get_descr_status(descr) == GELIC_NET_DESCR_CARDOWNED) { 723 + if (gelic_descr_get_status(descr) == GELIC_DESCR_DMA_CARDOWNED) { 817 724 card->tx_dma_progress = 1; 818 725 status = lv1_net_start_tx_dma(bus_id(card), dev_id(card), 819 726 descr->bus_addr, 0); ··· 831 738 * 832 739 * returns 0 on success, <0 on failure 833 740 */ 834 - static int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev) 741 + int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev) 835 742 { 836 - struct gelic_net_card *card = netdev_priv(netdev); 837 - struct gelic_net_descr *descr; 743 + struct gelic_card *card = netdev_card(netdev); 744 + struct gelic_descr *descr; 838 745 int result; 839 746 unsigned long flags; 840 747 841 - spin_lock_irqsave(&card->tx_dma_lock, flags); 748 + spin_lock_irqsave(&card->tx_lock, flags); 842 749 843 - gelic_net_release_tx_chain(card, 0); 750 + gelic_card_release_tx_chain(card, 0); 844 751 845 - descr = gelic_net_get_next_tx_descr(card); 752 + descr = gelic_card_get_next_tx_descr(card); 846 753 if (!descr) { 847 754 /* 848 755 * no more descriptors free 849 756 */ 850 - netif_stop_queue(netdev); 851 - spin_unlock_irqrestore(&card->tx_dma_lock, flags); 757 + gelic_card_stop_queues(card); 758 + spin_unlock_irqrestore(&card->tx_lock, flags); 852 759 return NETDEV_TX_BUSY; 853 760 } 854 761 855 - result = gelic_net_prepare_tx_descr_v(card, descr, skb); 762 + result = gelic_descr_prepare_tx(card, descr, skb); 856 763 if (result) { 857 764 /* 858 765 * DMA map failed. As chanses are that failure 859 766 * would continue, just release skb and return 860 767 */ 861 - card->netdev->stats.tx_dropped++; 768 + netdev->stats.tx_dropped++; 862 769 dev_kfree_skb_any(skb); 863 - spin_unlock_irqrestore(&card->tx_dma_lock, flags); 770 + spin_unlock_irqrestore(&card->tx_lock, flags); 864 771 return NETDEV_TX_OK; 865 772 } 866 773 /* 867 774 * link this prepared descriptor to previous one 868 775 * to achieve high performance 869 776 */ 870 - descr->prev->next_descr_addr = descr->bus_addr; 777 + descr->prev->next_descr_addr = cpu_to_be32(descr->bus_addr); 871 778 /* 872 779 * as hardware descriptor is modified in the above lines, 873 780 * ensure that the hardware sees it 874 781 */ 875 782 wmb(); 876 - if (gelic_net_kick_txdma(card, descr)) { 783 + if (gelic_card_kick_txdma(card, descr)) { 877 784 /* 878 785 * kick failed. 879 786 * release descriptors which were just prepared 880 787 */ 881 - card->netdev->stats.tx_dropped++; 882 - gelic_net_release_tx_descr(card, descr); 883 - gelic_net_release_tx_descr(card, descr->next); 788 + netdev->stats.tx_dropped++; 789 + gelic_descr_release_tx(card, descr); 790 + gelic_descr_release_tx(card, descr->next); 884 791 card->tx_chain.tail = descr->next->next; 885 792 dev_info(ctodev(card), "%s: kick failure\n", __func__); 886 793 } else { ··· 888 795 netdev->trans_start = jiffies; 889 796 } 890 797 891 - spin_unlock_irqrestore(&card->tx_dma_lock, flags); 798 + spin_unlock_irqrestore(&card->tx_lock, flags); 892 799 return NETDEV_TX_OK; 893 800 } 894 801 ··· 896 803 * gelic_net_pass_skb_up - takes an skb from a descriptor and passes it on 897 804 * @descr: descriptor to process 898 805 * @card: card structure 806 + * @netdev: net_device structure to be passed packet 899 807 * 900 808 * iommu-unmaps the skb, fills out skb structure and passes the data to the 901 809 * stack. The descriptor state is not changed. 902 810 */ 903 - static void gelic_net_pass_skb_up(struct gelic_net_descr *descr, 904 - struct gelic_net_card *card) 811 + static void gelic_net_pass_skb_up(struct gelic_descr *descr, 812 + struct gelic_card *card, 813 + struct net_device *netdev) 814 + 905 815 { 906 - struct sk_buff *skb; 907 - struct net_device *netdev; 816 + struct sk_buff *skb = descr->skb; 908 817 u32 data_status, data_error; 909 818 910 - data_status = descr->data_status; 911 - data_error = descr->data_error; 912 - netdev = card->netdev; 819 + data_status = be32_to_cpu(descr->data_status); 820 + data_error = be32_to_cpu(descr->data_error); 913 821 /* unmap skb buffer */ 914 - skb = descr->skb; 915 - dma_unmap_single(ctodev(card), descr->buf_addr, GELIC_NET_MAX_MTU, 822 + dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr), 823 + GELIC_NET_MAX_MTU, 916 824 DMA_FROM_DEVICE); 917 825 918 - skb_put(skb, descr->valid_size? descr->valid_size : descr->result_size); 826 + skb_put(skb, be32_to_cpu(descr->valid_size)? 827 + be32_to_cpu(descr->valid_size) : 828 + be32_to_cpu(descr->result_size)); 919 829 if (!descr->valid_size) 920 830 dev_info(ctodev(card), "buffer full %x %x %x\n", 921 - descr->result_size, descr->buf_size, 922 - descr->dmac_cmd_status); 831 + be32_to_cpu(descr->result_size), 832 + be32_to_cpu(descr->buf_size), 833 + be32_to_cpu(descr->dmac_cmd_status)); 923 834 924 835 descr->skb = NULL; 925 836 /* ··· 935 838 936 839 /* checksum offload */ 937 840 if (card->rx_csum) { 938 - if ((data_status & GELIC_NET_DATA_STATUS_CHK_MASK) && 939 - (!(data_error & GELIC_NET_DATA_ERROR_CHK_MASK))) 841 + if ((data_status & GELIC_DESCR_DATA_STATUS_CHK_MASK) && 842 + (!(data_error & GELIC_DESCR_DATA_ERROR_CHK_MASK))) 940 843 skb->ip_summed = CHECKSUM_UNNECESSARY; 941 844 else 942 845 skb->ip_summed = CHECKSUM_NONE; ··· 944 847 skb->ip_summed = CHECKSUM_NONE; 945 848 946 849 /* update netdevice statistics */ 947 - card->netdev->stats.rx_packets++; 948 - card->netdev->stats.rx_bytes += skb->len; 850 + netdev->stats.rx_packets++; 851 + netdev->stats.rx_bytes += skb->len; 949 852 950 853 /* pass skb up to stack */ 951 854 netif_receive_skb(skb); 952 855 } 953 856 954 857 /** 955 - * gelic_net_decode_one_descr - processes an rx descriptor 858 + * gelic_card_decode_one_descr - processes an rx descriptor 956 859 * @card: card structure 957 860 * 958 861 * returns 1 if a packet has been sent to the stack, otherwise 0 ··· 960 863 * processes an rx descriptor by iommu-unmapping the data buffer and passing 961 864 * the packet up to the stack 962 865 */ 963 - static int gelic_net_decode_one_descr(struct gelic_net_card *card) 866 + static int gelic_card_decode_one_descr(struct gelic_card *card) 964 867 { 965 - enum gelic_net_descr_status status; 966 - struct gelic_net_descr_chain *chain = &card->rx_chain; 967 - struct gelic_net_descr *descr = chain->tail; 868 + enum gelic_descr_dma_status status; 869 + struct gelic_descr_chain *chain = &card->rx_chain; 870 + struct gelic_descr *descr = chain->head; 871 + struct net_device *netdev = NULL; 968 872 int dmac_chain_ended; 969 873 970 - status = gelic_net_get_descr_status(descr); 874 + status = gelic_descr_get_status(descr); 971 875 /* is this descriptor terminated with next_descr == NULL? */ 972 876 dmac_chain_ended = 973 - descr->dmac_cmd_status & GELIC_NET_DMAC_CMDSTAT_RXDCEIS; 877 + be32_to_cpu(descr->dmac_cmd_status) & 878 + GELIC_DESCR_RX_DMA_CHAIN_END; 974 879 975 - if (status == GELIC_NET_DESCR_CARDOWNED) 880 + if (status == GELIC_DESCR_DMA_CARDOWNED) 976 881 return 0; 977 882 978 - if (status == GELIC_NET_DESCR_NOT_IN_USE) { 883 + if (status == GELIC_DESCR_DMA_NOT_IN_USE) { 979 884 dev_dbg(ctodev(card), "dormant descr? %p\n", descr); 980 885 return 0; 981 886 } 982 887 983 - if ((status == GELIC_NET_DESCR_RESPONSE_ERROR) || 984 - (status == GELIC_NET_DESCR_PROTECTION_ERROR) || 985 - (status == GELIC_NET_DESCR_FORCE_END)) { 888 + /* netdevice select */ 889 + if (card->vlan_required) { 890 + unsigned int i; 891 + u16 vid; 892 + vid = *(u16 *)(descr->skb->data) & VLAN_VID_MASK; 893 + for (i = 0; i < GELIC_PORT_MAX; i++) { 894 + if (card->vlan[i].rx == vid) { 895 + netdev = card->netdev[i]; 896 + break; 897 + } 898 + }; 899 + if (GELIC_PORT_MAX <= i) { 900 + pr_info("%s: unknown packet vid=%x\n", __func__, vid); 901 + goto refill; 902 + } 903 + } else 904 + netdev = card->netdev[GELIC_PORT_ETHERNET]; 905 + 906 + if ((status == GELIC_DESCR_DMA_RESPONSE_ERROR) || 907 + (status == GELIC_DESCR_DMA_PROTECTION_ERROR) || 908 + (status == GELIC_DESCR_DMA_FORCE_END)) { 986 909 dev_info(ctodev(card), "dropping RX descriptor with state %x\n", 987 910 status); 988 - card->netdev->stats.rx_dropped++; 911 + netdev->stats.rx_dropped++; 989 912 goto refill; 990 913 } 991 914 992 - if (status == GELIC_NET_DESCR_BUFFER_FULL) { 915 + if (status == GELIC_DESCR_DMA_BUFFER_FULL) { 993 916 /* 994 917 * Buffer full would occur if and only if 995 918 * the frame length was longer than the size of this ··· 1026 909 * descriptoers any other than FRAME_END here should 1027 910 * be treated as error. 1028 911 */ 1029 - if (status != GELIC_NET_DESCR_FRAME_END) { 912 + if (status != GELIC_DESCR_DMA_FRAME_END) { 1030 913 dev_dbg(ctodev(card), "RX descriptor with state %x\n", 1031 914 status); 1032 915 goto refill; 1033 916 } 1034 917 1035 918 /* ok, we've got a packet in descr */ 1036 - gelic_net_pass_skb_up(descr, card); 919 + gelic_net_pass_skb_up(descr, card, netdev); 1037 920 refill: 1038 921 /* 1039 922 * So that always DMAC can see the end ··· 1043 926 descr->next_descr_addr = 0; 1044 927 1045 928 /* change the descriptor state: */ 1046 - gelic_net_set_descr_status(descr, GELIC_NET_DESCR_NOT_IN_USE); 929 + gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE); 1047 930 1048 931 /* 1049 932 * this call can fail, but for now, just leave this 1050 933 * decriptor without skb 1051 934 */ 1052 - gelic_net_prepare_rx_descr(card, descr); 935 + gelic_descr_prepare_rx(card, descr); 1053 936 1054 - chain->head = descr; 1055 - chain->tail = descr->next; 937 + chain->tail = descr; 938 + chain->head = descr->next; 1056 939 1057 940 /* 1058 941 * Set this descriptor the end of the chain. 1059 942 */ 1060 - descr->prev->next_descr_addr = descr->bus_addr; 943 + descr->prev->next_descr_addr = cpu_to_be32(descr->bus_addr); 1061 944 1062 945 /* 1063 946 * If dmac chain was met, DMAC stopped. ··· 1073 956 1074 957 /** 1075 958 * gelic_net_poll - NAPI poll function called by the stack to return packets 1076 - * @netdev: interface device structure 959 + * @napi: napi structure 1077 960 * @budget: number of packets we can pass to the stack at most 1078 961 * 1079 - * returns 0 if no more packets available to the driver/stack. Returns 1, 1080 - * if the quota is exceeded, but the driver has still packets. 962 + * returns the number of the processed packets 1081 963 * 1082 964 */ 1083 965 static int gelic_net_poll(struct napi_struct *napi, int budget) 1084 966 { 1085 - struct gelic_net_card *card = container_of(napi, struct gelic_net_card, napi); 1086 - struct net_device *netdev = card->netdev; 967 + struct gelic_card *card = container_of(napi, struct gelic_card, napi); 1087 968 int packets_done = 0; 1088 969 1089 970 while (packets_done < budget) { 1090 - if (!gelic_net_decode_one_descr(card)) 971 + if (!gelic_card_decode_one_descr(card)) 1091 972 break; 1092 973 1093 974 packets_done++; 1094 975 } 1095 976 1096 977 if (packets_done < budget) { 1097 - netif_rx_complete(netdev, napi); 1098 - gelic_net_rx_irq_on(card); 978 + napi_complete(napi); 979 + gelic_card_rx_irq_on(card); 1099 980 } 1100 981 return packets_done; 1101 982 } ··· 1104 989 * 1105 990 * returns 0 on success, <0 on failure 1106 991 */ 1107 - static int gelic_net_change_mtu(struct net_device *netdev, int new_mtu) 992 + int gelic_net_change_mtu(struct net_device *netdev, int new_mtu) 1108 993 { 1109 994 /* no need to re-alloc skbs or so -- the max mtu is about 2.3k 1110 995 * and mtu is outbound only anyway */ ··· 1117 1002 } 1118 1003 1119 1004 /** 1120 - * gelic_net_interrupt - event handler for gelic_net 1005 + * gelic_card_interrupt - event handler for gelic_net 1121 1006 */ 1122 - static irqreturn_t gelic_net_interrupt(int irq, void *ptr) 1007 + static irqreturn_t gelic_card_interrupt(int irq, void *ptr) 1123 1008 { 1124 1009 unsigned long flags; 1125 - struct net_device *netdev = ptr; 1126 - struct gelic_net_card *card = netdev_priv(netdev); 1010 + struct gelic_card *card = ptr; 1127 1011 u64 status; 1128 1012 1129 1013 status = card->irq_status; ··· 1130 1016 if (!status) 1131 1017 return IRQ_NONE; 1132 1018 1019 + status &= card->irq_mask; 1020 + 1133 1021 if (card->rx_dma_restart_required) { 1134 1022 card->rx_dma_restart_required = 0; 1135 - gelic_net_enable_rxdmac(card); 1023 + gelic_card_enable_rxdmac(card); 1136 1024 } 1137 1025 1138 - if (status & GELIC_NET_RXINT) { 1139 - gelic_net_rx_irq_off(card); 1140 - netif_rx_schedule(netdev, &card->napi); 1026 + if (status & GELIC_CARD_RXINT) { 1027 + gelic_card_rx_irq_off(card); 1028 + napi_schedule(&card->napi); 1141 1029 } 1142 1030 1143 - if (status & GELIC_NET_TXINT) { 1144 - spin_lock_irqsave(&card->tx_dma_lock, flags); 1031 + if (status & GELIC_CARD_TXINT) { 1032 + spin_lock_irqsave(&card->tx_lock, flags); 1145 1033 card->tx_dma_progress = 0; 1146 - gelic_net_release_tx_chain(card, 0); 1034 + gelic_card_release_tx_chain(card, 0); 1147 1035 /* kick outstanding tx descriptor if any */ 1148 - gelic_net_kick_txdma(card, card->tx_chain.tail); 1149 - spin_unlock_irqrestore(&card->tx_dma_lock, flags); 1036 + gelic_card_kick_txdma(card, card->tx_chain.tail); 1037 + spin_unlock_irqrestore(&card->tx_lock, flags); 1150 1038 } 1039 + 1040 + /* ether port status changed */ 1041 + if (status & GELIC_CARD_PORT_STATUS_CHANGED) 1042 + gelic_card_get_ether_port_status(card, 1); 1043 + 1044 + #ifdef CONFIG_GELIC_WIRELESS 1045 + if (status & (GELIC_CARD_WLAN_EVENT_RECEIVED | 1046 + GELIC_CARD_WLAN_COMMAND_COMPLETED)) 1047 + gelic_wl_interrupt(card->netdev[GELIC_PORT_WIRELESS], status); 1048 + #endif 1049 + 1151 1050 return IRQ_HANDLED; 1152 1051 } 1153 1052 ··· 1171 1044 * 1172 1045 * see Documentation/networking/netconsole.txt 1173 1046 */ 1174 - static void gelic_net_poll_controller(struct net_device *netdev) 1047 + void gelic_net_poll_controller(struct net_device *netdev) 1175 1048 { 1176 - struct gelic_net_card *card = netdev_priv(netdev); 1049 + struct gelic_card *card = netdev_card(netdev); 1177 1050 1178 - gelic_net_set_irq_mask(card, 0); 1179 - gelic_net_interrupt(netdev->irq, netdev); 1180 - gelic_net_set_irq_mask(card, card->ghiintmask); 1051 + gelic_card_set_irq_mask(card, 0); 1052 + gelic_card_interrupt(netdev->irq, netdev); 1053 + gelic_card_set_irq_mask(card, card->irq_mask); 1181 1054 } 1182 1055 #endif /* CONFIG_NET_POLL_CONTROLLER */ 1183 - 1184 - /** 1185 - * gelic_net_open_device - open device and map dma region 1186 - * @card: card structure 1187 - */ 1188 - static int gelic_net_open_device(struct gelic_net_card *card) 1189 - { 1190 - int result; 1191 - 1192 - result = ps3_sb_event_receive_port_setup(card->dev, PS3_BINDING_CPU_ANY, 1193 - &card->netdev->irq); 1194 - 1195 - if (result) { 1196 - dev_info(ctodev(card), 1197 - "%s:%d: gelic_net_open_device failed (%d)\n", 1198 - __func__, __LINE__, result); 1199 - result = -EPERM; 1200 - goto fail_alloc_irq; 1201 - } 1202 - 1203 - result = request_irq(card->netdev->irq, gelic_net_interrupt, 1204 - IRQF_DISABLED, card->netdev->name, card->netdev); 1205 - 1206 - if (result) { 1207 - dev_info(ctodev(card), "%s:%d: request_irq failed (%d)\n", 1208 - __func__, __LINE__, result); 1209 - goto fail_request_irq; 1210 - } 1211 - 1212 - return 0; 1213 - 1214 - fail_request_irq: 1215 - ps3_sb_event_receive_port_destroy(card->dev, card->netdev->irq); 1216 - card->netdev->irq = NO_IRQ; 1217 - fail_alloc_irq: 1218 - return result; 1219 - } 1220 - 1221 1056 1222 1057 /** 1223 1058 * gelic_net_open - called upon ifonfig up ··· 1190 1101 * gelic_net_open allocates all the descriptors and memory needed for 1191 1102 * operation, sets up multicast list and enables interrupts 1192 1103 */ 1193 - static int gelic_net_open(struct net_device *netdev) 1104 + int gelic_net_open(struct net_device *netdev) 1194 1105 { 1195 - struct gelic_net_card *card = netdev_priv(netdev); 1106 + struct gelic_card *card = netdev_card(netdev); 1196 1107 1197 - dev_dbg(ctodev(card), " -> %s:%d\n", __func__, __LINE__); 1108 + dev_dbg(ctodev(card), " -> %s %p\n", __func__, netdev); 1198 1109 1199 - gelic_net_open_device(card); 1200 - 1201 - if (gelic_net_init_chain(card, &card->tx_chain, 1202 - card->descr, GELIC_NET_TX_DESCRIPTORS)) 1203 - goto alloc_tx_failed; 1204 - if (gelic_net_init_chain(card, &card->rx_chain, 1205 - card->descr + GELIC_NET_TX_DESCRIPTORS, 1206 - GELIC_NET_RX_DESCRIPTORS)) 1207 - goto alloc_rx_failed; 1208 - 1209 - /* head of chain */ 1210 - card->tx_top = card->tx_chain.head; 1211 - card->rx_top = card->rx_chain.head; 1212 - dev_dbg(ctodev(card), "descr rx %p, tx %p, size %#lx, num %#x\n", 1213 - card->rx_top, card->tx_top, sizeof(struct gelic_net_descr), 1214 - GELIC_NET_RX_DESCRIPTORS); 1215 - /* allocate rx skbs */ 1216 - if (gelic_net_alloc_rx_skbs(card)) 1217 - goto alloc_skbs_failed; 1218 - 1219 - napi_enable(&card->napi); 1220 - 1221 - card->tx_dma_progress = 0; 1222 - card->ghiintmask = GELIC_NET_RXINT | GELIC_NET_TXINT; 1223 - 1224 - gelic_net_set_irq_mask(card, card->ghiintmask); 1225 - gelic_net_enable_rxdmac(card); 1110 + gelic_card_up(card); 1226 1111 1227 1112 netif_start_queue(netdev); 1228 - netif_carrier_on(netdev); 1113 + gelic_card_get_ether_port_status(card, 1); 1229 1114 1115 + dev_dbg(ctodev(card), " <- %s\n", __func__); 1230 1116 return 0; 1231 - 1232 - alloc_skbs_failed: 1233 - gelic_net_free_chain(card, card->rx_top); 1234 - alloc_rx_failed: 1235 - gelic_net_free_chain(card, card->tx_top); 1236 - alloc_tx_failed: 1237 - return -ENOMEM; 1238 1117 } 1239 1118 1240 - static void gelic_net_get_drvinfo (struct net_device *netdev, 1241 - struct ethtool_drvinfo *info) 1119 + void gelic_net_get_drvinfo(struct net_device *netdev, 1120 + struct ethtool_drvinfo *info) 1242 1121 { 1243 1122 strncpy(info->driver, DRV_NAME, sizeof(info->driver) - 1); 1244 1123 strncpy(info->version, DRV_VERSION, sizeof(info->version) - 1); 1245 1124 } 1246 1125 1247 - static int gelic_net_get_settings(struct net_device *netdev, 1248 - struct ethtool_cmd *cmd) 1126 + static int gelic_ether_get_settings(struct net_device *netdev, 1127 + struct ethtool_cmd *cmd) 1249 1128 { 1250 - struct gelic_net_card *card = netdev_priv(netdev); 1251 - int status; 1252 - u64 v1, v2; 1253 - int speed, duplex; 1129 + struct gelic_card *card = netdev_card(netdev); 1254 1130 1255 - speed = duplex = -1; 1256 - status = lv1_net_control(bus_id(card), dev_id(card), 1257 - GELIC_NET_GET_ETH_PORT_STATUS, GELIC_NET_PORT, 0, 0, 1258 - &v1, &v2); 1259 - if (status) { 1260 - /* link down */ 1261 - } else { 1262 - if (v1 & GELIC_NET_FULL_DUPLEX) { 1263 - duplex = DUPLEX_FULL; 1264 - } else { 1265 - duplex = DUPLEX_HALF; 1266 - } 1131 + gelic_card_get_ether_port_status(card, 0); 1267 1132 1268 - if (v1 & GELIC_NET_SPEED_10 ) { 1269 - speed = SPEED_10; 1270 - } else if (v1 & GELIC_NET_SPEED_100) { 1271 - speed = SPEED_100; 1272 - } else if (v1 & GELIC_NET_SPEED_1000) { 1273 - speed = SPEED_1000; 1274 - } 1133 + if (card->ether_port_status & GELIC_LV1_ETHER_FULL_DUPLEX) 1134 + cmd->duplex = DUPLEX_FULL; 1135 + else 1136 + cmd->duplex = DUPLEX_HALF; 1137 + 1138 + switch (card->ether_port_status & GELIC_LV1_ETHER_SPEED_MASK) { 1139 + case GELIC_LV1_ETHER_SPEED_10: 1140 + cmd->speed = SPEED_10; 1141 + break; 1142 + case GELIC_LV1_ETHER_SPEED_100: 1143 + cmd->speed = SPEED_100; 1144 + break; 1145 + case GELIC_LV1_ETHER_SPEED_1000: 1146 + cmd->speed = SPEED_1000; 1147 + break; 1148 + default: 1149 + pr_info("%s: speed unknown\n", __func__); 1150 + cmd->speed = SPEED_10; 1151 + break; 1275 1152 } 1153 + 1276 1154 cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | 1277 1155 SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | 1278 1156 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | 1279 1157 SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full; 1280 1158 cmd->advertising = cmd->supported; 1281 - cmd->speed = speed; 1282 - cmd->duplex = duplex; 1283 1159 cmd->autoneg = AUTONEG_ENABLE; /* always enabled */ 1284 1160 cmd->port = PORT_TP; 1285 1161 1286 1162 return 0; 1287 1163 } 1288 1164 1289 - static u32 gelic_net_get_link(struct net_device *netdev) 1165 + u32 gelic_net_get_rx_csum(struct net_device *netdev) 1290 1166 { 1291 - struct gelic_net_card *card = netdev_priv(netdev); 1292 - int status; 1293 - u64 v1, v2; 1294 - int link; 1295 - 1296 - status = lv1_net_control(bus_id(card), dev_id(card), 1297 - GELIC_NET_GET_ETH_PORT_STATUS, GELIC_NET_PORT, 0, 0, 1298 - &v1, &v2); 1299 - if (status) 1300 - return 0; /* link down */ 1301 - 1302 - if (v1 & GELIC_NET_LINK_UP) 1303 - link = 1; 1304 - else 1305 - link = 0; 1306 - 1307 - return link; 1308 - } 1309 - 1310 - static int gelic_net_nway_reset(struct net_device *netdev) 1311 - { 1312 - if (netif_running(netdev)) { 1313 - gelic_net_stop(netdev); 1314 - gelic_net_open(netdev); 1315 - } 1316 - return 0; 1317 - } 1318 - 1319 - static u32 gelic_net_get_tx_csum(struct net_device *netdev) 1320 - { 1321 - return (netdev->features & NETIF_F_IP_CSUM) != 0; 1322 - } 1323 - 1324 - static int gelic_net_set_tx_csum(struct net_device *netdev, u32 data) 1325 - { 1326 - if (data) 1327 - netdev->features |= NETIF_F_IP_CSUM; 1328 - else 1329 - netdev->features &= ~NETIF_F_IP_CSUM; 1330 - 1331 - return 0; 1332 - } 1333 - 1334 - static u32 gelic_net_get_rx_csum(struct net_device *netdev) 1335 - { 1336 - struct gelic_net_card *card = netdev_priv(netdev); 1167 + struct gelic_card *card = netdev_card(netdev); 1337 1168 1338 1169 return card->rx_csum; 1339 1170 } 1340 1171 1341 - static int gelic_net_set_rx_csum(struct net_device *netdev, u32 data) 1172 + int gelic_net_set_rx_csum(struct net_device *netdev, u32 data) 1342 1173 { 1343 - struct gelic_net_card *card = netdev_priv(netdev); 1174 + struct gelic_card *card = netdev_card(netdev); 1344 1175 1345 1176 card->rx_csum = data; 1346 1177 return 0; 1347 1178 } 1348 1179 1349 - static struct ethtool_ops gelic_net_ethtool_ops = { 1180 + static struct ethtool_ops gelic_ether_ethtool_ops = { 1350 1181 .get_drvinfo = gelic_net_get_drvinfo, 1351 - .get_settings = gelic_net_get_settings, 1352 - .get_link = gelic_net_get_link, 1353 - .nway_reset = gelic_net_nway_reset, 1354 - .get_tx_csum = gelic_net_get_tx_csum, 1355 - .set_tx_csum = gelic_net_set_tx_csum, 1182 + .get_settings = gelic_ether_get_settings, 1183 + .get_link = ethtool_op_get_link, 1184 + .get_tx_csum = ethtool_op_get_tx_csum, 1185 + .set_tx_csum = ethtool_op_set_tx_csum, 1356 1186 .get_rx_csum = gelic_net_get_rx_csum, 1357 1187 .set_rx_csum = gelic_net_set_rx_csum, 1358 1188 }; ··· 1285 1277 */ 1286 1278 static void gelic_net_tx_timeout_task(struct work_struct *work) 1287 1279 { 1288 - struct gelic_net_card *card = 1289 - container_of(work, struct gelic_net_card, tx_timeout_task); 1290 - struct net_device *netdev = card->netdev; 1280 + struct gelic_card *card = 1281 + container_of(work, struct gelic_card, tx_timeout_task); 1282 + struct net_device *netdev = card->netdev[GELIC_PORT_ETHERNET]; 1291 1283 1292 1284 dev_info(ctodev(card), "%s:Timed out. Restarting... \n", __func__); 1293 1285 ··· 1310 1302 * 1311 1303 * called, if tx hangs. Schedules a task that resets the interface 1312 1304 */ 1313 - static void gelic_net_tx_timeout(struct net_device *netdev) 1305 + void gelic_net_tx_timeout(struct net_device *netdev) 1314 1306 { 1315 - struct gelic_net_card *card; 1307 + struct gelic_card *card; 1316 1308 1317 - card = netdev_priv(netdev); 1309 + card = netdev_card(netdev); 1318 1310 atomic_inc(&card->tx_timeout_task_counter); 1319 1311 if (netdev->flags & IFF_UP) 1320 1312 schedule_work(&card->tx_timeout_task); ··· 1323 1315 } 1324 1316 1325 1317 /** 1326 - * gelic_net_setup_netdev_ops - initialization of net_device operations 1318 + * gelic_ether_setup_netdev_ops - initialization of net_device operations 1327 1319 * @netdev: net_device structure 1328 1320 * 1329 1321 * fills out function pointers in the net_device structure 1330 1322 */ 1331 - static void gelic_net_setup_netdev_ops(struct net_device *netdev) 1323 + static void gelic_ether_setup_netdev_ops(struct net_device *netdev, 1324 + struct napi_struct *napi) 1332 1325 { 1333 1326 netdev->open = &gelic_net_open; 1334 1327 netdev->stop = &gelic_net_stop; ··· 1339 1330 /* tx watchdog */ 1340 1331 netdev->tx_timeout = &gelic_net_tx_timeout; 1341 1332 netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; 1342 - netdev->ethtool_ops = &gelic_net_ethtool_ops; 1333 + /* NAPI */ 1334 + netif_napi_add(netdev, napi, 1335 + gelic_net_poll, GELIC_NET_NAPI_WEIGHT); 1336 + netdev->ethtool_ops = &gelic_ether_ethtool_ops; 1337 + #ifdef CONFIG_NET_POLL_CONTROLLER 1338 + netdev->poll_controller = gelic_net_poll_controller; 1339 + #endif 1343 1340 } 1344 1341 1345 1342 /** 1346 - * gelic_net_setup_netdev - initialization of net_device 1343 + * gelic_ether_setup_netdev - initialization of net_device 1344 + * @netdev: net_device structure 1347 1345 * @card: card structure 1348 1346 * 1349 1347 * Returns 0 on success or <0 on failure 1350 1348 * 1351 - * gelic_net_setup_netdev initializes the net_device structure 1349 + * gelic_ether_setup_netdev initializes the net_device structure 1350 + * and register it. 1352 1351 **/ 1353 - static int gelic_net_setup_netdev(struct gelic_net_card *card) 1352 + int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card) 1354 1353 { 1355 - struct net_device *netdev = card->netdev; 1356 - struct sockaddr addr; 1357 - unsigned int i; 1358 1354 int status; 1359 1355 u64 v1, v2; 1360 1356 DECLARE_MAC_BUF(mac); 1361 1357 1362 - SET_NETDEV_DEV(netdev, &card->dev->core); 1363 - spin_lock_init(&card->tx_dma_lock); 1364 - 1365 - card->rx_csum = GELIC_NET_RX_CSUM_DEFAULT; 1366 - 1367 - gelic_net_setup_netdev_ops(netdev); 1368 - 1369 - netif_napi_add(netdev, &card->napi, 1370 - gelic_net_poll, GELIC_NET_NAPI_WEIGHT); 1371 - 1372 1358 netdev->features = NETIF_F_IP_CSUM; 1373 1359 1374 1360 status = lv1_net_control(bus_id(card), dev_id(card), 1375 - GELIC_NET_GET_MAC_ADDRESS, 1361 + GELIC_LV1_GET_MAC_ADDRESS, 1376 1362 0, 0, 0, &v1, &v2); 1363 + v1 <<= 16; 1377 1364 if (status || !is_valid_ether_addr((u8 *)&v1)) { 1378 1365 dev_info(ctodev(card), 1379 1366 "%s:lv1_net_control GET_MAC_ADDR failed %d\n", 1380 1367 __func__, status); 1381 1368 return -EINVAL; 1382 1369 } 1383 - v1 <<= 16; 1384 - memcpy(addr.sa_data, &v1, ETH_ALEN); 1385 - memcpy(netdev->dev_addr, addr.sa_data, ETH_ALEN); 1386 - dev_info(ctodev(card), "MAC addr %s\n", 1387 - print_mac(mac, netdev->dev_addr)); 1370 + memcpy(netdev->dev_addr, &v1, ETH_ALEN); 1388 1371 1389 - card->vlan_index = -1; /* no vlan */ 1390 - for (i = 0; i < GELIC_NET_VLAN_MAX; i++) { 1391 - status = lv1_net_control(bus_id(card), dev_id(card), 1392 - GELIC_NET_GET_VLAN_ID, 1393 - i + 1, /* index; one based */ 1394 - 0, 0, &v1, &v2); 1395 - if (status == GELIC_NET_VLAN_NO_ENTRY) { 1396 - dev_dbg(ctodev(card), 1397 - "GELIC_VLAN_ID no entry:%d, VLAN disabled\n", 1398 - status); 1399 - card->vlan_id[i] = 0; 1400 - } else if (status) { 1401 - dev_dbg(ctodev(card), 1402 - "%s:GELIC_NET_VLAN_ID faild, status=%d\n", 1403 - __func__, status); 1404 - card->vlan_id[i] = 0; 1405 - } else { 1406 - card->vlan_id[i] = (u32)v1; 1407 - dev_dbg(ctodev(card), "vlan_id:%d, %lx\n", i, v1); 1408 - } 1409 - } 1410 - 1411 - if (card->vlan_id[GELIC_NET_VLAN_WIRED - 1]) { 1412 - card->vlan_index = GELIC_NET_VLAN_WIRED - 1; 1372 + if (card->vlan_required) { 1413 1373 netdev->hard_header_len += VLAN_HLEN; 1374 + /* 1375 + * As vlan is internally used, 1376 + * we can not receive vlan packets 1377 + */ 1378 + netdev->features |= NETIF_F_VLAN_CHALLENGED; 1414 1379 } 1415 1380 1416 1381 status = register_netdev(netdev); 1417 1382 if (status) { 1418 - dev_err(ctodev(card), "%s:Couldn't register net_device: %d\n", 1419 - __func__, status); 1383 + dev_err(ctodev(card), "%s:Couldn't register %s %d\n", 1384 + __func__, netdev->name, status); 1420 1385 return status; 1421 1386 } 1387 + dev_info(ctodev(card), "%s: MAC addr %s\n", 1388 + netdev->name, 1389 + print_mac(mac, netdev->dev_addr)); 1422 1390 1423 1391 return 0; 1424 1392 } 1425 1393 1426 1394 /** 1427 - * gelic_net_alloc_card - allocates net_device and card structure 1395 + * gelic_alloc_card_net - allocates net_device and card structure 1428 1396 * 1429 1397 * returns the card structure or NULL in case of errors 1430 1398 * 1431 1399 * the card and net_device structures are linked to each other 1432 1400 */ 1433 - static struct gelic_net_card *gelic_net_alloc_card(void) 1401 + #define GELIC_ALIGN (32) 1402 + static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev) 1434 1403 { 1435 - struct net_device *netdev; 1436 - struct gelic_net_card *card; 1404 + struct gelic_card *card; 1405 + struct gelic_port *port; 1406 + void *p; 1437 1407 size_t alloc_size; 1438 - 1439 - alloc_size = sizeof (*card) + 1440 - sizeof (struct gelic_net_descr) * GELIC_NET_RX_DESCRIPTORS + 1441 - sizeof (struct gelic_net_descr) * GELIC_NET_TX_DESCRIPTORS; 1442 1408 /* 1443 - * we assume private data is allocated 32 bytes (or more) aligned 1444 - * so that gelic_net_descr should be 32 bytes aligned. 1445 - * Current alloc_etherdev() does do it because NETDEV_ALIGN 1446 - * is 32. 1447 - * check this assumption here. 1409 + * gelic requires dma descriptor is 32 bytes aligned and 1410 + * the hypervisor requires irq_status is 8 bytes aligned. 1448 1411 */ 1449 - BUILD_BUG_ON(NETDEV_ALIGN < 32); 1450 - BUILD_BUG_ON(offsetof(struct gelic_net_card, irq_status) % 8); 1451 - BUILD_BUG_ON(offsetof(struct gelic_net_card, descr) % 32); 1412 + BUILD_BUG_ON(offsetof(struct gelic_card, irq_status) % 8); 1413 + BUILD_BUG_ON(offsetof(struct gelic_card, descr) % 32); 1414 + alloc_size = 1415 + sizeof(struct gelic_card) + 1416 + sizeof(struct gelic_descr) * GELIC_NET_RX_DESCRIPTORS + 1417 + sizeof(struct gelic_descr) * GELIC_NET_TX_DESCRIPTORS + 1418 + GELIC_ALIGN - 1; 1452 1419 1453 - netdev = alloc_etherdev(alloc_size); 1454 - if (!netdev) 1420 + p = kzalloc(alloc_size, GFP_KERNEL); 1421 + if (!p) 1455 1422 return NULL; 1423 + card = PTR_ALIGN(p, GELIC_ALIGN); 1424 + card->unalign = p; 1456 1425 1457 - card = netdev_priv(netdev); 1458 - card->netdev = netdev; 1426 + /* 1427 + * alloc netdev 1428 + */ 1429 + *netdev = alloc_etherdev(sizeof(struct gelic_port)); 1430 + if (!netdev) { 1431 + kfree(card->unalign); 1432 + return NULL; 1433 + } 1434 + port = netdev_priv(*netdev); 1435 + 1436 + /* gelic_port */ 1437 + port->netdev = *netdev; 1438 + port->card = card; 1439 + port->type = GELIC_PORT_ETHERNET; 1440 + 1441 + /* gelic_card */ 1442 + card->netdev[GELIC_PORT_ETHERNET] = *netdev; 1443 + 1459 1444 INIT_WORK(&card->tx_timeout_task, gelic_net_tx_timeout_task); 1460 1445 init_waitqueue_head(&card->waitq); 1461 1446 atomic_set(&card->tx_timeout_task_counter, 0); 1447 + init_MUTEX(&card->updown_lock); 1448 + atomic_set(&card->users, 0); 1462 1449 1463 1450 return card; 1464 1451 } 1465 1452 1453 + static void gelic_card_get_vlan_info(struct gelic_card *card) 1454 + { 1455 + u64 v1, v2; 1456 + int status; 1457 + unsigned int i; 1458 + struct { 1459 + int tx; 1460 + int rx; 1461 + } vlan_id_ix[2] = { 1462 + [GELIC_PORT_ETHERNET] = { 1463 + .tx = GELIC_LV1_VLAN_TX_ETHERNET, 1464 + .rx = GELIC_LV1_VLAN_RX_ETHERNET 1465 + }, 1466 + [GELIC_PORT_WIRELESS] = { 1467 + .tx = GELIC_LV1_VLAN_TX_WIRELESS, 1468 + .rx = GELIC_LV1_VLAN_RX_WIRELESS 1469 + } 1470 + }; 1471 + 1472 + for (i = 0; i < ARRAY_SIZE(vlan_id_ix); i++) { 1473 + /* tx tag */ 1474 + status = lv1_net_control(bus_id(card), dev_id(card), 1475 + GELIC_LV1_GET_VLAN_ID, 1476 + vlan_id_ix[i].tx, 1477 + 0, 0, &v1, &v2); 1478 + if (status || !v1) { 1479 + if (status != LV1_NO_ENTRY) 1480 + dev_dbg(ctodev(card), 1481 + "get vlan id for tx(%d) failed(%d)\n", 1482 + vlan_id_ix[i].tx, status); 1483 + card->vlan[i].tx = 0; 1484 + card->vlan[i].rx = 0; 1485 + continue; 1486 + } 1487 + card->vlan[i].tx = (u16)v1; 1488 + 1489 + /* rx tag */ 1490 + status = lv1_net_control(bus_id(card), dev_id(card), 1491 + GELIC_LV1_GET_VLAN_ID, 1492 + vlan_id_ix[i].rx, 1493 + 0, 0, &v1, &v2); 1494 + if (status || !v1) { 1495 + if (status != LV1_NO_ENTRY) 1496 + dev_info(ctodev(card), 1497 + "get vlan id for rx(%d) failed(%d)\n", 1498 + vlan_id_ix[i].rx, status); 1499 + card->vlan[i].tx = 0; 1500 + card->vlan[i].rx = 0; 1501 + continue; 1502 + } 1503 + card->vlan[i].rx = (u16)v1; 1504 + 1505 + dev_dbg(ctodev(card), "vlan_id[%d] tx=%02x rx=%02x\n", 1506 + i, card->vlan[i].tx, card->vlan[i].rx); 1507 + } 1508 + 1509 + if (card->vlan[GELIC_PORT_ETHERNET].tx) { 1510 + BUG_ON(!card->vlan[GELIC_PORT_WIRELESS].tx); 1511 + card->vlan_required = 1; 1512 + } else 1513 + card->vlan_required = 0; 1514 + 1515 + /* check wirelss capable firmware */ 1516 + if (ps3_compare_firmware_version(1, 6, 0) < 0) { 1517 + card->vlan[GELIC_PORT_WIRELESS].tx = 0; 1518 + card->vlan[GELIC_PORT_WIRELESS].rx = 0; 1519 + } 1520 + 1521 + dev_info(ctodev(card), "internal vlan %s\n", 1522 + card->vlan_required? "enabled" : "disabled"); 1523 + } 1466 1524 /** 1467 1525 * ps3_gelic_driver_probe - add a device to the control of this driver 1468 1526 */ 1469 - static int ps3_gelic_driver_probe (struct ps3_system_bus_device *dev) 1527 + static int ps3_gelic_driver_probe(struct ps3_system_bus_device *dev) 1470 1528 { 1471 - struct gelic_net_card *card = gelic_net_alloc_card(); 1529 + struct gelic_card *card; 1530 + struct net_device *netdev; 1472 1531 int result; 1473 1532 1474 - if (!card) { 1475 - dev_info(&dev->core, "gelic_net_alloc_card failed\n"); 1476 - result = -ENOMEM; 1477 - goto fail_alloc_card; 1478 - } 1479 - 1480 - ps3_system_bus_set_driver_data(dev, card); 1481 - card->dev = dev; 1482 - 1533 + pr_debug("%s: called\n", __func__); 1483 1534 result = ps3_open_hv_device(dev); 1484 1535 1485 1536 if (result) { 1486 - dev_dbg(&dev->core, "ps3_open_hv_device failed\n"); 1537 + dev_dbg(&dev->core, "%s:ps3_open_hv_device failed\n", 1538 + __func__); 1487 1539 goto fail_open; 1488 1540 } 1489 1541 1490 1542 result = ps3_dma_region_create(dev->d_region); 1491 1543 1492 1544 if (result) { 1493 - dev_dbg(&dev->core, "ps3_dma_region_create failed(%d)\n", 1494 - result); 1545 + dev_dbg(&dev->core, "%s:ps3_dma_region_create failed(%d)\n", 1546 + __func__, result); 1495 1547 BUG_ON("check region type"); 1496 1548 goto fail_dma_region; 1497 1549 } 1498 1550 1551 + /* alloc card/netdevice */ 1552 + card = gelic_alloc_card_net(&netdev); 1553 + if (!card) { 1554 + dev_info(&dev->core, "%s:gelic_net_alloc_card failed\n", 1555 + __func__); 1556 + result = -ENOMEM; 1557 + goto fail_alloc_card; 1558 + } 1559 + ps3_system_bus_set_driver_data(dev, card); 1560 + card->dev = dev; 1561 + 1562 + /* get internal vlan info */ 1563 + gelic_card_get_vlan_info(card); 1564 + 1565 + /* setup interrupt */ 1499 1566 result = lv1_net_set_interrupt_status_indicator(bus_id(card), 1500 1567 dev_id(card), 1501 1568 ps3_mm_phys_to_lpar(__pa(&card->irq_status)), ··· 1579 1494 1580 1495 if (result) { 1581 1496 dev_dbg(&dev->core, 1582 - "lv1_net_set_interrupt_status_indicator failed: %s\n", 1583 - ps3_result(result)); 1497 + "%s:set_interrupt_status_indicator failed: %s\n", 1498 + __func__, ps3_result(result)); 1584 1499 result = -EIO; 1585 1500 goto fail_status_indicator; 1586 1501 } 1587 1502 1588 - result = gelic_net_setup_netdev(card); 1503 + result = ps3_sb_event_receive_port_setup(dev, PS3_BINDING_CPU_ANY, 1504 + &card->irq); 1589 1505 1590 1506 if (result) { 1591 - dev_dbg(&dev->core, "%s:%d: ps3_dma_region_create failed: " 1592 - "(%d)\n", __func__, __LINE__, result); 1507 + dev_info(ctodev(card), 1508 + "%s:gelic_net_open_device failed (%d)\n", 1509 + __func__, result); 1510 + result = -EPERM; 1511 + goto fail_alloc_irq; 1512 + } 1513 + result = request_irq(card->irq, gelic_card_interrupt, 1514 + IRQF_DISABLED, netdev->name, card); 1515 + 1516 + if (result) { 1517 + dev_info(ctodev(card), "%s:request_irq failed (%d)\n", 1518 + __func__, result); 1519 + goto fail_request_irq; 1520 + } 1521 + 1522 + /* setup card structure */ 1523 + card->irq_mask = GELIC_CARD_RXINT | GELIC_CARD_TXINT | 1524 + GELIC_CARD_PORT_STATUS_CHANGED; 1525 + card->rx_csum = GELIC_CARD_RX_CSUM_DEFAULT; 1526 + 1527 + 1528 + if (gelic_card_init_chain(card, &card->tx_chain, 1529 + card->descr, GELIC_NET_TX_DESCRIPTORS)) 1530 + goto fail_alloc_tx; 1531 + if (gelic_card_init_chain(card, &card->rx_chain, 1532 + card->descr + GELIC_NET_TX_DESCRIPTORS, 1533 + GELIC_NET_RX_DESCRIPTORS)) 1534 + goto fail_alloc_rx; 1535 + 1536 + /* head of chain */ 1537 + card->tx_top = card->tx_chain.head; 1538 + card->rx_top = card->rx_chain.head; 1539 + dev_dbg(ctodev(card), "descr rx %p, tx %p, size %#lx, num %#x\n", 1540 + card->rx_top, card->tx_top, sizeof(struct gelic_descr), 1541 + GELIC_NET_RX_DESCRIPTORS); 1542 + /* allocate rx skbs */ 1543 + if (gelic_card_alloc_rx_skbs(card)) 1544 + goto fail_alloc_skbs; 1545 + 1546 + spin_lock_init(&card->tx_lock); 1547 + card->tx_dma_progress = 0; 1548 + 1549 + /* setup net_device structure */ 1550 + netdev->irq = card->irq; 1551 + SET_NETDEV_DEV(netdev, &card->dev->core); 1552 + gelic_ether_setup_netdev_ops(netdev, &card->napi); 1553 + result = gelic_net_setup_netdev(netdev, card); 1554 + if (result) { 1555 + dev_dbg(&dev->core, "%s: setup_netdev failed %d", 1556 + __func__, result); 1593 1557 goto fail_setup_netdev; 1594 1558 } 1595 1559 1560 + #ifdef CONFIG_GELIC_WIRELESS 1561 + if (gelic_wl_driver_probe(card)) { 1562 + dev_dbg(&dev->core, "%s: WL init failed\n", __func__); 1563 + goto fail_setup_netdev; 1564 + } 1565 + #endif 1566 + pr_debug("%s: done\n", __func__); 1596 1567 return 0; 1597 1568 1598 1569 fail_setup_netdev: 1570 + fail_alloc_skbs: 1571 + gelic_card_free_chain(card, card->rx_chain.head); 1572 + fail_alloc_rx: 1573 + gelic_card_free_chain(card, card->tx_chain.head); 1574 + fail_alloc_tx: 1575 + free_irq(card->irq, card); 1576 + netdev->irq = NO_IRQ; 1577 + fail_request_irq: 1578 + ps3_sb_event_receive_port_destroy(dev, card->irq); 1579 + fail_alloc_irq: 1599 1580 lv1_net_set_interrupt_status_indicator(bus_id(card), 1600 1581 bus_id(card), 1601 - 0 , 0); 1582 + 0, 0); 1602 1583 fail_status_indicator: 1584 + ps3_system_bus_set_driver_data(dev, NULL); 1585 + kfree(netdev_card(netdev)->unalign); 1586 + free_netdev(netdev); 1587 + fail_alloc_card: 1603 1588 ps3_dma_region_free(dev->d_region); 1604 1589 fail_dma_region: 1605 1590 ps3_close_hv_device(dev); 1606 1591 fail_open: 1607 - ps3_system_bus_set_driver_data(dev, NULL); 1608 - free_netdev(card->netdev); 1609 - fail_alloc_card: 1610 1592 return result; 1611 1593 } 1612 1594 ··· 1681 1529 * ps3_gelic_driver_remove - remove a device from the control of this driver 1682 1530 */ 1683 1531 1684 - static int ps3_gelic_driver_remove (struct ps3_system_bus_device *dev) 1532 + static int ps3_gelic_driver_remove(struct ps3_system_bus_device *dev) 1685 1533 { 1686 - struct gelic_net_card *card = ps3_system_bus_get_driver_data(dev); 1534 + struct gelic_card *card = ps3_system_bus_get_driver_data(dev); 1535 + struct net_device *netdev0; 1536 + pr_debug("%s: called\n", __func__); 1537 + 1538 + #ifdef CONFIG_GELIC_WIRELESS 1539 + gelic_wl_driver_remove(card); 1540 + #endif 1541 + /* stop interrupt */ 1542 + gelic_card_set_irq_mask(card, 0); 1543 + 1544 + /* turn off DMA, force end */ 1545 + gelic_card_disable_rxdmac(card); 1546 + gelic_card_disable_txdmac(card); 1547 + 1548 + /* release chains */ 1549 + gelic_card_release_tx_chain(card, 1); 1550 + gelic_card_release_rx_chain(card); 1551 + 1552 + gelic_card_free_chain(card, card->tx_top); 1553 + gelic_card_free_chain(card, card->rx_top); 1554 + 1555 + netdev0 = card->netdev[GELIC_PORT_ETHERNET]; 1556 + /* disconnect event port */ 1557 + free_irq(card->irq, card); 1558 + netdev0->irq = NO_IRQ; 1559 + ps3_sb_event_receive_port_destroy(card->dev, card->irq); 1687 1560 1688 1561 wait_event(card->waitq, 1689 1562 atomic_read(&card->tx_timeout_task_counter) == 0); ··· 1716 1539 lv1_net_set_interrupt_status_indicator(bus_id(card), dev_id(card), 1717 1540 0 , 0); 1718 1541 1719 - unregister_netdev(card->netdev); 1720 - free_netdev(card->netdev); 1542 + unregister_netdev(netdev0); 1543 + kfree(netdev_card(netdev0)->unalign); 1544 + free_netdev(netdev0); 1721 1545 1722 1546 ps3_system_bus_set_driver_data(dev, NULL); 1723 1547 ··· 1726 1548 1727 1549 ps3_close_hv_device(dev); 1728 1550 1551 + pr_debug("%s: done\n", __func__); 1729 1552 return 0; 1730 1553 } 1731 1554 ··· 1751 1572 ps3_system_bus_driver_unregister(&ps3_gelic_driver); 1752 1573 } 1753 1574 1754 - module_init (ps3_gelic_driver_init); 1755 - module_exit (ps3_gelic_driver_exit); 1575 + module_init(ps3_gelic_driver_init); 1576 + module_exit(ps3_gelic_driver_exit); 1756 1577 1757 1578 MODULE_ALIAS(PS3_MODULE_ALIAS_GELIC); 1758 1579
+264 -139
drivers/net/ps3_gelic_net.h
··· 35 35 #define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN 36 36 #define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN 37 37 #define GELIC_NET_RXBUF_ALIGN 128 38 - #define GELIC_NET_RX_CSUM_DEFAULT 1 /* hw chksum */ 38 + #define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ 39 39 #define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ 40 40 #define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS) 41 41 #define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL 42 - #define GELIC_NET_VLAN_POS (VLAN_ETH_ALEN * 2) 43 - #define GELIC_NET_VLAN_MAX 4 42 + 44 43 #define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */ 45 44 46 - enum gelic_net_int0_status { 47 - GELIC_NET_GDTDCEINT = 24, 48 - GELIC_NET_GRFANMINT = 28, 49 - }; 45 + /* virtual interrupt status register bits */ 46 + /* INT1 */ 47 + #define GELIC_CARD_TX_RAM_FULL_ERR 0x0000000000000001L 48 + #define GELIC_CARD_RX_RAM_FULL_ERR 0x0000000000000002L 49 + #define GELIC_CARD_TX_SHORT_FRAME_ERR 0x0000000000000004L 50 + #define GELIC_CARD_TX_INVALID_DESCR_ERR 0x0000000000000008L 51 + #define GELIC_CARD_RX_FIFO_FULL_ERR 0x0000000000002000L 52 + #define GELIC_CARD_RX_DESCR_CHAIN_END 0x0000000000004000L 53 + #define GELIC_CARD_RX_INVALID_DESCR_ERR 0x0000000000008000L 54 + #define GELIC_CARD_TX_RESPONCE_ERR 0x0000000000010000L 55 + #define GELIC_CARD_RX_RESPONCE_ERR 0x0000000000100000L 56 + #define GELIC_CARD_TX_PROTECTION_ERR 0x0000000000400000L 57 + #define GELIC_CARD_RX_PROTECTION_ERR 0x0000000004000000L 58 + #define GELIC_CARD_TX_TCP_UDP_CHECKSUM_ERR 0x0000000008000000L 59 + #define GELIC_CARD_PORT_STATUS_CHANGED 0x0000000020000000L 60 + #define GELIC_CARD_WLAN_EVENT_RECEIVED 0x0000000040000000L 61 + #define GELIC_CARD_WLAN_COMMAND_COMPLETED 0x0000000080000000L 62 + /* INT 0 */ 63 + #define GELIC_CARD_TX_FLAGGED_DESCR 0x0004000000000000L 64 + #define GELIC_CARD_RX_FLAGGED_DESCR 0x0040000000000000L 65 + #define GELIC_CARD_TX_TRANSFER_END 0x0080000000000000L 66 + #define GELIC_CARD_TX_DESCR_CHAIN_END 0x0100000000000000L 67 + #define GELIC_CARD_NUMBER_OF_RX_FRAME 0x1000000000000000L 68 + #define GELIC_CARD_ONE_TIME_COUNT_TIMER 0x4000000000000000L 69 + #define GELIC_CARD_FREE_RUN_COUNT_TIMER 0x8000000000000000L 50 70 51 - /* GHIINT1STS bits */ 52 - enum gelic_net_int1_status { 53 - GELIC_NET_GDADCEINT = 14, 54 - }; 71 + /* initial interrupt mask */ 72 + #define GELIC_CARD_TXINT GELIC_CARD_TX_DESCR_CHAIN_END 55 73 56 - /* interrupt mask */ 57 - #define GELIC_NET_TXINT (1L << (GELIC_NET_GDTDCEINT + 32)) 58 - 59 - #define GELIC_NET_RXINT0 (1L << (GELIC_NET_GRFANMINT + 32)) 60 - #define GELIC_NET_RXINT1 (1L << GELIC_NET_GDADCEINT) 61 - #define GELIC_NET_RXINT (GELIC_NET_RXINT0 | GELIC_NET_RXINT1) 74 + #define GELIC_CARD_RXINT (GELIC_CARD_RX_DESCR_CHAIN_END | \ 75 + GELIC_CARD_NUMBER_OF_RX_FRAME) 62 76 63 77 /* RX descriptor data_status bits */ 64 - #define GELIC_NET_RXDMADU 0x80000000 /* destination MAC addr unknown */ 65 - #define GELIC_NET_RXLSTFBF 0x40000000 /* last frame buffer */ 66 - #define GELIC_NET_RXIPCHK 0x20000000 /* IP checksum performed */ 67 - #define GELIC_NET_RXTCPCHK 0x10000000 /* TCP/UDP checksup performed */ 68 - #define GELIC_NET_RXIPSPKT 0x08000000 /* IPsec packet */ 69 - #define GELIC_NET_RXIPSAHPRT 0x04000000 /* IPsec AH protocol performed */ 70 - #define GELIC_NET_RXIPSESPPRT 0x02000000 /* IPsec ESP protocol performed */ 71 - #define GELIC_NET_RXSESPAH 0x01000000 /* 72 - * IPsec ESP protocol auth 73 - * performed 74 - */ 78 + enum gelic_descr_rx_status { 79 + GELIC_DESCR_RXDMADU = 0x80000000, /* destination MAC addr unknown */ 80 + GELIC_DESCR_RXLSTFBF = 0x40000000, /* last frame buffer */ 81 + GELIC_DESCR_RXIPCHK = 0x20000000, /* IP checksum performed */ 82 + GELIC_DESCR_RXTCPCHK = 0x10000000, /* TCP/UDP checksup performed */ 83 + GELIC_DESCR_RXWTPKT = 0x00C00000, /* 84 + * wakeup trigger packet 85 + * 01: Magic Packet (TM) 86 + * 10: ARP packet 87 + * 11: Multicast MAC addr 88 + */ 89 + GELIC_DESCR_RXVLNPKT = 0x00200000, /* VLAN packet */ 90 + /* bit 20..16 reserved */ 91 + GELIC_DESCR_RXRRECNUM = 0x0000ff00, /* reception receipt number */ 92 + /* bit 7..0 reserved */ 93 + }; 75 94 76 - #define GELIC_NET_RXWTPKT 0x00C00000 /* 77 - * wakeup trigger packet 78 - * 01: Magic Packet (TM) 79 - * 10: ARP packet 80 - * 11: Multicast MAC addr 81 - */ 82 - #define GELIC_NET_RXVLNPKT 0x00200000 /* VLAN packet */ 83 - /* bit 20..16 reserved */ 84 - #define GELIC_NET_RXRRECNUM 0x0000ff00 /* reception receipt number */ 85 - #define GELIC_NET_RXRRECNUM_SHIFT 8 86 - /* bit 7..0 reserved */ 95 + #define GELIC_DESCR_DATA_STATUS_CHK_MASK \ 96 + (GELIC_DESCR_RXIPCHK | GELIC_DESCR_RXTCPCHK) 87 97 88 - #define GELIC_NET_TXDESC_TAIL 0 89 - #define GELIC_NET_DATA_STATUS_CHK_MASK (GELIC_NET_RXIPCHK | GELIC_NET_RXTCPCHK) 98 + /* TX descriptor data_status bits */ 99 + enum gelic_descr_tx_status { 100 + GELIC_DESCR_TX_TAIL = 0x00000001, /* gelic treated this 101 + * descriptor was end of 102 + * a tx frame 103 + */ 104 + }; 90 105 91 - /* RX descriptor data_error bits */ 92 - /* bit 31 reserved */ 93 - #define GELIC_NET_RXALNERR 0x40000000 /* alignement error 10/100M */ 94 - #define GELIC_NET_RXOVERERR 0x20000000 /* oversize error */ 95 - #define GELIC_NET_RXRNTERR 0x10000000 /* Runt error */ 96 - #define GELIC_NET_RXIPCHKERR 0x08000000 /* IP checksum error */ 97 - #define GELIC_NET_RXTCPCHKERR 0x04000000 /* TCP/UDP checksum error */ 98 - #define GELIC_NET_RXUMCHSP 0x02000000 /* unmatched sp on sp */ 99 - #define GELIC_NET_RXUMCHSPI 0x01000000 /* unmatched SPI on SAD */ 100 - #define GELIC_NET_RXUMCHSAD 0x00800000 /* unmatched SAD */ 101 - #define GELIC_NET_RXIPSAHERR 0x00400000 /* auth error on AH protocol 102 - * processing */ 103 - #define GELIC_NET_RXIPSESPAHERR 0x00200000 /* auth error on ESP protocol 104 - * processing */ 105 - #define GELIC_NET_RXDRPPKT 0x00100000 /* drop packet */ 106 - #define GELIC_NET_RXIPFMTERR 0x00080000 /* IP packet format error */ 107 - /* bit 18 reserved */ 108 - #define GELIC_NET_RXDATAERR 0x00020000 /* IP packet format error */ 109 - #define GELIC_NET_RXCALERR 0x00010000 /* cariier extension length 110 - * error */ 111 - #define GELIC_NET_RXCREXERR 0x00008000 /* carrier extention error */ 112 - #define GELIC_NET_RXMLTCST 0x00004000 /* multicast address frame */ 113 - /* bit 13..0 reserved */ 114 - #define GELIC_NET_DATA_ERROR_CHK_MASK \ 115 - (GELIC_NET_RXIPCHKERR | GELIC_NET_RXTCPCHKERR) 106 + /* RX descriptor data error bits */ 107 + enum gelic_descr_rx_error { 108 + /* bit 31 reserved */ 109 + GELIC_DESCR_RXALNERR = 0x40000000, /* alignement error 10/100M */ 110 + GELIC_DESCR_RXOVERERR = 0x20000000, /* oversize error */ 111 + GELIC_DESCR_RXRNTERR = 0x10000000, /* Runt error */ 112 + GELIC_DESCR_RXIPCHKERR = 0x08000000, /* IP checksum error */ 113 + GELIC_DESCR_RXTCPCHKERR = 0x04000000, /* TCP/UDP checksum error */ 114 + GELIC_DESCR_RXDRPPKT = 0x00100000, /* drop packet */ 115 + GELIC_DESCR_RXIPFMTERR = 0x00080000, /* IP packet format error */ 116 + /* bit 18 reserved */ 117 + GELIC_DESCR_RXDATAERR = 0x00020000, /* IP packet format error */ 118 + GELIC_DESCR_RXCALERR = 0x00010000, /* cariier extension length 119 + * error */ 120 + GELIC_DESCR_RXCREXERR = 0x00008000, /* carrier extention error */ 121 + GELIC_DESCR_RXMLTCST = 0x00004000, /* multicast address frame */ 122 + /* bit 13..0 reserved */ 123 + }; 124 + #define GELIC_DESCR_DATA_ERROR_CHK_MASK \ 125 + (GELIC_DESCR_RXIPCHKERR | GELIC_DESCR_RXTCPCHKERR) 116 126 127 + /* DMA command and status (RX and TX)*/ 128 + enum gelic_descr_dma_status { 129 + GELIC_DESCR_DMA_COMPLETE = 0x00000000, /* used in tx */ 130 + GELIC_DESCR_DMA_BUFFER_FULL = 0x00000000, /* used in rx */ 131 + GELIC_DESCR_DMA_RESPONSE_ERROR = 0x10000000, /* used in rx, tx */ 132 + GELIC_DESCR_DMA_PROTECTION_ERROR = 0x20000000, /* used in rx, tx */ 133 + GELIC_DESCR_DMA_FRAME_END = 0x40000000, /* used in rx */ 134 + GELIC_DESCR_DMA_FORCE_END = 0x50000000, /* used in rx, tx */ 135 + GELIC_DESCR_DMA_CARDOWNED = 0xa0000000, /* used in rx, tx */ 136 + GELIC_DESCR_DMA_NOT_IN_USE = 0xb0000000, /* any other value */ 137 + }; 138 + 139 + #define GELIC_DESCR_DMA_STAT_MASK (0xf0000000) 117 140 118 141 /* tx descriptor command and status */ 119 - #define GELIC_NET_DMAC_CMDSTAT_NOCS 0xa0080000 /* middle of frame */ 120 - #define GELIC_NET_DMAC_CMDSTAT_TCPCS 0xa00a0000 121 - #define GELIC_NET_DMAC_CMDSTAT_UDPCS 0xa00b0000 122 - #define GELIC_NET_DMAC_CMDSTAT_END_FRAME 0x00040000 /* end of frame */ 142 + enum gelic_descr_tx_dma_status { 143 + /* [19] */ 144 + GELIC_DESCR_TX_DMA_IKE = 0x00080000, /* IPSEC off */ 145 + /* [18] */ 146 + GELIC_DESCR_TX_DMA_FRAME_TAIL = 0x00040000, /* last descriptor of 147 + * the packet 148 + */ 149 + /* [17..16] */ 150 + GELIC_DESCR_TX_DMA_TCP_CHKSUM = 0x00020000, /* TCP packet */ 151 + GELIC_DESCR_TX_DMA_UDP_CHKSUM = 0x00030000, /* UDP packet */ 152 + GELIC_DESCR_TX_DMA_NO_CHKSUM = 0x00000000, /* no checksum */ 123 153 124 - #define GELIC_NET_DMAC_CMDSTAT_RXDCEIS 0x00000002 /* descriptor chain end 125 - * interrupt status */ 126 - 127 - #define GELIC_NET_DMAC_CMDSTAT_CHAIN_END 0x00000002 /* RXDCEIS:DMA stopped */ 128 - #define GELIC_NET_DESCR_IND_PROC_SHIFT 28 129 - #define GELIC_NET_DESCR_IND_PROC_MASKO 0x0fffffff 130 - 131 - 132 - enum gelic_net_descr_status { 133 - GELIC_NET_DESCR_COMPLETE = 0x00, /* used in tx */ 134 - GELIC_NET_DESCR_BUFFER_FULL = 0x00, /* used in rx */ 135 - GELIC_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */ 136 - GELIC_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */ 137 - GELIC_NET_DESCR_FRAME_END = 0x04, /* used in rx */ 138 - GELIC_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */ 139 - GELIC_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */ 140 - GELIC_NET_DESCR_NOT_IN_USE = 0x0b /* any other value */ 154 + /* [1] */ 155 + GELIC_DESCR_TX_DMA_CHAIN_END = 0x00000002, /* DMA terminated 156 + * due to chain end 157 + */ 141 158 }; 159 + 160 + #define GELIC_DESCR_DMA_CMD_NO_CHKSUM \ 161 + (GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \ 162 + GELIC_DESCR_TX_DMA_NO_CHKSUM) 163 + 164 + #define GELIC_DESCR_DMA_CMD_TCP_CHKSUM \ 165 + (GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \ 166 + GELIC_DESCR_TX_DMA_TCP_CHKSUM) 167 + 168 + #define GELIC_DESCR_DMA_CMD_UDP_CHKSUM \ 169 + (GELIC_DESCR_DMA_CARDOWNED | GELIC_DESCR_TX_DMA_IKE | \ 170 + GELIC_DESCR_TX_DMA_UDP_CHKSUM) 171 + 172 + enum gelic_descr_rx_dma_status { 173 + /* [ 1 ] */ 174 + GELIC_DESCR_RX_DMA_CHAIN_END = 0x00000002, /* DMA terminated 175 + * due to chain end 176 + */ 177 + }; 178 + 142 179 /* for lv1_net_control */ 143 - #define GELIC_NET_GET_MAC_ADDRESS 0x0000000000000001 144 - #define GELIC_NET_GET_ETH_PORT_STATUS 0x0000000000000002 145 - #define GELIC_NET_SET_NEGOTIATION_MODE 0x0000000000000003 146 - #define GELIC_NET_GET_VLAN_ID 0x0000000000000004 180 + enum gelic_lv1_net_control_code { 181 + GELIC_LV1_GET_MAC_ADDRESS = 1, 182 + GELIC_LV1_GET_ETH_PORT_STATUS = 2, 183 + GELIC_LV1_SET_NEGOTIATION_MODE = 3, 184 + GELIC_LV1_GET_VLAN_ID = 4, 185 + GELIC_LV1_GET_CHANNEL = 6, 186 + GELIC_LV1_POST_WLAN_CMD = 9, 187 + GELIC_LV1_GET_WLAN_CMD_RESULT = 10, 188 + GELIC_LV1_GET_WLAN_EVENT = 11 189 + }; 147 190 148 - #define GELIC_NET_LINK_UP 0x0000000000000001 149 - #define GELIC_NET_FULL_DUPLEX 0x0000000000000002 150 - #define GELIC_NET_AUTO_NEG 0x0000000000000004 151 - #define GELIC_NET_SPEED_10 0x0000000000000010 152 - #define GELIC_NET_SPEED_100 0x0000000000000020 153 - #define GELIC_NET_SPEED_1000 0x0000000000000040 191 + /* status returened from GET_ETH_PORT_STATUS */ 192 + enum gelic_lv1_ether_port_status { 193 + GELIC_LV1_ETHER_LINK_UP = 0x0000000000000001L, 194 + GELIC_LV1_ETHER_FULL_DUPLEX = 0x0000000000000002L, 195 + GELIC_LV1_ETHER_AUTO_NEG = 0x0000000000000004L, 154 196 155 - #define GELIC_NET_VLAN_ALL 0x0000000000000001 156 - #define GELIC_NET_VLAN_WIRED 0x0000000000000002 157 - #define GELIC_NET_VLAN_WIRELESS 0x0000000000000003 158 - #define GELIC_NET_VLAN_PSP 0x0000000000000004 159 - #define GELIC_NET_VLAN_PORT0 0x0000000000000010 160 - #define GELIC_NET_VLAN_PORT1 0x0000000000000011 161 - #define GELIC_NET_VLAN_PORT2 0x0000000000000012 162 - #define GELIC_NET_VLAN_DAEMON_CLIENT_BSS 0x0000000000000013 163 - #define GELIC_NET_VLAN_LIBERO_CLIENT_BSS 0x0000000000000014 164 - #define GELIC_NET_VLAN_NO_ENTRY -6 197 + GELIC_LV1_ETHER_SPEED_10 = 0x0000000000000010L, 198 + GELIC_LV1_ETHER_SPEED_100 = 0x0000000000000020L, 199 + GELIC_LV1_ETHER_SPEED_1000 = 0x0000000000000040L, 200 + GELIC_LV1_ETHER_SPEED_MASK = 0x0000000000000070L 201 + }; 165 202 166 - #define GELIC_NET_PORT 2 /* for port status */ 203 + enum gelic_lv1_vlan_index { 204 + /* for outgoing packets */ 205 + GELIC_LV1_VLAN_TX_ETHERNET = 0x0000000000000002L, 206 + GELIC_LV1_VLAN_TX_WIRELESS = 0x0000000000000003L, 207 + /* for incoming packets */ 208 + GELIC_LV1_VLAN_RX_ETHERNET = 0x0000000000000012L, 209 + GELIC_LV1_VLAN_RX_WIRELESS = 0x0000000000000013L 210 + }; 167 211 168 212 /* size of hardware part of gelic descriptor */ 169 - #define GELIC_NET_DESCR_SIZE (32) 170 - struct gelic_net_descr { 213 + #define GELIC_DESCR_SIZE (32) 214 + 215 + enum gelic_port_type { 216 + GELIC_PORT_ETHERNET = 0, 217 + GELIC_PORT_WIRELESS = 1, 218 + GELIC_PORT_MAX 219 + }; 220 + 221 + struct gelic_descr { 171 222 /* as defined by the hardware */ 172 - u32 buf_addr; 173 - u32 buf_size; 174 - u32 next_descr_addr; 175 - u32 dmac_cmd_status; 176 - u32 result_size; 177 - u32 valid_size; /* all zeroes for tx */ 178 - u32 data_status; 179 - u32 data_error; /* all zeroes for tx */ 223 + __be32 buf_addr; 224 + __be32 buf_size; 225 + __be32 next_descr_addr; 226 + __be32 dmac_cmd_status; 227 + __be32 result_size; 228 + __be32 valid_size; /* all zeroes for tx */ 229 + __be32 data_status; 230 + __be32 data_error; /* all zeroes for tx */ 180 231 181 232 /* used in the driver */ 182 233 struct sk_buff *skb; 183 234 dma_addr_t bus_addr; 184 - struct gelic_net_descr *next; 185 - struct gelic_net_descr *prev; 186 - struct vlan_ethhdr vlan; 235 + struct gelic_descr *next; 236 + struct gelic_descr *prev; 187 237 } __attribute__((aligned(32))); 188 238 189 - struct gelic_net_descr_chain { 239 + struct gelic_descr_chain { 190 240 /* we walk from tail to head */ 191 - struct gelic_net_descr *head; 192 - struct gelic_net_descr *tail; 241 + struct gelic_descr *head; 242 + struct gelic_descr *tail; 193 243 }; 194 244 195 - struct gelic_net_card { 196 - struct net_device *netdev; 245 + struct gelic_vlan_id { 246 + u16 tx; 247 + u16 rx; 248 + }; 249 + 250 + struct gelic_card { 197 251 struct napi_struct napi; 252 + struct net_device *netdev[GELIC_PORT_MAX]; 198 253 /* 199 254 * hypervisor requires irq_status should be 200 255 * 8 bytes aligned, but u64 member is 201 256 * always disposed in that manner 202 257 */ 203 258 u64 irq_status; 204 - u64 ghiintmask; 259 + u64 irq_mask; 205 260 206 261 struct ps3_system_bus_device *dev; 207 - u32 vlan_id[GELIC_NET_VLAN_MAX]; 208 - int vlan_index; 262 + struct gelic_vlan_id vlan[GELIC_PORT_MAX]; 263 + int vlan_required; 209 264 210 - struct gelic_net_descr_chain tx_chain; 211 - struct gelic_net_descr_chain rx_chain; 265 + struct gelic_descr_chain tx_chain; 266 + struct gelic_descr_chain rx_chain; 212 267 int rx_dma_restart_required; 213 - /* gurad dmac descriptor chain*/ 214 - spinlock_t chain_lock; 215 - 216 268 int rx_csum; 217 - /* guard tx_dma_progress */ 218 - spinlock_t tx_dma_lock; 269 + /* 270 + * tx_lock guards tx descriptor list and 271 + * tx_dma_progress. 272 + */ 273 + spinlock_t tx_lock; 219 274 int tx_dma_progress; 220 275 221 276 struct work_struct tx_timeout_task; 222 277 atomic_t tx_timeout_task_counter; 223 278 wait_queue_head_t waitq; 224 279 225 - struct gelic_net_descr *tx_top, *rx_top; 226 - struct gelic_net_descr descr[0]; 280 + /* only first user should up the card */ 281 + struct semaphore updown_lock; 282 + atomic_t users; 283 + 284 + u64 ether_port_status; 285 + /* original address returned by kzalloc */ 286 + void *unalign; 287 + 288 + /* 289 + * each netdevice has copy of irq 290 + */ 291 + unsigned int irq; 292 + struct gelic_descr *tx_top, *rx_top; 293 + struct gelic_descr descr[0]; /* must be the last */ 227 294 }; 228 295 296 + struct gelic_port { 297 + struct gelic_card *card; 298 + struct net_device *netdev; 299 + enum gelic_port_type type; 300 + long priv[0]; /* long for alignment */ 301 + }; 229 302 230 - extern unsigned long p_to_lp(long pa); 303 + static inline struct gelic_card *port_to_card(struct gelic_port *p) 304 + { 305 + return p->card; 306 + } 307 + static inline struct net_device *port_to_netdev(struct gelic_port *p) 308 + { 309 + return p->netdev; 310 + } 311 + static inline struct gelic_card *netdev_card(struct net_device *d) 312 + { 313 + return ((struct gelic_port *)netdev_priv(d))->card; 314 + } 315 + static inline struct gelic_port *netdev_port(struct net_device *d) 316 + { 317 + return (struct gelic_port *)netdev_priv(d); 318 + } 319 + static inline struct device *ctodev(struct gelic_card *card) 320 + { 321 + return &card->dev->core; 322 + } 323 + static inline u64 bus_id(struct gelic_card *card) 324 + { 325 + return card->dev->bus_id; 326 + } 327 + static inline u64 dev_id(struct gelic_card *card) 328 + { 329 + return card->dev->dev_id; 330 + } 331 + 332 + static inline void *port_priv(struct gelic_port *port) 333 + { 334 + return port->priv; 335 + } 336 + 337 + extern int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask); 338 + /* shared netdev ops */ 339 + extern void gelic_card_up(struct gelic_card *card); 340 + extern void gelic_card_down(struct gelic_card *card); 341 + extern int gelic_net_open(struct net_device *netdev); 342 + extern int gelic_net_stop(struct net_device *netdev); 343 + extern int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev); 344 + extern void gelic_net_set_multi(struct net_device *netdev); 345 + extern void gelic_net_tx_timeout(struct net_device *netdev); 346 + extern int gelic_net_change_mtu(struct net_device *netdev, int new_mtu); 347 + extern int gelic_net_setup_netdev(struct net_device *netdev, 348 + struct gelic_card *card); 349 + 350 + /* shared ethtool ops */ 351 + extern void gelic_net_get_drvinfo(struct net_device *netdev, 352 + struct ethtool_drvinfo *info); 353 + extern u32 gelic_net_get_rx_csum(struct net_device *netdev); 354 + extern int gelic_net_set_rx_csum(struct net_device *netdev, u32 data); 355 + extern void gelic_net_poll_controller(struct net_device *netdev); 231 356 232 357 #endif /* _GELIC_NET_H */
+2753
drivers/net/ps3_gelic_wireless.c
··· 1 + /* 2 + * PS3 gelic network driver. 3 + * 4 + * Copyright (C) 2007 Sony Computer Entertainment Inc. 5 + * Copyright 2007 Sony Corporation 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 9 + * as published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program; if not, write to the Free Software 18 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 + */ 20 + #undef DEBUG 21 + 22 + #include <linux/kernel.h> 23 + #include <linux/module.h> 24 + 25 + #include <linux/etherdevice.h> 26 + #include <linux/ethtool.h> 27 + #include <linux/if_vlan.h> 28 + 29 + #include <linux/in.h> 30 + #include <linux/ip.h> 31 + #include <linux/tcp.h> 32 + #include <linux/wireless.h> 33 + #include <linux/ctype.h> 34 + #include <linux/string.h> 35 + #include <net/iw_handler.h> 36 + #include <net/ieee80211.h> 37 + 38 + #include <linux/dma-mapping.h> 39 + #include <net/checksum.h> 40 + #include <asm/firmware.h> 41 + #include <asm/ps3.h> 42 + #include <asm/lv1call.h> 43 + 44 + #include "ps3_gelic_net.h" 45 + #include "ps3_gelic_wireless.h" 46 + 47 + 48 + static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan); 49 + static int gelic_wl_try_associate(struct net_device *netdev); 50 + 51 + /* 52 + * tables 53 + */ 54 + 55 + /* 802.11b/g channel to freq in MHz */ 56 + static const int channel_freq[] = { 57 + 2412, 2417, 2422, 2427, 2432, 58 + 2437, 2442, 2447, 2452, 2457, 59 + 2462, 2467, 2472, 2484 60 + }; 61 + #define NUM_CHANNELS ARRAY_SIZE(channel_freq) 62 + 63 + /* in bps */ 64 + static const int bitrate_list[] = { 65 + 1000000, 66 + 2000000, 67 + 5500000, 68 + 11000000, 69 + 6000000, 70 + 9000000, 71 + 12000000, 72 + 18000000, 73 + 24000000, 74 + 36000000, 75 + 48000000, 76 + 54000000 77 + }; 78 + #define NUM_BITRATES ARRAY_SIZE(bitrate_list) 79 + 80 + /* 81 + * wpa2 support requires the hypervisor version 2.0 or later 82 + */ 83 + static inline int wpa2_capable(void) 84 + { 85 + return (0 <= ps3_compare_firmware_version(2, 0, 0)); 86 + } 87 + 88 + static inline int precise_ie(void) 89 + { 90 + return 0; /* FIXME */ 91 + } 92 + /* 93 + * post_eurus_cmd helpers 94 + */ 95 + struct eurus_cmd_arg_info { 96 + int pre_arg; /* command requres arg1, arg2 at POST COMMAND */ 97 + int post_arg; /* command requires arg1, arg2 at GET_RESULT */ 98 + }; 99 + 100 + static const struct eurus_cmd_arg_info cmd_info[GELIC_EURUS_CMD_MAX_INDEX] = { 101 + [GELIC_EURUS_CMD_SET_COMMON_CFG] = { .pre_arg = 1}, 102 + [GELIC_EURUS_CMD_SET_WEP_CFG] = { .pre_arg = 1}, 103 + [GELIC_EURUS_CMD_SET_WPA_CFG] = { .pre_arg = 1}, 104 + [GELIC_EURUS_CMD_GET_COMMON_CFG] = { .post_arg = 1}, 105 + [GELIC_EURUS_CMD_GET_WEP_CFG] = { .post_arg = 1}, 106 + [GELIC_EURUS_CMD_GET_WPA_CFG] = { .post_arg = 1}, 107 + [GELIC_EURUS_CMD_GET_RSSI_CFG] = { .post_arg = 1}, 108 + [GELIC_EURUS_CMD_GET_SCAN] = { .post_arg = 1}, 109 + }; 110 + 111 + #ifdef DEBUG 112 + static const char *cmdstr(enum gelic_eurus_command ix) 113 + { 114 + switch (ix) { 115 + case GELIC_EURUS_CMD_ASSOC: 116 + return "ASSOC"; 117 + case GELIC_EURUS_CMD_DISASSOC: 118 + return "DISASSOC"; 119 + case GELIC_EURUS_CMD_START_SCAN: 120 + return "SCAN"; 121 + case GELIC_EURUS_CMD_GET_SCAN: 122 + return "GET SCAN"; 123 + case GELIC_EURUS_CMD_SET_COMMON_CFG: 124 + return "SET_COMMON_CFG"; 125 + case GELIC_EURUS_CMD_GET_COMMON_CFG: 126 + return "GET_COMMON_CFG"; 127 + case GELIC_EURUS_CMD_SET_WEP_CFG: 128 + return "SET_WEP_CFG"; 129 + case GELIC_EURUS_CMD_GET_WEP_CFG: 130 + return "GET_WEP_CFG"; 131 + case GELIC_EURUS_CMD_SET_WPA_CFG: 132 + return "SET_WPA_CFG"; 133 + case GELIC_EURUS_CMD_GET_WPA_CFG: 134 + return "GET_WPA_CFG"; 135 + case GELIC_EURUS_CMD_GET_RSSI_CFG: 136 + return "GET_RSSI"; 137 + default: 138 + break; 139 + } 140 + return ""; 141 + }; 142 + #else 143 + static inline const char *cmdstr(enum gelic_eurus_command ix) 144 + { 145 + return ""; 146 + } 147 + #endif 148 + 149 + /* synchronously do eurus commands */ 150 + static void gelic_eurus_sync_cmd_worker(struct work_struct *work) 151 + { 152 + struct gelic_eurus_cmd *cmd; 153 + struct gelic_card *card; 154 + struct gelic_wl_info *wl; 155 + 156 + u64 arg1, arg2; 157 + 158 + pr_debug("%s: <-\n", __func__); 159 + cmd = container_of(work, struct gelic_eurus_cmd, work); 160 + BUG_ON(cmd_info[cmd->cmd].pre_arg && 161 + cmd_info[cmd->cmd].post_arg); 162 + wl = cmd->wl; 163 + card = port_to_card(wl_port(wl)); 164 + 165 + if (cmd_info[cmd->cmd].pre_arg) { 166 + arg1 = ps3_mm_phys_to_lpar(__pa(cmd->buffer)); 167 + arg2 = cmd->buf_size; 168 + } else { 169 + arg1 = 0; 170 + arg2 = 0; 171 + } 172 + init_completion(&wl->cmd_done_intr); 173 + pr_debug("%s: cmd='%s' start\n", __func__, cmdstr(cmd->cmd)); 174 + cmd->status = lv1_net_control(bus_id(card), dev_id(card), 175 + GELIC_LV1_POST_WLAN_CMD, 176 + cmd->cmd, arg1, arg2, 177 + &cmd->tag, &cmd->size); 178 + if (cmd->status) { 179 + complete(&cmd->done); 180 + pr_info("%s: cmd issue failed\n", __func__); 181 + return; 182 + } 183 + 184 + wait_for_completion(&wl->cmd_done_intr); 185 + 186 + if (cmd_info[cmd->cmd].post_arg) { 187 + arg1 = ps3_mm_phys_to_lpar(__pa(cmd->buffer)); 188 + arg2 = cmd->buf_size; 189 + } else { 190 + arg1 = 0; 191 + arg2 = 0; 192 + } 193 + 194 + cmd->status = lv1_net_control(bus_id(card), dev_id(card), 195 + GELIC_LV1_GET_WLAN_CMD_RESULT, 196 + cmd->tag, arg1, arg2, 197 + &cmd->cmd_status, &cmd->size); 198 + #ifdef DEBUG 199 + if (cmd->status || cmd->cmd_status) { 200 + pr_debug("%s: cmd done tag=%#lx arg1=%#lx, arg2=%#lx\n", __func__, 201 + cmd->tag, arg1, arg2); 202 + pr_debug("%s: cmd done status=%#x cmd_status=%#lx size=%#lx\n", 203 + __func__, cmd->status, cmd->cmd_status, cmd->size); 204 + } 205 + #endif 206 + complete(&cmd->done); 207 + pr_debug("%s: cmd='%s' done\n", __func__, cmdstr(cmd->cmd)); 208 + } 209 + 210 + static struct gelic_eurus_cmd *gelic_eurus_sync_cmd(struct gelic_wl_info *wl, 211 + unsigned int eurus_cmd, 212 + void *buffer, 213 + unsigned int buf_size) 214 + { 215 + struct gelic_eurus_cmd *cmd; 216 + 217 + /* allocate cmd */ 218 + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 219 + if (!cmd) 220 + return NULL; 221 + 222 + /* initialize members */ 223 + cmd->cmd = eurus_cmd; 224 + cmd->buffer = buffer; 225 + cmd->buf_size = buf_size; 226 + cmd->wl = wl; 227 + INIT_WORK(&cmd->work, gelic_eurus_sync_cmd_worker); 228 + init_completion(&cmd->done); 229 + queue_work(wl->eurus_cmd_queue, &cmd->work); 230 + 231 + /* wait for command completion */ 232 + wait_for_completion(&cmd->done); 233 + 234 + return cmd; 235 + } 236 + 237 + static u32 gelic_wl_get_link(struct net_device *netdev) 238 + { 239 + struct gelic_wl_info *wl = port_wl(netdev_port(netdev)); 240 + u32 ret; 241 + 242 + pr_debug("%s: <-\n", __func__); 243 + down(&wl->assoc_stat_lock); 244 + if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) 245 + ret = 1; 246 + else 247 + ret = 0; 248 + up(&wl->assoc_stat_lock); 249 + pr_debug("%s: ->\n", __func__); 250 + return ret; 251 + } 252 + 253 + static void gelic_wl_send_iwap_event(struct gelic_wl_info *wl, u8 *bssid) 254 + { 255 + union iwreq_data data; 256 + 257 + memset(&data, 0, sizeof(data)); 258 + if (bssid) 259 + memcpy(data.ap_addr.sa_data, bssid, ETH_ALEN); 260 + data.ap_addr.sa_family = ARPHRD_ETHER; 261 + wireless_send_event(port_to_netdev(wl_port(wl)), SIOCGIWAP, 262 + &data, NULL); 263 + } 264 + 265 + /* 266 + * wireless extension handlers and helpers 267 + */ 268 + 269 + /* SIOGIWNAME */ 270 + static int gelic_wl_get_name(struct net_device *dev, 271 + struct iw_request_info *info, 272 + union iwreq_data *iwreq, char *extra) 273 + { 274 + strcpy(iwreq->name, "IEEE 802.11bg"); 275 + return 0; 276 + } 277 + 278 + static void gelic_wl_get_ch_info(struct gelic_wl_info *wl) 279 + { 280 + struct gelic_card *card = port_to_card(wl_port(wl)); 281 + u64 ch_info_raw, tmp; 282 + int status; 283 + 284 + if (!test_and_set_bit(GELIC_WL_STAT_CH_INFO, &wl->stat)) { 285 + status = lv1_net_control(bus_id(card), dev_id(card), 286 + GELIC_LV1_GET_CHANNEL, 0, 0, 0, 287 + &ch_info_raw, 288 + &tmp); 289 + /* some fw versions may return error */ 290 + if (status) { 291 + if (status != LV1_NO_ENTRY) 292 + pr_info("%s: available ch unknown\n", __func__); 293 + wl->ch_info = 0x07ff;/* 11 ch */ 294 + } else 295 + /* 16 bits of MSB has available channels */ 296 + wl->ch_info = ch_info_raw >> 48; 297 + } 298 + return; 299 + } 300 + 301 + /* SIOGIWRANGE */ 302 + static int gelic_wl_get_range(struct net_device *netdev, 303 + struct iw_request_info *info, 304 + union iwreq_data *iwreq, char *extra) 305 + { 306 + struct iw_point *point = &iwreq->data; 307 + struct iw_range *range = (struct iw_range *)extra; 308 + struct gelic_wl_info *wl = port_wl(netdev_port(netdev)); 309 + unsigned int i, chs; 310 + 311 + pr_debug("%s: <-\n", __func__); 312 + point->length = sizeof(struct iw_range); 313 + memset(range, 0, sizeof(struct iw_range)); 314 + 315 + range->we_version_compiled = WIRELESS_EXT; 316 + range->we_version_source = 22; 317 + 318 + /* available channels and frequencies */ 319 + gelic_wl_get_ch_info(wl); 320 + 321 + for (i = 0, chs = 0; 322 + i < NUM_CHANNELS && chs < IW_MAX_FREQUENCIES; i++) 323 + if (wl->ch_info & (1 << i)) { 324 + range->freq[chs].i = i + 1; 325 + range->freq[chs].m = channel_freq[i]; 326 + range->freq[chs].e = 6; 327 + chs++; 328 + } 329 + range->num_frequency = chs; 330 + range->old_num_frequency = chs; 331 + range->num_channels = chs; 332 + range->old_num_channels = chs; 333 + 334 + /* bitrates */ 335 + for (i = 0; i < NUM_BITRATES; i++) 336 + range->bitrate[i] = bitrate_list[i]; 337 + range->num_bitrates = i; 338 + 339 + /* signal levels */ 340 + range->max_qual.qual = 100; /* relative value */ 341 + range->max_qual.level = 100; 342 + range->avg_qual.qual = 50; 343 + range->avg_qual.level = 50; 344 + range->sensitivity = 0; 345 + 346 + /* Event capability */ 347 + IW_EVENT_CAPA_SET_KERNEL(range->event_capa); 348 + IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWAP); 349 + IW_EVENT_CAPA_SET(range->event_capa, SIOCGIWSCAN); 350 + 351 + /* encryption capability */ 352 + range->enc_capa = IW_ENC_CAPA_WPA | 353 + IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; 354 + if (wpa2_capable()) 355 + range->enc_capa |= IW_ENC_CAPA_WPA2; 356 + range->encoding_size[0] = 5; /* 40bit WEP */ 357 + range->encoding_size[1] = 13; /* 104bit WEP */ 358 + range->encoding_size[2] = 32; /* WPA-PSK */ 359 + range->num_encoding_sizes = 3; 360 + range->max_encoding_tokens = GELIC_WEP_KEYS; 361 + 362 + pr_debug("%s: ->\n", __func__); 363 + return 0; 364 + 365 + } 366 + 367 + /* SIOC{G,S}IWSCAN */ 368 + static int gelic_wl_set_scan(struct net_device *netdev, 369 + struct iw_request_info *info, 370 + union iwreq_data *wrqu, char *extra) 371 + { 372 + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 373 + 374 + return gelic_wl_start_scan(wl, 1); 375 + } 376 + 377 + #define OUI_LEN 3 378 + static const u8 rsn_oui[OUI_LEN] = { 0x00, 0x0f, 0xac }; 379 + static const u8 wpa_oui[OUI_LEN] = { 0x00, 0x50, 0xf2 }; 380 + 381 + /* 382 + * synthesize WPA/RSN IE data 383 + * See WiFi WPA specification and IEEE 802.11-2007 7.3.2.25 384 + * for the format 385 + */ 386 + static size_t gelic_wl_synthesize_ie(u8 *buf, 387 + struct gelic_eurus_scan_info *scan) 388 + { 389 + 390 + const u8 *oui_header; 391 + u8 *start = buf; 392 + int rsn; 393 + int ccmp; 394 + 395 + pr_debug("%s: <- sec=%16x\n", __func__, scan->security); 396 + switch (be16_to_cpu(scan->security) & GELIC_EURUS_SCAN_SEC_MASK) { 397 + case GELIC_EURUS_SCAN_SEC_WPA: 398 + rsn = 0; 399 + break; 400 + case GELIC_EURUS_SCAN_SEC_WPA2: 401 + rsn = 1; 402 + break; 403 + default: 404 + /* WEP or none. No IE returned */ 405 + return 0; 406 + } 407 + 408 + switch (be16_to_cpu(scan->security) & GELIC_EURUS_SCAN_SEC_WPA_MASK) { 409 + case GELIC_EURUS_SCAN_SEC_WPA_TKIP: 410 + ccmp = 0; 411 + break; 412 + case GELIC_EURUS_SCAN_SEC_WPA_AES: 413 + ccmp = 1; 414 + break; 415 + default: 416 + if (rsn) { 417 + ccmp = 1; 418 + pr_info("%s: no cipher info. defaulted to CCMP\n", 419 + __func__); 420 + } else { 421 + ccmp = 0; 422 + pr_info("%s: no cipher info. defaulted to TKIP\n", 423 + __func__); 424 + } 425 + } 426 + 427 + if (rsn) 428 + oui_header = rsn_oui; 429 + else 430 + oui_header = wpa_oui; 431 + 432 + /* element id */ 433 + if (rsn) 434 + *buf++ = MFIE_TYPE_RSN; 435 + else 436 + *buf++ = MFIE_TYPE_GENERIC; 437 + 438 + /* length filed; set later */ 439 + buf++; 440 + 441 + /* wpa special header */ 442 + if (!rsn) { 443 + memcpy(buf, wpa_oui, OUI_LEN); 444 + buf += OUI_LEN; 445 + *buf++ = 0x01; 446 + } 447 + 448 + /* version */ 449 + *buf++ = 0x01; /* version 1.0 */ 450 + *buf++ = 0x00; 451 + 452 + /* group cipher */ 453 + memcpy(buf, oui_header, OUI_LEN); 454 + buf += OUI_LEN; 455 + 456 + if (ccmp) 457 + *buf++ = 0x04; /* CCMP */ 458 + else 459 + *buf++ = 0x02; /* TKIP */ 460 + 461 + /* pairwise key count always 1 */ 462 + *buf++ = 0x01; 463 + *buf++ = 0x00; 464 + 465 + /* pairwise key suit */ 466 + memcpy(buf, oui_header, OUI_LEN); 467 + buf += OUI_LEN; 468 + if (ccmp) 469 + *buf++ = 0x04; /* CCMP */ 470 + else 471 + *buf++ = 0x02; /* TKIP */ 472 + 473 + /* AKM count is 1 */ 474 + *buf++ = 0x01; 475 + *buf++ = 0x00; 476 + 477 + /* AKM suite is assumed as PSK*/ 478 + memcpy(buf, oui_header, OUI_LEN); 479 + buf += OUI_LEN; 480 + *buf++ = 0x02; /* PSK */ 481 + 482 + /* RSN capabilities is 0 */ 483 + *buf++ = 0x00; 484 + *buf++ = 0x00; 485 + 486 + /* set length field */ 487 + start[1] = (buf - start - 2); 488 + 489 + pr_debug("%s: ->\n", __func__); 490 + return (buf - start); 491 + } 492 + 493 + struct ie_item { 494 + u8 *data; 495 + u8 len; 496 + }; 497 + 498 + struct ie_info { 499 + struct ie_item wpa; 500 + struct ie_item rsn; 501 + }; 502 + 503 + static void gelic_wl_parse_ie(u8 *data, size_t len, 504 + struct ie_info *ie_info) 505 + { 506 + size_t data_left = len; 507 + u8 *pos = data; 508 + u8 item_len; 509 + u8 item_id; 510 + 511 + pr_debug("%s: data=%p len=%ld \n", __func__, 512 + data, len); 513 + memset(ie_info, 0, sizeof(struct ie_info)); 514 + 515 + while (0 < data_left) { 516 + item_id = *pos++; 517 + item_len = *pos++; 518 + 519 + switch (item_id) { 520 + case MFIE_TYPE_GENERIC: 521 + if (!memcmp(pos, wpa_oui, OUI_LEN) && 522 + pos[OUI_LEN] == 0x01) { 523 + ie_info->wpa.data = pos - 2; 524 + ie_info->wpa.len = item_len + 2; 525 + } 526 + break; 527 + case MFIE_TYPE_RSN: 528 + ie_info->rsn.data = pos - 2; 529 + /* length includes the header */ 530 + ie_info->rsn.len = item_len + 2; 531 + break; 532 + default: 533 + pr_debug("%s: ignore %#x,%d\n", __func__, 534 + item_id, item_len); 535 + break; 536 + } 537 + pos += item_len; 538 + data_left -= item_len + 2; 539 + } 540 + pr_debug("%s: wpa=%p,%d wpa2=%p,%d\n", __func__, 541 + ie_info->wpa.data, ie_info->wpa.len, 542 + ie_info->rsn.data, ie_info->rsn.len); 543 + } 544 + 545 + 546 + /* 547 + * translate the scan informations from hypervisor to a 548 + * independent format 549 + */ 550 + static char *gelic_wl_translate_scan(struct net_device *netdev, 551 + char *ev, 552 + char *stop, 553 + struct gelic_wl_scan_info *network) 554 + { 555 + struct iw_event iwe; 556 + struct gelic_eurus_scan_info *scan = network->hwinfo; 557 + char *tmp; 558 + u8 rate; 559 + unsigned int i, j, len; 560 + u8 buf[MAX_WPA_IE_LEN]; 561 + 562 + pr_debug("%s: <-\n", __func__); 563 + 564 + /* first entry should be AP's mac address */ 565 + iwe.cmd = SIOCGIWAP; 566 + iwe.u.ap_addr.sa_family = ARPHRD_ETHER; 567 + memcpy(iwe.u.ap_addr.sa_data, &scan->bssid[2], ETH_ALEN); 568 + ev = iwe_stream_add_event(ev, stop, &iwe, IW_EV_ADDR_LEN); 569 + 570 + /* ESSID */ 571 + iwe.cmd = SIOCGIWESSID; 572 + iwe.u.data.flags = 1; 573 + iwe.u.data.length = strnlen(scan->essid, 32); 574 + ev = iwe_stream_add_point(ev, stop, &iwe, scan->essid); 575 + 576 + /* FREQUENCY */ 577 + iwe.cmd = SIOCGIWFREQ; 578 + iwe.u.freq.m = be16_to_cpu(scan->channel); 579 + iwe.u.freq.e = 0; /* table value in MHz */ 580 + iwe.u.freq.i = 0; 581 + ev = iwe_stream_add_event(ev, stop, &iwe, IW_EV_FREQ_LEN); 582 + 583 + /* RATES */ 584 + iwe.cmd = SIOCGIWRATE; 585 + iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0; 586 + /* to stuff multiple values in one event */ 587 + tmp = ev + IW_EV_LCP_LEN; 588 + /* put them in ascendant order (older is first) */ 589 + i = 0; 590 + j = 0; 591 + pr_debug("%s: rates=%d rate=%d\n", __func__, 592 + network->rate_len, network->rate_ext_len); 593 + while (i < network->rate_len) { 594 + if (j < network->rate_ext_len && 595 + ((scan->ext_rate[j] & 0x7f) < (scan->rate[i] & 0x7f))) 596 + rate = scan->ext_rate[j++] & 0x7f; 597 + else 598 + rate = scan->rate[i++] & 0x7f; 599 + iwe.u.bitrate.value = rate * 500000; /* 500kbps unit */ 600 + tmp = iwe_stream_add_value(ev, tmp, stop, &iwe, 601 + IW_EV_PARAM_LEN); 602 + } 603 + while (j < network->rate_ext_len) { 604 + iwe.u.bitrate.value = (scan->ext_rate[j++] & 0x7f) * 500000; 605 + tmp = iwe_stream_add_value(ev, tmp, stop, &iwe, 606 + IW_EV_PARAM_LEN); 607 + } 608 + /* Check if we added any rate */ 609 + if (IW_EV_LCP_LEN < (tmp - ev)) 610 + ev = tmp; 611 + 612 + /* ENCODE */ 613 + iwe.cmd = SIOCGIWENCODE; 614 + if (be16_to_cpu(scan->capability) & WLAN_CAPABILITY_PRIVACY) 615 + iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; 616 + else 617 + iwe.u.data.flags = IW_ENCODE_DISABLED; 618 + iwe.u.data.length = 0; 619 + ev = iwe_stream_add_point(ev, stop, &iwe, scan->essid); 620 + 621 + /* MODE */ 622 + iwe.cmd = SIOCGIWMODE; 623 + if (be16_to_cpu(scan->capability) & 624 + (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) { 625 + if (be16_to_cpu(scan->capability) & WLAN_CAPABILITY_ESS) 626 + iwe.u.mode = IW_MODE_MASTER; 627 + else 628 + iwe.u.mode = IW_MODE_ADHOC; 629 + ev = iwe_stream_add_event(ev, stop, &iwe, IW_EV_UINT_LEN); 630 + } 631 + 632 + /* QUAL */ 633 + iwe.cmd = IWEVQUAL; 634 + iwe.u.qual.updated = IW_QUAL_ALL_UPDATED | 635 + IW_QUAL_QUAL_INVALID | IW_QUAL_NOISE_INVALID; 636 + iwe.u.qual.level = be16_to_cpu(scan->rssi); 637 + iwe.u.qual.qual = be16_to_cpu(scan->rssi); 638 + iwe.u.qual.noise = 0; 639 + ev = iwe_stream_add_event(ev, stop, &iwe, IW_EV_QUAL_LEN); 640 + 641 + /* RSN */ 642 + memset(&iwe, 0, sizeof(iwe)); 643 + if (be16_to_cpu(scan->size) <= sizeof(*scan)) { 644 + /* If wpa[2] capable station, synthesize IE and put it */ 645 + len = gelic_wl_synthesize_ie(buf, scan); 646 + if (len) { 647 + iwe.cmd = IWEVGENIE; 648 + iwe.u.data.length = len; 649 + ev = iwe_stream_add_point(ev, stop, &iwe, buf); 650 + } 651 + } else { 652 + /* this scan info has IE data */ 653 + struct ie_info ie_info; 654 + size_t data_len; 655 + 656 + data_len = be16_to_cpu(scan->size) - sizeof(*scan); 657 + 658 + gelic_wl_parse_ie(scan->elements, data_len, &ie_info); 659 + 660 + if (ie_info.wpa.len && (ie_info.wpa.len <= sizeof(buf))) { 661 + memcpy(buf, ie_info.wpa.data, ie_info.wpa.len); 662 + iwe.cmd = IWEVGENIE; 663 + iwe.u.data.length = ie_info.wpa.len; 664 + ev = iwe_stream_add_point(ev, stop, &iwe, buf); 665 + } 666 + 667 + if (ie_info.rsn.len && (ie_info.rsn.len <= sizeof(buf))) { 668 + memset(&iwe, 0, sizeof(iwe)); 669 + memcpy(buf, ie_info.rsn.data, ie_info.rsn.len); 670 + iwe.cmd = IWEVGENIE; 671 + iwe.u.data.length = ie_info.rsn.len; 672 + ev = iwe_stream_add_point(ev, stop, &iwe, buf); 673 + } 674 + } 675 + 676 + pr_debug("%s: ->\n", __func__); 677 + return ev; 678 + } 679 + 680 + 681 + static int gelic_wl_get_scan(struct net_device *netdev, 682 + struct iw_request_info *info, 683 + union iwreq_data *wrqu, char *extra) 684 + { 685 + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 686 + struct gelic_wl_scan_info *scan_info; 687 + char *ev = extra; 688 + char *stop = ev + wrqu->data.length; 689 + int ret = 0; 690 + unsigned long this_time = jiffies; 691 + 692 + pr_debug("%s: <-\n", __func__); 693 + if (down_interruptible(&wl->scan_lock)) 694 + return -EAGAIN; 695 + 696 + switch (wl->scan_stat) { 697 + case GELIC_WL_SCAN_STAT_SCANNING: 698 + /* If a scan in progress, caller should call me again */ 699 + ret = -EAGAIN; 700 + goto out; 701 + break; 702 + 703 + case GELIC_WL_SCAN_STAT_INIT: 704 + /* last scan request failed or never issued */ 705 + ret = -ENODEV; 706 + goto out; 707 + break; 708 + case GELIC_WL_SCAN_STAT_GOT_LIST: 709 + /* ok, use current list */ 710 + break; 711 + } 712 + 713 + list_for_each_entry(scan_info, &wl->network_list, list) { 714 + if (wl->scan_age == 0 || 715 + time_after(scan_info->last_scanned + wl->scan_age, 716 + this_time)) 717 + ev = gelic_wl_translate_scan(netdev, ev, stop, 718 + scan_info); 719 + else 720 + pr_debug("%s:entry too old\n", __func__); 721 + 722 + if (stop - ev <= IW_EV_ADDR_LEN) { 723 + ret = -E2BIG; 724 + goto out; 725 + } 726 + } 727 + 728 + wrqu->data.length = ev - extra; 729 + wrqu->data.flags = 0; 730 + out: 731 + up(&wl->scan_lock); 732 + pr_debug("%s: -> %d %d\n", __func__, ret, wrqu->data.length); 733 + return ret; 734 + } 735 + 736 + #ifdef DEBUG 737 + static void scan_list_dump(struct gelic_wl_info *wl) 738 + { 739 + struct gelic_wl_scan_info *scan_info; 740 + int i; 741 + DECLARE_MAC_BUF(mac); 742 + 743 + i = 0; 744 + list_for_each_entry(scan_info, &wl->network_list, list) { 745 + pr_debug("%s: item %d\n", __func__, i++); 746 + pr_debug("valid=%d eurusindex=%d last=%lx\n", 747 + scan_info->valid, scan_info->eurus_index, 748 + scan_info->last_scanned); 749 + pr_debug("r_len=%d r_ext_len=%d essid_len=%d\n", 750 + scan_info->rate_len, scan_info->rate_ext_len, 751 + scan_info->essid_len); 752 + /* -- */ 753 + pr_debug("bssid=%s\n", 754 + print_mac(mac, &scan_info->hwinfo->bssid[2])); 755 + pr_debug("essid=%s\n", scan_info->hwinfo->essid); 756 + } 757 + } 758 + #endif 759 + 760 + static int gelic_wl_set_auth(struct net_device *netdev, 761 + struct iw_request_info *info, 762 + union iwreq_data *data, char *extra) 763 + { 764 + struct iw_param *param = &data->param; 765 + struct gelic_wl_info *wl = port_wl(netdev_port(netdev)); 766 + unsigned long irqflag; 767 + int ret = 0; 768 + 769 + pr_debug("%s: <- %d\n", __func__, param->flags & IW_AUTH_INDEX); 770 + spin_lock_irqsave(&wl->lock, irqflag); 771 + switch (param->flags & IW_AUTH_INDEX) { 772 + case IW_AUTH_WPA_VERSION: 773 + if (param->value & IW_AUTH_WPA_VERSION_DISABLED) { 774 + pr_debug("%s: NO WPA selected\n", __func__); 775 + wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE; 776 + wl->group_cipher_method = GELIC_WL_CIPHER_WEP; 777 + wl->pairwise_cipher_method = GELIC_WL_CIPHER_WEP; 778 + } 779 + if (param->value & IW_AUTH_WPA_VERSION_WPA) { 780 + pr_debug("%s: WPA version 1 selected\n", __func__); 781 + wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA; 782 + wl->group_cipher_method = GELIC_WL_CIPHER_TKIP; 783 + wl->pairwise_cipher_method = GELIC_WL_CIPHER_TKIP; 784 + wl->auth_method = GELIC_EURUS_AUTH_OPEN; 785 + } 786 + if (param->value & IW_AUTH_WPA_VERSION_WPA2) { 787 + /* 788 + * As the hypervisor may not tell the cipher 789 + * information of the AP if it is WPA2, 790 + * you will not decide suitable cipher from 791 + * its beacon. 792 + * You should have knowledge about the AP's 793 + * cipher infomation in other method prior to 794 + * the association. 795 + */ 796 + if (!precise_ie()) 797 + pr_info("%s: WPA2 may not work\n", __func__); 798 + if (wpa2_capable()) { 799 + wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA2; 800 + wl->group_cipher_method = GELIC_WL_CIPHER_AES; 801 + wl->pairwise_cipher_method = 802 + GELIC_WL_CIPHER_AES; 803 + wl->auth_method = GELIC_EURUS_AUTH_OPEN; 804 + } else 805 + ret = -EINVAL; 806 + } 807 + break; 808 + 809 + case IW_AUTH_CIPHER_PAIRWISE: 810 + if (param->value & 811 + (IW_AUTH_CIPHER_WEP104 | IW_AUTH_CIPHER_WEP40)) { 812 + pr_debug("%s: WEP selected\n", __func__); 813 + wl->pairwise_cipher_method = GELIC_WL_CIPHER_WEP; 814 + } 815 + if (param->value & IW_AUTH_CIPHER_TKIP) { 816 + pr_debug("%s: TKIP selected\n", __func__); 817 + wl->pairwise_cipher_method = GELIC_WL_CIPHER_TKIP; 818 + } 819 + if (param->value & IW_AUTH_CIPHER_CCMP) { 820 + pr_debug("%s: CCMP selected\n", __func__); 821 + wl->pairwise_cipher_method = GELIC_WL_CIPHER_AES; 822 + } 823 + if (param->value & IW_AUTH_CIPHER_NONE) { 824 + pr_debug("%s: no auth selected\n", __func__); 825 + wl->pairwise_cipher_method = GELIC_WL_CIPHER_NONE; 826 + } 827 + break; 828 + case IW_AUTH_CIPHER_GROUP: 829 + if (param->value & 830 + (IW_AUTH_CIPHER_WEP104 | IW_AUTH_CIPHER_WEP40)) { 831 + pr_debug("%s: WEP selected\n", __func__); 832 + wl->group_cipher_method = GELIC_WL_CIPHER_WEP; 833 + } 834 + if (param->value & IW_AUTH_CIPHER_TKIP) { 835 + pr_debug("%s: TKIP selected\n", __func__); 836 + wl->group_cipher_method = GELIC_WL_CIPHER_TKIP; 837 + } 838 + if (param->value & IW_AUTH_CIPHER_CCMP) { 839 + pr_debug("%s: CCMP selected\n", __func__); 840 + wl->group_cipher_method = GELIC_WL_CIPHER_AES; 841 + } 842 + if (param->value & IW_AUTH_CIPHER_NONE) { 843 + pr_debug("%s: no auth selected\n", __func__); 844 + wl->group_cipher_method = GELIC_WL_CIPHER_NONE; 845 + } 846 + break; 847 + case IW_AUTH_80211_AUTH_ALG: 848 + if (param->value & IW_AUTH_ALG_SHARED_KEY) { 849 + pr_debug("%s: shared key specified\n", __func__); 850 + wl->auth_method = GELIC_EURUS_AUTH_SHARED; 851 + } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) { 852 + pr_debug("%s: open system specified\n", __func__); 853 + wl->auth_method = GELIC_EURUS_AUTH_OPEN; 854 + } else 855 + ret = -EINVAL; 856 + break; 857 + 858 + case IW_AUTH_WPA_ENABLED: 859 + if (param->value) { 860 + pr_debug("%s: WPA enabled\n", __func__); 861 + wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA; 862 + } else { 863 + pr_debug("%s: WPA disabled\n", __func__); 864 + wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE; 865 + } 866 + break; 867 + 868 + case IW_AUTH_KEY_MGMT: 869 + if (param->value & IW_AUTH_KEY_MGMT_PSK) 870 + break; 871 + /* intentionally fall through */ 872 + default: 873 + ret = -EOPNOTSUPP; 874 + break; 875 + }; 876 + 877 + if (!ret) 878 + set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); 879 + 880 + spin_unlock_irqrestore(&wl->lock, irqflag); 881 + pr_debug("%s: -> %d\n", __func__, ret); 882 + return ret; 883 + } 884 + 885 + static int gelic_wl_get_auth(struct net_device *netdev, 886 + struct iw_request_info *info, 887 + union iwreq_data *iwreq, char *extra) 888 + { 889 + struct iw_param *param = &iwreq->param; 890 + struct gelic_wl_info *wl = port_wl(netdev_port(netdev)); 891 + unsigned long irqflag; 892 + int ret = 0; 893 + 894 + pr_debug("%s: <- %d\n", __func__, param->flags & IW_AUTH_INDEX); 895 + spin_lock_irqsave(&wl->lock, irqflag); 896 + switch (param->flags & IW_AUTH_INDEX) { 897 + case IW_AUTH_WPA_VERSION: 898 + switch (wl->wpa_level) { 899 + case GELIC_WL_WPA_LEVEL_WPA: 900 + param->value |= IW_AUTH_WPA_VERSION_WPA; 901 + break; 902 + case GELIC_WL_WPA_LEVEL_WPA2: 903 + param->value |= IW_AUTH_WPA_VERSION_WPA2; 904 + break; 905 + default: 906 + param->value |= IW_AUTH_WPA_VERSION_DISABLED; 907 + } 908 + break; 909 + 910 + case IW_AUTH_80211_AUTH_ALG: 911 + if (wl->auth_method == GELIC_EURUS_AUTH_SHARED) 912 + param->value = IW_AUTH_ALG_SHARED_KEY; 913 + else if (wl->auth_method == GELIC_EURUS_AUTH_OPEN) 914 + param->value = IW_AUTH_ALG_OPEN_SYSTEM; 915 + break; 916 + 917 + case IW_AUTH_WPA_ENABLED: 918 + switch (wl->wpa_level) { 919 + case GELIC_WL_WPA_LEVEL_WPA: 920 + case GELIC_WL_WPA_LEVEL_WPA2: 921 + param->value = 1; 922 + break; 923 + default: 924 + param->value = 0; 925 + break; 926 + } 927 + break; 928 + default: 929 + ret = -EOPNOTSUPP; 930 + } 931 + 932 + spin_unlock_irqrestore(&wl->lock, irqflag); 933 + pr_debug("%s: -> %d\n", __func__, ret); 934 + return ret; 935 + } 936 + 937 + /* SIOC{S,G}IWESSID */ 938 + static int gelic_wl_set_essid(struct net_device *netdev, 939 + struct iw_request_info *info, 940 + union iwreq_data *data, char *extra) 941 + { 942 + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 943 + unsigned long irqflag; 944 + 945 + pr_debug("%s: <- l=%d f=%d\n", __func__, 946 + data->essid.length, data->essid.flags); 947 + if (IW_ESSID_MAX_SIZE < data->essid.length) 948 + return -EINVAL; 949 + 950 + spin_lock_irqsave(&wl->lock, irqflag); 951 + if (data->essid.flags) { 952 + wl->essid_len = data->essid.length; 953 + memcpy(wl->essid, extra, wl->essid_len); 954 + pr_debug("%s: essid = '%s'\n", __func__, extra); 955 + set_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat); 956 + } else { 957 + pr_debug("%s: ESSID any \n", __func__); 958 + clear_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat); 959 + } 960 + set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); 961 + spin_unlock_irqrestore(&wl->lock, irqflag); 962 + 963 + 964 + gelic_wl_try_associate(netdev); /* FIXME */ 965 + pr_debug("%s: -> \n", __func__); 966 + return 0; 967 + } 968 + 969 + static int gelic_wl_get_essid(struct net_device *netdev, 970 + struct iw_request_info *info, 971 + union iwreq_data *data, char *extra) 972 + { 973 + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 974 + unsigned long irqflag; 975 + 976 + pr_debug("%s: <- \n", __func__); 977 + down(&wl->assoc_stat_lock); 978 + spin_lock_irqsave(&wl->lock, irqflag); 979 + if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat) || 980 + wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) { 981 + memcpy(extra, wl->essid, wl->essid_len); 982 + data->essid.length = wl->essid_len; 983 + data->essid.flags = 1; 984 + } else 985 + data->essid.flags = 0; 986 + 987 + up(&wl->assoc_stat_lock); 988 + spin_unlock_irqrestore(&wl->lock, irqflag); 989 + pr_debug("%s: -> len=%d \n", __func__, data->essid.length); 990 + 991 + return 0; 992 + } 993 + 994 + /* SIO{S,G}IWENCODE */ 995 + static int gelic_wl_set_encode(struct net_device *netdev, 996 + struct iw_request_info *info, 997 + union iwreq_data *data, char *extra) 998 + { 999 + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 1000 + struct iw_point *enc = &data->encoding; 1001 + __u16 flags; 1002 + unsigned int irqflag; 1003 + int key_index, index_specified; 1004 + int ret = 0; 1005 + 1006 + pr_debug("%s: <- \n", __func__); 1007 + flags = enc->flags & IW_ENCODE_FLAGS; 1008 + key_index = enc->flags & IW_ENCODE_INDEX; 1009 + 1010 + pr_debug("%s: key_index = %d\n", __func__, key_index); 1011 + pr_debug("%s: key_len = %d\n", __func__, enc->length); 1012 + pr_debug("%s: flag=%x\n", __func__, enc->flags & IW_ENCODE_FLAGS); 1013 + 1014 + if (GELIC_WEP_KEYS < key_index) 1015 + return -EINVAL; 1016 + 1017 + spin_lock_irqsave(&wl->lock, irqflag); 1018 + if (key_index) { 1019 + index_specified = 1; 1020 + key_index--; 1021 + } else { 1022 + index_specified = 0; 1023 + key_index = wl->current_key; 1024 + } 1025 + 1026 + if (flags & IW_ENCODE_NOKEY) { 1027 + /* if just IW_ENCODE_NOKEY, change current key index */ 1028 + if (!flags && index_specified) { 1029 + wl->current_key = key_index; 1030 + goto done; 1031 + } 1032 + 1033 + if (flags & IW_ENCODE_DISABLED) { 1034 + if (!index_specified) { 1035 + /* disable encryption */ 1036 + wl->group_cipher_method = GELIC_WL_CIPHER_NONE; 1037 + wl->pairwise_cipher_method = 1038 + GELIC_WL_CIPHER_NONE; 1039 + /* invalidate all key */ 1040 + wl->key_enabled = 0; 1041 + } else 1042 + clear_bit(key_index, &wl->key_enabled); 1043 + } 1044 + 1045 + if (flags & IW_ENCODE_OPEN) 1046 + wl->auth_method = GELIC_EURUS_AUTH_OPEN; 1047 + if (flags & IW_ENCODE_RESTRICTED) { 1048 + pr_info("%s: shared key mode enabled\n", __func__); 1049 + wl->auth_method = GELIC_EURUS_AUTH_SHARED; 1050 + } 1051 + } else { 1052 + if (IW_ENCODING_TOKEN_MAX < enc->length) { 1053 + ret = -EINVAL; 1054 + goto done; 1055 + } 1056 + wl->key_len[key_index] = enc->length; 1057 + memcpy(wl->key[key_index], extra, enc->length); 1058 + set_bit(key_index, &wl->key_enabled); 1059 + wl->pairwise_cipher_method = GELIC_WL_CIPHER_WEP; 1060 + wl->group_cipher_method = GELIC_WL_CIPHER_WEP; 1061 + } 1062 + set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); 1063 + done: 1064 + spin_unlock_irqrestore(&wl->lock, irqflag); 1065 + pr_debug("%s: -> \n", __func__); 1066 + return ret; 1067 + } 1068 + 1069 + static int gelic_wl_get_encode(struct net_device *netdev, 1070 + struct iw_request_info *info, 1071 + union iwreq_data *data, char *extra) 1072 + { 1073 + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 1074 + struct iw_point *enc = &data->encoding; 1075 + unsigned int irqflag; 1076 + unsigned int key_index, index_specified; 1077 + int ret = 0; 1078 + 1079 + pr_debug("%s: <- \n", __func__); 1080 + key_index = enc->flags & IW_ENCODE_INDEX; 1081 + pr_debug("%s: flag=%#x point=%p len=%d extra=%p\n", __func__, 1082 + enc->flags, enc->pointer, enc->length, extra); 1083 + if (GELIC_WEP_KEYS < key_index) 1084 + return -EINVAL; 1085 + 1086 + spin_lock_irqsave(&wl->lock, irqflag); 1087 + if (key_index) { 1088 + index_specified = 1; 1089 + key_index--; 1090 + } else { 1091 + index_specified = 0; 1092 + key_index = wl->current_key; 1093 + } 1094 + 1095 + if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) { 1096 + switch (wl->auth_method) { 1097 + case GELIC_EURUS_AUTH_OPEN: 1098 + enc->flags = IW_ENCODE_OPEN; 1099 + break; 1100 + case GELIC_EURUS_AUTH_SHARED: 1101 + enc->flags = IW_ENCODE_RESTRICTED; 1102 + break; 1103 + } 1104 + } else 1105 + enc->flags = IW_ENCODE_DISABLED; 1106 + 1107 + if (test_bit(key_index, &wl->key_enabled)) { 1108 + if (enc->length < wl->key_len[key_index]) { 1109 + ret = -EINVAL; 1110 + goto done; 1111 + } 1112 + enc->length = wl->key_len[key_index]; 1113 + memcpy(extra, wl->key[key_index], wl->key_len[key_index]); 1114 + } else { 1115 + enc->length = 0; 1116 + enc->flags |= IW_ENCODE_NOKEY; 1117 + } 1118 + enc->flags |= key_index + 1; 1119 + pr_debug("%s: -> flag=%x len=%d\n", __func__, 1120 + enc->flags, enc->length); 1121 + 1122 + done: 1123 + spin_unlock_irqrestore(&wl->lock, irqflag); 1124 + return ret; 1125 + } 1126 + 1127 + /* SIOC{S,G}IWAP */ 1128 + static int gelic_wl_set_ap(struct net_device *netdev, 1129 + struct iw_request_info *info, 1130 + union iwreq_data *data, char *extra) 1131 + { 1132 + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 1133 + unsigned long irqflag; 1134 + 1135 + pr_debug("%s: <-\n", __func__); 1136 + if (data->ap_addr.sa_family != ARPHRD_ETHER) 1137 + return -EINVAL; 1138 + 1139 + spin_lock_irqsave(&wl->lock, irqflag); 1140 + if (is_valid_ether_addr(data->ap_addr.sa_data)) { 1141 + memcpy(wl->bssid, data->ap_addr.sa_data, 1142 + ETH_ALEN); 1143 + set_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat); 1144 + set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); 1145 + pr_debug("%s: bss=%02x:%02x:%02x:%02x:%02x:%02x\n", 1146 + __func__, 1147 + wl->bssid[0], wl->bssid[1], 1148 + wl->bssid[2], wl->bssid[3], 1149 + wl->bssid[4], wl->bssid[5]); 1150 + } else { 1151 + pr_debug("%s: clear bssid\n", __func__); 1152 + clear_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat); 1153 + memset(wl->bssid, 0, ETH_ALEN); 1154 + } 1155 + spin_unlock_irqrestore(&wl->lock, irqflag); 1156 + pr_debug("%s: ->\n", __func__); 1157 + return 0; 1158 + } 1159 + 1160 + static int gelic_wl_get_ap(struct net_device *netdev, 1161 + struct iw_request_info *info, 1162 + union iwreq_data *data, char *extra) 1163 + { 1164 + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 1165 + unsigned long irqflag; 1166 + 1167 + pr_debug("%s: <-\n", __func__); 1168 + down(&wl->assoc_stat_lock); 1169 + spin_lock_irqsave(&wl->lock, irqflag); 1170 + if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) { 1171 + data->ap_addr.sa_family = ARPHRD_ETHER; 1172 + memcpy(data->ap_addr.sa_data, wl->active_bssid, 1173 + ETH_ALEN); 1174 + } else 1175 + memset(data->ap_addr.sa_data, 0, ETH_ALEN); 1176 + 1177 + spin_unlock_irqrestore(&wl->lock, irqflag); 1178 + up(&wl->assoc_stat_lock); 1179 + pr_debug("%s: ->\n", __func__); 1180 + return 0; 1181 + } 1182 + 1183 + /* SIOC{S,G}IWENCODEEXT */ 1184 + static int gelic_wl_set_encodeext(struct net_device *netdev, 1185 + struct iw_request_info *info, 1186 + union iwreq_data *data, char *extra) 1187 + { 1188 + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 1189 + struct iw_point *enc = &data->encoding; 1190 + struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 1191 + __u16 alg; 1192 + __u16 flags; 1193 + unsigned int irqflag; 1194 + int key_index; 1195 + int ret = 0; 1196 + 1197 + pr_debug("%s: <- \n", __func__); 1198 + flags = enc->flags & IW_ENCODE_FLAGS; 1199 + alg = ext->alg; 1200 + key_index = enc->flags & IW_ENCODE_INDEX; 1201 + 1202 + pr_debug("%s: key_index = %d\n", __func__, key_index); 1203 + pr_debug("%s: key_len = %d\n", __func__, enc->length); 1204 + pr_debug("%s: flag=%x\n", __func__, enc->flags & IW_ENCODE_FLAGS); 1205 + pr_debug("%s: ext_flag=%x\n", __func__, ext->ext_flags); 1206 + pr_debug("%s: ext_key_len=%x\n", __func__, ext->key_len); 1207 + 1208 + if (GELIC_WEP_KEYS < key_index) 1209 + return -EINVAL; 1210 + 1211 + spin_lock_irqsave(&wl->lock, irqflag); 1212 + if (key_index) 1213 + key_index--; 1214 + else 1215 + key_index = wl->current_key; 1216 + 1217 + if (!enc->length && (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)) { 1218 + /* reques to change default key index */ 1219 + pr_debug("%s: request to change default key to %d\n", 1220 + __func__, key_index); 1221 + wl->current_key = key_index; 1222 + goto done; 1223 + } 1224 + 1225 + if (alg == IW_ENCODE_ALG_NONE || (flags & IW_ENCODE_DISABLED)) { 1226 + pr_debug("%s: alg disabled\n", __func__); 1227 + wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE; 1228 + wl->group_cipher_method = GELIC_WL_CIPHER_NONE; 1229 + wl->pairwise_cipher_method = GELIC_WL_CIPHER_NONE; 1230 + wl->auth_method = GELIC_EURUS_AUTH_OPEN; /* should be open */ 1231 + } else if (alg == IW_ENCODE_ALG_WEP) { 1232 + pr_debug("%s: WEP requested\n", __func__); 1233 + if (flags & IW_ENCODE_OPEN) { 1234 + pr_debug("%s: open key mode\n", __func__); 1235 + wl->auth_method = GELIC_EURUS_AUTH_OPEN; 1236 + } 1237 + if (flags & IW_ENCODE_RESTRICTED) { 1238 + pr_debug("%s: shared key mode\n", __func__); 1239 + wl->auth_method = GELIC_EURUS_AUTH_SHARED; 1240 + } 1241 + if (IW_ENCODING_TOKEN_MAX < ext->key_len) { 1242 + pr_info("%s: key is too long %d\n", __func__, 1243 + ext->key_len); 1244 + ret = -EINVAL; 1245 + goto done; 1246 + } 1247 + /* OK, update the key */ 1248 + wl->key_len[key_index] = ext->key_len; 1249 + memset(wl->key[key_index], 0, IW_ENCODING_TOKEN_MAX); 1250 + memcpy(wl->key[key_index], ext->key, ext->key_len); 1251 + set_bit(key_index, &wl->key_enabled); 1252 + /* remember wep info changed */ 1253 + set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); 1254 + } else if ((alg == IW_ENCODE_ALG_TKIP) || (alg == IW_ENCODE_ALG_CCMP)) { 1255 + pr_debug("%s: TKIP/CCMP requested alg=%d\n", __func__, alg); 1256 + /* check key length */ 1257 + if (IW_ENCODING_TOKEN_MAX < ext->key_len) { 1258 + pr_info("%s: key is too long %d\n", __func__, 1259 + ext->key_len); 1260 + ret = -EINVAL; 1261 + goto done; 1262 + } 1263 + if (alg == IW_ENCODE_ALG_CCMP) { 1264 + pr_debug("%s: AES selected\n", __func__); 1265 + wl->group_cipher_method = GELIC_WL_CIPHER_AES; 1266 + wl->pairwise_cipher_method = GELIC_WL_CIPHER_AES; 1267 + wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA2; 1268 + } else { 1269 + pr_debug("%s: TKIP selected, WPA forced\n", __func__); 1270 + wl->group_cipher_method = GELIC_WL_CIPHER_TKIP; 1271 + wl->pairwise_cipher_method = GELIC_WL_CIPHER_TKIP; 1272 + /* FIXME: how do we do if WPA2 + TKIP? */ 1273 + wl->wpa_level = GELIC_WL_WPA_LEVEL_WPA; 1274 + } 1275 + if (flags & IW_ENCODE_RESTRICTED) 1276 + BUG(); 1277 + wl->auth_method = GELIC_EURUS_AUTH_OPEN; 1278 + /* We should use same key for both and unicast */ 1279 + if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) 1280 + pr_debug("%s: group key \n", __func__); 1281 + else 1282 + pr_debug("%s: unicast key \n", __func__); 1283 + /* OK, update the key */ 1284 + wl->key_len[key_index] = ext->key_len; 1285 + memset(wl->key[key_index], 0, IW_ENCODING_TOKEN_MAX); 1286 + memcpy(wl->key[key_index], ext->key, ext->key_len); 1287 + set_bit(key_index, &wl->key_enabled); 1288 + /* remember info changed */ 1289 + set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); 1290 + } 1291 + done: 1292 + spin_unlock_irqrestore(&wl->lock, irqflag); 1293 + pr_debug("%s: -> \n", __func__); 1294 + return ret; 1295 + } 1296 + 1297 + static int gelic_wl_get_encodeext(struct net_device *netdev, 1298 + struct iw_request_info *info, 1299 + union iwreq_data *data, char *extra) 1300 + { 1301 + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 1302 + struct iw_point *enc = &data->encoding; 1303 + struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 1304 + unsigned int irqflag; 1305 + int key_index; 1306 + int ret = 0; 1307 + int max_key_len; 1308 + 1309 + pr_debug("%s: <- \n", __func__); 1310 + 1311 + max_key_len = enc->length - sizeof(struct iw_encode_ext); 1312 + if (max_key_len < 0) 1313 + return -EINVAL; 1314 + key_index = enc->flags & IW_ENCODE_INDEX; 1315 + 1316 + pr_debug("%s: key_index = %d\n", __func__, key_index); 1317 + pr_debug("%s: key_len = %d\n", __func__, enc->length); 1318 + pr_debug("%s: flag=%x\n", __func__, enc->flags & IW_ENCODE_FLAGS); 1319 + 1320 + if (GELIC_WEP_KEYS < key_index) 1321 + return -EINVAL; 1322 + 1323 + spin_lock_irqsave(&wl->lock, irqflag); 1324 + if (key_index) 1325 + key_index--; 1326 + else 1327 + key_index = wl->current_key; 1328 + 1329 + memset(ext, 0, sizeof(struct iw_encode_ext)); 1330 + switch (wl->group_cipher_method) { 1331 + case GELIC_WL_CIPHER_WEP: 1332 + ext->alg = IW_ENCODE_ALG_WEP; 1333 + enc->flags |= IW_ENCODE_ENABLED; 1334 + break; 1335 + case GELIC_WL_CIPHER_TKIP: 1336 + ext->alg = IW_ENCODE_ALG_TKIP; 1337 + enc->flags |= IW_ENCODE_ENABLED; 1338 + break; 1339 + case GELIC_WL_CIPHER_AES: 1340 + ext->alg = IW_ENCODE_ALG_CCMP; 1341 + enc->flags |= IW_ENCODE_ENABLED; 1342 + break; 1343 + case GELIC_WL_CIPHER_NONE: 1344 + default: 1345 + ext->alg = IW_ENCODE_ALG_NONE; 1346 + enc->flags |= IW_ENCODE_NOKEY; 1347 + break; 1348 + } 1349 + 1350 + if (!(enc->flags & IW_ENCODE_NOKEY)) { 1351 + if (max_key_len < wl->key_len[key_index]) { 1352 + ret = -E2BIG; 1353 + goto out; 1354 + } 1355 + if (test_bit(key_index, &wl->key_enabled)) 1356 + memcpy(ext->key, wl->key[key_index], 1357 + wl->key_len[key_index]); 1358 + else 1359 + pr_debug("%s: disabled key requested ix=%d\n", 1360 + __func__, key_index); 1361 + } 1362 + out: 1363 + spin_unlock_irqrestore(&wl->lock, irqflag); 1364 + pr_debug("%s: -> \n", __func__); 1365 + return ret; 1366 + } 1367 + /* SIOC{S,G}IWMODE */ 1368 + static int gelic_wl_set_mode(struct net_device *netdev, 1369 + struct iw_request_info *info, 1370 + union iwreq_data *data, char *extra) 1371 + { 1372 + __u32 mode = data->mode; 1373 + int ret; 1374 + 1375 + pr_debug("%s: <- \n", __func__); 1376 + if (mode == IW_MODE_INFRA) 1377 + ret = 0; 1378 + else 1379 + ret = -EOPNOTSUPP; 1380 + pr_debug("%s: -> %d\n", __func__, ret); 1381 + return ret; 1382 + } 1383 + 1384 + static int gelic_wl_get_mode(struct net_device *netdev, 1385 + struct iw_request_info *info, 1386 + union iwreq_data *data, char *extra) 1387 + { 1388 + __u32 *mode = &data->mode; 1389 + pr_debug("%s: <- \n", __func__); 1390 + *mode = IW_MODE_INFRA; 1391 + pr_debug("%s: ->\n", __func__); 1392 + return 0; 1393 + } 1394 + 1395 + /* SIOCIWFIRSTPRIV */ 1396 + static int hex2bin(u8 *str, u8 *bin, unsigned int len) 1397 + { 1398 + unsigned int i; 1399 + static unsigned char *hex = "0123456789ABCDEF"; 1400 + unsigned char *p, *q; 1401 + u8 tmp; 1402 + 1403 + if (len != WPA_PSK_LEN * 2) 1404 + return -EINVAL; 1405 + 1406 + for (i = 0; i < WPA_PSK_LEN * 2; i += 2) { 1407 + p = strchr(hex, toupper(str[i])); 1408 + q = strchr(hex, toupper(str[i + 1])); 1409 + if (!p || !q) { 1410 + pr_info("%s: unconvertible PSK digit=%d\n", 1411 + __func__, i); 1412 + return -EINVAL; 1413 + } 1414 + tmp = ((p - hex) << 4) + (q - hex); 1415 + *bin++ = tmp; 1416 + } 1417 + return 0; 1418 + }; 1419 + 1420 + static int gelic_wl_priv_set_psk(struct net_device *net_dev, 1421 + struct iw_request_info *info, 1422 + union iwreq_data *data, char *extra) 1423 + { 1424 + struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev)); 1425 + unsigned int len; 1426 + unsigned int irqflag; 1427 + int ret = 0; 1428 + 1429 + pr_debug("%s:<- len=%d\n", __func__, data->data.length); 1430 + len = data->data.length - 1; 1431 + if (len <= 2) 1432 + return -EINVAL; 1433 + 1434 + spin_lock_irqsave(&wl->lock, irqflag); 1435 + if (extra[0] == '"' && extra[len - 1] == '"') { 1436 + pr_debug("%s: passphrase mode\n", __func__); 1437 + /* pass phrase */ 1438 + if (GELIC_WL_EURUS_PSK_MAX_LEN < (len - 2)) { 1439 + pr_info("%s: passphrase too long\n", __func__); 1440 + ret = -E2BIG; 1441 + goto out; 1442 + } 1443 + memset(wl->psk, 0, sizeof(wl->psk)); 1444 + wl->psk_len = len - 2; 1445 + memcpy(wl->psk, &(extra[1]), wl->psk_len); 1446 + wl->psk_type = GELIC_EURUS_WPA_PSK_PASSPHRASE; 1447 + } else { 1448 + ret = hex2bin(extra, wl->psk, len); 1449 + if (ret) 1450 + goto out; 1451 + wl->psk_len = WPA_PSK_LEN; 1452 + wl->psk_type = GELIC_EURUS_WPA_PSK_BIN; 1453 + } 1454 + set_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat); 1455 + out: 1456 + spin_unlock_irqrestore(&wl->lock, irqflag); 1457 + pr_debug("%s:->\n", __func__); 1458 + return ret; 1459 + } 1460 + 1461 + static int gelic_wl_priv_get_psk(struct net_device *net_dev, 1462 + struct iw_request_info *info, 1463 + union iwreq_data *data, char *extra) 1464 + { 1465 + struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev)); 1466 + char *p; 1467 + unsigned int irqflag; 1468 + unsigned int i; 1469 + 1470 + pr_debug("%s:<-\n", __func__); 1471 + if (!capable(CAP_NET_ADMIN)) 1472 + return -EPERM; 1473 + 1474 + spin_lock_irqsave(&wl->lock, irqflag); 1475 + p = extra; 1476 + if (test_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat)) { 1477 + if (wl->psk_type == GELIC_EURUS_WPA_PSK_BIN) { 1478 + for (i = 0; i < wl->psk_len; i++) { 1479 + sprintf(p, "%02xu", wl->psk[i]); 1480 + p += 2; 1481 + } 1482 + *p = '\0'; 1483 + data->data.length = wl->psk_len * 2; 1484 + } else { 1485 + *p++ = '"'; 1486 + memcpy(p, wl->psk, wl->psk_len); 1487 + p += wl->psk_len; 1488 + *p++ = '"'; 1489 + *p = '\0'; 1490 + data->data.length = wl->psk_len + 2; 1491 + } 1492 + } else 1493 + /* no psk set */ 1494 + data->data.length = 0; 1495 + spin_unlock_irqrestore(&wl->lock, irqflag); 1496 + pr_debug("%s:-> %d\n", __func__, data->data.length); 1497 + return 0; 1498 + } 1499 + 1500 + /* SIOCGIWNICKN */ 1501 + static int gelic_wl_get_nick(struct net_device *net_dev, 1502 + struct iw_request_info *info, 1503 + union iwreq_data *data, char *extra) 1504 + { 1505 + strcpy(extra, "gelic_wl"); 1506 + data->data.length = strlen(extra); 1507 + data->data.flags = 1; 1508 + return 0; 1509 + } 1510 + 1511 + 1512 + /* --- */ 1513 + 1514 + static struct iw_statistics *gelic_wl_get_wireless_stats( 1515 + struct net_device *netdev) 1516 + { 1517 + 1518 + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 1519 + struct gelic_eurus_cmd *cmd; 1520 + struct iw_statistics *is; 1521 + struct gelic_eurus_rssi_info *rssi; 1522 + 1523 + pr_debug("%s: <-\n", __func__); 1524 + 1525 + is = &wl->iwstat; 1526 + memset(is, 0, sizeof(*is)); 1527 + cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_GET_RSSI_CFG, 1528 + wl->buf, sizeof(*rssi)); 1529 + if (cmd && !cmd->status && !cmd->cmd_status) { 1530 + rssi = wl->buf; 1531 + is->qual.level = be16_to_cpu(rssi->rssi); 1532 + is->qual.updated = IW_QUAL_LEVEL_UPDATED | 1533 + IW_QUAL_QUAL_INVALID | IW_QUAL_NOISE_INVALID; 1534 + } else 1535 + /* not associated */ 1536 + is->qual.updated = IW_QUAL_ALL_INVALID; 1537 + 1538 + kfree(cmd); 1539 + pr_debug("%s: ->\n", __func__); 1540 + return is; 1541 + } 1542 + 1543 + /* 1544 + * scanning helpers 1545 + */ 1546 + static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan) 1547 + { 1548 + struct gelic_eurus_cmd *cmd; 1549 + int ret = 0; 1550 + 1551 + pr_debug("%s: <- always=%d\n", __func__, always_scan); 1552 + if (down_interruptible(&wl->scan_lock)) 1553 + return -ERESTARTSYS; 1554 + 1555 + /* 1556 + * If already a scan in progress, do not trigger more 1557 + */ 1558 + if (wl->scan_stat == GELIC_WL_SCAN_STAT_SCANNING) { 1559 + pr_debug("%s: scanning now\n", __func__); 1560 + goto out; 1561 + } 1562 + 1563 + init_completion(&wl->scan_done); 1564 + /* 1565 + * If we have already a bss list, don't try to get new 1566 + */ 1567 + if (!always_scan && wl->scan_stat == GELIC_WL_SCAN_STAT_GOT_LIST) { 1568 + pr_debug("%s: already has the list\n", __func__); 1569 + complete(&wl->scan_done); 1570 + goto out; 1571 + } 1572 + /* 1573 + * issue start scan request 1574 + */ 1575 + wl->scan_stat = GELIC_WL_SCAN_STAT_SCANNING; 1576 + cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_START_SCAN, 1577 + NULL, 0); 1578 + if (!cmd || cmd->status || cmd->cmd_status) { 1579 + wl->scan_stat = GELIC_WL_SCAN_STAT_INIT; 1580 + complete(&wl->scan_done); 1581 + ret = -ENOMEM; 1582 + goto out; 1583 + } 1584 + kfree(cmd); 1585 + out: 1586 + up(&wl->scan_lock); 1587 + pr_debug("%s: ->\n", __func__); 1588 + return ret; 1589 + } 1590 + 1591 + /* 1592 + * retrieve scan result from the chip (hypervisor) 1593 + * this function is invoked by schedule work. 1594 + */ 1595 + static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl) 1596 + { 1597 + struct gelic_eurus_cmd *cmd = NULL; 1598 + struct gelic_wl_scan_info *target, *tmp; 1599 + struct gelic_wl_scan_info *oldest = NULL; 1600 + struct gelic_eurus_scan_info *scan_info; 1601 + unsigned int scan_info_size; 1602 + union iwreq_data data; 1603 + unsigned long this_time = jiffies; 1604 + unsigned int data_len, i, found, r; 1605 + DECLARE_MAC_BUF(mac); 1606 + 1607 + pr_debug("%s:start\n", __func__); 1608 + down(&wl->scan_lock); 1609 + 1610 + if (wl->scan_stat != GELIC_WL_SCAN_STAT_SCANNING) { 1611 + /* 1612 + * stop() may be called while scanning, ignore result 1613 + */ 1614 + pr_debug("%s: scan complete when stat != scanning(%d)\n", 1615 + __func__, wl->scan_stat); 1616 + goto out; 1617 + } 1618 + 1619 + cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_GET_SCAN, 1620 + wl->buf, PAGE_SIZE); 1621 + if (!cmd || cmd->status || cmd->cmd_status) { 1622 + wl->scan_stat = GELIC_WL_SCAN_STAT_INIT; 1623 + pr_info("%s:cmd failed\n", __func__); 1624 + kfree(cmd); 1625 + goto out; 1626 + } 1627 + data_len = cmd->size; 1628 + pr_debug("%s: data_len = %d\n", __func__, data_len); 1629 + kfree(cmd); 1630 + 1631 + /* OK, bss list retrieved */ 1632 + wl->scan_stat = GELIC_WL_SCAN_STAT_GOT_LIST; 1633 + 1634 + /* mark all entries are old */ 1635 + list_for_each_entry_safe(target, tmp, &wl->network_list, list) { 1636 + target->valid = 0; 1637 + /* expire too old entries */ 1638 + if (time_before(target->last_scanned + wl->scan_age, 1639 + this_time)) { 1640 + kfree(target->hwinfo); 1641 + target->hwinfo = NULL; 1642 + list_move_tail(&target->list, &wl->network_free_list); 1643 + } 1644 + } 1645 + 1646 + /* put them in the newtork_list */ 1647 + scan_info = wl->buf; 1648 + scan_info_size = 0; 1649 + i = 0; 1650 + while (scan_info_size < data_len) { 1651 + pr_debug("%s:size=%d bssid=%s scan_info=%p\n", __func__, 1652 + be16_to_cpu(scan_info->size), 1653 + print_mac(mac, &scan_info->bssid[2]), scan_info); 1654 + found = 0; 1655 + oldest = NULL; 1656 + list_for_each_entry(target, &wl->network_list, list) { 1657 + if (!compare_ether_addr(&target->hwinfo->bssid[2], 1658 + &scan_info->bssid[2])) { 1659 + found = 1; 1660 + pr_debug("%s: same BBS found scanned list\n", 1661 + __func__); 1662 + break; 1663 + } 1664 + if (!oldest || 1665 + (target->last_scanned < oldest->last_scanned)) 1666 + oldest = target; 1667 + } 1668 + 1669 + if (!found) { 1670 + /* not found in the list */ 1671 + if (list_empty(&wl->network_free_list)) { 1672 + /* expire oldest */ 1673 + target = oldest; 1674 + } else { 1675 + target = list_entry(wl->network_free_list.next, 1676 + struct gelic_wl_scan_info, 1677 + list); 1678 + } 1679 + } 1680 + 1681 + /* update the item */ 1682 + target->last_scanned = this_time; 1683 + target->valid = 1; 1684 + target->eurus_index = i; 1685 + kfree(target->hwinfo); 1686 + target->hwinfo = kzalloc(be16_to_cpu(scan_info->size), 1687 + GFP_KERNEL); 1688 + if (!target->hwinfo) { 1689 + pr_info("%s: kzalloc failed\n", __func__); 1690 + i++; 1691 + scan_info_size += be16_to_cpu(scan_info->size); 1692 + scan_info = (void *)scan_info + 1693 + be16_to_cpu(scan_info->size); 1694 + continue; 1695 + } 1696 + /* copy hw scan info */ 1697 + memcpy(target->hwinfo, scan_info, scan_info->size); 1698 + target->essid_len = strnlen(scan_info->essid, 1699 + sizeof(scan_info->essid)); 1700 + target->rate_len = 0; 1701 + for (r = 0; r < MAX_RATES_LENGTH; r++) 1702 + if (scan_info->rate[r]) 1703 + target->rate_len++; 1704 + if (8 < target->rate_len) 1705 + pr_info("%s: AP returns %d rates\n", __func__, 1706 + target->rate_len); 1707 + target->rate_ext_len = 0; 1708 + for (r = 0; r < MAX_RATES_EX_LENGTH; r++) 1709 + if (scan_info->ext_rate[r]) 1710 + target->rate_ext_len++; 1711 + list_move_tail(&target->list, &wl->network_list); 1712 + /* bump pointer */ 1713 + i++; 1714 + scan_info_size += be16_to_cpu(scan_info->size); 1715 + scan_info = (void *)scan_info + be16_to_cpu(scan_info->size); 1716 + } 1717 + memset(&data, 0, sizeof(data)); 1718 + wireless_send_event(port_to_netdev(wl_port(wl)), SIOCGIWSCAN, &data, 1719 + NULL); 1720 + out: 1721 + complete(&wl->scan_done); 1722 + up(&wl->scan_lock); 1723 + pr_debug("%s:end\n", __func__); 1724 + } 1725 + 1726 + /* 1727 + * Select an appropriate bss from current scan list regarding 1728 + * current settings from userspace. 1729 + * The caller must hold wl->scan_lock, 1730 + * and on the state of wl->scan_state == GELIC_WL_SCAN_GOT_LIST 1731 + */ 1732 + static void update_best(struct gelic_wl_scan_info **best, 1733 + struct gelic_wl_scan_info *candid, 1734 + int *best_weight, 1735 + int *weight) 1736 + { 1737 + if (*best_weight < ++(*weight)) { 1738 + *best_weight = *weight; 1739 + *best = candid; 1740 + } 1741 + } 1742 + 1743 + static 1744 + struct gelic_wl_scan_info *gelic_wl_find_best_bss(struct gelic_wl_info *wl) 1745 + { 1746 + struct gelic_wl_scan_info *scan_info; 1747 + struct gelic_wl_scan_info *best_bss; 1748 + int weight, best_weight; 1749 + u16 security; 1750 + DECLARE_MAC_BUF(mac); 1751 + 1752 + pr_debug("%s: <-\n", __func__); 1753 + 1754 + best_bss = NULL; 1755 + best_weight = 0; 1756 + 1757 + list_for_each_entry(scan_info, &wl->network_list, list) { 1758 + pr_debug("%s: station %p\n", __func__, scan_info); 1759 + 1760 + if (!scan_info->valid) { 1761 + pr_debug("%s: station invalid\n", __func__); 1762 + continue; 1763 + } 1764 + 1765 + /* If bss specified, check it only */ 1766 + if (test_bit(GELIC_WL_STAT_BSSID_SET, &wl->stat)) { 1767 + if (!compare_ether_addr(&scan_info->hwinfo->bssid[2], 1768 + wl->bssid)) { 1769 + best_bss = scan_info; 1770 + pr_debug("%s: bssid matched\n", __func__); 1771 + break; 1772 + } else { 1773 + pr_debug("%s: bssid unmached\n", __func__); 1774 + continue; 1775 + } 1776 + } 1777 + 1778 + weight = 0; 1779 + 1780 + /* security */ 1781 + security = be16_to_cpu(scan_info->hwinfo->security) & 1782 + GELIC_EURUS_SCAN_SEC_MASK; 1783 + if (wl->wpa_level == GELIC_WL_WPA_LEVEL_WPA2) { 1784 + if (security == GELIC_EURUS_SCAN_SEC_WPA2) 1785 + update_best(&best_bss, scan_info, 1786 + &best_weight, &weight); 1787 + else 1788 + continue; 1789 + } else if (wl->wpa_level == GELIC_WL_WPA_LEVEL_WPA) { 1790 + if (security == GELIC_EURUS_SCAN_SEC_WPA) 1791 + update_best(&best_bss, scan_info, 1792 + &best_weight, &weight); 1793 + else 1794 + continue; 1795 + } else if (wl->wpa_level == GELIC_WL_WPA_LEVEL_NONE && 1796 + wl->group_cipher_method == GELIC_WL_CIPHER_WEP) { 1797 + if (security == GELIC_EURUS_SCAN_SEC_WEP) 1798 + update_best(&best_bss, scan_info, 1799 + &best_weight, &weight); 1800 + else 1801 + continue; 1802 + } 1803 + 1804 + /* If ESSID is set, check it */ 1805 + if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat)) { 1806 + if ((scan_info->essid_len == wl->essid_len) && 1807 + !strncmp(wl->essid, 1808 + scan_info->hwinfo->essid, 1809 + scan_info->essid_len)) 1810 + update_best(&best_bss, scan_info, 1811 + &best_weight, &weight); 1812 + else 1813 + continue; 1814 + } 1815 + } 1816 + 1817 + #ifdef DEBUG 1818 + pr_debug("%s: -> bss=%p\n", __func__, best_bss); 1819 + if (best_bss) { 1820 + pr_debug("%s:addr=%s\n", __func__, 1821 + print_mac(mac, &best_bss->hwinfo->bssid[2])); 1822 + } 1823 + #endif 1824 + return best_bss; 1825 + } 1826 + 1827 + /* 1828 + * Setup WEP configuration to the chip 1829 + * The caller must hold wl->scan_lock, 1830 + * and on the state of wl->scan_state == GELIC_WL_SCAN_GOT_LIST 1831 + */ 1832 + static int gelic_wl_do_wep_setup(struct gelic_wl_info *wl) 1833 + { 1834 + unsigned int i; 1835 + struct gelic_eurus_wep_cfg *wep; 1836 + struct gelic_eurus_cmd *cmd; 1837 + int wep104 = 0; 1838 + int have_key = 0; 1839 + int ret = 0; 1840 + 1841 + pr_debug("%s: <-\n", __func__); 1842 + /* we can assume no one should uses the buffer */ 1843 + wep = wl->buf; 1844 + memset(wep, 0, sizeof(*wep)); 1845 + 1846 + if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) { 1847 + pr_debug("%s: WEP mode\n", __func__); 1848 + for (i = 0; i < GELIC_WEP_KEYS; i++) { 1849 + if (!test_bit(i, &wl->key_enabled)) 1850 + continue; 1851 + 1852 + pr_debug("%s: key#%d enabled\n", __func__, i); 1853 + have_key = 1; 1854 + if (wl->key_len[i] == 13) 1855 + wep104 = 1; 1856 + else if (wl->key_len[i] != 5) { 1857 + pr_info("%s: wrong wep key[%d]=%d\n", 1858 + __func__, i, wl->key_len[i]); 1859 + ret = -EINVAL; 1860 + goto out; 1861 + } 1862 + memcpy(wep->key[i], wl->key[i], wl->key_len[i]); 1863 + } 1864 + 1865 + if (!have_key) { 1866 + pr_info("%s: all wep key disabled\n", __func__); 1867 + ret = -EINVAL; 1868 + goto out; 1869 + } 1870 + 1871 + if (wep104) { 1872 + pr_debug("%s: 104bit key\n", __func__); 1873 + wep->security = cpu_to_be16(GELIC_EURUS_WEP_SEC_104BIT); 1874 + } else { 1875 + pr_debug("%s: 40bit key\n", __func__); 1876 + wep->security = cpu_to_be16(GELIC_EURUS_WEP_SEC_40BIT); 1877 + } 1878 + } else { 1879 + pr_debug("%s: NO encryption\n", __func__); 1880 + wep->security = cpu_to_be16(GELIC_EURUS_WEP_SEC_NONE); 1881 + } 1882 + 1883 + /* issue wep setup */ 1884 + cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_SET_WEP_CFG, 1885 + wep, sizeof(*wep)); 1886 + if (!cmd) 1887 + ret = -ENOMEM; 1888 + else if (cmd->status || cmd->cmd_status) 1889 + ret = -ENXIO; 1890 + 1891 + kfree(cmd); 1892 + out: 1893 + pr_debug("%s: ->\n", __func__); 1894 + return ret; 1895 + } 1896 + 1897 + #ifdef DEBUG 1898 + static const char *wpasecstr(enum gelic_eurus_wpa_security sec) 1899 + { 1900 + switch (sec) { 1901 + case GELIC_EURUS_WPA_SEC_NONE: 1902 + return "NONE"; 1903 + break; 1904 + case GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP: 1905 + return "WPA_TKIP_TKIP"; 1906 + break; 1907 + case GELIC_EURUS_WPA_SEC_WPA_TKIP_AES: 1908 + return "WPA_TKIP_AES"; 1909 + break; 1910 + case GELIC_EURUS_WPA_SEC_WPA_AES_AES: 1911 + return "WPA_AES_AES"; 1912 + break; 1913 + case GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP: 1914 + return "WPA2_TKIP_TKIP"; 1915 + break; 1916 + case GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES: 1917 + return "WPA2_TKIP_AES"; 1918 + break; 1919 + case GELIC_EURUS_WPA_SEC_WPA2_AES_AES: 1920 + return "WPA2_AES_AES"; 1921 + break; 1922 + } 1923 + return ""; 1924 + }; 1925 + #endif 1926 + 1927 + static int gelic_wl_do_wpa_setup(struct gelic_wl_info *wl) 1928 + { 1929 + struct gelic_eurus_wpa_cfg *wpa; 1930 + struct gelic_eurus_cmd *cmd; 1931 + u16 security; 1932 + int ret = 0; 1933 + 1934 + pr_debug("%s: <-\n", __func__); 1935 + /* we can assume no one should uses the buffer */ 1936 + wpa = wl->buf; 1937 + memset(wpa, 0, sizeof(*wpa)); 1938 + 1939 + if (!test_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat)) 1940 + pr_info("%s: PSK not configured yet\n", __func__); 1941 + 1942 + /* copy key */ 1943 + memcpy(wpa->psk, wl->psk, wl->psk_len); 1944 + 1945 + /* set security level */ 1946 + if (wl->wpa_level == GELIC_WL_WPA_LEVEL_WPA2) { 1947 + if (wl->group_cipher_method == GELIC_WL_CIPHER_AES) { 1948 + security = GELIC_EURUS_WPA_SEC_WPA2_AES_AES; 1949 + } else { 1950 + if (wl->pairwise_cipher_method == GELIC_WL_CIPHER_AES && 1951 + precise_ie()) 1952 + security = GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES; 1953 + else 1954 + security = GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP; 1955 + } 1956 + } else { 1957 + if (wl->group_cipher_method == GELIC_WL_CIPHER_AES) { 1958 + security = GELIC_EURUS_WPA_SEC_WPA_AES_AES; 1959 + } else { 1960 + if (wl->pairwise_cipher_method == GELIC_WL_CIPHER_AES && 1961 + precise_ie()) 1962 + security = GELIC_EURUS_WPA_SEC_WPA_TKIP_AES; 1963 + else 1964 + security = GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP; 1965 + } 1966 + } 1967 + wpa->security = cpu_to_be16(security); 1968 + 1969 + /* PSK type */ 1970 + wpa->psk_type = cpu_to_be16(wl->psk_type); 1971 + #ifdef DEBUG 1972 + pr_debug("%s: sec=%s psktype=%s\nn", __func__, 1973 + wpasecstr(wpa->security), 1974 + (wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ? 1975 + "BIN" : "passphrase"); 1976 + #if 0 1977 + /* 1978 + * don't enable here if you plan to submit 1979 + * the debug log because this dumps your precious 1980 + * passphrase/key. 1981 + */ 1982 + pr_debug("%s: psk=%s\n", 1983 + (wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ? 1984 + (char *)"N/A" : (char *)wpa->psk); 1985 + #endif 1986 + #endif 1987 + /* issue wpa setup */ 1988 + cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_SET_WPA_CFG, 1989 + wpa, sizeof(*wpa)); 1990 + if (!cmd) 1991 + ret = -ENOMEM; 1992 + else if (cmd->status || cmd->cmd_status) 1993 + ret = -ENXIO; 1994 + kfree(cmd); 1995 + pr_debug("%s: --> %d\n", __func__, ret); 1996 + return ret; 1997 + } 1998 + 1999 + /* 2000 + * Start association. caller must hold assoc_stat_lock 2001 + */ 2002 + static int gelic_wl_associate_bss(struct gelic_wl_info *wl, 2003 + struct gelic_wl_scan_info *bss) 2004 + { 2005 + struct gelic_eurus_cmd *cmd; 2006 + struct gelic_eurus_common_cfg *common; 2007 + int ret = 0; 2008 + unsigned long rc; 2009 + 2010 + pr_debug("%s: <-\n", __func__); 2011 + 2012 + /* do common config */ 2013 + common = wl->buf; 2014 + memset(common, 0, sizeof(*common)); 2015 + common->bss_type = cpu_to_be16(GELIC_EURUS_BSS_INFRA); 2016 + common->op_mode = cpu_to_be16(GELIC_EURUS_OPMODE_11BG); 2017 + 2018 + common->scan_index = cpu_to_be16(bss->eurus_index); 2019 + switch (wl->auth_method) { 2020 + case GELIC_EURUS_AUTH_OPEN: 2021 + common->auth_method = cpu_to_be16(GELIC_EURUS_AUTH_OPEN); 2022 + break; 2023 + case GELIC_EURUS_AUTH_SHARED: 2024 + common->auth_method = cpu_to_be16(GELIC_EURUS_AUTH_SHARED); 2025 + break; 2026 + } 2027 + 2028 + #ifdef DEBUG 2029 + scan_list_dump(wl); 2030 + #endif 2031 + pr_debug("%s: common cfg index=%d bsstype=%d auth=%d\n", __func__, 2032 + be16_to_cpu(common->scan_index), 2033 + be16_to_cpu(common->bss_type), 2034 + be16_to_cpu(common->auth_method)); 2035 + 2036 + cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_SET_COMMON_CFG, 2037 + common, sizeof(*common)); 2038 + if (!cmd || cmd->status || cmd->cmd_status) { 2039 + ret = -ENOMEM; 2040 + kfree(cmd); 2041 + goto out; 2042 + } 2043 + kfree(cmd); 2044 + 2045 + /* WEP/WPA */ 2046 + switch (wl->wpa_level) { 2047 + case GELIC_WL_WPA_LEVEL_NONE: 2048 + /* If WEP or no security, setup WEP config */ 2049 + ret = gelic_wl_do_wep_setup(wl); 2050 + break; 2051 + case GELIC_WL_WPA_LEVEL_WPA: 2052 + case GELIC_WL_WPA_LEVEL_WPA2: 2053 + ret = gelic_wl_do_wpa_setup(wl); 2054 + break; 2055 + }; 2056 + 2057 + if (ret) { 2058 + pr_debug("%s: WEP/WPA setup failed %d\n", __func__, 2059 + ret); 2060 + } 2061 + 2062 + /* start association */ 2063 + init_completion(&wl->assoc_done); 2064 + wl->assoc_stat = GELIC_WL_ASSOC_STAT_ASSOCIATING; 2065 + cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_ASSOC, 2066 + NULL, 0); 2067 + if (!cmd || cmd->status || cmd->cmd_status) { 2068 + pr_debug("%s: assoc request failed\n", __func__); 2069 + wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN; 2070 + kfree(cmd); 2071 + ret = -ENOMEM; 2072 + gelic_wl_send_iwap_event(wl, NULL); 2073 + goto out; 2074 + } 2075 + kfree(cmd); 2076 + 2077 + /* wait for connected event */ 2078 + rc = wait_for_completion_timeout(&wl->assoc_done, HZ * 4);/*FIXME*/ 2079 + 2080 + if (!rc) { 2081 + /* timeouted. Maybe key or cyrpt mode is wrong */ 2082 + pr_info("%s: connect timeout \n", __func__); 2083 + cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC, 2084 + NULL, 0); 2085 + kfree(cmd); 2086 + wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN; 2087 + gelic_wl_send_iwap_event(wl, NULL); 2088 + ret = -ENXIO; 2089 + } else { 2090 + wl->assoc_stat = GELIC_WL_ASSOC_STAT_ASSOCIATED; 2091 + /* copy bssid */ 2092 + memcpy(wl->active_bssid, &bss->hwinfo->bssid[2], ETH_ALEN); 2093 + 2094 + /* send connect event */ 2095 + gelic_wl_send_iwap_event(wl, wl->active_bssid); 2096 + pr_info("%s: connected\n", __func__); 2097 + } 2098 + out: 2099 + pr_debug("%s: ->\n", __func__); 2100 + return ret; 2101 + } 2102 + 2103 + /* 2104 + * connected event 2105 + */ 2106 + static void gelic_wl_connected_event(struct gelic_wl_info *wl, 2107 + u64 event) 2108 + { 2109 + u64 desired_event = 0; 2110 + 2111 + switch (wl->wpa_level) { 2112 + case GELIC_WL_WPA_LEVEL_NONE: 2113 + desired_event = GELIC_LV1_WL_EVENT_CONNECTED; 2114 + break; 2115 + case GELIC_WL_WPA_LEVEL_WPA: 2116 + case GELIC_WL_WPA_LEVEL_WPA2: 2117 + desired_event = GELIC_LV1_WL_EVENT_WPA_CONNECTED; 2118 + break; 2119 + } 2120 + 2121 + if (desired_event == event) { 2122 + pr_debug("%s: completed \n", __func__); 2123 + complete(&wl->assoc_done); 2124 + netif_carrier_on(port_to_netdev(wl_port(wl))); 2125 + } else 2126 + pr_debug("%s: event %#lx under wpa\n", 2127 + __func__, event); 2128 + } 2129 + 2130 + /* 2131 + * disconnect event 2132 + */ 2133 + static void gelic_wl_disconnect_event(struct gelic_wl_info *wl, 2134 + u64 event) 2135 + { 2136 + struct gelic_eurus_cmd *cmd; 2137 + int lock; 2138 + 2139 + /* 2140 + * If we fall here in the middle of association, 2141 + * associate_bss() should be waiting for complation of 2142 + * wl->assoc_done. 2143 + * As it waits with timeout, just leave assoc_done 2144 + * uncompleted, then it terminates with timeout 2145 + */ 2146 + if (down_trylock(&wl->assoc_stat_lock)) { 2147 + pr_debug("%s: already locked\n", __func__); 2148 + lock = 0; 2149 + } else { 2150 + pr_debug("%s: obtain lock\n", __func__); 2151 + lock = 1; 2152 + } 2153 + 2154 + cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC, NULL, 0); 2155 + kfree(cmd); 2156 + 2157 + /* send disconnected event to the supplicant */ 2158 + if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) 2159 + gelic_wl_send_iwap_event(wl, NULL); 2160 + 2161 + wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN; 2162 + netif_carrier_off(port_to_netdev(wl_port(wl))); 2163 + 2164 + if (lock) 2165 + up(&wl->assoc_stat_lock); 2166 + } 2167 + /* 2168 + * event worker 2169 + */ 2170 + #ifdef DEBUG 2171 + static const char *eventstr(enum gelic_lv1_wl_event event) 2172 + { 2173 + static char buf[32]; 2174 + char *ret; 2175 + if (event & GELIC_LV1_WL_EVENT_DEVICE_READY) 2176 + ret = "EURUS_READY"; 2177 + else if (event & GELIC_LV1_WL_EVENT_SCAN_COMPLETED) 2178 + ret = "SCAN_COMPLETED"; 2179 + else if (event & GELIC_LV1_WL_EVENT_DEAUTH) 2180 + ret = "DEAUTH"; 2181 + else if (event & GELIC_LV1_WL_EVENT_BEACON_LOST) 2182 + ret = "BEACON_LOST"; 2183 + else if (event & GELIC_LV1_WL_EVENT_CONNECTED) 2184 + ret = "CONNECTED"; 2185 + else if (event & GELIC_LV1_WL_EVENT_WPA_CONNECTED) 2186 + ret = "WPA_CONNECTED"; 2187 + else if (event & GELIC_LV1_WL_EVENT_WPA_ERROR) 2188 + ret = "WPA_ERROR"; 2189 + else { 2190 + sprintf(buf, "Unknown(%#x)", event); 2191 + ret = buf; 2192 + } 2193 + return ret; 2194 + } 2195 + #else 2196 + static const char *eventstr(enum gelic_lv1_wl_event event) 2197 + { 2198 + return NULL; 2199 + } 2200 + #endif 2201 + static void gelic_wl_event_worker(struct work_struct *work) 2202 + { 2203 + struct gelic_wl_info *wl; 2204 + struct gelic_port *port; 2205 + u64 event, tmp; 2206 + int status; 2207 + 2208 + pr_debug("%s:start\n", __func__); 2209 + wl = container_of(work, struct gelic_wl_info, event_work.work); 2210 + port = wl_port(wl); 2211 + while (1) { 2212 + status = lv1_net_control(bus_id(port->card), dev_id(port->card), 2213 + GELIC_LV1_GET_WLAN_EVENT, 0, 0, 0, 2214 + &event, &tmp); 2215 + if (status) { 2216 + if (status != LV1_NO_ENTRY) 2217 + pr_debug("%s:wlan event failed %d\n", 2218 + __func__, status); 2219 + /* got all events */ 2220 + pr_debug("%s:end\n", __func__); 2221 + return; 2222 + } 2223 + pr_debug("%s: event=%s\n", __func__, eventstr(event)); 2224 + switch (event) { 2225 + case GELIC_LV1_WL_EVENT_SCAN_COMPLETED: 2226 + gelic_wl_scan_complete_event(wl); 2227 + break; 2228 + case GELIC_LV1_WL_EVENT_BEACON_LOST: 2229 + case GELIC_LV1_WL_EVENT_DEAUTH: 2230 + gelic_wl_disconnect_event(wl, event); 2231 + break; 2232 + case GELIC_LV1_WL_EVENT_CONNECTED: 2233 + case GELIC_LV1_WL_EVENT_WPA_CONNECTED: 2234 + gelic_wl_connected_event(wl, event); 2235 + break; 2236 + default: 2237 + break; 2238 + } 2239 + } /* while */ 2240 + } 2241 + /* 2242 + * association worker 2243 + */ 2244 + static void gelic_wl_assoc_worker(struct work_struct *work) 2245 + { 2246 + struct gelic_wl_info *wl; 2247 + 2248 + struct gelic_wl_scan_info *best_bss; 2249 + int ret; 2250 + 2251 + wl = container_of(work, struct gelic_wl_info, assoc_work.work); 2252 + 2253 + down(&wl->assoc_stat_lock); 2254 + 2255 + if (wl->assoc_stat != GELIC_WL_ASSOC_STAT_DISCONN) 2256 + goto out; 2257 + 2258 + ret = gelic_wl_start_scan(wl, 0); 2259 + if (ret == -ERESTARTSYS) { 2260 + pr_debug("%s: scan start failed association\n", __func__); 2261 + schedule_delayed_work(&wl->assoc_work, HZ/10); /*FIXME*/ 2262 + goto out; 2263 + } else if (ret) { 2264 + pr_info("%s: scan prerequisite failed\n", __func__); 2265 + goto out; 2266 + } 2267 + 2268 + /* 2269 + * Wait for bss scan completion 2270 + * If we have scan list already, gelic_wl_start_scan() 2271 + * returns OK and raises the complete. Thus, 2272 + * it's ok to wait unconditionally here 2273 + */ 2274 + wait_for_completion(&wl->scan_done); 2275 + 2276 + pr_debug("%s: scan done\n", __func__); 2277 + down(&wl->scan_lock); 2278 + if (wl->scan_stat != GELIC_WL_SCAN_STAT_GOT_LIST) { 2279 + gelic_wl_send_iwap_event(wl, NULL); 2280 + pr_info("%s: no scan list. association failed\n", __func__); 2281 + goto scan_lock_out; 2282 + } 2283 + 2284 + /* find best matching bss */ 2285 + best_bss = gelic_wl_find_best_bss(wl); 2286 + if (!best_bss) { 2287 + gelic_wl_send_iwap_event(wl, NULL); 2288 + pr_info("%s: no bss matched. association failed\n", __func__); 2289 + goto scan_lock_out; 2290 + } 2291 + 2292 + /* ok, do association */ 2293 + ret = gelic_wl_associate_bss(wl, best_bss); 2294 + if (ret) 2295 + pr_info("%s: association failed %d\n", __func__, ret); 2296 + scan_lock_out: 2297 + up(&wl->scan_lock); 2298 + out: 2299 + up(&wl->assoc_stat_lock); 2300 + } 2301 + /* 2302 + * Interrupt handler 2303 + * Called from the ethernet interrupt handler 2304 + * Processes wireless specific virtual interrupts only 2305 + */ 2306 + void gelic_wl_interrupt(struct net_device *netdev, u64 status) 2307 + { 2308 + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 2309 + 2310 + if (status & GELIC_CARD_WLAN_COMMAND_COMPLETED) { 2311 + pr_debug("%s:cmd complete\n", __func__); 2312 + complete(&wl->cmd_done_intr); 2313 + } 2314 + 2315 + if (status & GELIC_CARD_WLAN_EVENT_RECEIVED) { 2316 + pr_debug("%s:event received\n", __func__); 2317 + queue_delayed_work(wl->event_queue, &wl->event_work, 0); 2318 + } 2319 + } 2320 + 2321 + /* 2322 + * driver helpers 2323 + */ 2324 + #define IW_IOCTL(n) [(n) - SIOCSIWCOMMIT] 2325 + static const iw_handler gelic_wl_wext_handler[] = 2326 + { 2327 + IW_IOCTL(SIOCGIWNAME) = gelic_wl_get_name, 2328 + IW_IOCTL(SIOCGIWRANGE) = gelic_wl_get_range, 2329 + IW_IOCTL(SIOCSIWSCAN) = gelic_wl_set_scan, 2330 + IW_IOCTL(SIOCGIWSCAN) = gelic_wl_get_scan, 2331 + IW_IOCTL(SIOCSIWAUTH) = gelic_wl_set_auth, 2332 + IW_IOCTL(SIOCGIWAUTH) = gelic_wl_get_auth, 2333 + IW_IOCTL(SIOCSIWESSID) = gelic_wl_set_essid, 2334 + IW_IOCTL(SIOCGIWESSID) = gelic_wl_get_essid, 2335 + IW_IOCTL(SIOCSIWENCODE) = gelic_wl_set_encode, 2336 + IW_IOCTL(SIOCGIWENCODE) = gelic_wl_get_encode, 2337 + IW_IOCTL(SIOCSIWAP) = gelic_wl_set_ap, 2338 + IW_IOCTL(SIOCGIWAP) = gelic_wl_get_ap, 2339 + IW_IOCTL(SIOCSIWENCODEEXT) = gelic_wl_set_encodeext, 2340 + IW_IOCTL(SIOCGIWENCODEEXT) = gelic_wl_get_encodeext, 2341 + IW_IOCTL(SIOCSIWMODE) = gelic_wl_set_mode, 2342 + IW_IOCTL(SIOCGIWMODE) = gelic_wl_get_mode, 2343 + IW_IOCTL(SIOCGIWNICKN) = gelic_wl_get_nick, 2344 + }; 2345 + 2346 + static struct iw_priv_args gelic_wl_private_args[] = 2347 + { 2348 + { 2349 + .cmd = GELIC_WL_PRIV_SET_PSK, 2350 + .set_args = IW_PRIV_TYPE_CHAR | 2351 + (GELIC_WL_EURUS_PSK_MAX_LEN + 2), 2352 + .name = "set_psk" 2353 + }, 2354 + { 2355 + .cmd = GELIC_WL_PRIV_GET_PSK, 2356 + .get_args = IW_PRIV_TYPE_CHAR | 2357 + (GELIC_WL_EURUS_PSK_MAX_LEN + 2), 2358 + .name = "get_psk" 2359 + } 2360 + }; 2361 + 2362 + static const iw_handler gelic_wl_private_handler[] = 2363 + { 2364 + gelic_wl_priv_set_psk, 2365 + gelic_wl_priv_get_psk, 2366 + }; 2367 + 2368 + static const struct iw_handler_def gelic_wl_wext_handler_def = { 2369 + .num_standard = ARRAY_SIZE(gelic_wl_wext_handler), 2370 + .standard = gelic_wl_wext_handler, 2371 + .get_wireless_stats = gelic_wl_get_wireless_stats, 2372 + .num_private = ARRAY_SIZE(gelic_wl_private_handler), 2373 + .num_private_args = ARRAY_SIZE(gelic_wl_private_args), 2374 + .private = gelic_wl_private_handler, 2375 + .private_args = gelic_wl_private_args, 2376 + }; 2377 + 2378 + static struct net_device *gelic_wl_alloc(struct gelic_card *card) 2379 + { 2380 + struct net_device *netdev; 2381 + struct gelic_port *port; 2382 + struct gelic_wl_info *wl; 2383 + unsigned int i; 2384 + 2385 + pr_debug("%s:start\n", __func__); 2386 + netdev = alloc_etherdev(sizeof(struct gelic_port) + 2387 + sizeof(struct gelic_wl_info)); 2388 + pr_debug("%s: netdev =%p card=%p \np", __func__, netdev, card); 2389 + if (!netdev) 2390 + return NULL; 2391 + 2392 + port = netdev_priv(netdev); 2393 + port->netdev = netdev; 2394 + port->card = card; 2395 + port->type = GELIC_PORT_WIRELESS; 2396 + 2397 + wl = port_wl(port); 2398 + pr_debug("%s: wl=%p port=%p\n", __func__, wl, port); 2399 + 2400 + /* allocate scan list */ 2401 + wl->networks = kzalloc(sizeof(struct gelic_wl_scan_info) * 2402 + GELIC_WL_BSS_MAX_ENT, GFP_KERNEL); 2403 + 2404 + if (!wl->networks) 2405 + goto fail_bss; 2406 + 2407 + wl->eurus_cmd_queue = create_singlethread_workqueue("gelic_cmd"); 2408 + if (!wl->eurus_cmd_queue) 2409 + goto fail_cmd_workqueue; 2410 + 2411 + wl->event_queue = create_singlethread_workqueue("gelic_event"); 2412 + if (!wl->event_queue) 2413 + goto fail_event_workqueue; 2414 + 2415 + INIT_LIST_HEAD(&wl->network_free_list); 2416 + INIT_LIST_HEAD(&wl->network_list); 2417 + for (i = 0; i < GELIC_WL_BSS_MAX_ENT; i++) 2418 + list_add_tail(&wl->networks[i].list, 2419 + &wl->network_free_list); 2420 + init_completion(&wl->cmd_done_intr); 2421 + 2422 + INIT_DELAYED_WORK(&wl->event_work, gelic_wl_event_worker); 2423 + INIT_DELAYED_WORK(&wl->assoc_work, gelic_wl_assoc_worker); 2424 + init_MUTEX(&wl->scan_lock); 2425 + init_MUTEX(&wl->assoc_stat_lock); 2426 + 2427 + init_completion(&wl->scan_done); 2428 + /* for the case that no scan request is issued and stop() is called */ 2429 + complete(&wl->scan_done); 2430 + 2431 + spin_lock_init(&wl->lock); 2432 + 2433 + wl->scan_age = 5*HZ; /* FIXME */ 2434 + 2435 + /* buffer for receiving scanned list etc */ 2436 + BUILD_BUG_ON(PAGE_SIZE < 2437 + sizeof(struct gelic_eurus_scan_info) * 2438 + GELIC_EURUS_MAX_SCAN); 2439 + wl->buf = (void *)get_zeroed_page(GFP_KERNEL); 2440 + if (!wl->buf) { 2441 + pr_info("%s:buffer allocation failed\n", __func__); 2442 + goto fail_getpage; 2443 + } 2444 + pr_debug("%s:end\n", __func__); 2445 + return netdev; 2446 + 2447 + fail_getpage: 2448 + destroy_workqueue(wl->event_queue); 2449 + fail_event_workqueue: 2450 + destroy_workqueue(wl->eurus_cmd_queue); 2451 + fail_cmd_workqueue: 2452 + kfree(wl->networks); 2453 + fail_bss: 2454 + free_netdev(netdev); 2455 + pr_debug("%s:end error\n", __func__); 2456 + return NULL; 2457 + 2458 + } 2459 + 2460 + static void gelic_wl_free(struct gelic_wl_info *wl) 2461 + { 2462 + struct gelic_wl_scan_info *scan_info; 2463 + unsigned int i; 2464 + 2465 + pr_debug("%s: <-\n", __func__); 2466 + 2467 + pr_debug("%s: destroy queues\n", __func__); 2468 + destroy_workqueue(wl->eurus_cmd_queue); 2469 + destroy_workqueue(wl->event_queue); 2470 + 2471 + scan_info = wl->networks; 2472 + for (i = 0; i < GELIC_WL_BSS_MAX_ENT; i++, scan_info++) 2473 + kfree(scan_info->hwinfo); 2474 + kfree(wl->networks); 2475 + 2476 + free_netdev(port_to_netdev(wl_port(wl))); 2477 + 2478 + pr_debug("%s: ->\n", __func__); 2479 + } 2480 + 2481 + static int gelic_wl_try_associate(struct net_device *netdev) 2482 + { 2483 + struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 2484 + int ret = -1; 2485 + unsigned int i; 2486 + 2487 + pr_debug("%s: <-\n", __func__); 2488 + 2489 + /* check constraits for start association */ 2490 + /* for no access restriction AP */ 2491 + if (wl->group_cipher_method == GELIC_WL_CIPHER_NONE) { 2492 + if (test_bit(GELIC_WL_STAT_CONFIGURED, 2493 + &wl->stat)) 2494 + goto do_associate; 2495 + else { 2496 + pr_debug("%s: no wep, not configured\n", __func__); 2497 + return ret; 2498 + } 2499 + } 2500 + 2501 + /* for WEP, one of four keys should be set */ 2502 + if (wl->group_cipher_method == GELIC_WL_CIPHER_WEP) { 2503 + /* one of keys set */ 2504 + for (i = 0; i < GELIC_WEP_KEYS; i++) { 2505 + if (test_bit(i, &wl->key_enabled)) 2506 + goto do_associate; 2507 + } 2508 + pr_debug("%s: WEP, but no key specified\n", __func__); 2509 + return ret; 2510 + } 2511 + 2512 + /* for WPA[2], psk should be set */ 2513 + if ((wl->group_cipher_method == GELIC_WL_CIPHER_TKIP) || 2514 + (wl->group_cipher_method == GELIC_WL_CIPHER_AES)) { 2515 + if (test_bit(GELIC_WL_STAT_WPA_PSK_SET, 2516 + &wl->stat)) 2517 + goto do_associate; 2518 + else { 2519 + pr_debug("%s: AES/TKIP, but PSK not configured\n", 2520 + __func__); 2521 + return ret; 2522 + } 2523 + } 2524 + 2525 + do_associate: 2526 + ret = schedule_delayed_work(&wl->assoc_work, 0); 2527 + pr_debug("%s: start association work %d\n", __func__, ret); 2528 + return ret; 2529 + } 2530 + 2531 + /* 2532 + * netdev handlers 2533 + */ 2534 + static int gelic_wl_open(struct net_device *netdev) 2535 + { 2536 + struct gelic_card *card = netdev_card(netdev); 2537 + 2538 + pr_debug("%s:->%p\n", __func__, netdev); 2539 + 2540 + gelic_card_up(card); 2541 + 2542 + /* try to associate */ 2543 + gelic_wl_try_associate(netdev); 2544 + 2545 + netif_start_queue(netdev); 2546 + 2547 + pr_debug("%s:<-\n", __func__); 2548 + return 0; 2549 + } 2550 + 2551 + /* 2552 + * reset state machine 2553 + */ 2554 + static int gelic_wl_reset_state(struct gelic_wl_info *wl) 2555 + { 2556 + struct gelic_wl_scan_info *target; 2557 + struct gelic_wl_scan_info *tmp; 2558 + 2559 + /* empty scan list */ 2560 + list_for_each_entry_safe(target, tmp, &wl->network_list, list) { 2561 + list_move_tail(&target->list, &wl->network_free_list); 2562 + } 2563 + wl->scan_stat = GELIC_WL_SCAN_STAT_INIT; 2564 + 2565 + /* clear configuration */ 2566 + wl->auth_method = GELIC_EURUS_AUTH_OPEN; 2567 + wl->group_cipher_method = GELIC_WL_CIPHER_NONE; 2568 + wl->pairwise_cipher_method = GELIC_WL_CIPHER_NONE; 2569 + wl->wpa_level = GELIC_WL_WPA_LEVEL_NONE; 2570 + 2571 + wl->key_enabled = 0; 2572 + wl->current_key = 0; 2573 + 2574 + wl->psk_type = GELIC_EURUS_WPA_PSK_PASSPHRASE; 2575 + wl->psk_len = 0; 2576 + 2577 + wl->essid_len = 0; 2578 + memset(wl->essid, 0, sizeof(wl->essid)); 2579 + memset(wl->bssid, 0, sizeof(wl->bssid)); 2580 + memset(wl->active_bssid, 0, sizeof(wl->active_bssid)); 2581 + 2582 + wl->assoc_stat = GELIC_WL_ASSOC_STAT_DISCONN; 2583 + 2584 + memset(&wl->iwstat, 0, sizeof(wl->iwstat)); 2585 + /* all status bit clear */ 2586 + wl->stat = 0; 2587 + return 0; 2588 + } 2589 + 2590 + /* 2591 + * Tell eurus to terminate association 2592 + */ 2593 + static void gelic_wl_disconnect(struct net_device *netdev) 2594 + { 2595 + struct gelic_port *port = netdev_priv(netdev); 2596 + struct gelic_wl_info *wl = port_wl(port); 2597 + struct gelic_eurus_cmd *cmd; 2598 + 2599 + /* 2600 + * If scann process is running on chip, 2601 + * further requests will be rejected 2602 + */ 2603 + if (wl->scan_stat == GELIC_WL_SCAN_STAT_SCANNING) 2604 + wait_for_completion_timeout(&wl->scan_done, HZ); 2605 + 2606 + cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC, NULL, 0); 2607 + kfree(cmd); 2608 + gelic_wl_send_iwap_event(wl, NULL); 2609 + }; 2610 + 2611 + static int gelic_wl_stop(struct net_device *netdev) 2612 + { 2613 + struct gelic_port *port = netdev_priv(netdev); 2614 + struct gelic_wl_info *wl = port_wl(port); 2615 + struct gelic_card *card = netdev_card(netdev); 2616 + 2617 + pr_debug("%s:<-\n", __func__); 2618 + 2619 + /* 2620 + * Cancel pending association work. 2621 + * event work can run after netdev down 2622 + */ 2623 + cancel_delayed_work(&wl->assoc_work); 2624 + 2625 + if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) 2626 + gelic_wl_disconnect(netdev); 2627 + 2628 + /* reset our state machine */ 2629 + gelic_wl_reset_state(wl); 2630 + 2631 + netif_stop_queue(netdev); 2632 + 2633 + gelic_card_down(card); 2634 + 2635 + pr_debug("%s:->\n", __func__); 2636 + return 0; 2637 + } 2638 + 2639 + /* -- */ 2640 + 2641 + static struct ethtool_ops gelic_wl_ethtool_ops = { 2642 + .get_drvinfo = gelic_net_get_drvinfo, 2643 + .get_link = gelic_wl_get_link, 2644 + .get_tx_csum = ethtool_op_get_tx_csum, 2645 + .set_tx_csum = ethtool_op_set_tx_csum, 2646 + .get_rx_csum = gelic_net_get_rx_csum, 2647 + .set_rx_csum = gelic_net_set_rx_csum, 2648 + }; 2649 + 2650 + static void gelic_wl_setup_netdev_ops(struct net_device *netdev) 2651 + { 2652 + struct gelic_wl_info *wl; 2653 + wl = port_wl(netdev_priv(netdev)); 2654 + BUG_ON(!wl); 2655 + netdev->open = &gelic_wl_open; 2656 + netdev->stop = &gelic_wl_stop; 2657 + netdev->hard_start_xmit = &gelic_net_xmit; 2658 + netdev->set_multicast_list = &gelic_net_set_multi; 2659 + netdev->change_mtu = &gelic_net_change_mtu; 2660 + netdev->wireless_data = &wl->wireless_data; 2661 + netdev->wireless_handlers = &gelic_wl_wext_handler_def; 2662 + /* tx watchdog */ 2663 + netdev->tx_timeout = &gelic_net_tx_timeout; 2664 + netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; 2665 + 2666 + netdev->ethtool_ops = &gelic_wl_ethtool_ops; 2667 + #ifdef CONFIG_NET_POLL_CONTROLLER 2668 + netdev->poll_controller = gelic_net_poll_controller; 2669 + #endif 2670 + } 2671 + 2672 + /* 2673 + * driver probe/remove 2674 + */ 2675 + int gelic_wl_driver_probe(struct gelic_card *card) 2676 + { 2677 + int ret; 2678 + struct net_device *netdev; 2679 + 2680 + pr_debug("%s:start\n", __func__); 2681 + 2682 + if (ps3_compare_firmware_version(1, 6, 0) < 0) 2683 + return 0; 2684 + if (!card->vlan[GELIC_PORT_WIRELESS].tx) 2685 + return 0; 2686 + 2687 + /* alloc netdevice for wireless */ 2688 + netdev = gelic_wl_alloc(card); 2689 + if (!netdev) 2690 + return -ENOMEM; 2691 + 2692 + /* setup net_device structure */ 2693 + gelic_wl_setup_netdev_ops(netdev); 2694 + 2695 + /* setup some of net_device and register it */ 2696 + ret = gelic_net_setup_netdev(netdev, card); 2697 + if (ret) 2698 + goto fail_setup; 2699 + card->netdev[GELIC_PORT_WIRELESS] = netdev; 2700 + 2701 + /* add enable wireless interrupt */ 2702 + card->irq_mask |= GELIC_CARD_WLAN_EVENT_RECEIVED | 2703 + GELIC_CARD_WLAN_COMMAND_COMPLETED; 2704 + /* to allow wireless commands while both interfaces are down */ 2705 + gelic_card_set_irq_mask(card, GELIC_CARD_WLAN_EVENT_RECEIVED | 2706 + GELIC_CARD_WLAN_COMMAND_COMPLETED); 2707 + pr_debug("%s:end\n", __func__); 2708 + return 0; 2709 + 2710 + fail_setup: 2711 + gelic_wl_free(port_wl(netdev_port(netdev))); 2712 + 2713 + return ret; 2714 + } 2715 + 2716 + int gelic_wl_driver_remove(struct gelic_card *card) 2717 + { 2718 + struct gelic_wl_info *wl; 2719 + struct net_device *netdev; 2720 + 2721 + pr_debug("%s:start\n", __func__); 2722 + 2723 + if (ps3_compare_firmware_version(1, 6, 0) < 0) 2724 + return 0; 2725 + if (!card->vlan[GELIC_PORT_WIRELESS].tx) 2726 + return 0; 2727 + 2728 + netdev = card->netdev[GELIC_PORT_WIRELESS]; 2729 + wl = port_wl(netdev_priv(netdev)); 2730 + 2731 + /* if the interface was not up, but associated */ 2732 + if (wl->assoc_stat == GELIC_WL_ASSOC_STAT_ASSOCIATED) 2733 + gelic_wl_disconnect(netdev); 2734 + 2735 + complete(&wl->cmd_done_intr); 2736 + 2737 + /* cancel all work queue */ 2738 + cancel_delayed_work(&wl->assoc_work); 2739 + cancel_delayed_work(&wl->event_work); 2740 + flush_workqueue(wl->eurus_cmd_queue); 2741 + flush_workqueue(wl->event_queue); 2742 + 2743 + unregister_netdev(netdev); 2744 + 2745 + /* disable wireless interrupt */ 2746 + pr_debug("%s: disable intr\n", __func__); 2747 + card->irq_mask &= ~(GELIC_CARD_WLAN_EVENT_RECEIVED | 2748 + GELIC_CARD_WLAN_COMMAND_COMPLETED); 2749 + /* free bss list, netdev*/ 2750 + gelic_wl_free(wl); 2751 + pr_debug("%s:end\n", __func__); 2752 + return 0; 2753 + }
+329
drivers/net/ps3_gelic_wireless.h
··· 1 + /* 2 + * PS3 gelic network driver. 3 + * 4 + * Copyright (C) 2007 Sony Computer Entertainment Inc. 5 + * Copyright 2007 Sony Corporation 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License as published by 9 + * the Free Software Foundation version 2. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program; if not, write to the Free Software 18 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 + */ 20 + #ifndef _GELIC_WIRELESS_H 21 + #define _GELIC_WIRELESS_H 22 + 23 + #include <linux/wireless.h> 24 + #include <net/iw_handler.h> 25 + 26 + 27 + /* return value from GELIC_LV1_GET_WLAN_EVENT netcontrol */ 28 + enum gelic_lv1_wl_event { 29 + GELIC_LV1_WL_EVENT_DEVICE_READY = 0x01, /* Eurus ready */ 30 + GELIC_LV1_WL_EVENT_SCAN_COMPLETED = 0x02, /* Scan has completed */ 31 + GELIC_LV1_WL_EVENT_DEAUTH = 0x04, /* Deauthed by the AP */ 32 + GELIC_LV1_WL_EVENT_BEACON_LOST = 0x08, /* Beacon lost detected */ 33 + GELIC_LV1_WL_EVENT_CONNECTED = 0x10, /* Connected to AP */ 34 + GELIC_LV1_WL_EVENT_WPA_CONNECTED = 0x20, /* WPA connection */ 35 + GELIC_LV1_WL_EVENT_WPA_ERROR = 0x40, /* MIC error */ 36 + }; 37 + 38 + /* arguments for GELIC_LV1_POST_WLAN_COMMAND netcontrol */ 39 + enum gelic_eurus_command { 40 + GELIC_EURUS_CMD_ASSOC = 1, /* association start */ 41 + GELIC_EURUS_CMD_DISASSOC = 2, /* disassociate */ 42 + GELIC_EURUS_CMD_START_SCAN = 3, /* scan start */ 43 + GELIC_EURUS_CMD_GET_SCAN = 4, /* get scan result */ 44 + GELIC_EURUS_CMD_SET_COMMON_CFG = 5, /* set common config */ 45 + GELIC_EURUS_CMD_GET_COMMON_CFG = 6, /* set common config */ 46 + GELIC_EURUS_CMD_SET_WEP_CFG = 7, /* set WEP config */ 47 + GELIC_EURUS_CMD_GET_WEP_CFG = 8, /* get WEP config */ 48 + GELIC_EURUS_CMD_SET_WPA_CFG = 9, /* set WPA config */ 49 + GELIC_EURUS_CMD_GET_WPA_CFG = 10, /* get WPA config */ 50 + GELIC_EURUS_CMD_GET_RSSI_CFG = 11, /* get RSSI info. */ 51 + GELIC_EURUS_CMD_MAX_INDEX 52 + }; 53 + 54 + /* for GELIC_EURUS_CMD_COMMON_CFG */ 55 + enum gelic_eurus_bss_type { 56 + GELIC_EURUS_BSS_INFRA = 0, 57 + GELIC_EURUS_BSS_ADHOC = 1, /* not supported */ 58 + }; 59 + 60 + enum gelic_eurus_auth_method { 61 + GELIC_EURUS_AUTH_OPEN = 0, /* FIXME: WLAN_AUTH_OPEN */ 62 + GELIC_EURUS_AUTH_SHARED = 1, /* not supported */ 63 + }; 64 + 65 + enum gelic_eurus_opmode { 66 + GELIC_EURUS_OPMODE_11BG = 0, /* 802.11b/g */ 67 + GELIC_EURUS_OPMODE_11B = 1, /* 802.11b only */ 68 + GELIC_EURUS_OPMODE_11G = 2, /* 802.11g only */ 69 + }; 70 + 71 + struct gelic_eurus_common_cfg { 72 + /* all fields are big endian */ 73 + u16 scan_index; 74 + u16 bss_type; /* infra or adhoc */ 75 + u16 auth_method; /* shared key or open */ 76 + u16 op_mode; /* B/G */ 77 + } __attribute__((packed)); 78 + 79 + 80 + /* for GELIC_EURUS_CMD_WEP_CFG */ 81 + enum gelic_eurus_wep_security { 82 + GELIC_EURUS_WEP_SEC_NONE = 0, 83 + GELIC_EURUS_WEP_SEC_40BIT = 1, 84 + GELIC_EURUS_WEP_SEC_104BIT = 2, 85 + }; 86 + 87 + struct gelic_eurus_wep_cfg { 88 + /* all fields are big endian */ 89 + u16 security; 90 + u8 key[4][16]; 91 + } __attribute__((packed)); 92 + 93 + /* for GELIC_EURUS_CMD_WPA_CFG */ 94 + enum gelic_eurus_wpa_security { 95 + GELIC_EURUS_WPA_SEC_NONE = 0x0000, 96 + /* group=TKIP, pairwise=TKIP */ 97 + GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP = 0x0001, 98 + /* group=AES, pairwise=AES */ 99 + GELIC_EURUS_WPA_SEC_WPA_AES_AES = 0x0002, 100 + /* group=TKIP, pairwise=TKIP */ 101 + GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP = 0x0004, 102 + /* group=AES, pairwise=AES */ 103 + GELIC_EURUS_WPA_SEC_WPA2_AES_AES = 0x0008, 104 + /* group=TKIP, pairwise=AES */ 105 + GELIC_EURUS_WPA_SEC_WPA_TKIP_AES = 0x0010, 106 + /* group=TKIP, pairwise=AES */ 107 + GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES = 0x0020, 108 + }; 109 + 110 + enum gelic_eurus_wpa_psk_type { 111 + GELIC_EURUS_WPA_PSK_PASSPHRASE = 0, /* passphrase string */ 112 + GELIC_EURUS_WPA_PSK_BIN = 1, /* 32 bytes binary key */ 113 + }; 114 + 115 + #define GELIC_WL_EURUS_PSK_MAX_LEN 64 116 + #define WPA_PSK_LEN 32 /* WPA spec says 256bit */ 117 + 118 + struct gelic_eurus_wpa_cfg { 119 + /* all fields are big endian */ 120 + u16 security; 121 + u16 psk_type; /* psk key encoding type */ 122 + u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN]; /* psk key; hex or passphrase */ 123 + } __attribute__((packed)); 124 + 125 + /* for GELIC_EURUS_CMD_{START,GET}_SCAN */ 126 + enum gelic_eurus_scan_capability { 127 + GELIC_EURUS_SCAN_CAP_ADHOC = 0x0000, 128 + GELIC_EURUS_SCAN_CAP_INFRA = 0x0001, 129 + GELIC_EURUS_SCAN_CAP_MASK = 0x0001, 130 + }; 131 + 132 + enum gelic_eurus_scan_sec_type { 133 + GELIC_EURUS_SCAN_SEC_NONE = 0x0000, 134 + GELIC_EURUS_SCAN_SEC_WEP = 0x0100, 135 + GELIC_EURUS_SCAN_SEC_WPA = 0x0200, 136 + GELIC_EURUS_SCAN_SEC_WPA2 = 0x0400, 137 + GELIC_EURUS_SCAN_SEC_MASK = 0x0f00, 138 + }; 139 + 140 + enum gelic_eurus_scan_sec_wep_type { 141 + GELIC_EURUS_SCAN_SEC_WEP_UNKNOWN = 0x0000, 142 + GELIC_EURUS_SCAN_SEC_WEP_40 = 0x0001, 143 + GELIC_EURUS_SCAN_SEC_WEP_104 = 0x0002, 144 + GELIC_EURUS_SCAN_SEC_WEP_MASK = 0x0003, 145 + }; 146 + 147 + enum gelic_eurus_scan_sec_wpa_type { 148 + GELIC_EURUS_SCAN_SEC_WPA_UNKNOWN = 0x0000, 149 + GELIC_EURUS_SCAN_SEC_WPA_TKIP = 0x0001, 150 + GELIC_EURUS_SCAN_SEC_WPA_AES = 0x0002, 151 + GELIC_EURUS_SCAN_SEC_WPA_MASK = 0x0003, 152 + }; 153 + 154 + /* 155 + * hw BSS information structure returned from GELIC_EURUS_CMD_GET_SCAN 156 + */ 157 + struct gelic_eurus_scan_info { 158 + /* all fields are big endian */ 159 + __be16 size; 160 + __be16 rssi; /* percentage */ 161 + __be16 channel; /* channel number */ 162 + __be16 beacon_period; /* FIXME: in msec unit */ 163 + __be16 capability; 164 + __be16 security; 165 + u8 bssid[8]; /* last ETH_ALEN are valid. bssid[0],[1] are unused */ 166 + u8 essid[32]; /* IW_ESSID_MAX_SIZE */ 167 + u8 rate[16]; /* first MAX_RATES_LENGTH(12) are valid */ 168 + u8 ext_rate[16]; /* first MAX_RATES_EX_LENGTH(16) are valid */ 169 + __be32 reserved1; 170 + __be32 reserved2; 171 + __be32 reserved3; 172 + __be32 reserved4; 173 + u8 elements[0]; /* ie */ 174 + } __attribute__ ((packed)); 175 + 176 + /* the hypervisor returns bbs up to 16 */ 177 + #define GELIC_EURUS_MAX_SCAN (16) 178 + struct gelic_wl_scan_info { 179 + struct list_head list; 180 + struct gelic_eurus_scan_info *hwinfo; 181 + 182 + int valid; /* set 1 if this entry was in latest scanned list 183 + * from Eurus */ 184 + unsigned int eurus_index; /* index in the Eurus list */ 185 + unsigned long last_scanned; /* acquired time */ 186 + 187 + unsigned int rate_len; 188 + unsigned int rate_ext_len; 189 + unsigned int essid_len; 190 + }; 191 + 192 + /* for GELIC_EURUS_CMD_GET_RSSI */ 193 + struct gelic_eurus_rssi_info { 194 + /* big endian */ 195 + __be16 rssi; 196 + } __attribute__ ((packed)); 197 + 198 + 199 + /* for 'stat' member of gelic_wl_info */ 200 + enum gelic_wl_info_status_bit { 201 + GELIC_WL_STAT_CONFIGURED, 202 + GELIC_WL_STAT_CH_INFO, /* ch info aquired */ 203 + GELIC_WL_STAT_ESSID_SET, /* ESSID specified by userspace */ 204 + GELIC_WL_STAT_BSSID_SET, /* BSSID specified by userspace */ 205 + GELIC_WL_STAT_WPA_PSK_SET, /* PMK specified by userspace */ 206 + GELIC_WL_STAT_WPA_LEVEL_SET, /* WEP or WPA[2] selected */ 207 + }; 208 + 209 + /* for 'scan_stat' member of gelic_wl_info */ 210 + enum gelic_wl_scan_state { 211 + /* just initialized or get last scan result failed */ 212 + GELIC_WL_SCAN_STAT_INIT, 213 + /* scan request issued, accepted or chip is scanning */ 214 + GELIC_WL_SCAN_STAT_SCANNING, 215 + /* scan results retrieved */ 216 + GELIC_WL_SCAN_STAT_GOT_LIST, 217 + }; 218 + 219 + /* for 'cipher_method' */ 220 + enum gelic_wl_cipher_method { 221 + GELIC_WL_CIPHER_NONE, 222 + GELIC_WL_CIPHER_WEP, 223 + GELIC_WL_CIPHER_TKIP, 224 + GELIC_WL_CIPHER_AES, 225 + }; 226 + 227 + /* for 'wpa_level' */ 228 + enum gelic_wl_wpa_level { 229 + GELIC_WL_WPA_LEVEL_NONE, 230 + GELIC_WL_WPA_LEVEL_WPA, 231 + GELIC_WL_WPA_LEVEL_WPA2, 232 + }; 233 + 234 + /* for 'assoc_stat' */ 235 + enum gelic_wl_assoc_state { 236 + GELIC_WL_ASSOC_STAT_DISCONN, 237 + GELIC_WL_ASSOC_STAT_ASSOCIATING, 238 + GELIC_WL_ASSOC_STAT_ASSOCIATED, 239 + }; 240 + /* part of private data alloc_etherdev() allocated */ 241 + #define GELIC_WEP_KEYS 4 242 + struct gelic_wl_info { 243 + /* bss list */ 244 + struct semaphore scan_lock; 245 + struct list_head network_list; 246 + struct list_head network_free_list; 247 + struct gelic_wl_scan_info *networks; 248 + 249 + unsigned long scan_age; /* last scanned time */ 250 + enum gelic_wl_scan_state scan_stat; 251 + struct completion scan_done; 252 + 253 + /* eurus command queue */ 254 + struct workqueue_struct *eurus_cmd_queue; 255 + struct completion cmd_done_intr; 256 + 257 + /* eurus event handling */ 258 + struct workqueue_struct *event_queue; 259 + struct delayed_work event_work; 260 + 261 + /* wl status bits */ 262 + unsigned long stat; 263 + enum gelic_eurus_auth_method auth_method; /* open/shared */ 264 + enum gelic_wl_cipher_method group_cipher_method; 265 + enum gelic_wl_cipher_method pairwise_cipher_method; 266 + enum gelic_wl_wpa_level wpa_level; /* wpa/wpa2 */ 267 + 268 + /* association handling */ 269 + struct semaphore assoc_stat_lock; 270 + struct delayed_work assoc_work; 271 + enum gelic_wl_assoc_state assoc_stat; 272 + struct completion assoc_done; 273 + 274 + spinlock_t lock; 275 + u16 ch_info; /* available channels. bit0 = ch1 */ 276 + /* WEP keys */ 277 + u8 key[GELIC_WEP_KEYS][IW_ENCODING_TOKEN_MAX]; 278 + unsigned long key_enabled; 279 + unsigned int key_len[GELIC_WEP_KEYS]; 280 + unsigned int current_key; 281 + /* WWPA PSK */ 282 + u8 psk[GELIC_WL_EURUS_PSK_MAX_LEN]; 283 + enum gelic_eurus_wpa_psk_type psk_type; 284 + unsigned int psk_len; 285 + 286 + u8 essid[IW_ESSID_MAX_SIZE]; 287 + u8 bssid[ETH_ALEN]; /* userland requested */ 288 + u8 active_bssid[ETH_ALEN]; /* associated bssid */ 289 + unsigned int essid_len; 290 + 291 + /* buffer for hypervisor IO */ 292 + void *buf; 293 + 294 + struct iw_public_data wireless_data; 295 + struct iw_statistics iwstat; 296 + }; 297 + 298 + #define GELIC_WL_BSS_MAX_ENT 32 299 + #define GELIC_WL_ASSOC_RETRY 50 300 + static inline struct gelic_port *wl_port(struct gelic_wl_info *wl) 301 + { 302 + return container_of((void *)wl, struct gelic_port, priv); 303 + } 304 + static inline struct gelic_wl_info *port_wl(struct gelic_port *port) 305 + { 306 + return port_priv(port); 307 + } 308 + 309 + struct gelic_eurus_cmd { 310 + struct work_struct work; 311 + struct gelic_wl_info *wl; 312 + unsigned int cmd; /* command code */ 313 + u64 tag; 314 + u64 size; 315 + void *buffer; 316 + unsigned int buf_size; 317 + struct completion done; 318 + int status; 319 + u64 cmd_status; 320 + }; 321 + 322 + /* private ioctls to pass PSK */ 323 + #define GELIC_WL_PRIV_SET_PSK (SIOCIWFIRSTPRIV + 0) 324 + #define GELIC_WL_PRIV_GET_PSK (SIOCIWFIRSTPRIV + 1) 325 + 326 + extern int gelic_wl_driver_probe(struct gelic_card *card); 327 + extern int gelic_wl_driver_remove(struct gelic_card *card); 328 + extern void gelic_wl_interrupt(struct net_device *netdev, u64 status); 329 + #endif /* _GELIC_WIRELESS_H */
+135 -98
drivers/net/r6040.c
··· 61 61 62 62 /* Time in jiffies before concluding the transmitter is hung. */ 63 63 #define TX_TIMEOUT (6000 * HZ / 1000) 64 - #define TIMER_WUT (jiffies + HZ * 1)/* timer wakeup time : 1 second */ 65 64 66 65 /* RDC MAC I/O Size */ 67 66 #define R6040_IO_SIZE 256 ··· 173 174 struct net_device *dev; 174 175 struct mii_if_info mii_if; 175 176 struct napi_struct napi; 176 - struct net_device_stats stats; 177 - u16 napi_rx_running; 178 177 void __iomem *base; 179 178 }; 180 179 ··· 232 235 phy_write(ioaddr, lp->phy_addr, reg, val); 233 236 } 234 237 235 - static void r6040_tx_timeout(struct net_device *dev) 238 + static void r6040_free_txbufs(struct net_device *dev) 236 239 { 237 - struct r6040_private *priv = netdev_priv(dev); 240 + struct r6040_private *lp = netdev_priv(dev); 241 + int i; 238 242 239 - disable_irq(dev->irq); 240 - napi_disable(&priv->napi); 241 - spin_lock(&priv->lock); 242 - dev->stats.tx_errors++; 243 - spin_unlock(&priv->lock); 243 + for (i = 0; i < TX_DCNT; i++) { 244 + if (lp->tx_insert_ptr->skb_ptr) { 245 + pci_unmap_single(lp->pdev, lp->tx_insert_ptr->buf, 246 + MAX_BUF_SIZE, PCI_DMA_TODEVICE); 247 + dev_kfree_skb(lp->tx_insert_ptr->skb_ptr); 248 + lp->rx_insert_ptr->skb_ptr = NULL; 249 + } 250 + lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp; 251 + } 252 + } 244 253 245 - netif_stop_queue(dev); 254 + static void r6040_free_rxbufs(struct net_device *dev) 255 + { 256 + struct r6040_private *lp = netdev_priv(dev); 257 + int i; 258 + 259 + for (i = 0; i < RX_DCNT; i++) { 260 + if (lp->rx_insert_ptr->skb_ptr) { 261 + pci_unmap_single(lp->pdev, lp->rx_insert_ptr->buf, 262 + MAX_BUF_SIZE, PCI_DMA_FROMDEVICE); 263 + dev_kfree_skb(lp->rx_insert_ptr->skb_ptr); 264 + lp->rx_insert_ptr->skb_ptr = NULL; 265 + } 266 + lp->rx_insert_ptr = lp->rx_insert_ptr->vndescp; 267 + } 268 + } 269 + 270 + static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring, 271 + dma_addr_t desc_dma, int size) 272 + { 273 + struct r6040_descriptor *desc = desc_ring; 274 + dma_addr_t mapping = desc_dma; 275 + 276 + while (size-- > 0) { 277 + mapping += sizeof(sizeof(*desc)); 278 + desc->ndesc = cpu_to_le32(mapping); 279 + desc->vndescp = desc + 1; 280 + desc++; 281 + } 282 + desc--; 283 + desc->ndesc = cpu_to_le32(desc_dma); 284 + desc->vndescp = desc_ring; 246 285 } 247 286 248 287 /* Allocate skb buffer for rx descriptor */ ··· 289 256 290 257 descptr = lp->rx_insert_ptr; 291 258 while (lp->rx_free_desc < RX_DCNT) { 292 - descptr->skb_ptr = dev_alloc_skb(MAX_BUF_SIZE); 259 + descptr->skb_ptr = netdev_alloc_skb(dev, MAX_BUF_SIZE); 293 260 294 261 if (!descptr->skb_ptr) 295 262 break; ··· 305 272 lp->rx_insert_ptr = descptr; 306 273 } 307 274 275 + static void r6040_alloc_txbufs(struct net_device *dev) 276 + { 277 + struct r6040_private *lp = netdev_priv(dev); 278 + void __iomem *ioaddr = lp->base; 279 + 280 + lp->tx_free_desc = TX_DCNT; 281 + 282 + lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring; 283 + r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT); 284 + 285 + iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0); 286 + iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1); 287 + } 288 + 289 + static void r6040_alloc_rxbufs(struct net_device *dev) 290 + { 291 + struct r6040_private *lp = netdev_priv(dev); 292 + void __iomem *ioaddr = lp->base; 293 + 294 + lp->rx_free_desc = 0; 295 + 296 + lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring; 297 + r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT); 298 + 299 + rx_buf_alloc(lp, dev); 300 + 301 + iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0); 302 + iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1); 303 + } 304 + 305 + static void r6040_tx_timeout(struct net_device *dev) 306 + { 307 + struct r6040_private *priv = netdev_priv(dev); 308 + void __iomem *ioaddr = priv->base; 309 + 310 + printk(KERN_WARNING "%s: transmit timed out, status %4.4x, PHY status " 311 + "%4.4x\n", 312 + dev->name, ioread16(ioaddr + MIER), 313 + mdio_read(dev, priv->mii_if.phy_id, MII_BMSR)); 314 + 315 + disable_irq(dev->irq); 316 + napi_disable(&priv->napi); 317 + spin_lock(&priv->lock); 318 + /* Clear all descriptors */ 319 + r6040_free_txbufs(dev); 320 + r6040_free_rxbufs(dev); 321 + r6040_alloc_txbufs(dev); 322 + r6040_alloc_rxbufs(dev); 323 + 324 + /* Reset MAC */ 325 + iowrite16(MAC_RST, ioaddr + MCR1); 326 + spin_unlock(&priv->lock); 327 + enable_irq(dev->irq); 328 + 329 + dev->stats.tx_errors++; 330 + netif_wake_queue(dev); 331 + } 308 332 309 333 static struct net_device_stats *r6040_get_stats(struct net_device *dev) 310 334 { ··· 370 280 unsigned long flags; 371 281 372 282 spin_lock_irqsave(&priv->lock, flags); 373 - priv->stats.rx_crc_errors += ioread8(ioaddr + ME_CNT1); 374 - priv->stats.multicast += ioread8(ioaddr + ME_CNT0); 283 + dev->stats.rx_crc_errors += ioread8(ioaddr + ME_CNT1); 284 + dev->stats.multicast += ioread8(ioaddr + ME_CNT0); 375 285 spin_unlock_irqrestore(&priv->lock, flags); 376 286 377 - return &priv->stats; 287 + return &dev->stats; 378 288 } 379 289 380 290 /* Stop RDC MAC and Free the allocated resource */ ··· 383 293 struct r6040_private *lp = netdev_priv(dev); 384 294 void __iomem *ioaddr = lp->base; 385 295 struct pci_dev *pdev = lp->pdev; 386 - int i; 387 296 int limit = 2048; 388 297 u16 *adrp; 389 298 u16 cmd; ··· 402 313 iowrite16(adrp[1], ioaddr + MID_0M); 403 314 iowrite16(adrp[2], ioaddr + MID_0H); 404 315 free_irq(dev->irq, dev); 316 + 405 317 /* Free RX buffer */ 406 - for (i = 0; i < RX_DCNT; i++) { 407 - if (lp->rx_insert_ptr->skb_ptr) { 408 - pci_unmap_single(lp->pdev, lp->rx_insert_ptr->buf, 409 - MAX_BUF_SIZE, PCI_DMA_FROMDEVICE); 410 - dev_kfree_skb(lp->rx_insert_ptr->skb_ptr); 411 - lp->rx_insert_ptr->skb_ptr = NULL; 412 - } 413 - lp->rx_insert_ptr = lp->rx_insert_ptr->vndescp; 414 - } 318 + r6040_free_rxbufs(dev); 415 319 416 320 /* Free TX buffer */ 417 - for (i = 0; i < TX_DCNT; i++) { 418 - if (lp->tx_insert_ptr->skb_ptr) { 419 - pci_unmap_single(lp->pdev, lp->tx_insert_ptr->buf, 420 - MAX_BUF_SIZE, PCI_DMA_TODEVICE); 421 - dev_kfree_skb(lp->tx_insert_ptr->skb_ptr); 422 - lp->rx_insert_ptr->skb_ptr = NULL; 423 - } 424 - lp->tx_insert_ptr = lp->tx_insert_ptr->vndescp; 425 - } 321 + r6040_free_txbufs(dev); 426 322 427 323 /* Free Descriptor memory */ 428 324 pci_free_consistent(pdev, RX_DESC_SIZE, lp->rx_ring, lp->rx_ring_dma); ··· 506 432 507 433 /* Check for errors */ 508 434 err = ioread16(ioaddr + MLSR); 509 - if (err & 0x0400) priv->stats.rx_errors++; 435 + if (err & 0x0400) 436 + dev->stats.rx_errors++; 510 437 /* RX FIFO over-run */ 511 - if (err & 0x8000) priv->stats.rx_fifo_errors++; 438 + if (err & 0x8000) 439 + dev->stats.rx_fifo_errors++; 512 440 /* RX descriptor unavailable */ 513 - if (err & 0x0080) priv->stats.rx_frame_errors++; 441 + if (err & 0x0080) 442 + dev->stats.rx_frame_errors++; 514 443 /* Received packet with length over buffer lenght */ 515 - if (err & 0x0020) priv->stats.rx_over_errors++; 444 + if (err & 0x0020) 445 + dev->stats.rx_over_errors++; 516 446 /* Received packet with too long or short */ 517 - if (err & (0x0010|0x0008)) priv->stats.rx_length_errors++; 447 + if (err & (0x0010 | 0x0008)) 448 + dev->stats.rx_length_errors++; 518 449 /* Received packet with CRC errors */ 519 450 if (err & 0x0004) { 520 451 spin_lock(&priv->lock); 521 - priv->stats.rx_crc_errors++; 452 + dev->stats.rx_crc_errors++; 522 453 spin_unlock(&priv->lock); 523 454 } 524 455 ··· 548 469 /* Send to upper layer */ 549 470 netif_receive_skb(skb_ptr); 550 471 dev->last_rx = jiffies; 551 - priv->dev->stats.rx_packets++; 552 - priv->dev->stats.rx_bytes += descptr->len; 472 + dev->stats.rx_packets++; 473 + dev->stats.rx_bytes += descptr->len; 553 474 /* To next descriptor */ 554 475 descptr = descptr->vndescp; 555 476 priv->rx_free_desc--; ··· 577 498 /* Check for errors */ 578 499 err = ioread16(ioaddr + MLSR); 579 500 580 - if (err & 0x0200) priv->stats.rx_fifo_errors++; 581 - if (err & (0x2000 | 0x4000)) priv->stats.tx_carrier_errors++; 501 + if (err & 0x0200) 502 + dev->stats.rx_fifo_errors++; 503 + if (err & (0x2000 | 0x4000)) 504 + dev->stats.tx_carrier_errors++; 582 505 583 506 if (descptr->status & 0x8000) 584 - break; /* Not complte */ 507 + break; /* Not complete */ 585 508 skb_ptr = descptr->skb_ptr; 586 509 pci_unmap_single(priv->pdev, descptr->buf, 587 510 skb_ptr->len, PCI_DMA_TODEVICE); ··· 626 545 struct r6040_private *lp = netdev_priv(dev); 627 546 void __iomem *ioaddr = lp->base; 628 547 u16 status; 629 - int handled = 1; 630 548 631 549 /* Mask off RDC MAC interrupt */ 632 550 iowrite16(MSK_INT, ioaddr + MIER); ··· 645 565 if (status & 0x10) 646 566 r6040_tx(dev); 647 567 648 - return IRQ_RETVAL(handled); 568 + return IRQ_HANDLED; 649 569 } 650 570 651 571 #ifdef CONFIG_NET_POLL_CONTROLLER ··· 657 577 } 658 578 #endif 659 579 660 - static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring, 661 - dma_addr_t desc_dma, int size) 662 - { 663 - struct r6040_descriptor *desc = desc_ring; 664 - dma_addr_t mapping = desc_dma; 665 - 666 - while (size-- > 0) { 667 - mapping += sizeof(sizeof(*desc)); 668 - desc->ndesc = cpu_to_le32(mapping); 669 - desc->vndescp = desc + 1; 670 - desc++; 671 - } 672 - desc--; 673 - desc->ndesc = cpu_to_le32(desc_dma); 674 - desc->vndescp = desc_ring; 675 - } 676 - 677 580 /* Init RDC MAC */ 678 581 static void r6040_up(struct net_device *dev) 679 582 { 680 583 struct r6040_private *lp = netdev_priv(dev); 681 584 void __iomem *ioaddr = lp->base; 682 585 683 - /* Initialize */ 684 - lp->tx_free_desc = TX_DCNT; 685 - lp->rx_free_desc = 0; 686 - /* Init descriptor */ 687 - lp->tx_remove_ptr = lp->tx_insert_ptr = lp->tx_ring; 688 - lp->rx_remove_ptr = lp->rx_insert_ptr = lp->rx_ring; 689 - /* Init TX descriptor */ 690 - r6040_init_ring_desc(lp->tx_ring, lp->tx_ring_dma, TX_DCNT); 691 - 692 - /* Init RX descriptor */ 693 - r6040_init_ring_desc(lp->rx_ring, lp->rx_ring_dma, RX_DCNT); 694 - 695 - /* Allocate buffer for RX descriptor */ 696 - rx_buf_alloc(lp, dev); 697 - 698 - /* 699 - * TX and RX descriptor start registers. 700 - * Lower 16-bits to MxD_SA0. Higher 16-bits to MxD_SA1. 701 - */ 702 - iowrite16(lp->tx_ring_dma, ioaddr + MTD_SA0); 703 - iowrite16(lp->tx_ring_dma >> 16, ioaddr + MTD_SA1); 704 - 705 - iowrite16(lp->rx_ring_dma, ioaddr + MRD_SA0); 706 - iowrite16(lp->rx_ring_dma >> 16, ioaddr + MRD_SA1); 586 + /* Initialise and alloc RX/TX buffers */ 587 + r6040_alloc_txbufs(dev); 588 + r6040_alloc_rxbufs(dev); 707 589 708 590 /* Buffer Size Register */ 709 591 iowrite16(MAX_BUF_SIZE, ioaddr + MR_BSR); ··· 731 689 } 732 690 733 691 /* Timer active again */ 734 - lp->timer.expires = TIMER_WUT; 735 - add_timer(&lp->timer); 692 + mod_timer(&lp->timer, jiffies + round_jiffies(HZ)); 736 693 } 737 694 738 695 /* Read/set MAC address routines */ ··· 787 746 napi_enable(&lp->napi); 788 747 netif_start_queue(dev); 789 748 790 - if (lp->switch_sig != ICPLUS_PHY_ID) { 791 - /* set and active a timer process */ 792 - init_timer(&lp->timer); 793 - lp->timer.expires = TIMER_WUT; 794 - lp->timer.data = (unsigned long)dev; 795 - lp->timer.function = &r6040_timer; 796 - add_timer(&lp->timer); 797 - } 749 + /* set and active a timer process */ 750 + setup_timer(&lp->timer, r6040_timer, (unsigned long) dev); 751 + if (lp->switch_sig != ICPLUS_PHY_ID) 752 + mod_timer(&lp->timer, jiffies + HZ); 798 753 return 0; 799 754 } 800 755
+2 -1
drivers/net/sis190.c
··· 1630 1630 SIS_PCI_COMMIT(); 1631 1631 } 1632 1632 1633 - static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev) 1633 + static int __devinit sis190_get_mac_addr(struct pci_dev *pdev, 1634 + struct net_device *dev) 1634 1635 { 1635 1636 u8 from; 1636 1637
+13 -6
drivers/s390/net/claw.h
··· 114 114 debug_event(claw_dbf_##name,level,(void*)(addr),len); \ 115 115 } while (0) 116 116 117 + /* Allow to sort out low debug levels early to avoid wasted sprints */ 118 + static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level) 119 + { 120 + return (level <= dbf_grp->level); 121 + } 122 + 117 123 #define CLAW_DBF_TEXT_(level,name,text...) \ 118 - do { \ 119 - sprintf(debug_buffer, text); \ 120 - debug_text_event(claw_dbf_##name,level, debug_buffer);\ 121 - } while (0) 124 + do { \ 125 + if (claw_dbf_passes(claw_dbf_##name, level)) { \ 126 + sprintf(debug_buffer, text); \ 127 + debug_text_event(claw_dbf_##name, level, \ 128 + debug_buffer); \ 129 + } \ 130 + } while (0) 122 131 123 132 /******************************************************* 124 133 * Define Control Blocks * ··· 287 278 __u16 write_size; /* write buffer size */ 288 279 __u16 dev_id; /* device ident */ 289 280 __u8 packing; /* are we packing? */ 290 - volatile __u8 queme_switch; /* gate for imed packing */ 291 - volatile unsigned long pk_delay; /* Delay for adaptive packing */ 292 281 __u8 in_use; /* device active flag */ 293 282 struct net_device *ndev; /* backward ptr to the net dev*/ 294 283 };
+1 -1
drivers/s390/net/lcs.c
··· 94 94 lcs_register_debug_facility(void) 95 95 { 96 96 lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8); 97 - lcs_dbf_trace = debug_register("lcs_trace", 2, 2, 8); 97 + lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8); 98 98 if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) { 99 99 PRINT_ERR("Not enough memory for debug facility.\n"); 100 100 lcs_unregister_debug_facility();
+12 -4
drivers/s390/net/lcs.h
··· 16 16 debug_event(lcs_dbf_##name,level,(void*)(addr),len); \ 17 17 } while (0) 18 18 19 + /* Allow to sort out low debug levels early to avoid wasted sprints */ 20 + static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level) 21 + { 22 + return (level <= dbf_grp->level); 23 + } 24 + 19 25 #define LCS_DBF_TEXT_(level,name,text...) \ 20 - do { \ 21 - sprintf(debug_buffer, text); \ 22 - debug_text_event(lcs_dbf_##name,level, debug_buffer);\ 23 - } while (0) 26 + do { \ 27 + if (lcs_dbf_passes(lcs_dbf_##name, level)) { \ 28 + sprintf(debug_buffer, text); \ 29 + debug_text_event(lcs_dbf_##name, level, debug_buffer); \ 30 + } \ 31 + } while (0) 24 32 25 33 /** 26 34 * sysfs related stuff
+20 -9
drivers/s390/net/netiucv.c
··· 97 97 98 98 DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf); 99 99 100 - #define IUCV_DBF_TEXT_(name,level,text...) \ 101 - do { \ 102 - char* iucv_dbf_txt_buf = get_cpu_var(iucv_dbf_txt_buf); \ 103 - sprintf(iucv_dbf_txt_buf, text); \ 104 - debug_text_event(iucv_dbf_##name,level,iucv_dbf_txt_buf); \ 105 - put_cpu_var(iucv_dbf_txt_buf); \ 100 + /* Allow to sort out low debug levels early to avoid wasted sprints */ 101 + static inline int iucv_dbf_passes(debug_info_t *dbf_grp, int level) 102 + { 103 + return (level <= dbf_grp->level); 104 + } 105 + 106 + #define IUCV_DBF_TEXT_(name, level, text...) \ 107 + do { \ 108 + if (iucv_dbf_passes(iucv_dbf_##name, level)) { \ 109 + char* iucv_dbf_txt_buf = \ 110 + get_cpu_var(iucv_dbf_txt_buf); \ 111 + sprintf(iucv_dbf_txt_buf, text); \ 112 + debug_text_event(iucv_dbf_##name, level, \ 113 + iucv_dbf_txt_buf); \ 114 + put_cpu_var(iucv_dbf_txt_buf); \ 115 + } \ 106 116 } while (0) 107 117 108 118 #define IUCV_DBF_SPRINTF(name,level,text...) \ ··· 147 137 #define PRINTK_HEADER " iucv: " /* for debugging */ 148 138 149 139 static struct device_driver netiucv_driver = { 140 + .owner = THIS_MODULE, 150 141 .name = "netiucv", 151 142 .bus = &iucv_bus, 152 143 }; ··· 583 572 } 584 573 585 574 /** 586 - * Dummy NOP action for all statemachines 575 + * NOP action for statemachines 587 576 */ 588 - static void fsm_action_nop(fsm_instance *fi, int event, void *arg) 577 + static void netiucv_action_nop(fsm_instance *fi, int event, void *arg) 589 578 { 590 579 } 591 580 ··· 1121 1110 1122 1111 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop }, 1123 1112 { DEV_STATE_RUNNING, DEV_EVENT_CONDOWN, dev_action_conndown }, 1124 - { DEV_STATE_RUNNING, DEV_EVENT_CONUP, fsm_action_nop }, 1113 + { DEV_STATE_RUNNING, DEV_EVENT_CONUP, netiucv_action_nop }, 1125 1114 }; 1126 1115 1127 1116 static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
+2
include/linux/dm9000.h
··· 19 19 #define DM9000_PLATF_8BITONLY (0x0001) 20 20 #define DM9000_PLATF_16BITONLY (0x0002) 21 21 #define DM9000_PLATF_32BITONLY (0x0004) 22 + #define DM9000_PLATF_EXT_PHY (0x0008) 23 + #define DM9000_PLATF_NO_EEPROM (0x0010) 22 24 23 25 /* platfrom data for platfrom device structure's platfrom_data field */ 24 26
+4 -4
include/linux/netdevice.h
··· 604 604 605 605 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ 606 606 607 + /* ingress path synchronizer */ 608 + spinlock_t ingress_lock; 609 + struct Qdisc *qdisc_ingress; 610 + 607 611 /* 608 612 * Cache line mostly used on queue transmit path (qdisc) 609 613 */ ··· 620 616 621 617 /* Partially transmitted GSO packet. */ 622 618 struct sk_buff *gso_skb; 623 - 624 - /* ingress path synchronizer */ 625 - spinlock_t ingress_lock; 626 - struct Qdisc *qdisc_ingress; 627 619 628 620 /* 629 621 * One part is mostly used on xmit path (device)
+2
include/net/ax25.h
··· 324 324 extern void ax25_dama_off(ax25_cb *); 325 325 326 326 /* ax25_ds_timer.c */ 327 + extern void ax25_ds_setup_timer(ax25_dev *); 327 328 extern void ax25_ds_set_timer(ax25_dev *); 328 329 extern void ax25_ds_del_timer(ax25_dev *); 329 330 extern void ax25_ds_timer(ax25_cb *); ··· 417 416 extern void ax25_disconnect(ax25_cb *, int); 418 417 419 418 /* ax25_timer.c */ 419 + extern void ax25_setup_timers(ax25_cb *); 420 420 extern void ax25_start_heartbeat(ax25_cb *); 421 421 extern void ax25_start_t1timer(ax25_cb *); 422 422 extern void ax25_start_t2timer(ax25_cb *);
-1
include/net/ndisc.h
··· 103 103 extern int ndisc_mc_map(struct in6_addr *addr, char *buf, struct net_device *dev, int dir); 104 104 105 105 106 - struct rt6_info * dflt_rt_lookup(void); 107 106 108 107 /* 109 108 * IGMP
+4 -1
include/net/xfrm.h
··· 508 508 } header; 509 509 510 510 /* Sequence number for replay protection. */ 511 - u64 seq; 511 + union { 512 + u64 output; 513 + __be32 input; 514 + } seq; 512 515 }; 513 516 514 517 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
+3 -9
net/ax25/af_ax25.c
··· 510 510 skb_queue_head_init(&ax25->ack_queue); 511 511 skb_queue_head_init(&ax25->reseq_queue); 512 512 513 - init_timer(&ax25->timer); 514 - init_timer(&ax25->t1timer); 515 - init_timer(&ax25->t2timer); 516 - init_timer(&ax25->t3timer); 517 - init_timer(&ax25->idletimer); 513 + ax25_setup_timers(ax25); 518 514 519 515 ax25_fillin_cb(ax25, NULL); 520 516 ··· 1924 1928 ax25->paclen); 1925 1929 1926 1930 if (ax25->sk != NULL) { 1927 - bh_lock_sock(ax25->sk); 1928 - seq_printf(seq," %d %d %ld\n", 1931 + seq_printf(seq, " %d %d %lu\n", 1929 1932 atomic_read(&ax25->sk->sk_wmem_alloc), 1930 1933 atomic_read(&ax25->sk->sk_rmem_alloc), 1931 - ax25->sk->sk_socket != NULL ? SOCK_INODE(ax25->sk->sk_socket)->i_ino : 0L); 1932 - bh_unlock_sock(ax25->sk); 1934 + sock_i_ino(ax25->sk)); 1933 1935 } else { 1934 1936 seq_puts(seq, " * * *\n"); 1935 1937 }
+1 -1
net/ax25/ax25_dev.c
··· 82 82 ax25_dev->values[AX25_VALUES_DS_TIMEOUT]= AX25_DEF_DS_TIMEOUT; 83 83 84 84 #if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER) 85 - init_timer(&ax25_dev->dama.slave_timer); 85 + ax25_ds_setup_timer(ax25_dev); 86 86 #endif 87 87 88 88 spin_lock_bh(&ax25_dev_lock);
+4 -8
net/ax25/ax25_ds_timer.c
··· 40 40 * 1/10th of a second. 41 41 */ 42 42 43 - static void ax25_ds_add_timer(ax25_dev *ax25_dev) 43 + void ax25_ds_setup_timer(ax25_dev *ax25_dev) 44 44 { 45 - struct timer_list *t = &ax25_dev->dama.slave_timer; 46 - t->data = (unsigned long) ax25_dev; 47 - t->function = &ax25_ds_timeout; 48 - t->expires = jiffies + HZ; 49 - add_timer(t); 45 + setup_timer(&ax25_dev->dama.slave_timer, ax25_ds_timeout, 46 + (unsigned long)ax25_dev); 50 47 } 51 48 52 49 void ax25_ds_del_timer(ax25_dev *ax25_dev) ··· 57 60 if (ax25_dev == NULL) /* paranoia */ 58 61 return; 59 62 60 - del_timer(&ax25_dev->dama.slave_timer); 61 63 ax25_dev->dama.slave_timeout = 62 64 msecs_to_jiffies(ax25_dev->values[AX25_VALUES_DS_TIMEOUT]) / 10; 63 - ax25_ds_add_timer(ax25_dev); 65 + mod_timer(&ax25_dev->dama.slave_timer, jiffies + HZ); 64 66 } 65 67 66 68 /*
+14 -14
net/ax25/ax25_route.c
··· 45 45 { 46 46 ax25_route *s, *t, *ax25_rt; 47 47 48 - write_lock(&ax25_route_lock); 48 + write_lock_bh(&ax25_route_lock); 49 49 ax25_rt = ax25_route_list; 50 50 while (ax25_rt != NULL) { 51 51 s = ax25_rt; ··· 68 68 } 69 69 } 70 70 } 71 - write_unlock(&ax25_route_lock); 71 + write_unlock_bh(&ax25_route_lock); 72 72 } 73 73 74 74 static int __must_check ax25_rt_add(struct ax25_routes_struct *route) ··· 82 82 if (route->digi_count > AX25_MAX_DIGIS) 83 83 return -EINVAL; 84 84 85 - write_lock(&ax25_route_lock); 85 + write_lock_bh(&ax25_route_lock); 86 86 87 87 ax25_rt = ax25_route_list; 88 88 while (ax25_rt != NULL) { ··· 92 92 ax25_rt->digipeat = NULL; 93 93 if (route->digi_count != 0) { 94 94 if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { 95 - write_unlock(&ax25_route_lock); 95 + write_unlock_bh(&ax25_route_lock); 96 96 return -ENOMEM; 97 97 } 98 98 ax25_rt->digipeat->lastrepeat = -1; ··· 102 102 ax25_rt->digipeat->calls[i] = route->digi_addr[i]; 103 103 } 104 104 } 105 - write_unlock(&ax25_route_lock); 105 + write_unlock_bh(&ax25_route_lock); 106 106 return 0; 107 107 } 108 108 ax25_rt = ax25_rt->next; 109 109 } 110 110 111 111 if ((ax25_rt = kmalloc(sizeof(ax25_route), GFP_ATOMIC)) == NULL) { 112 - write_unlock(&ax25_route_lock); 112 + write_unlock_bh(&ax25_route_lock); 113 113 return -ENOMEM; 114 114 } 115 115 ··· 120 120 ax25_rt->ip_mode = ' '; 121 121 if (route->digi_count != 0) { 122 122 if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { 123 - write_unlock(&ax25_route_lock); 123 + write_unlock_bh(&ax25_route_lock); 124 124 kfree(ax25_rt); 125 125 return -ENOMEM; 126 126 } ··· 133 133 } 134 134 ax25_rt->next = ax25_route_list; 135 135 ax25_route_list = ax25_rt; 136 - write_unlock(&ax25_route_lock); 136 + write_unlock_bh(&ax25_route_lock); 137 137 138 138 return 0; 139 139 } ··· 152 152 if ((ax25_dev = ax25_addr_ax25dev(&route->port_addr)) == NULL) 153 153 return -EINVAL; 154 154 155 - write_lock(&ax25_route_lock); 155 + write_lock_bh(&ax25_route_lock); 156 156 157 157 ax25_rt = ax25_route_list; 158 158 while (ax25_rt != NULL) { ··· 174 174 } 175 175 } 176 176 } 177 - write_unlock(&ax25_route_lock); 177 + write_unlock_bh(&ax25_route_lock); 178 178 179 179 return 0; 180 180 } ··· 188 188 if ((ax25_dev = ax25_addr_ax25dev(&rt_option->port_addr)) == NULL) 189 189 return -EINVAL; 190 190 191 - write_lock(&ax25_route_lock); 191 + write_lock_bh(&ax25_route_lock); 192 192 193 193 ax25_rt = ax25_route_list; 194 194 while (ax25_rt != NULL) { ··· 216 216 } 217 217 218 218 out: 219 - write_unlock(&ax25_route_lock); 219 + write_unlock_bh(&ax25_route_lock); 220 220 return err; 221 221 } 222 222 ··· 492 492 { 493 493 ax25_route *s, *ax25_rt = ax25_route_list; 494 494 495 - write_lock(&ax25_route_lock); 495 + write_lock_bh(&ax25_route_lock); 496 496 while (ax25_rt != NULL) { 497 497 s = ax25_rt; 498 498 ax25_rt = ax25_rt->next; ··· 500 500 kfree(s->digipeat); 501 501 kfree(s); 502 502 } 503 - write_unlock(&ax25_route_lock); 503 + write_unlock_bh(&ax25_route_lock); 504 504 }
+21 -39
net/ax25/ax25_timer.c
··· 40 40 static void ax25_t3timer_expiry(unsigned long); 41 41 static void ax25_idletimer_expiry(unsigned long); 42 42 43 + void ax25_setup_timers(ax25_cb *ax25) 44 + { 45 + setup_timer(&ax25->timer, ax25_heartbeat_expiry, (unsigned long)ax25); 46 + setup_timer(&ax25->t1timer, ax25_t1timer_expiry, (unsigned long)ax25); 47 + setup_timer(&ax25->t2timer, ax25_t2timer_expiry, (unsigned long)ax25); 48 + setup_timer(&ax25->t3timer, ax25_t3timer_expiry, (unsigned long)ax25); 49 + setup_timer(&ax25->idletimer, ax25_idletimer_expiry, 50 + (unsigned long)ax25); 51 + } 52 + 43 53 void ax25_start_heartbeat(ax25_cb *ax25) 44 54 { 45 - del_timer(&ax25->timer); 46 - 47 - ax25->timer.data = (unsigned long)ax25; 48 - ax25->timer.function = &ax25_heartbeat_expiry; 49 - ax25->timer.expires = jiffies + 5 * HZ; 50 - 51 - add_timer(&ax25->timer); 55 + mod_timer(&ax25->timer, jiffies + 5 * HZ); 52 56 } 53 57 54 58 void ax25_start_t1timer(ax25_cb *ax25) 55 59 { 56 - del_timer(&ax25->t1timer); 57 - 58 - ax25->t1timer.data = (unsigned long)ax25; 59 - ax25->t1timer.function = &ax25_t1timer_expiry; 60 - ax25->t1timer.expires = jiffies + ax25->t1; 61 - 62 - add_timer(&ax25->t1timer); 60 + mod_timer(&ax25->t1timer, jiffies + ax25->t1); 63 61 } 64 62 65 63 void ax25_start_t2timer(ax25_cb *ax25) 66 64 { 67 - del_timer(&ax25->t2timer); 68 - 69 - ax25->t2timer.data = (unsigned long)ax25; 70 - ax25->t2timer.function = &ax25_t2timer_expiry; 71 - ax25->t2timer.expires = jiffies + ax25->t2; 72 - 73 - add_timer(&ax25->t2timer); 65 + mod_timer(&ax25->t2timer, jiffies + ax25->t2); 74 66 } 75 67 76 68 void ax25_start_t3timer(ax25_cb *ax25) 77 69 { 78 - del_timer(&ax25->t3timer); 79 - 80 - if (ax25->t3 > 0) { 81 - ax25->t3timer.data = (unsigned long)ax25; 82 - ax25->t3timer.function = &ax25_t3timer_expiry; 83 - ax25->t3timer.expires = jiffies + ax25->t3; 84 - 85 - add_timer(&ax25->t3timer); 86 - } 70 + if (ax25->t3 > 0) 71 + mod_timer(&ax25->t3timer, jiffies + ax25->t3); 72 + else 73 + del_timer(&ax25->t3timer); 87 74 } 88 75 89 76 void ax25_start_idletimer(ax25_cb *ax25) 90 77 { 91 - del_timer(&ax25->idletimer); 92 - 93 - if (ax25->idle > 0) { 94 - ax25->idletimer.data = (unsigned long)ax25; 95 - ax25->idletimer.function = &ax25_idletimer_expiry; 96 - ax25->idletimer.expires = jiffies + ax25->idle; 97 - 98 - add_timer(&ax25->idletimer); 99 - } 78 + if (ax25->idle > 0) 79 + mod_timer(&ax25->idletimer, jiffies + ax25->idle); 80 + else 81 + del_timer(&ax25->idletimer); 100 82 } 101 83 102 84 void ax25_stop_heartbeat(ax25_cb *ax25)
+2 -2
net/core/dev.c
··· 1071 1071 */ 1072 1072 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); 1073 1073 1074 - dev_deactivate(dev); 1075 - 1076 1074 clear_bit(__LINK_STATE_START, &dev->state); 1077 1075 1078 1076 /* Synchronize to scheduled poll. We cannot touch poll list, ··· 1080 1082 * napi_struct instances on this device. 1081 1083 */ 1082 1084 smp_mb__after_clear_bit(); /* Commit netif_running(). */ 1085 + 1086 + dev_deactivate(dev); 1083 1087 1084 1088 /* 1085 1089 * Call the device specific close. This cannot fail.
+3 -9
net/core/neighbour.c
··· 834 834 } 835 835 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { 836 836 struct sk_buff *skb = skb_peek(&neigh->arp_queue); 837 - /* keep skb alive even if arp_queue overflows */ 838 - if (skb) 839 - skb_get(skb); 840 - write_unlock(&neigh->lock); 837 + 841 838 neigh->ops->solicit(neigh, skb); 842 839 atomic_inc(&neigh->probes); 843 - if (skb) 844 - kfree_skb(skb); 845 - } else { 846 - out: 847 - write_unlock(&neigh->lock); 848 840 } 841 + out: 842 + write_unlock(&neigh->lock); 849 843 850 844 if (notify) 851 845 neigh_update_notify(neigh);
+26 -10
net/core/rtnetlink.c
··· 504 504 505 505 EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo); 506 506 507 - static void set_operstate(struct net_device *dev, unsigned char transition) 507 + static int set_operstate(struct net_device *dev, unsigned char transition, bool send_notification) 508 508 { 509 509 unsigned char operstate = dev->operstate; 510 510 ··· 527 527 write_lock_bh(&dev_base_lock); 528 528 dev->operstate = operstate; 529 529 write_unlock_bh(&dev_base_lock); 530 - netdev_state_change(dev); 531 - } 530 + 531 + if (send_notification) 532 + netdev_state_change(dev); 533 + return 1; 534 + } else 535 + return 0; 532 536 } 533 537 534 538 static void copy_rtnl_link_stats(struct rtnl_link_stats *a, ··· 826 822 if (tb[IFLA_BROADCAST]) { 827 823 nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len); 828 824 send_addr_notify = 1; 825 + modified = 1; 829 826 } 830 827 831 828 if (ifm->ifi_flags || ifm->ifi_change) { ··· 839 834 dev_change_flags(dev, flags); 840 835 } 841 836 842 - if (tb[IFLA_TXQLEN]) 843 - dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); 837 + if (tb[IFLA_TXQLEN]) { 838 + if (dev->tx_queue_len != nla_get_u32(tb[IFLA_TXQLEN])) { 839 + dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); 840 + modified = 1; 841 + } 842 + } 844 843 845 844 if (tb[IFLA_OPERSTATE]) 846 - set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 845 + modified |= set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]), false); 847 846 848 847 if (tb[IFLA_LINKMODE]) { 849 - write_lock_bh(&dev_base_lock); 850 - dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); 851 - write_unlock_bh(&dev_base_lock); 848 + if (dev->link_mode != nla_get_u8(tb[IFLA_LINKMODE])) { 849 + write_lock_bh(&dev_base_lock); 850 + dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); 851 + write_lock_bh(&dev_base_lock); 852 + modified = 1; 853 + } 852 854 } 853 855 854 856 err = 0; ··· 869 857 870 858 if (send_addr_notify) 871 859 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev); 860 + 861 + if (modified) 862 + netdev_state_change(dev); 863 + 872 864 return err; 873 865 } 874 866 ··· 990 974 if (tb[IFLA_TXQLEN]) 991 975 dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); 992 976 if (tb[IFLA_OPERSTATE]) 993 - set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); 977 + set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]), true); 994 978 if (tb[IFLA_LINKMODE]) 995 979 dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); 996 980
+1 -2
net/core/skbuff.c
··· 2106 2106 /** 2107 2107 * skb_pull_rcsum - pull skb and update receive checksum 2108 2108 * @skb: buffer to update 2109 - * @start: start of data before pull 2110 2109 * @len: length of data pulled 2111 2110 * 2112 2111 * This function performs an skb_pull on the packet and updates 2113 - * update the CHECKSUM_COMPLETE checksum. It should be used on 2112 + * the CHECKSUM_COMPLETE checksum. It should be used on 2114 2113 * receive path processing instead of skb_pull unless you know 2115 2114 * that the checksum difference is zero (e.g., a valid IP header) 2116 2115 * or you are setting ip_summed to CHECKSUM_NONE.
+1 -1
net/ipv4/ah4.c
··· 96 96 97 97 ah->reserved = 0; 98 98 ah->spi = x->id.spi; 99 - ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq); 99 + ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output); 100 100 101 101 spin_lock_bh(&x->lock); 102 102 err = ah_mac_digest(ahp, skb, ah->auth_data);
-3
net/ipv4/arp.c
··· 368 368 if (!(neigh->nud_state&NUD_VALID)) 369 369 printk(KERN_DEBUG "trying to ucast probe in NUD_INVALID\n"); 370 370 dst_ha = neigh->ha; 371 - read_lock_bh(&neigh->lock); 372 371 } else if ((probes -= neigh->parms->app_probes) < 0) { 373 372 #ifdef CONFIG_ARPD 374 373 neigh_app_ns(neigh); ··· 377 378 378 379 arp_send(ARPOP_REQUEST, ETH_P_ARP, target, dev, saddr, 379 380 dst_ha, dev->dev_addr, NULL); 380 - if (dst_ha) 381 - read_unlock_bh(&neigh->lock); 382 381 } 383 382 384 383 static int arp_ignore(struct in_device *in_dev, __be32 sip, __be32 tip)
+3 -2
net/ipv4/esp4.c
··· 199 199 } 200 200 201 201 esph->spi = x->id.spi; 202 - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq); 202 + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output); 203 203 204 204 sg_init_table(sg, nfrags); 205 205 skb_to_sgvec(skb, sg, ··· 210 210 aead_givcrypt_set_callback(req, 0, esp_output_done, skb); 211 211 aead_givcrypt_set_crypt(req, sg, sg, clen, iv); 212 212 aead_givcrypt_set_assoc(req, asg, sizeof(*esph)); 213 - aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq); 213 + aead_givcrypt_set_giv(req, esph->enc_data, 214 + XFRM_SKB_CB(skb)->seq.output); 214 215 215 216 ESP_SKB_CB(skb)->tmp = tmp; 216 217 err = crypto_aead_givencrypt(req);
+84 -15
net/ipv4/fib_trie.c
··· 1762 1762 { 1763 1763 struct leaf *l = trie_firstleaf(t); 1764 1764 1765 - while (index-- > 0) { 1765 + while (l && index-- > 0) 1766 1766 l = trie_nextleaf(l); 1767 - if (!l) 1768 - break; 1769 - } 1767 + 1770 1768 return l; 1771 1769 } 1772 1770 ··· 2459 2461 .release = seq_release_net, 2460 2462 }; 2461 2463 2464 + struct fib_route_iter { 2465 + struct seq_net_private p; 2466 + struct trie *main_trie; 2467 + loff_t pos; 2468 + t_key key; 2469 + }; 2470 + 2471 + static struct leaf *fib_route_get_idx(struct fib_route_iter *iter, loff_t pos) 2472 + { 2473 + struct leaf *l = NULL; 2474 + struct trie *t = iter->main_trie; 2475 + 2476 + /* use cache location of last found key */ 2477 + if (iter->pos > 0 && pos >= iter->pos && (l = fib_find_node(t, iter->key))) 2478 + pos -= iter->pos; 2479 + else { 2480 + iter->pos = 0; 2481 + l = trie_firstleaf(t); 2482 + } 2483 + 2484 + while (l && pos-- > 0) { 2485 + iter->pos++; 2486 + l = trie_nextleaf(l); 2487 + } 2488 + 2489 + if (l) 2490 + iter->key = pos; /* remember it */ 2491 + else 2492 + iter->pos = 0; /* forget it */ 2493 + 2494 + return l; 2495 + } 2496 + 2497 + static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos) 2498 + __acquires(RCU) 2499 + { 2500 + struct fib_route_iter *iter = seq->private; 2501 + struct fib_table *tb; 2502 + 2503 + rcu_read_lock(); 2504 + tb = fib_get_table(iter->p.net, RT_TABLE_MAIN); 2505 + if (!tb) 2506 + return NULL; 2507 + 2508 + iter->main_trie = (struct trie *) tb->tb_data; 2509 + if (*pos == 0) 2510 + return SEQ_START_TOKEN; 2511 + else 2512 + return fib_route_get_idx(iter, *pos - 1); 2513 + } 2514 + 2515 + static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2516 + { 2517 + struct fib_route_iter *iter = seq->private; 2518 + struct leaf *l = v; 2519 + 2520 + ++*pos; 2521 + if (v == SEQ_START_TOKEN) { 2522 + iter->pos = 0; 2523 + l = trie_firstleaf(iter->main_trie); 2524 + } else { 2525 + iter->pos++; 2526 + l = trie_nextleaf(l); 2527 + } 2528 + 2529 + if (l) 2530 + iter->key = l->key; 2531 + else 2532 + iter->pos = 0; 2533 + return l; 2534 + } 2535 + 2536 + static void fib_route_seq_stop(struct seq_file *seq, void *v) 2537 + __releases(RCU) 2538 + { 2539 + rcu_read_unlock(); 2540 + } 2541 + 2462 2542 static unsigned fib_flag_trans(int type, __be32 mask, const struct fib_info *fi) 2463 2543 { 2464 2544 static unsigned type2flags[RTN_MAX + 1] = { ··· 2560 2484 */ 2561 2485 static int fib_route_seq_show(struct seq_file *seq, void *v) 2562 2486 { 2563 - const struct fib_trie_iter *iter = seq->private; 2564 2487 struct leaf *l = v; 2565 2488 struct leaf_info *li; 2566 2489 struct hlist_node *node; ··· 2570 2495 "\tWindow\tIRTT"); 2571 2496 return 0; 2572 2497 } 2573 - 2574 - if (iter->trie == iter->trie_local) 2575 - return 0; 2576 - 2577 - if (IS_TNODE(l)) 2578 - return 0; 2579 2498 2580 2499 hlist_for_each_entry_rcu(li, node, &l->list, hlist) { 2581 2500 struct fib_alias *fa; ··· 2613 2544 } 2614 2545 2615 2546 static const struct seq_operations fib_route_seq_ops = { 2616 - .start = fib_trie_seq_start, 2617 - .next = fib_trie_seq_next, 2618 - .stop = fib_trie_seq_stop, 2547 + .start = fib_route_seq_start, 2548 + .next = fib_route_seq_next, 2549 + .stop = fib_route_seq_stop, 2619 2550 .show = fib_route_seq_show, 2620 2551 }; 2621 2552 2622 2553 static int fib_route_seq_open(struct inode *inode, struct file *file) 2623 2554 { 2624 2555 return seq_open_net(inode, file, &fib_route_seq_ops, 2625 - sizeof(struct fib_trie_iter)); 2556 + sizeof(struct fib_route_iter)); 2626 2557 } 2627 2558 2628 2559 static const struct file_operations fib_route_fops = {
-3
net/ipv4/inet_hashtables.c
··· 120 120 } 121 121 } 122 122 123 - EXPORT_SYMBOL(inet_listen_wlock); 124 - 125 123 /* 126 124 * Don't inline this cruft. Here are some nice properties to exploit here. The 127 125 * BSD API does not allow a listening sock to specify the remote port nor the ··· 492 494 return ret; 493 495 } 494 496 } 495 - EXPORT_SYMBOL_GPL(__inet_hash_connect); 496 497 497 498 /* 498 499 * Bind a port for a connect operation and hash it.
-5
net/ipv4/ip_sockglue.c
··· 514 514 val &= ~3; 515 515 val |= inet->tos & 3; 516 516 } 517 - if (IPTOS_PREC(val) >= IPTOS_PREC_CRITIC_ECP && 518 - !capable(CAP_NET_ADMIN)) { 519 - err = -EPERM; 520 - break; 521 - } 522 517 if (inet->tos != val) { 523 518 inet->tos = val; 524 519 sk->sk_priority = rt_tos2priority(val);
+1 -1
net/ipv6/ah6.c
··· 283 283 284 284 ah->reserved = 0; 285 285 ah->spi = x->id.spi; 286 - ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq); 286 + ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output); 287 287 288 288 spin_lock_bh(&x->lock); 289 289 err = ah_mac_digest(ahp, skb, ah->auth_data);
+3 -2
net/ipv6/esp6.c
··· 188 188 *skb_mac_header(skb) = IPPROTO_ESP; 189 189 190 190 esph->spi = x->id.spi; 191 - esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq); 191 + esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output); 192 192 193 193 sg_init_table(sg, nfrags); 194 194 skb_to_sgvec(skb, sg, ··· 199 199 aead_givcrypt_set_callback(req, 0, esp_output_done, skb); 200 200 aead_givcrypt_set_crypt(req, sg, sg, clen, iv); 201 201 aead_givcrypt_set_assoc(req, asg, sizeof(*esph)); 202 - aead_givcrypt_set_giv(req, esph->enc_data, XFRM_SKB_CB(skb)->seq); 202 + aead_givcrypt_set_giv(req, esph->enc_data, 203 + XFRM_SKB_CB(skb)->seq.output); 203 204 204 205 ESP_SKB_CB(skb)->tmp = tmp; 205 206 err = crypto_aead_givencrypt(req);
+5 -1
net/ipv6/ip6_output.c
··· 621 621 * or if the skb it not generated by a local socket. (This last 622 622 * check should be redundant, but it's free.) 623 623 */ 624 - if (!np || np->pmtudisc >= IPV6_PMTUDISC_DO) { 624 + if (!skb->local_df) { 625 625 skb->dev = skb->dst->dev; 626 626 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 627 627 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS); ··· 1419 1419 tmp_skb->destructor = NULL; 1420 1420 tmp_skb->sk = NULL; 1421 1421 } 1422 + 1423 + /* Allow local fragmentation. */ 1424 + if (np->pmtudisc < IPV6_PMTUDISC_DO) 1425 + skb->local_df = 1; 1422 1426 1423 1427 ipv6_addr_copy(final_dst, &fl->fl6_dst); 1424 1428 __skb_pull(skb, skb_network_header_len(skb));
+1 -1
net/ipv6/xfrm6_output.c
··· 36 36 if (mtu < IPV6_MIN_MTU) 37 37 mtu = IPV6_MIN_MTU; 38 38 39 - if (skb->len > mtu) { 39 + if (!skb->local_df && skb->len > mtu) { 40 40 skb->dev = dst->dev; 41 41 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, skb->dev); 42 42 ret = -EMSGSIZE;
+1
net/key/af_key.c
··· 2291 2291 return 0; 2292 2292 2293 2293 out: 2294 + xp->dead = 1; 2294 2295 xfrm_policy_destroy(xp); 2295 2296 return err; 2296 2297 }
+1 -1
net/netfilter/nf_conntrack_proto_tcp.c
··· 945 945 946 946 ct->proto.tcp.state = new_state; 947 947 if (old_state != new_state 948 - && new_state == TCP_CONNTRACK_CLOSE) 948 + && new_state == TCP_CONNTRACK_FIN_WAIT) 949 949 ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT; 950 950 timeout = ct->proto.tcp.retrans >= nf_ct_tcp_max_retrans 951 951 && tcp_timeouts[new_state] > nf_ct_tcp_timeout_max_retrans
+1 -1
net/netfilter/xt_SECMARK.c
··· 111 111 return true; 112 112 } 113 113 114 - void secmark_tg_destroy(const struct xt_target *target, void *targinfo) 114 + static void secmark_tg_destroy(const struct xt_target *target, void *targinfo) 115 115 { 116 116 switch (mode) { 117 117 case SECMARK_MODE_SEL:
+3 -3
net/netlabel/netlabel_domainhash.c
··· 150 150 entry = netlbl_domhsh_search(domain); 151 151 if (entry == NULL) { 152 152 entry = rcu_dereference(netlbl_domhsh_def); 153 - if (entry != NULL && entry->valid) 154 - return entry; 153 + if (entry != NULL && !entry->valid) 154 + entry = NULL; 155 155 } 156 156 157 - return NULL; 157 + return entry; 158 158 } 159 159 160 160 /*
+20 -10
net/netlabel/netlabel_unlabeled.c
··· 180 180 } 181 181 } 182 182 183 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 183 184 /** 184 185 * netlbl_unlabel_audit_addr6 - Audit an IPv6 address 185 186 * @audit_buf: audit buffer ··· 214 213 audit_log_format(audit_buf, " src_prefixlen=%d", mask_len); 215 214 } 216 215 } 216 + #endif /* IPv6 */ 217 217 218 218 /* 219 219 * Unlabeled Connection Hash Table Functions ··· 619 617 int ifindex; 620 618 struct net_device *dev; 621 619 struct netlbl_unlhsh_iface *iface; 622 - struct in_addr *addr4, *mask4; 623 - struct in6_addr *addr6, *mask6; 624 620 struct audit_buffer *audit_buf = NULL; 625 621 char *secctx = NULL; 626 622 u32 secctx_len; ··· 651 651 audit_buf = netlbl_audit_start_common(AUDIT_MAC_UNLBL_STCADD, 652 652 audit_info); 653 653 switch (addr_len) { 654 - case sizeof(struct in_addr): 654 + case sizeof(struct in_addr): { 655 + struct in_addr *addr4, *mask4; 656 + 655 657 addr4 = (struct in_addr *)addr; 656 658 mask4 = (struct in_addr *)mask; 657 659 ret_val = netlbl_unlhsh_add_addr4(iface, addr4, mask4, secid); ··· 663 661 addr4->s_addr, 664 662 mask4->s_addr); 665 663 break; 664 + } 666 665 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 667 - case sizeof(struct in6_addr): 666 + case sizeof(struct in6_addr): { 667 + struct in6_addr *addr6, *mask6; 668 + 668 669 addr6 = (struct in6_addr *)addr; 669 670 mask6 = (struct in6_addr *)mask; 670 671 ret_val = netlbl_unlhsh_add_addr6(iface, addr6, mask6, secid); ··· 676 671 dev_name, 677 672 addr6, mask6); 678 673 break; 674 + } 679 675 #endif /* IPv6 */ 680 676 default: 681 677 ret_val = -EINVAL; ··· 1747 1741 u16 family, 1748 1742 struct netlbl_lsm_secattr *secattr) 1749 1743 { 1750 - struct iphdr *hdr4; 1751 - struct ipv6hdr *hdr6; 1752 - struct netlbl_unlhsh_addr4 *addr4; 1753 - struct netlbl_unlhsh_addr6 *addr6; 1754 1744 struct netlbl_unlhsh_iface *iface; 1755 1745 1756 1746 rcu_read_lock(); ··· 1754 1752 if (iface == NULL) 1755 1753 goto unlabel_getattr_nolabel; 1756 1754 switch (family) { 1757 - case PF_INET: 1755 + case PF_INET: { 1756 + struct iphdr *hdr4; 1757 + struct netlbl_unlhsh_addr4 *addr4; 1758 + 1758 1759 hdr4 = ip_hdr(skb); 1759 1760 addr4 = netlbl_unlhsh_search_addr4(hdr4->saddr, iface); 1760 1761 if (addr4 == NULL) 1761 1762 goto unlabel_getattr_nolabel; 1762 1763 secattr->attr.secid = addr4->secid; 1763 1764 break; 1765 + } 1764 1766 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1765 - case PF_INET6: 1767 + case PF_INET6: { 1768 + struct ipv6hdr *hdr6; 1769 + struct netlbl_unlhsh_addr6 *addr6; 1770 + 1766 1771 hdr6 = ipv6_hdr(skb); 1767 1772 addr6 = netlbl_unlhsh_search_addr6(&hdr6->saddr, iface); 1768 1773 if (addr6 == NULL) 1769 1774 goto unlabel_getattr_nolabel; 1770 1775 secattr->attr.secid = addr6->secid; 1771 1776 break; 1777 + } 1772 1778 #endif /* IPv6 */ 1773 1779 default: 1774 1780 goto unlabel_getattr_nolabel;
+1 -2
net/netlabel/netlabel_user.c
··· 96 96 struct audit_buffer *netlbl_audit_start_common(int type, 97 97 struct netlbl_audit *audit_info) 98 98 { 99 - struct audit_context *audit_ctx = current->audit_context; 100 99 struct audit_buffer *audit_buf; 101 100 char *secctx; 102 101 u32 secctx_len; ··· 103 104 if (audit_enabled == 0) 104 105 return NULL; 105 106 106 - audit_buf = audit_log_start(audit_ctx, GFP_ATOMIC, type); 107 + audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC, type); 107 108 if (audit_buf == NULL) 108 109 return NULL; 109 110
+2 -4
net/netlink/genetlink.c
··· 230 230 { 231 231 struct genl_multicast_group *grp, *tmp; 232 232 233 - genl_lock(); 234 233 list_for_each_entry_safe(grp, tmp, &family->mcast_groups, list) 235 234 __genl_unregister_mc_group(family, grp); 236 - genl_unlock(); 237 235 } 238 236 239 237 /** ··· 394 396 { 395 397 struct genl_family *rc; 396 398 397 - genl_unregister_mc_groups(family); 398 - 399 399 genl_lock(); 400 + 401 + genl_unregister_mc_groups(family); 400 402 401 403 list_for_each_entry(rc, genl_family_chain(family->id), family_list) { 402 404 if (family->id != rc->id || strcmp(rc->name, family->name))
+3
net/socket.c
··· 701 701 { 702 702 struct socket *sock = file->private_data; 703 703 704 + if (unlikely(!sock->ops->splice_read)) 705 + return -EINVAL; 706 + 704 707 return sock->ops->splice_read(sock, ppos, pipe, len, flags); 705 708 } 706 709
+1 -1
net/xfrm/Kconfig
··· 38 38 39 39 config XFRM_STATISTICS 40 40 bool "Transformation statistics (EXPERIMENTAL)" 41 - depends on XFRM && PROC_FS && EXPERIMENTAL 41 + depends on INET && XFRM && PROC_FS && EXPERIMENTAL 42 42 ---help--- 43 43 This statistics is not a SNMP/MIB specification but shows 44 44 statistics about transformation error (or almost error) factor
+2 -2
net/xfrm/xfrm_input.c
··· 109 109 if (encap_type < 0) { 110 110 async = 1; 111 111 x = xfrm_input_state(skb); 112 - seq = XFRM_SKB_CB(skb)->seq; 112 + seq = XFRM_SKB_CB(skb)->seq.input; 113 113 goto resume; 114 114 } 115 115 ··· 175 175 176 176 spin_unlock(&x->lock); 177 177 178 - XFRM_SKB_CB(skb)->seq = seq; 178 + XFRM_SKB_CB(skb)->seq.input = seq; 179 179 180 180 nexthdr = x->type->input(x, skb); 181 181
+1 -1
net/xfrm/xfrm_output.c
··· 62 62 } 63 63 64 64 if (x->type->flags & XFRM_TYPE_REPLAY_PROT) { 65 - XFRM_SKB_CB(skb)->seq = ++x->replay.oseq; 65 + XFRM_SKB_CB(skb)->seq.output = ++x->replay.oseq; 66 66 if (unlikely(x->replay.oseq == 0)) { 67 67 XFRM_INC_STATS(LINUX_MIB_XFRMOUTSTATESEQERROR); 68 68 x->replay.oseq--;
+1
net/xfrm/xfrm_user.c
··· 1105 1105 return xp; 1106 1106 error: 1107 1107 *errp = err; 1108 + xp->dead = 1; 1108 1109 xfrm_policy_destroy(xp); 1109 1110 return NULL; 1110 1111 }