Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (56 commits)
l2tp: Fix possible oops if transmitting or receiving when tunnel goes down
tcp: Fix for race due to temporary drop of the socket lock in skb_splice_bits.
tcp: Increment OUTRSTS in tcp_send_active_reset()
raw: Raw socket leak.
lt2p: Fix possible WARN_ON from socket code when UDP socket is closed
USB ID for Philips CPWUA054/00 Wireless USB Adapter 11g
ssb: Fix context assertion in ssb_pcicore_dev_irqvecs_enable
libertas: fix command size for CMD_802_11_SUBSCRIBE_EVENT
ipw2200: expire and use oldest BSS on adhoc create
airo warning fix
b43legacy: Fix controller restart crash
sctp: Fix ECN markings for IPv6
sctp: Flush the queue only once during fast retransmit.
sctp: Start T3-RTX timer when fast retransmitting lowest TSN
sctp: Correctly implement Fast Recovery cwnd manipulations.
sctp: Move sctp_v4_dst_saddr out of loop
sctp: retran_path update bug fix
tcp: fix skb vs fack_count out-of-sync condition
sunhme: Cleanup use of deprecated calls to save_and_cli and restore_flags.
xfrm: xfrm_algo: correct usage of RIPEMD-160
...

+618 -315
+1 -1
Documentation/networking/bridge.txt
··· 1 1 In order to use the Ethernet bridging functionality, you'll need the 2 2 userspace tools. These programs and documentation are available 3 - at http://bridge.sourceforge.net. The download page is 3 + at http://www.linux-foundation.org/en/Net:Bridge. The download page is 4 4 http://prdownloads.sourceforge.net/bridge. 5 5 6 6 If you still have questions, don't hesitate to post to the mailing list
+1 -1
MAINTAINERS
··· 1611 1611 P: Stephen Hemminger 1612 1612 M: shemminger@linux-foundation.org 1613 1613 L: bridge@lists.linux-foundation.org 1614 - W: http://bridge.sourceforge.net/ 1614 + W: http://www.linux-foundation.org/en/Net:Bridge 1615 1615 S: Maintained 1616 1616 1617 1617 ETHERTEAM 16I DRIVER
+1
drivers/net/atlx/atl1.c
··· 2023 2023 /* Good Receive */ 2024 2024 pci_unmap_page(adapter->pdev, buffer_info->dma, 2025 2025 buffer_info->length, PCI_DMA_FROMDEVICE); 2026 + buffer_info->dma = 0; 2026 2027 skb = buffer_info->skb; 2027 2028 length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); 2028 2029
+7 -3
drivers/net/cs89x0.c
··· 1394 1394 #endif 1395 1395 if (!result) { 1396 1396 printk(KERN_ERR "%s: EEPROM is configured for unavailable media\n", dev->name); 1397 - release_irq: 1397 + release_dma: 1398 + #if ALLOW_DMA 1399 + free_dma(dev->dma); 1400 + #endif 1401 + release_irq: 1398 1402 #if ALLOW_DMA 1399 1403 release_dma_buff(lp); 1400 1404 #endif ··· 1446 1442 if ((result = detect_bnc(dev)) != DETECTED_NONE) 1447 1443 break; 1448 1444 printk(KERN_ERR "%s: no media detected\n", dev->name); 1449 - goto release_irq; 1445 + goto release_dma; 1450 1446 } 1451 1447 switch(result) { 1452 1448 case DETECTED_NONE: 1453 1449 printk(KERN_ERR "%s: no network cable attached to configured media\n", dev->name); 1454 - goto release_irq; 1450 + goto release_dma; 1455 1451 case DETECTED_RJ45H: 1456 1452 printk(KERN_INFO "%s: using half-duplex 10Base-T (RJ-45)\n", dev->name); 1457 1453 break;
+1 -1
drivers/net/myri10ge/myri10ge.c
··· 75 75 #include "myri10ge_mcp.h" 76 76 #include "myri10ge_mcp_gen_header.h" 77 77 78 - #define MYRI10GE_VERSION_STR "1.3.2-1.287" 78 + #define MYRI10GE_VERSION_STR "1.3.99-1.347" 79 79 80 80 MODULE_DESCRIPTION("Myricom 10G driver (10GbE)"); 81 81 MODULE_AUTHOR("Maintainer: help@myri.com");
+88 -23
drivers/net/pppol2tp.c
··· 240 240 if (sk == NULL) 241 241 return NULL; 242 242 243 + sock_hold(sk); 243 244 session = (struct pppol2tp_session *)(sk->sk_user_data); 244 - if (session == NULL) 245 - return NULL; 245 + if (session == NULL) { 246 + sock_put(sk); 247 + goto out; 248 + } 246 249 247 250 BUG_ON(session->magic != L2TP_SESSION_MAGIC); 248 - 251 + out: 249 252 return session; 250 253 } 251 254 ··· 259 256 if (sk == NULL) 260 257 return NULL; 261 258 259 + sock_hold(sk); 262 260 tunnel = (struct pppol2tp_tunnel *)(sk->sk_user_data); 263 - if (tunnel == NULL) 264 - return NULL; 261 + if (tunnel == NULL) { 262 + sock_put(sk); 263 + goto out; 264 + } 265 265 266 266 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); 267 - 267 + out: 268 268 return tunnel; 269 269 } 270 270 ··· 722 716 session->stats.rx_errors++; 723 717 kfree_skb(skb); 724 718 sock_put(session->sock); 719 + sock_put(sock); 725 720 726 721 return 0; 727 722 728 723 error: 729 724 /* Put UDP header back */ 730 725 __skb_push(skb, sizeof(struct udphdr)); 726 + sock_put(sock); 731 727 732 728 no_tunnel: 733 729 return 1; ··· 753 745 "%s: received %d bytes\n", tunnel->name, skb->len); 754 746 755 747 if (pppol2tp_recv_core(sk, skb)) 756 - goto pass_up; 748 + goto pass_up_put; 757 749 750 + sock_put(sk); 758 751 return 0; 759 752 753 + pass_up_put: 754 + sock_put(sk); 760 755 pass_up: 761 756 return 1; 762 757 } ··· 869 858 870 859 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); 871 860 if (tunnel == NULL) 872 - goto error; 861 + goto error_put_sess; 873 862 874 863 /* What header length is configured for this session? */ 875 864 hdr_len = pppol2tp_l2tp_header_len(session); ··· 881 870 sizeof(ppph) + total_len, 882 871 0, GFP_KERNEL); 883 872 if (!skb) 884 - goto error; 873 + goto error_put_sess_tun; 885 874 886 875 /* Reserve space for headers. */ 887 876 skb_reserve(skb, NET_SKB_PAD); ··· 911 900 error = memcpy_fromiovec(skb->data, m->msg_iov, total_len); 912 901 if (error < 0) { 913 902 kfree_skb(skb); 914 - goto error; 903 + goto error_put_sess_tun; 915 904 } 916 905 skb_put(skb, total_len); 917 906 ··· 958 947 session->stats.tx_errors++; 959 948 } 960 949 950 + return error; 951 + 952 + error_put_sess_tun: 953 + sock_put(session->tunnel_sock); 954 + error_put_sess: 955 + sock_put(sk); 961 956 error: 962 957 return error; 958 + } 959 + 960 + /* Automatically called when the skb is freed. 961 + */ 962 + static void pppol2tp_sock_wfree(struct sk_buff *skb) 963 + { 964 + sock_put(skb->sk); 965 + } 966 + 967 + /* For data skbs that we transmit, we associate with the tunnel socket 968 + * but don't do accounting. 969 + */ 970 + static inline void pppol2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk) 971 + { 972 + sock_hold(sk); 973 + skb->sk = sk; 974 + skb->destructor = pppol2tp_sock_wfree; 963 975 } 964 976 965 977 /* Transmit function called by generic PPP driver. Sends PPP frame ··· 1027 993 1028 994 sk_tun = session->tunnel_sock; 1029 995 if (sk_tun == NULL) 1030 - goto abort; 996 + goto abort_put_sess; 1031 997 tunnel = pppol2tp_sock_to_tunnel(sk_tun); 1032 998 if (tunnel == NULL) 1033 - goto abort; 999 + goto abort_put_sess; 1034 1000 1035 1001 /* What header length is configured for this session? */ 1036 1002 hdr_len = pppol2tp_l2tp_header_len(session); ··· 1043 1009 sizeof(struct udphdr) + hdr_len + sizeof(ppph); 1044 1010 old_headroom = skb_headroom(skb); 1045 1011 if (skb_cow_head(skb, headroom)) 1046 - goto abort; 1012 + goto abort_put_sess_tun; 1047 1013 1048 1014 new_headroom = skb_headroom(skb); 1049 1015 skb_orphan(skb); ··· 1103 1069 /* Get routing info from the tunnel socket */ 1104 1070 dst_release(skb->dst); 1105 1071 skb->dst = dst_clone(__sk_dst_get(sk_tun)); 1106 - skb->sk = sk_tun; 1072 + pppol2tp_skb_set_owner_w(skb, sk_tun); 1107 1073 1108 1074 /* Queue the packet to IP for output */ 1109 1075 len = skb->len; ··· 1120 1086 session->stats.tx_errors++; 1121 1087 } 1122 1088 1089 + sock_put(sk_tun); 1090 + sock_put(sk); 1123 1091 return 1; 1124 1092 1093 + abort_put_sess_tun: 1094 + sock_put(sk_tun); 1095 + abort_put_sess: 1096 + sock_put(sk); 1125 1097 abort: 1126 1098 /* Free the original skb */ 1127 1099 kfree_skb(skb); ··· 1231 1191 { 1232 1192 struct pppol2tp_tunnel *tunnel; 1233 1193 1234 - tunnel = pppol2tp_sock_to_tunnel(sk); 1194 + tunnel = sk->sk_user_data; 1235 1195 if (tunnel == NULL) 1236 1196 goto end; 1237 1197 ··· 1270 1230 if (sk->sk_user_data != NULL) { 1271 1231 struct pppol2tp_tunnel *tunnel; 1272 1232 1273 - session = pppol2tp_sock_to_session(sk); 1233 + session = sk->sk_user_data; 1274 1234 if (session == NULL) 1275 1235 goto out; 1236 + 1237 + BUG_ON(session->magic != L2TP_SESSION_MAGIC); 1276 1238 1277 1239 /* Don't use pppol2tp_sock_to_tunnel() here to 1278 1240 * get the tunnel context because the tunnel ··· 1321 1279 static int pppol2tp_release(struct socket *sock) 1322 1280 { 1323 1281 struct sock *sk = sock->sk; 1282 + struct pppol2tp_session *session; 1324 1283 int error; 1325 1284 1326 1285 if (!sk) ··· 1339 1296 sock_orphan(sk); 1340 1297 sock->sk = NULL; 1341 1298 1299 + session = pppol2tp_sock_to_session(sk); 1300 + 1342 1301 /* Purge any queued data */ 1343 1302 skb_queue_purge(&sk->sk_receive_queue); 1344 1303 skb_queue_purge(&sk->sk_write_queue); 1304 + if (session != NULL) { 1305 + struct sk_buff *skb; 1306 + while ((skb = skb_dequeue(&session->reorder_q))) { 1307 + kfree_skb(skb); 1308 + sock_put(sk); 1309 + } 1310 + } 1345 1311 1346 1312 release_sock(sk); 1347 1313 ··· 1653 1601 1654 1602 error = ppp_register_channel(&po->chan); 1655 1603 if (error) 1656 - goto end; 1604 + goto end_put_tun; 1657 1605 1658 1606 /* This is how we get the session context from the socket. */ 1659 1607 sk->sk_user_data = session; ··· 1673 1621 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO, 1674 1622 "%s: created\n", session->name); 1675 1623 1624 + end_put_tun: 1625 + sock_put(tunnel_sock); 1676 1626 end: 1677 1627 release_sock(sk); 1678 1628 ··· 1722 1668 *usockaddr_len = len; 1723 1669 1724 1670 error = 0; 1671 + sock_put(sock->sk); 1725 1672 1726 1673 end: 1727 1674 return error; ··· 1961 1906 err = -EBADF; 1962 1907 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); 1963 1908 if (tunnel == NULL) 1964 - goto end; 1909 + goto end_put_sess; 1965 1910 1966 1911 err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg); 1967 - goto end; 1912 + sock_put(session->tunnel_sock); 1913 + goto end_put_sess; 1968 1914 } 1969 1915 1970 1916 err = pppol2tp_session_ioctl(session, cmd, arg); 1971 1917 1918 + end_put_sess: 1919 + sock_put(sk); 1972 1920 end: 1973 1921 return err; 1974 1922 } ··· 2117 2059 err = -EBADF; 2118 2060 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); 2119 2061 if (tunnel == NULL) 2120 - goto end; 2062 + goto end_put_sess; 2121 2063 2122 2064 err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val); 2065 + sock_put(session->tunnel_sock); 2123 2066 } else 2124 2067 err = pppol2tp_session_setsockopt(sk, session, optname, val); 2125 2068 2126 2069 err = 0; 2127 2070 2071 + end_put_sess: 2072 + sock_put(sk); 2128 2073 end: 2129 2074 return err; 2130 2075 } ··· 2242 2181 err = -EBADF; 2243 2182 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock); 2244 2183 if (tunnel == NULL) 2245 - goto end; 2184 + goto end_put_sess; 2246 2185 2247 2186 err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val); 2187 + sock_put(session->tunnel_sock); 2248 2188 } else 2249 2189 err = pppol2tp_session_getsockopt(sk, session, optname, &val); 2250 2190 2251 2191 err = -EFAULT; 2252 2192 if (put_user(len, (int __user *) optlen)) 2253 - goto end; 2193 + goto end_put_sess; 2254 2194 2255 2195 if (copy_to_user((void __user *) optval, &val, len)) 2256 - goto end; 2196 + goto end_put_sess; 2257 2197 2258 2198 err = 0; 2199 + 2200 + end_put_sess: 2201 + sock_put(sk); 2259 2202 end: 2260 2203 return err; 2261 2204 }
+1 -1
drivers/net/sc92031.c
··· 972 972 skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); 973 973 974 974 len = skb->len; 975 - if (unlikely(len < ETH_ZLEN)) { 975 + if (len < ETH_ZLEN) { 976 976 memset(priv->tx_bufs + entry * TX_BUF_SIZE + len, 977 977 0, ETH_ZLEN - len); 978 978 len = ETH_ZLEN;
+1 -1
drivers/net/sfc/falcon_xmac.c
··· 459 459 tries--; 460 460 } 461 461 462 - EFX_ERR(efx, "Failed to bring XAUI link back up in %d tries!\n", 462 + EFX_LOG(efx, "Failed to bring XAUI link back up in %d tries!\n", 463 463 max_tries); 464 464 return 0; 465 465 }
+2 -2
drivers/net/sunhme.c
··· 111 111 struct hme_tx_logent *tlp; 112 112 unsigned long flags; 113 113 114 - save_and_cli(flags); 114 + local_irq_save(flags); 115 115 tlp = &tx_log[txlog_cur_entry]; 116 116 tlp->tstamp = (unsigned int)jiffies; 117 117 tlp->tx_new = hp->tx_new; ··· 119 119 tlp->action = a; 120 120 tlp->status = s; 121 121 txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1); 122 - restore_flags(flags); 122 + local_irq_restore(flags); 123 123 } 124 124 static __inline__ void tx_dump_log(void) 125 125 {
+8 -2
drivers/net/tulip/tulip_core.c
··· 1729 1729 if (!dev) 1730 1730 return -EINVAL; 1731 1731 1732 - if (netif_running(dev)) 1733 - tulip_down(dev); 1732 + if (!netif_running(dev)) 1733 + goto save_state; 1734 + 1735 + tulip_down(dev); 1734 1736 1735 1737 netif_device_detach(dev); 1736 1738 free_irq(dev->irq, dev); 1737 1739 1740 + save_state: 1738 1741 pci_save_state(pdev); 1739 1742 pci_disable_device(pdev); 1740 1743 pci_set_power_state(pdev, pci_choose_state(pdev, state)); ··· 1756 1753 1757 1754 pci_set_power_state(pdev, PCI_D0); 1758 1755 pci_restore_state(pdev); 1756 + 1757 + if (!netif_running(dev)) 1758 + return 0; 1759 1759 1760 1760 if ((retval = pci_enable_device(pdev))) { 1761 1761 printk (KERN_ERR "tulip: pci_enable_device failed in resume\n");
+2 -1
drivers/net/ucc_geth_ethtool.c
··· 73 73 "tx-frames-ok", 74 74 "tx-excessive-differ-frames", 75 75 "tx-256-511-frames", 76 + "tx-512-1023-frames", 76 77 "tx-1024-1518-frames", 77 78 "tx-jumbo-frames", 78 79 }; ··· 309 308 buf += UEC_TX_FW_STATS_LEN * ETH_GSTRING_LEN; 310 309 } 311 310 if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) 312 - memcpy(buf, tx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN * 311 + memcpy(buf, rx_fw_stat_gstrings, UEC_RX_FW_STATS_LEN * 313 312 ETH_GSTRING_LEN); 314 313 } 315 314
+31 -5
drivers/net/virtio_net.c
··· 47 47 /* Number of input buffers, and max we've ever had. */ 48 48 unsigned int num, max; 49 49 50 + /* For cleaning up after transmission. */ 51 + struct tasklet_struct tasklet; 52 + 50 53 /* Receive & send queues. */ 51 54 struct sk_buff_head recv; 52 55 struct sk_buff_head send; ··· 71 68 72 69 /* Suppress further interrupts. */ 73 70 svq->vq_ops->disable_cb(svq); 71 + 74 72 /* We were waiting for more output buffers. */ 75 73 netif_wake_queue(vi->dev); 74 + 75 + /* Make sure we re-xmit last_xmit_skb: if there are no more packets 76 + * queued, start_xmit won't be called. */ 77 + tasklet_schedule(&vi->tasklet); 76 78 } 77 79 78 80 static void receive_skb(struct net_device *dev, struct sk_buff *skb, ··· 286 278 return vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); 287 279 } 288 280 281 + static void xmit_tasklet(unsigned long data) 282 + { 283 + struct virtnet_info *vi = (void *)data; 284 + 285 + netif_tx_lock_bh(vi->dev); 286 + if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) == 0) { 287 + vi->svq->vq_ops->kick(vi->svq); 288 + vi->last_xmit_skb = NULL; 289 + } 290 + netif_tx_unlock_bh(vi->dev); 291 + } 292 + 289 293 static int start_xmit(struct sk_buff *skb, struct net_device *dev) 290 294 { 291 295 struct virtnet_info *vi = netdev_priv(dev); ··· 307 287 free_old_xmit_skbs(vi); 308 288 309 289 /* If we has a buffer left over from last time, send it now. */ 310 - if (vi->last_xmit_skb) { 290 + if (unlikely(vi->last_xmit_skb)) { 311 291 if (xmit_skb(vi, vi->last_xmit_skb) != 0) { 312 292 /* Drop this skb: we only queue one. */ 313 293 vi->dev->stats.tx_dropped++; 314 294 kfree_skb(skb); 295 + skb = NULL; 315 296 goto stop_queue; 316 297 } 317 298 vi->last_xmit_skb = NULL; 318 299 } 319 300 320 301 /* Put new one in send queue and do transmit */ 321 - __skb_queue_head(&vi->send, skb); 322 - if (xmit_skb(vi, skb) != 0) { 323 - vi->last_xmit_skb = skb; 324 - goto stop_queue; 302 + if (likely(skb)) { 303 + __skb_queue_head(&vi->send, skb); 304 + if (xmit_skb(vi, skb) != 0) { 305 + vi->last_xmit_skb = skb; 306 + skb = NULL; 307 + goto stop_queue; 308 + } 325 309 } 326 310 done: 327 311 vi->svq->vq_ops->kick(vi->svq); ··· 451 427 /* Initialize our empty receive and send queues. */ 452 428 skb_queue_head_init(&vi->recv); 453 429 skb_queue_head_init(&vi->send); 430 + 431 + tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi); 454 432 455 433 err = register_netdev(dev); 456 434 if (err) {
+1 -1
drivers/net/wireless/airo.c
··· 2905 2905 2906 2906 static int waitbusy (struct airo_info *ai) { 2907 2907 int delay = 0; 2908 - while ((IN4500 (ai, COMMAND) & COMMAND_BUSY) && (delay < 10000)) { 2908 + while ((IN4500(ai, COMMAND) & COMMAND_BUSY) && (delay < 10000)) { 2909 2909 udelay (10); 2910 2910 if ((++delay % 20) == 0) 2911 2911 OUT4500(ai, EVACK, EV_CLEARCOMMANDBUSY);
+9 -8
drivers/net/wireless/b43legacy/main.c
··· 3039 3039 /* Locking: wl->mutex */ 3040 3040 static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev) 3041 3041 { 3042 - struct b43legacy_wl *wl = dev->wl; 3043 3042 struct b43legacy_phy *phy = &dev->phy; 3044 3043 u32 macctl; 3045 3044 ··· 3052 3053 macctl &= ~B43legacy_MACCTL_PSM_RUN; 3053 3054 macctl |= B43legacy_MACCTL_PSM_JMP0; 3054 3055 b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl); 3055 - 3056 - mutex_unlock(&wl->mutex); 3057 - /* Must unlock as it would otherwise deadlock. No races here. 3058 - * Cancel possibly pending workqueues. */ 3059 - cancel_work_sync(&dev->restart_work); 3060 - mutex_lock(&wl->mutex); 3061 3056 3062 3057 b43legacy_leds_exit(dev); 3063 3058 b43legacy_rng_exit(dev->wl); ··· 3479 3486 } 3480 3487 } 3481 3488 out: 3489 + if (err) 3490 + wl->current_dev = NULL; /* Failed to init the dev. */ 3482 3491 mutex_unlock(&wl->mutex); 3483 3492 if (err) 3484 3493 b43legacyerr(wl, "Controller restart FAILED\n"); ··· 3613 3618 struct b43legacy_wldev *wldev; 3614 3619 struct b43legacy_wl *wl; 3615 3620 3621 + /* Do not cancel ieee80211-workqueue based work here. 3622 + * See comment in b43legacy_remove(). */ 3623 + 3616 3624 wldev = ssb_get_drvdata(dev); 3617 3625 wl = wldev->wl; 3618 - cancel_work_sync(&wldev->restart_work); 3619 3626 b43legacy_debugfs_remove_device(wldev); 3620 3627 b43legacy_wireless_core_detach(wldev); 3621 3628 list_del(&wldev->list); ··· 3785 3788 { 3786 3789 struct b43legacy_wl *wl = ssb_get_devtypedata(dev); 3787 3790 struct b43legacy_wldev *wldev = ssb_get_drvdata(dev); 3791 + 3792 + /* We must cancel any work here before unregistering from ieee80211, 3793 + * as the ieee80211 unreg will destroy the workqueue. */ 3794 + cancel_work_sync(&wldev->restart_work); 3788 3795 3789 3796 B43legacy_WARN_ON(!wl); 3790 3797 if (wl->current_dev == wldev)
+25 -2
drivers/net/wireless/ipw2200.c
··· 7558 7558 priv->ieee->iw_mode == IW_MODE_ADHOC && 7559 7559 priv->config & CFG_ADHOC_CREATE && 7560 7560 priv->config & CFG_STATIC_ESSID && 7561 - priv->config & CFG_STATIC_CHANNEL && 7562 - !list_empty(&priv->ieee->network_free_list)) { 7561 + priv->config & CFG_STATIC_CHANNEL) { 7562 + /* Use oldest network if the free list is empty */ 7563 + if (list_empty(&priv->ieee->network_free_list)) { 7564 + struct ieee80211_network *oldest = NULL; 7565 + struct ieee80211_network *target; 7566 + DECLARE_MAC_BUF(mac); 7567 + 7568 + list_for_each_entry(target, &priv->ieee->network_list, list) { 7569 + if ((oldest == NULL) || 7570 + (target->last_scanned < oldest->last_scanned)) 7571 + oldest = target; 7572 + } 7573 + 7574 + /* If there are no more slots, expire the oldest */ 7575 + list_del(&oldest->list); 7576 + target = oldest; 7577 + IPW_DEBUG_ASSOC("Expired '%s' (%s) from " 7578 + "network list.\n", 7579 + escape_essid(target->ssid, 7580 + target->ssid_len), 7581 + print_mac(mac, target->bssid)); 7582 + list_add_tail(&target->list, 7583 + &priv->ieee->network_free_list); 7584 + } 7585 + 7563 7586 element = priv->ieee->network_free_list.next; 7564 7587 network = list_entry(element, struct ieee80211_network, list); 7565 7588 ipw_adhoc_create(priv, network);
+2 -2
drivers/net/wireless/libertas/debugfs.c
··· 312 312 if (tlv_type != TLV_TYPE_BCNMISS) 313 313 tlv->freq = freq; 314 314 315 - /* The command header, the event mask, and the one TLV */ 316 - events->hdr.size = cpu_to_le16(sizeof(events->hdr) + 2 + sizeof(*tlv)); 315 + /* The command header, the action, the event mask, and one TLV */ 316 + events->hdr.size = cpu_to_le16(sizeof(events->hdr) + 4 + sizeof(*tlv)); 317 317 318 318 ret = lbs_cmd_with_response(priv, CMD_802_11_SUBSCRIBE_EVENT, events); 319 319
+1
drivers/net/wireless/p54/p54usb.c
··· 49 49 {USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */ 50 50 51 51 /* Version 2 devices (3887) */ 52 + {USB_DEVICE(0x0471, 0x1230)}, /* Philips CPWUA054/00 */ 52 53 {USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */ 53 54 {USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */ 54 55 {USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */
+2 -2
drivers/ssb/driver_pcicore.c
··· 537 537 int err = 0; 538 538 u32 tmp; 539 539 540 - might_sleep(); 541 - 542 540 if (!pdev) 543 541 goto out; 544 542 bus = pdev->bus; 543 + 544 + might_sleep_if(pdev->id.coreid != SSB_DEV_PCI); 545 545 546 546 /* Enable interrupts for this device. */ 547 547 if (bus->host_pci &&
+6 -6
include/linux/in_route.h
··· 10 10 #define RTCF_NOPMTUDISC RTM_F_NOPMTUDISC 11 11 12 12 #define RTCF_NOTIFY 0x00010000 13 - #define RTCF_DIRECTDST 0x00020000 13 + #define RTCF_DIRECTDST 0x00020000 /* unused */ 14 14 #define RTCF_REDIRECTED 0x00040000 15 - #define RTCF_TPROXY 0x00080000 15 + #define RTCF_TPROXY 0x00080000 /* unused */ 16 16 17 - #define RTCF_FAST 0x00200000 18 - #define RTCF_MASQ 0x00400000 19 - #define RTCF_SNAT 0x00800000 17 + #define RTCF_FAST 0x00200000 /* unused */ 18 + #define RTCF_MASQ 0x00400000 /* unused */ 19 + #define RTCF_SNAT 0x00800000 /* unused */ 20 20 #define RTCF_DOREDIRECT 0x01000000 21 21 #define RTCF_DIRECTSRC 0x04000000 22 22 #define RTCF_DNAT 0x08000000 23 23 #define RTCF_BROADCAST 0x10000000 24 24 #define RTCF_MULTICAST 0x20000000 25 - #define RTCF_REJECT 0x40000000 25 + #define RTCF_REJECT 0x40000000 /* unused */ 26 26 #define RTCF_LOCAL 0x80000000 27 27 28 28 #define RTCF_NAT (RTCF_DNAT|RTCF_SNAT)
-1
include/linux/inetdevice.h
··· 117 117 __be32 ifa_address; 118 118 __be32 ifa_mask; 119 119 __be32 ifa_broadcast; 120 - __be32 ifa_anycast; 121 120 unsigned char ifa_scope; 122 121 unsigned char ifa_flags; 123 122 unsigned char ifa_prefixlen;
+2 -2
include/linux/rtnetlink.h
··· 267 267 RTA_PREFSRC, 268 268 RTA_METRICS, 269 269 RTA_MULTIPATH, 270 - RTA_PROTOINFO, 270 + RTA_PROTOINFO, /* no longer used */ 271 271 RTA_FLOW, 272 272 RTA_CACHEINFO, 273 - RTA_SESSION, 273 + RTA_SESSION, /* no longer used */ 274 274 RTA_MP_ALGO, /* no longer used */ 275 275 RTA_TABLE, 276 276 __RTA_MAX
+22
include/net/addrconf.h
··· 94 94 extern void addrconf_leave_solict(struct inet6_dev *idev, 95 95 struct in6_addr *addr); 96 96 97 + static inline unsigned long addrconf_timeout_fixup(u32 timeout, 98 + unsigned unit) 99 + { 100 + if (timeout == 0xffffffff) 101 + return ~0UL; 102 + 103 + /* 104 + * Avoid arithmetic overflow. 105 + * Assuming unit is constant and non-zero, this "if" statement 106 + * will go away on 64bit archs. 107 + */ 108 + if (0xfffffffe > LONG_MAX / unit && timeout > LONG_MAX / unit) 109 + return LONG_MAX / unit; 110 + 111 + return timeout; 112 + } 113 + 114 + static inline int addrconf_finite_timeout(unsigned long timeout) 115 + { 116 + return ~timeout; 117 + } 118 + 97 119 /* 98 120 * IPv6 Address Label subsystem (addrlabel.c) 99 121 */
+2 -2
include/net/genetlink.h
··· 162 162 * @skb: socket buffer the message is stored in 163 163 * @hdr: generic netlink message header 164 164 */ 165 - static inline int genlmsg_cancel(struct sk_buff *skb, void *hdr) 165 + static inline void genlmsg_cancel(struct sk_buff *skb, void *hdr) 166 166 { 167 - return nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN); 167 + nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN); 168 168 } 169 169 170 170 /**
+9 -11
include/net/netlink.h
··· 556 556 * @skb: socket buffer the message is stored in 557 557 * @mark: mark to trim to 558 558 * 559 - * Trims the message to the provided mark. Returns -1. 559 + * Trims the message to the provided mark. 560 560 */ 561 - static inline int nlmsg_trim(struct sk_buff *skb, const void *mark) 561 + static inline void nlmsg_trim(struct sk_buff *skb, const void *mark) 562 562 { 563 563 if (mark) 564 564 skb_trim(skb, (unsigned char *) mark - skb->data); 565 - 566 - return -1; 567 565 } 568 566 569 567 /** ··· 570 572 * @nlh: netlink message header 571 573 * 572 574 * Removes the complete netlink message including all 573 - * attributes from the socket buffer again. Returns -1. 575 + * attributes from the socket buffer again. 574 576 */ 575 - static inline int nlmsg_cancel(struct sk_buff *skb, struct nlmsghdr *nlh) 577 + static inline void nlmsg_cancel(struct sk_buff *skb, struct nlmsghdr *nlh) 576 578 { 577 - return nlmsg_trim(skb, nlh); 579 + nlmsg_trim(skb, nlh); 578 580 } 579 581 580 582 /** ··· 773 775 int nested_len = nla_len(nla) - NLA_ALIGN(len); 774 776 775 777 if (nested_len < 0) 776 - return -1; 778 + return -EINVAL; 777 779 if (nested_len >= nla_attr_size(0)) 778 780 return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len), 779 781 nested_len, policy); ··· 1078 1080 * @start: container attribute 1079 1081 * 1080 1082 * Removes the container attribute and including all nested 1081 - * attributes. Returns -1. 1083 + * attributes. Returns -EMSGSIZE 1082 1084 */ 1083 - static inline int nla_nest_cancel(struct sk_buff *skb, struct nlattr *start) 1085 + static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start) 1084 1086 { 1085 - return nlmsg_trim(skb, start); 1087 + nlmsg_trim(skb, start); 1086 1088 } 1087 1089 1088 1090 /**
+14 -3
include/net/sctp/structs.h
··· 548 548 struct dst_entry *(*get_dst) (struct sctp_association *asoc, 549 549 union sctp_addr *daddr, 550 550 union sctp_addr *saddr); 551 - void (*get_saddr) (struct sctp_association *asoc, 551 + void (*get_saddr) (struct sctp_sock *sk, 552 + struct sctp_association *asoc, 552 553 struct dst_entry *dst, 553 554 union sctp_addr *daddr, 554 555 union sctp_addr *saddr); ··· 588 587 int (*is_ce) (const struct sk_buff *sk); 589 588 void (*seq_dump_addr)(struct seq_file *seq, 590 589 union sctp_addr *addr); 590 + void (*ecn_capable)(struct sock *sk); 591 591 __u16 net_header_len; 592 592 int sockaddr_len; 593 593 sa_family_t sa_family; ··· 903 901 * calculation completes (i.e. the DATA chunk 904 902 * is SACK'd) clear this flag. 905 903 */ 906 - int rto_pending; 904 + __u8 rto_pending; 905 + 906 + /* Flag to track the current fast recovery state */ 907 + __u8 fast_recovery; 907 908 908 909 /* 909 910 * These are the congestion stats. ··· 924 919 925 920 /* Data that has been sent, but not acknowledged. */ 926 921 __u32 flight_size; 922 + 923 + /* TSN marking the fast recovery exit point */ 924 + __u32 fast_recovery_exit; 927 925 928 926 /* Destination */ 929 927 struct dst_entry *dst; ··· 1052 1044 struct sctp_sock *); 1053 1045 void sctp_transport_pmtu(struct sctp_transport *); 1054 1046 void sctp_transport_free(struct sctp_transport *); 1055 - void sctp_transport_reset_timers(struct sctp_transport *); 1047 + void sctp_transport_reset_timers(struct sctp_transport *, int); 1056 1048 void sctp_transport_hold(struct sctp_transport *); 1057 1049 void sctp_transport_put(struct sctp_transport *); 1058 1050 void sctp_transport_update_rto(struct sctp_transport *, __u32); ··· 1141 1133 1142 1134 /* How many unackd bytes do we have in-flight? */ 1143 1135 __u32 outstanding_bytes; 1136 + 1137 + /* Are we doing fast-rtx on this queue */ 1138 + char fast_rtx; 1144 1139 1145 1140 /* Corked? */ 1146 1141 char cork;
+2 -1
include/net/transp_v6.h
··· 40 40 struct msghdr *msg, 41 41 struct sk_buff *skb); 42 42 43 - extern int datagram_send_ctl(struct msghdr *msg, 43 + extern int datagram_send_ctl(struct net *net, 44 + struct msghdr *msg, 44 45 struct flowi *fl, 45 46 struct ipv6_txoptions *opt, 46 47 int *hlimit, int *tclass);
+1
include/net/udp.h
··· 135 135 136 136 extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk, 137 137 struct msghdr *msg, size_t len); 138 + extern void udp_flush_pending_frames(struct sock *sk); 138 139 139 140 extern int udp_rcv(struct sk_buff *skb); 140 141 extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+3 -8
net/ax25/ax25_subr.c
··· 64 64 65 65 void ax25_requeue_frames(ax25_cb *ax25) 66 66 { 67 - struct sk_buff *skb, *skb_prev = NULL; 67 + struct sk_buff *skb; 68 68 69 69 /* 70 70 * Requeue all the un-ack-ed frames on the output queue to be picked 71 71 * up by ax25_kick called from the timer. This arrangement handles the 72 72 * possibility of an empty output queue. 73 73 */ 74 - while ((skb = skb_dequeue(&ax25->ack_queue)) != NULL) { 75 - if (skb_prev == NULL) 76 - skb_queue_head(&ax25->write_queue, skb); 77 - else 78 - skb_append(skb_prev, skb, &ax25->write_queue); 79 - skb_prev = skb; 80 - } 74 + while ((skb = skb_dequeue_tail(&ax25->ack_queue)) != NULL) 75 + skb_queue_head(&ax25->write_queue, skb); 81 76 } 82 77 83 78 /*
+12 -1
net/bluetooth/rfcomm/tty.c
··· 566 566 if (dlc->state == BT_CLOSED) { 567 567 if (!dev->tty) { 568 568 if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) { 569 - if (rfcomm_dev_get(dev->id) == NULL) 569 + /* Drop DLC lock here to avoid deadlock 570 + * 1. rfcomm_dev_get will take rfcomm_dev_lock 571 + * but in rfcomm_dev_add there's lock order: 572 + * rfcomm_dev_lock -> dlc lock 573 + * 2. rfcomm_dev_put will deadlock if it's 574 + * the last reference 575 + */ 576 + rfcomm_dlc_unlock(dlc); 577 + if (rfcomm_dev_get(dev->id) == NULL) { 578 + rfcomm_dlc_lock(dlc); 570 579 return; 580 + } 571 581 572 582 rfcomm_dev_del(dev); 573 583 rfcomm_dev_put(dev); 584 + rfcomm_dlc_lock(dlc); 574 585 } 575 586 } else 576 587 tty_hangup(dev->tty);
+5 -4
net/core/neighbour.c
··· 1714 1714 return nla_nest_end(skb, nest); 1715 1715 1716 1716 nla_put_failure: 1717 - return nla_nest_cancel(skb, nest); 1717 + nla_nest_cancel(skb, nest); 1718 + return -EMSGSIZE; 1718 1719 } 1719 1720 1720 1721 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl, ··· 2058 2057 goto nla_put_failure; 2059 2058 } 2060 2059 2061 - ci.ndm_used = now - neigh->used; 2062 - ci.ndm_confirmed = now - neigh->confirmed; 2063 - ci.ndm_updated = now - neigh->updated; 2060 + ci.ndm_used = jiffies_to_clock_t(now - neigh->used); 2061 + ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed); 2062 + ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated); 2064 2063 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1; 2065 2064 read_unlock_bh(&neigh->lock); 2066 2065
+2 -1
net/core/rtnetlink.c
··· 498 498 return nla_nest_end(skb, mx); 499 499 500 500 nla_put_failure: 501 - return nla_nest_cancel(skb, mx); 501 + nla_nest_cancel(skb, mx); 502 + return -EMSGSIZE; 502 503 } 503 504 504 505 int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
+3 -2
net/core/skbuff.c
··· 1445 1445 1446 1446 if (spd.nr_pages) { 1447 1447 int ret; 1448 + struct sock *sk = __skb->sk; 1448 1449 1449 1450 /* 1450 1451 * Drop the socket lock, otherwise we have reverse ··· 1456 1455 * we call into ->sendpage() with the i_mutex lock held 1457 1456 * and networking will grab the socket lock. 1458 1457 */ 1459 - release_sock(__skb->sk); 1458 + release_sock(sk); 1460 1459 ret = splice_to_pipe(pipe, &spd); 1461 - lock_sock(__skb->sk); 1460 + lock_sock(sk); 1462 1461 return ret; 1463 1462 } 1464 1463
+1 -1
net/core/user_dma.c
··· 75 75 76 76 end = start + skb_shinfo(skb)->frags[i].size; 77 77 copy = end - offset; 78 - if ((copy = end - offset) > 0) { 78 + if (copy > 0) { 79 79 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 80 80 struct page *page = frag->page; 81 81
-9
net/ipv4/devinet.c
··· 90 90 [IFA_LOCAL] = { .type = NLA_U32 }, 91 91 [IFA_ADDRESS] = { .type = NLA_U32 }, 92 92 [IFA_BROADCAST] = { .type = NLA_U32 }, 93 - [IFA_ANYCAST] = { .type = NLA_U32 }, 94 93 [IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, 95 94 }; 96 95 ··· 535 536 if (tb[IFA_BROADCAST]) 536 537 ifa->ifa_broadcast = nla_get_be32(tb[IFA_BROADCAST]); 537 538 538 - if (tb[IFA_ANYCAST]) 539 - ifa->ifa_anycast = nla_get_be32(tb[IFA_ANYCAST]); 540 - 541 539 if (tb[IFA_LABEL]) 542 540 nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ); 543 541 else ··· 741 745 break; 742 746 inet_del_ifa(in_dev, ifap, 0); 743 747 ifa->ifa_broadcast = 0; 744 - ifa->ifa_anycast = 0; 745 748 ifa->ifa_scope = 0; 746 749 } 747 750 ··· 1108 1113 + nla_total_size(4) /* IFA_ADDRESS */ 1109 1114 + nla_total_size(4) /* IFA_LOCAL */ 1110 1115 + nla_total_size(4) /* IFA_BROADCAST */ 1111 - + nla_total_size(4) /* IFA_ANYCAST */ 1112 1116 + nla_total_size(IFNAMSIZ); /* IFA_LABEL */ 1113 1117 } 1114 1118 ··· 1136 1142 1137 1143 if (ifa->ifa_broadcast) 1138 1144 NLA_PUT_BE32(skb, IFA_BROADCAST, ifa->ifa_broadcast); 1139 - 1140 - if (ifa->ifa_anycast) 1141 - NLA_PUT_BE32(skb, IFA_ANYCAST, ifa->ifa_anycast); 1142 1145 1143 1146 if (ifa->ifa_label[0]) 1144 1147 NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label);
-1
net/ipv4/fib_frontend.c
··· 506 506 [RTA_PREFSRC] = { .type = NLA_U32 }, 507 507 [RTA_METRICS] = { .type = NLA_NESTED }, 508 508 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) }, 509 - [RTA_PROTOINFO] = { .type = NLA_U32 }, 510 509 [RTA_FLOW] = { .type = NLA_U32 }, 511 510 }; 512 511
+9
net/ipv4/raw.c
··· 608 608 sk_common_release(sk); 609 609 } 610 610 611 + static int raw_destroy(struct sock *sk) 612 + { 613 + lock_sock(sk); 614 + ip_flush_pending_frames(sk); 615 + release_sock(sk); 616 + return 0; 617 + } 618 + 611 619 /* This gets rid of all the nasties in af_inet. -DaveM */ 612 620 static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) 613 621 { ··· 828 820 .name = "RAW", 829 821 .owner = THIS_MODULE, 830 822 .close = raw_close, 823 + .destroy = raw_destroy, 831 824 .connect = ip4_datagram_connect, 832 825 .disconnect = udp_disconnect, 833 826 .ioctl = raw_ioctl,
+1 -1
net/ipv4/route.c
··· 1792 1792 if (err) 1793 1793 flags |= RTCF_DIRECTSRC; 1794 1794 1795 - if (out_dev == in_dev && err && !(flags & RTCF_MASQ) && 1795 + if (out_dev == in_dev && err && 1796 1796 (IN_DEV_SHARED_MEDIA(out_dev) || 1797 1797 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) 1798 1798 flags |= RTCF_DOREDIRECT;
+8 -1
net/ipv4/tcp.c
··· 1227 1227 copied += used; 1228 1228 offset += used; 1229 1229 } 1230 - if (offset != skb->len) 1230 + /* 1231 + * If recv_actor drops the lock (e.g. TCP splice 1232 + * receive) the skb pointer might be invalid when 1233 + * getting here: tcp_collapse might have deleted it 1234 + * while aggregating skbs from the socket queue. 1235 + */ 1236 + skb = tcp_recv_skb(sk, seq-1, &offset); 1237 + if (!skb || (offset+1 != skb->len)) 1231 1238 break; 1232 1239 } 1233 1240 if (tcp_hdr(skb)->fin) {
+22 -13
net/ipv4/tcp_input.c
··· 1392 1392 1393 1393 if (before(next_dup->start_seq, skip_to_seq)) { 1394 1394 skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq, fack_count); 1395 - tcp_sacktag_walk(skb, sk, NULL, 1396 - next_dup->start_seq, next_dup->end_seq, 1397 - 1, fack_count, reord, flag); 1395 + skb = tcp_sacktag_walk(skb, sk, NULL, 1396 + next_dup->start_seq, next_dup->end_seq, 1397 + 1, fack_count, reord, flag); 1398 1398 } 1399 1399 1400 1400 return skb; ··· 2483 2483 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); 2484 2484 } 2485 2485 2486 + static void tcp_try_keep_open(struct sock *sk) 2487 + { 2488 + struct tcp_sock *tp = tcp_sk(sk); 2489 + int state = TCP_CA_Open; 2490 + 2491 + if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker) 2492 + state = TCP_CA_Disorder; 2493 + 2494 + if (inet_csk(sk)->icsk_ca_state != state) { 2495 + tcp_set_ca_state(sk, state); 2496 + tp->high_seq = tp->snd_nxt; 2497 + } 2498 + } 2499 + 2486 2500 static void tcp_try_to_open(struct sock *sk, int flag) 2487 2501 { 2488 2502 struct tcp_sock *tp = tcp_sk(sk); ··· 2510 2496 tcp_enter_cwr(sk, 1); 2511 2497 2512 2498 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { 2513 - int state = TCP_CA_Open; 2514 - 2515 - if (tcp_left_out(tp) || tp->retrans_out || tp->undo_marker) 2516 - state = TCP_CA_Disorder; 2517 - 2518 - if (inet_csk(sk)->icsk_ca_state != state) { 2519 - tcp_set_ca_state(sk, state); 2520 - tp->high_seq = tp->snd_nxt; 2521 - } 2499 + tcp_try_keep_open(sk); 2522 2500 tcp_moderate_cwnd(tp); 2523 2501 } else { 2524 2502 tcp_cwnd_down(sk, flag); ··· 3316 3310 return 1; 3317 3311 3318 3312 old_ack: 3319 - if (TCP_SKB_CB(skb)->sacked) 3313 + if (TCP_SKB_CB(skb)->sacked) { 3320 3314 tcp_sacktag_write_queue(sk, skb, prior_snd_una); 3315 + if (icsk->icsk_ca_state == TCP_CA_Open) 3316 + tcp_try_keep_open(sk); 3317 + } 3321 3318 3322 3319 uninteresting_ack: 3323 3320 SOCK_DEBUG(sk, "Ack %u out of %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
+2
net/ipv4/tcp_output.c
··· 2131 2131 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2132 2132 if (tcp_transmit_skb(sk, skb, 0, priority)) 2133 2133 NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); 2134 + 2135 + TCP_INC_STATS(TCP_MIB_OUTRSTS); 2134 2136 } 2135 2137 2136 2138 /* WARNING: This routine must only be called when we have already sent
+1 -1
net/ipv4/tunnel4.c
··· 97 97 { 98 98 struct xfrm_tunnel *handler; 99 99 100 - if (!pskb_may_pull(skb, sizeof(struct iphdr))) 100 + if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 101 101 goto drop; 102 102 103 103 for (handler = tunnel64_handlers; handler; handler = handler->next)
+2 -1
net/ipv4/udp.c
··· 420 420 /* 421 421 * Throw away all pending data and cancel the corking. Socket is locked. 422 422 */ 423 - static void udp_flush_pending_frames(struct sock *sk) 423 + void udp_flush_pending_frames(struct sock *sk) 424 424 { 425 425 struct udp_sock *up = udp_sk(sk); 426 426 ··· 430 430 ip_flush_pending_frames(sk); 431 431 } 432 432 } 433 + EXPORT_SYMBOL(udp_flush_pending_frames); 433 434 434 435 /** 435 436 * udp4_hwcsum_outgoing - handle outgoing HW checksumming
+58 -49
net/ipv6/addrconf.c
··· 731 731 onlink = -1; 732 732 733 733 spin_lock(&ifa->lock); 734 - lifetime = min_t(unsigned long, 735 - ifa->valid_lft, 0x7fffffffUL/HZ); 734 + 735 + lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ); 736 + /* 737 + * Note: Because this address is 738 + * not permanent, lifetime < 739 + * LONG_MAX / HZ here. 740 + */ 736 741 if (time_before(expires, 737 742 ifa->tstamp + lifetime * HZ)) 738 743 expires = ifa->tstamp + lifetime * HZ; ··· 1727 1722 __u32 valid_lft; 1728 1723 __u32 prefered_lft; 1729 1724 int addr_type; 1730 - unsigned long rt_expires; 1731 1725 struct inet6_dev *in6_dev; 1732 1726 1733 1727 pinfo = (struct prefix_info *) opt; ··· 1768 1764 * 2) Configure prefixes with the auto flag set 1769 1765 */ 1770 1766 1771 - if (valid_lft == INFINITY_LIFE_TIME) 1772 - rt_expires = ~0UL; 1773 - else if (valid_lft >= 0x7FFFFFFF/HZ) { 1767 + if (pinfo->onlink) { 1768 + struct rt6_info *rt; 1769 + unsigned long rt_expires; 1770 + 1774 1771 /* Avoid arithmetic overflow. Really, we could 1775 1772 * save rt_expires in seconds, likely valid_lft, 1776 1773 * but it would require division in fib gc, that it 1777 1774 * not good. 1778 1775 */ 1779 - rt_expires = 0x7FFFFFFF - (0x7FFFFFFF % HZ); 1780 - } else 1781 - rt_expires = valid_lft * HZ; 1776 + if (HZ > USER_HZ) 1777 + rt_expires = addrconf_timeout_fixup(valid_lft, HZ); 1778 + else 1779 + rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ); 1782 1780 1783 - /* 1784 - * We convert this (in jiffies) to clock_t later. 1785 - * Avoid arithmetic overflow there as well. 1786 - * Overflow can happen only if HZ < USER_HZ. 1787 - */ 1788 - if (HZ < USER_HZ && ~rt_expires && rt_expires > 0x7FFFFFFF / USER_HZ) 1789 - rt_expires = 0x7FFFFFFF / USER_HZ; 1781 + if (addrconf_finite_timeout(rt_expires)) 1782 + rt_expires *= HZ; 1790 1783 1791 - if (pinfo->onlink) { 1792 - struct rt6_info *rt; 1793 1784 rt = rt6_lookup(dev_net(dev), &pinfo->prefix, NULL, 1794 1785 dev->ifindex, 1); 1795 1786 ··· 1793 1794 if (valid_lft == 0) { 1794 1795 ip6_del_rt(rt); 1795 1796 rt = NULL; 1796 - } else if (~rt_expires) { 1797 + } else if (addrconf_finite_timeout(rt_expires)) { 1797 1798 /* not infinity */ 1798 1799 rt->rt6i_expires = jiffies + rt_expires; 1799 1800 rt->rt6i_flags |= RTF_EXPIRES; ··· 1802 1803 rt->rt6i_expires = 0; 1803 1804 } 1804 1805 } else if (valid_lft) { 1805 - int flags = RTF_ADDRCONF | RTF_PREFIX_RT; 1806 1806 clock_t expires = 0; 1807 - if (~rt_expires) { 1807 + int flags = RTF_ADDRCONF | RTF_PREFIX_RT; 1808 + if (addrconf_finite_timeout(rt_expires)) { 1808 1809 /* not infinity */ 1809 1810 flags |= RTF_EXPIRES; 1810 1811 expires = jiffies_to_clock_t(rt_expires); ··· 2026 2027 * Manual configuration of address on an interface 2027 2028 */ 2028 2029 static int inet6_addr_add(struct net *net, int ifindex, struct in6_addr *pfx, 2029 - int plen, __u8 ifa_flags, __u32 prefered_lft, 2030 + unsigned int plen, __u8 ifa_flags, __u32 prefered_lft, 2030 2031 __u32 valid_lft) 2031 2032 { 2032 2033 struct inet6_ifaddr *ifp; ··· 2035 2036 int scope; 2036 2037 u32 flags; 2037 2038 clock_t expires; 2039 + unsigned long timeout; 2038 2040 2039 2041 ASSERT_RTNL(); 2042 + 2043 + if (plen > 128) 2044 + return -EINVAL; 2040 2045 2041 2046 /* check the lifetime */ 2042 2047 if (!valid_lft || prefered_lft > valid_lft) ··· 2055 2052 2056 2053 scope = ipv6_addr_scope(pfx); 2057 2054 2058 - if (valid_lft == INFINITY_LIFE_TIME) { 2059 - ifa_flags |= IFA_F_PERMANENT; 2060 - flags = 0; 2061 - expires = 0; 2062 - } else { 2063 - if (valid_lft >= 0x7FFFFFFF/HZ) 2064 - valid_lft = 0x7FFFFFFF/HZ; 2055 + timeout = addrconf_timeout_fixup(valid_lft, HZ); 2056 + if (addrconf_finite_timeout(timeout)) { 2057 + expires = jiffies_to_clock_t(timeout * HZ); 2058 + valid_lft = timeout; 2065 2059 flags = RTF_EXPIRES; 2066 - expires = jiffies_to_clock_t(valid_lft * HZ); 2060 + } else { 2061 + expires = 0; 2062 + flags = 0; 2063 + ifa_flags |= IFA_F_PERMANENT; 2067 2064 } 2068 2065 2069 - if (prefered_lft == 0) 2070 - ifa_flags |= IFA_F_DEPRECATED; 2071 - else if ((prefered_lft >= 0x7FFFFFFF/HZ) && 2072 - (prefered_lft != INFINITY_LIFE_TIME)) 2073 - prefered_lft = 0x7FFFFFFF/HZ; 2066 + timeout = addrconf_timeout_fixup(prefered_lft, HZ); 2067 + if (addrconf_finite_timeout(timeout)) { 2068 + if (timeout == 0) 2069 + ifa_flags |= IFA_F_DEPRECATED; 2070 + prefered_lft = timeout; 2071 + } 2074 2072 2075 2073 ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags); 2076 2074 ··· 2099 2095 } 2100 2096 2101 2097 static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx, 2102 - int plen) 2098 + unsigned int plen) 2103 2099 { 2104 2100 struct inet6_ifaddr *ifp; 2105 2101 struct inet6_dev *idev; 2106 2102 struct net_device *dev; 2103 + 2104 + if (plen > 128) 2105 + return -EINVAL; 2107 2106 2108 2107 dev = __dev_get_by_index(net, ifindex); 2109 2108 if (!dev) ··· 3176 3169 { 3177 3170 u32 flags; 3178 3171 clock_t expires; 3172 + unsigned long timeout; 3179 3173 3180 3174 if (!valid_lft || (prefered_lft > valid_lft)) 3181 3175 return -EINVAL; 3182 3176 3183 - if (valid_lft == INFINITY_LIFE_TIME) { 3184 - ifa_flags |= IFA_F_PERMANENT; 3185 - flags = 0; 3186 - expires = 0; 3187 - } else { 3188 - if (valid_lft >= 0x7FFFFFFF/HZ) 3189 - valid_lft = 0x7FFFFFFF/HZ; 3177 + timeout = addrconf_timeout_fixup(valid_lft, HZ); 3178 + if (addrconf_finite_timeout(timeout)) { 3179 + expires = jiffies_to_clock_t(timeout * HZ); 3180 + valid_lft = timeout; 3190 3181 flags = RTF_EXPIRES; 3191 - expires = jiffies_to_clock_t(valid_lft * HZ); 3182 + } else { 3183 + expires = 0; 3184 + flags = 0; 3185 + ifa_flags |= IFA_F_PERMANENT; 3192 3186 } 3193 3187 3194 - if (prefered_lft == 0) 3195 - ifa_flags |= IFA_F_DEPRECATED; 3196 - else if ((prefered_lft >= 0x7FFFFFFF/HZ) && 3197 - (prefered_lft != INFINITY_LIFE_TIME)) 3198 - prefered_lft = 0x7FFFFFFF/HZ; 3188 + timeout = addrconf_timeout_fixup(prefered_lft, HZ); 3189 + if (addrconf_finite_timeout(timeout)) { 3190 + if (timeout == 0) 3191 + ifa_flags |= IFA_F_DEPRECATED; 3192 + prefered_lft = timeout; 3193 + } 3199 3194 3200 3195 spin_lock_bh(&ifp->lock); 3201 3196 ifp->flags = (ifp->flags & ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD | IFA_F_HOMEADDRESS)) | ifa_flags;
+24 -21
net/ipv6/datagram.c
··· 496 496 return 0; 497 497 } 498 498 499 - int datagram_send_ctl(struct msghdr *msg, struct flowi *fl, 499 + int datagram_send_ctl(struct net *net, 500 + struct msghdr *msg, struct flowi *fl, 500 501 struct ipv6_txoptions *opt, 501 502 int *hlimit, int *tclass) 502 503 { ··· 510 509 511 510 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { 512 511 int addr_type; 513 - struct net_device *dev = NULL; 514 512 515 513 if (!CMSG_OK(msg, cmsg)) { 516 514 err = -EINVAL; ··· 522 522 switch (cmsg->cmsg_type) { 523 523 case IPV6_PKTINFO: 524 524 case IPV6_2292PKTINFO: 525 + { 526 + struct net_device *dev = NULL; 527 + 525 528 if (cmsg->cmsg_len < CMSG_LEN(sizeof(struct in6_pktinfo))) { 526 529 err = -EINVAL; 527 530 goto exit_f; ··· 538 535 fl->oif = src_info->ipi6_ifindex; 539 536 } 540 537 541 - addr_type = ipv6_addr_type(&src_info->ipi6_addr); 538 + addr_type = __ipv6_addr_type(&src_info->ipi6_addr); 542 539 543 - if (addr_type == IPV6_ADDR_ANY) 544 - break; 540 + if (fl->oif) { 541 + dev = dev_get_by_index(net, fl->oif); 542 + if (!dev) 543 + return -ENODEV; 544 + } else if (addr_type & IPV6_ADDR_LINKLOCAL) 545 + return -EINVAL; 545 546 546 - if (addr_type & IPV6_ADDR_LINKLOCAL) { 547 - if (!src_info->ipi6_ifindex) 548 - return -EINVAL; 549 - else { 550 - dev = dev_get_by_index(&init_net, src_info->ipi6_ifindex); 551 - if (!dev) 552 - return -ENODEV; 553 - } 547 + if (addr_type != IPV6_ADDR_ANY) { 548 + int strict = __ipv6_addr_src_scope(addr_type) <= IPV6_ADDR_SCOPE_LINKLOCAL; 549 + if (!ipv6_chk_addr(net, &src_info->ipi6_addr, 550 + strict ? dev : NULL, 0)) 551 + err = -EINVAL; 552 + else 553 + ipv6_addr_copy(&fl->fl6_src, &src_info->ipi6_addr); 554 554 } 555 - if (!ipv6_chk_addr(&init_net, &src_info->ipi6_addr, 556 - dev, 0)) { 557 - if (dev) 558 - dev_put(dev); 559 - err = -EINVAL; 560 - goto exit_f; 561 - } 555 + 562 556 if (dev) 563 557 dev_put(dev); 564 558 565 - ipv6_addr_copy(&fl->fl6_src, &src_info->ipi6_addr); 559 + if (err) 560 + goto exit_f; 561 + 566 562 break; 563 + } 567 564 568 565 case IPV6_FLOWINFO: 569 566 if (cmsg->cmsg_len < CMSG_LEN(4)) {
+1 -1
net/ipv6/ip6_flowlabel.c
··· 354 354 msg.msg_control = (void*)(fl->opt+1); 355 355 flowi.oif = 0; 356 356 357 - err = datagram_send_ctl(&msg, &flowi, fl->opt, &junk, &junk); 357 + err = datagram_send_ctl(net, &msg, &flowi, fl->opt, &junk, &junk); 358 358 if (err) 359 359 goto done; 360 360 err = -EINVAL;
+16 -5
net/ipv6/ipv6_sockglue.c
··· 161 161 struct ipv6_txoptions *opt; 162 162 struct sk_buff *pktopt; 163 163 164 - if (sk->sk_protocol != IPPROTO_UDP && 165 - sk->sk_protocol != IPPROTO_UDPLITE && 166 - sk->sk_protocol != IPPROTO_TCP) 164 + if (sk->sk_type == SOCK_RAW) 165 + break; 166 + 167 + if (sk->sk_protocol == IPPROTO_UDP || 168 + sk->sk_protocol == IPPROTO_UDPLITE) { 169 + struct udp_sock *up = udp_sk(sk); 170 + if (up->pending == AF_INET6) { 171 + retv = -EBUSY; 172 + break; 173 + } 174 + } else if (sk->sk_protocol != IPPROTO_TCP) 167 175 break; 168 176 169 177 if (sk->sk_state != TCP_ESTABLISHED) { ··· 424 416 msg.msg_controllen = optlen; 425 417 msg.msg_control = (void*)(opt+1); 426 418 427 - retv = datagram_send_ctl(&msg, &fl, opt, &junk, &junk); 419 + retv = datagram_send_ctl(net, &msg, &fl, opt, &junk, &junk); 428 420 if (retv) 429 421 goto done; 430 422 update: ··· 840 832 len = min_t(unsigned int, len, ipv6_optlen(hdr)); 841 833 if (copy_to_user(optval, hdr, len)) 842 834 return -EFAULT; 843 - return ipv6_optlen(hdr); 835 + return len; 844 836 } 845 837 846 838 static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, ··· 983 975 len = ipv6_getsockopt_sticky(sk, np->opt, 984 976 optname, optval, len); 985 977 release_sock(sk); 978 + /* check if ipv6_getsockopt_sticky() returns err code */ 979 + if (len < 0) 980 + return len; 986 981 return put_user(len, optlen); 987 982 } 988 983
+5 -3
net/ipv6/netfilter/nf_conntrack_reasm.c
··· 209 209 arg.dst = dst; 210 210 hash = ip6qhashfn(id, src, dst); 211 211 212 + local_bh_disable(); 212 213 q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash); 214 + local_bh_enable(); 213 215 if (q == NULL) 214 216 goto oom; 215 217 ··· 640 638 goto ret_orig; 641 639 } 642 640 643 - spin_lock(&fq->q.lock); 641 + spin_lock_bh(&fq->q.lock); 644 642 645 643 if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { 646 - spin_unlock(&fq->q.lock); 644 + spin_unlock_bh(&fq->q.lock); 647 645 pr_debug("Can't insert skb to queue\n"); 648 646 fq_put(fq); 649 647 goto ret_orig; ··· 655 653 if (ret_skb == NULL) 656 654 pr_debug("Can't reassemble fragmented packets\n"); 657 655 } 658 - spin_unlock(&fq->q.lock); 656 + spin_unlock_bh(&fq->q.lock); 659 657 660 658 fq_put(fq); 661 659 return ret_skb;
+10 -1
net/ipv6/raw.c
··· 813 813 memset(opt, 0, sizeof(struct ipv6_txoptions)); 814 814 opt->tot_len = sizeof(struct ipv6_txoptions); 815 815 816 - err = datagram_send_ctl(msg, &fl, opt, &hlimit, &tclass); 816 + err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, &tclass); 817 817 if (err < 0) { 818 818 fl6_sock_release(flowlabel); 819 819 return err; ··· 1164 1164 sk_common_release(sk); 1165 1165 } 1166 1166 1167 + static int raw6_destroy(struct sock *sk) 1168 + { 1169 + lock_sock(sk); 1170 + ip6_flush_pending_frames(sk); 1171 + release_sock(sk); 1172 + return 0; 1173 + } 1174 + 1167 1175 static int rawv6_init_sk(struct sock *sk) 1168 1176 { 1169 1177 struct raw6_sock *rp = raw6_sk(sk); ··· 1195 1187 .name = "RAWv6", 1196 1188 .owner = THIS_MODULE, 1197 1189 .close = rawv6_close, 1190 + .destroy = raw6_destroy, 1198 1191 .connect = ip6_datagram_connect, 1199 1192 .disconnect = udp_disconnect, 1200 1193 .ioctl = rawv6_ioctl,
+3 -9
net/ipv6/route.c
··· 446 446 struct route_info *rinfo = (struct route_info *) opt; 447 447 struct in6_addr prefix_buf, *prefix; 448 448 unsigned int pref; 449 - u32 lifetime; 449 + unsigned long lifetime; 450 450 struct rt6_info *rt; 451 451 452 452 if (len < sizeof(struct route_info)) { ··· 472 472 if (pref == ICMPV6_ROUTER_PREF_INVALID) 473 473 pref = ICMPV6_ROUTER_PREF_MEDIUM; 474 474 475 - lifetime = ntohl(rinfo->lifetime); 476 - if (lifetime == 0xffffffff) { 477 - /* infinity */ 478 - } else if (lifetime > 0x7fffffff/HZ - 1) { 479 - /* Avoid arithmetic overflow */ 480 - lifetime = 0x7fffffff/HZ - 1; 481 - } 475 + lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ); 482 476 483 477 if (rinfo->length == 3) 484 478 prefix = (struct in6_addr *)rinfo->prefix; ··· 500 506 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); 501 507 502 508 if (rt) { 503 - if (lifetime == 0xffffffff) { 509 + if (!addrconf_finite_timeout(lifetime)) { 504 510 rt->rt6i_flags &= ~RTF_EXPIRES; 505 511 } else { 506 512 rt->rt6i_expires = jiffies + HZ * lifetime;
+1 -1
net/ipv6/tunnel6.c
··· 109 109 { 110 110 struct xfrm6_tunnel *handler; 111 111 112 - if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 112 + if (!pskb_may_pull(skb, sizeof(struct iphdr))) 113 113 goto drop; 114 114 115 115 for (handler = tunnel46_handlers; handler; handler = handler->next)
+6 -2
net/ipv6/udp.c
··· 534 534 { 535 535 struct udp_sock *up = udp_sk(sk); 536 536 537 - if (up->pending) { 537 + if (up->pending == AF_INET) 538 + udp_flush_pending_frames(sk); 539 + else if (up->pending) { 538 540 up->len = 0; 539 541 up->pending = 0; 540 542 ip6_flush_pending_frames(sk); ··· 733 731 memset(opt, 0, sizeof(struct ipv6_txoptions)); 734 732 opt->tot_len = sizeof(*opt); 735 733 736 - err = datagram_send_ctl(msg, &fl, opt, &hlimit, &tclass); 734 + err = datagram_send_ctl(sock_net(sk), msg, &fl, opt, &hlimit, &tclass); 737 735 if (err < 0) { 738 736 fl6_sock_release(flowlabel); 739 737 return err; ··· 850 848 } else { 851 849 dst_release(dst); 852 850 } 851 + dst = NULL; 853 852 } 854 853 855 854 if (err > 0) 856 855 err = np->recverr ? net_xmit_errno(err) : 0; 857 856 release_sock(sk); 858 857 out: 858 + dst_release(dst); 859 859 fl6_sock_release(flowlabel); 860 860 if (!err) 861 861 return len;
+7 -5
net/irda/af_irda.c
··· 1093 1093 1094 1094 init_waitqueue_head(&self->query_wait); 1095 1095 1096 - /* Initialise networking socket struct */ 1097 - sock_init_data(sock, sk); /* Note : set sk->sk_refcnt to 1 */ 1098 - sk->sk_family = PF_IRDA; 1099 - sk->sk_protocol = protocol; 1100 - 1101 1096 switch (sock->type) { 1102 1097 case SOCK_STREAM: 1103 1098 sock->ops = &irda_stream_ops; ··· 1119 1124 self->max_sdu_size_rx = TTP_SAR_UNBOUND; 1120 1125 break; 1121 1126 default: 1127 + sk_free(sk); 1122 1128 return -ESOCKTNOSUPPORT; 1123 1129 } 1124 1130 break; 1125 1131 default: 1132 + sk_free(sk); 1126 1133 return -ESOCKTNOSUPPORT; 1127 1134 } 1135 + 1136 + /* Initialise networking socket struct */ 1137 + sock_init_data(sock, sk); /* Note : set sk->sk_refcnt to 1 */ 1138 + sk->sk_family = PF_IRDA; 1139 + sk->sk_protocol = protocol; 1128 1140 1129 1141 /* Register as a client with IrLMP */ 1130 1142 self->ckey = irlmp_register_client(0, NULL, NULL, NULL);
+2 -1
net/netfilter/xt_connlimit.c
··· 73 73 static inline bool already_closed(const struct nf_conn *conn) 74 74 { 75 75 if (nf_ct_protonum(conn) == IPPROTO_TCP) 76 - return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT; 76 + return conn->proto.tcp.state == TCP_CONNTRACK_TIME_WAIT || 77 + conn->proto.tcp.state == TCP_CONNTRACK_CLOSE; 77 78 else 78 79 return 0; 79 80 }
+6 -6
net/netlink/attr.c
··· 400 400 * @attrlen: length of attribute payload 401 401 * @data: head of attribute payload 402 402 * 403 - * Returns -1 if the tailroom of the skb is insufficient to store 403 + * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store 404 404 * the attribute header and payload. 405 405 */ 406 406 int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data) 407 407 { 408 408 if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen))) 409 - return -1; 409 + return -EMSGSIZE; 410 410 411 411 __nla_put(skb, attrtype, attrlen, data); 412 412 return 0; ··· 418 418 * @attrlen: length of attribute payload 419 419 * @data: head of attribute payload 420 420 * 421 - * Returns -1 if the tailroom of the skb is insufficient to store 421 + * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store 422 422 * the attribute payload. 423 423 */ 424 424 int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data) 425 425 { 426 426 if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) 427 - return -1; 427 + return -EMSGSIZE; 428 428 429 429 __nla_put_nohdr(skb, attrlen, data); 430 430 return 0; ··· 436 436 * @attrlen: length of attribute payload 437 437 * @data: head of attribute payload 438 438 * 439 - * Returns -1 if the tailroom of the skb is insufficient to store 439 + * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store 440 440 * the attribute payload. 441 441 */ 442 442 int nla_append(struct sk_buff *skb, int attrlen, const void *data) 443 443 { 444 444 if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) 445 - return -1; 445 + return -EMSGSIZE; 446 446 447 447 memcpy(skb_put(skb, attrlen), data, attrlen); 448 448 return 0;
+4 -2
net/netlink/genetlink.c
··· 554 554 return genlmsg_end(skb, hdr); 555 555 556 556 nla_put_failure: 557 - return genlmsg_cancel(skb, hdr); 557 + genlmsg_cancel(skb, hdr); 558 + return -EMSGSIZE; 558 559 } 559 560 560 561 static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid, ··· 591 590 return genlmsg_end(skb, hdr); 592 591 593 592 nla_put_failure: 594 - return genlmsg_cancel(skb, hdr); 593 + genlmsg_cancel(skb, hdr); 594 + return -EMSGSIZE; 595 595 } 596 596 597 597 static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
+4 -2
net/sched/sch_dsmark.c
··· 444 444 return nla_nest_end(skb, opts); 445 445 446 446 nla_put_failure: 447 - return nla_nest_cancel(skb, opts); 447 + nla_nest_cancel(skb, opts); 448 + return -EMSGSIZE; 448 449 } 449 450 450 451 static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb) ··· 467 466 return nla_nest_end(skb, opts); 468 467 469 468 nla_put_failure: 470 - return nla_nest_cancel(skb, opts); 469 + nla_nest_cancel(skb, opts); 470 + return -EMSGSIZE; 471 471 } 472 472 473 473 static const struct Qdisc_class_ops dsmark_class_ops = {
+2 -1
net/sched/sch_gred.c
··· 582 582 return nla_nest_end(skb, opts); 583 583 584 584 nla_put_failure: 585 - return nla_nest_cancel(skb, opts); 585 + nla_nest_cancel(skb, opts); 586 + return -EMSGSIZE; 586 587 } 587 588 588 589 static void gred_destroy(struct Qdisc *sch)
+1 -1
net/sched/sch_hfsc.c
··· 1360 1360 1361 1361 nla_put_failure: 1362 1362 nla_nest_cancel(skb, nest); 1363 - return -1; 1363 + return -EMSGSIZE; 1364 1364 } 1365 1365 1366 1366 static int
+2 -1
net/sched/sch_red.c
··· 281 281 return nla_nest_end(skb, opts); 282 282 283 283 nla_put_failure: 284 - return nla_nest_cancel(skb, opts); 284 + nla_nest_cancel(skb, opts); 285 + return -EMSGSIZE; 285 286 } 286 287 287 288 static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+12 -9
net/sctp/associola.c
··· 1203 1203 struct list_head *head = &asoc->peer.transport_addr_list; 1204 1204 struct list_head *pos; 1205 1205 1206 + if (asoc->peer.transport_count == 1) 1207 + return; 1208 + 1206 1209 /* Find the next transport in a round-robin fashion. */ 1207 1210 t = asoc->peer.retran_path; 1208 1211 pos = &t->transports; ··· 1220 1217 1221 1218 t = list_entry(pos, struct sctp_transport, transports); 1222 1219 1220 + /* We have exhausted the list, but didn't find any 1221 + * other active transports. If so, use the next 1222 + * transport. 1223 + */ 1224 + if (t == asoc->peer.retran_path) { 1225 + t = next; 1226 + break; 1227 + } 1228 + 1223 1229 /* Try to find an active transport. */ 1224 1230 1225 1231 if ((t->state == SCTP_ACTIVE) || ··· 1240 1228 */ 1241 1229 if (!next) 1242 1230 next = t; 1243 - } 1244 - 1245 - /* We have exhausted the list, but didn't find any 1246 - * other active transports. If so, use the next 1247 - * transport. 1248 - */ 1249 - if (t == asoc->peer.retran_path) { 1250 - t = next; 1251 - break; 1252 1231 } 1253 1232 } 1254 1233
+9 -2
net/sctp/ipv6.c
··· 299 299 /* Fills in the source address(saddr) based on the destination address(daddr) 300 300 * and asoc's bind address list. 301 301 */ 302 - static void sctp_v6_get_saddr(struct sctp_association *asoc, 302 + static void sctp_v6_get_saddr(struct sctp_sock *sk, 303 + struct sctp_association *asoc, 303 304 struct dst_entry *dst, 304 305 union sctp_addr *daddr, 305 306 union sctp_addr *saddr) ··· 319 318 if (!asoc) { 320 319 ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL, 321 320 &daddr->v6.sin6_addr, 322 - inet6_sk(asoc->base.sk)->srcprefs, 321 + inet6_sk(&sk->inet.sk)->srcprefs, 323 322 &saddr->v6.sin6_addr); 324 323 SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: " NIP6_FMT "\n", 325 324 NIP6(saddr->v6.sin6_addr)); ··· 727 726 seq_printf(seq, NIP6_FMT " ", NIP6(addr->v6.sin6_addr)); 728 727 } 729 728 729 + static void sctp_v6_ecn_capable(struct sock *sk) 730 + { 731 + inet6_sk(sk)->tclass |= INET_ECN_ECT_0; 732 + } 733 + 730 734 /* Initialize a PF_INET6 socket msg_name. */ 731 735 static void sctp_inet6_msgname(char *msgname, int *addr_len) 732 736 { ··· 1002 996 .skb_iif = sctp_v6_skb_iif, 1003 997 .is_ce = sctp_v6_is_ce, 1004 998 .seq_dump_addr = sctp_v6_seq_dump_addr, 999 + .ecn_capable = sctp_v6_ecn_capable, 1005 1000 .net_header_len = sizeof(struct ipv6hdr), 1006 1001 .sockaddr_len = sizeof(struct sockaddr_in6), 1007 1002 #ifdef CONFIG_COMPAT
+1 -1
net/sctp/output.c
··· 548 548 * Note: The works for IPv6 layer checks this bit too later 549 549 * in transmission. See IP6_ECN_flow_xmit(). 550 550 */ 551 - INET_ECN_xmit(nskb->sk); 551 + (*tp->af_specific->ecn_capable)(nskb->sk); 552 552 553 553 /* Set up the IP options. */ 554 554 /* BUG: not implemented
+77 -43
net/sctp/outqueue.c
··· 208 208 INIT_LIST_HEAD(&q->sacked); 209 209 INIT_LIST_HEAD(&q->abandoned); 210 210 211 + q->fast_rtx = 0; 211 212 q->outstanding_bytes = 0; 212 213 q->empty = 1; 213 214 q->cork = 0; ··· 501 500 case SCTP_RTXR_FAST_RTX: 502 501 SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS); 503 502 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); 503 + q->fast_rtx = 1; 504 504 break; 505 505 case SCTP_RTXR_PMTUD: 506 506 SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS); ··· 520 518 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by 521 519 * following the procedures outlined in C1 - C5. 522 520 */ 523 - sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point); 521 + if (reason == SCTP_RTXR_T3_RTX) 522 + sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point); 524 523 525 - error = sctp_outq_flush(q, /* rtx_timeout */ 1); 524 + /* Flush the queues only on timeout, since fast_rtx is only 525 + * triggered during sack processing and the queue 526 + * will be flushed at the end. 527 + */ 528 + if (reason != SCTP_RTXR_FAST_RTX) 529 + error = sctp_outq_flush(q, /* rtx_timeout */ 1); 526 530 527 531 if (error) 528 532 q->asoc->base.sk->sk_err = -error; ··· 546 538 int rtx_timeout, int *start_timer) 547 539 { 548 540 struct list_head *lqueue; 549 - struct list_head *lchunk; 550 541 struct sctp_transport *transport = pkt->transport; 551 542 sctp_xmit_t status; 552 543 struct sctp_chunk *chunk, *chunk1; 553 544 struct sctp_association *asoc; 545 + int fast_rtx; 554 546 int error = 0; 547 + int timer = 0; 548 + int done = 0; 555 549 556 550 asoc = q->asoc; 557 551 lqueue = &q->retransmit; 552 + fast_rtx = q->fast_rtx; 558 553 559 - /* RFC 2960 6.3.3 Handle T3-rtx Expiration 554 + /* This loop handles time-out retransmissions, fast retransmissions, 555 + * and retransmissions due to opening of whindow. 556 + * 557 + * RFC 2960 6.3.3 Handle T3-rtx Expiration 560 558 * 561 559 * E3) Determine how many of the earliest (i.e., lowest TSN) 562 560 * outstanding DATA chunks for the address for which the ··· 577 563 * [Just to be painfully clear, if we are retransmitting 578 564 * because a timeout just happened, we should send only ONE 579 565 * packet of retransmitted data.] 566 + * 567 + * For fast retransmissions we also send only ONE packet. However, 568 + * if we are just flushing the queue due to open window, we'll 569 + * try to send as much as possible. 580 570 */ 581 - lchunk = sctp_list_dequeue(lqueue); 582 - 583 - while (lchunk) { 584 - chunk = list_entry(lchunk, struct sctp_chunk, 585 - transmitted_list); 571 + list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) { 586 572 587 573 /* Make sure that Gap Acked TSNs are not retransmitted. A 588 574 * simple approach is just to move such TSNs out of the ··· 590 576 * next chunk. 591 577 */ 592 578 if (chunk->tsn_gap_acked) { 593 - list_add_tail(lchunk, &transport->transmitted); 594 - lchunk = sctp_list_dequeue(lqueue); 579 + list_del(&chunk->transmitted_list); 580 + list_add_tail(&chunk->transmitted_list, 581 + &transport->transmitted); 595 582 continue; 596 583 } 584 + 585 + /* If we are doing fast retransmit, ignore non-fast_rtransmit 586 + * chunks 587 + */ 588 + if (fast_rtx && !chunk->fast_retransmit) 589 + continue; 597 590 598 591 /* Attempt to append this chunk to the packet. */ 599 592 status = sctp_packet_append_chunk(pkt, chunk); ··· 608 587 switch (status) { 609 588 case SCTP_XMIT_PMTU_FULL: 610 589 /* Send this packet. */ 611 - if ((error = sctp_packet_transmit(pkt)) == 0) 612 - *start_timer = 1; 590 + error = sctp_packet_transmit(pkt); 613 591 614 592 /* If we are retransmitting, we should only 615 593 * send a single packet. 616 594 */ 617 - if (rtx_timeout) { 618 - list_add(lchunk, lqueue); 619 - lchunk = NULL; 620 - } 595 + if (rtx_timeout || fast_rtx) 596 + done = 1; 621 597 622 - /* Bundle lchunk in the next round. */ 598 + /* Bundle next chunk in the next round. */ 623 599 break; 624 600 625 601 case SCTP_XMIT_RWND_FULL: 626 602 /* Send this packet. */ 627 - if ((error = sctp_packet_transmit(pkt)) == 0) 628 - *start_timer = 1; 603 + error = sctp_packet_transmit(pkt); 629 604 630 605 /* Stop sending DATA as there is no more room 631 606 * at the receiver. 632 607 */ 633 - list_add(lchunk, lqueue); 634 - lchunk = NULL; 608 + done = 1; 635 609 break; 636 610 637 611 case SCTP_XMIT_NAGLE_DELAY: 638 612 /* Send this packet. */ 639 - if ((error = sctp_packet_transmit(pkt)) == 0) 640 - *start_timer = 1; 613 + error = sctp_packet_transmit(pkt); 641 614 642 615 /* Stop sending DATA because of nagle delay. */ 643 - list_add(lchunk, lqueue); 644 - lchunk = NULL; 616 + done = 1; 645 617 break; 646 618 647 619 default: 648 620 /* The append was successful, so add this chunk to 649 621 * the transmitted list. 650 622 */ 651 - list_add_tail(lchunk, &transport->transmitted); 623 + list_del(&chunk->transmitted_list); 624 + list_add_tail(&chunk->transmitted_list, 625 + &transport->transmitted); 652 626 653 627 /* Mark the chunk as ineligible for fast retransmit 654 628 * after it is retransmitted. ··· 651 635 if (chunk->fast_retransmit > 0) 652 636 chunk->fast_retransmit = -1; 653 637 654 - *start_timer = 1; 655 - q->empty = 0; 638 + /* Force start T3-rtx timer when fast retransmitting 639 + * the earliest outstanding TSN 640 + */ 641 + if (!timer && fast_rtx && 642 + ntohl(chunk->subh.data_hdr->tsn) == 643 + asoc->ctsn_ack_point + 1) 644 + timer = 2; 656 645 657 - /* Retrieve a new chunk to bundle. */ 658 - lchunk = sctp_list_dequeue(lqueue); 646 + q->empty = 0; 659 647 break; 660 648 } 661 649 662 - /* If we are here due to a retransmit timeout or a fast 663 - * retransmit and if there are any chunks left in the retransmit 664 - * queue that could not fit in the PMTU sized packet, they need 665 - * to be marked as ineligible for a subsequent fast retransmit. 666 - */ 667 - if (rtx_timeout && !lchunk) { 668 - list_for_each_entry(chunk1, lqueue, transmitted_list) { 669 - if (chunk1->fast_retransmit > 0) 670 - chunk1->fast_retransmit = -1; 671 - } 650 + /* Set the timer if there were no errors */ 651 + if (!error && !timer) 652 + timer = 1; 653 + 654 + if (done) 655 + break; 656 + } 657 + 658 + /* If we are here due to a retransmit timeout or a fast 659 + * retransmit and if there are any chunks left in the retransmit 660 + * queue that could not fit in the PMTU sized packet, they need 661 + * to be marked as ineligible for a subsequent fast retransmit. 662 + */ 663 + if (rtx_timeout || fast_rtx) { 664 + list_for_each_entry(chunk1, lqueue, transmitted_list) { 665 + if (chunk1->fast_retransmit > 0) 666 + chunk1->fast_retransmit = -1; 672 667 } 673 668 } 669 + 670 + *start_timer = timer; 671 + 672 + /* Clear fast retransmit hint */ 673 + if (fast_rtx) 674 + q->fast_rtx = 0; 674 675 675 676 return error; 676 677 } ··· 895 862 rtx_timeout, &start_timer); 896 863 897 864 if (start_timer) 898 - sctp_transport_reset_timers(transport); 865 + sctp_transport_reset_timers(transport, 866 + start_timer-1); 899 867 900 868 /* This can happen on COOKIE-ECHO resend. Only 901 869 * one chunk can get bundled with a COOKIE-ECHO. ··· 1011 977 list_add_tail(&chunk->transmitted_list, 1012 978 &transport->transmitted); 1013 979 1014 - sctp_transport_reset_timers(transport); 980 + sctp_transport_reset_timers(transport, start_timer-1); 1015 981 1016 982 q->empty = 0; 1017 983
+9 -2
net/sctp/protocol.c
··· 470 470 /* Walk through the bind address list and look for a bind 471 471 * address that matches the source address of the returned dst. 472 472 */ 473 + sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port)); 473 474 rcu_read_lock(); 474 475 list_for_each_entry_rcu(laddr, &bp->address_list, list) { 475 476 if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC)) 476 477 continue; 477 - sctp_v4_dst_saddr(&dst_saddr, dst, htons(bp->port)); 478 478 if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) 479 479 goto out_unlock; 480 480 } ··· 519 519 /* For v4, the source address is cached in the route entry(dst). So no need 520 520 * to cache it separately and hence this is an empty routine. 521 521 */ 522 - static void sctp_v4_get_saddr(struct sctp_association *asoc, 522 + static void sctp_v4_get_saddr(struct sctp_sock *sk, 523 + struct sctp_association *asoc, 523 524 struct dst_entry *dst, 524 525 union sctp_addr *daddr, 525 526 union sctp_addr *saddr) ··· 615 614 static void sctp_v4_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr) 616 615 { 617 616 seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr)); 617 + } 618 + 619 + static void sctp_v4_ecn_capable(struct sock *sk) 620 + { 621 + INET_ECN_xmit(sk); 618 622 } 619 623 620 624 /* Event handler for inet address addition/deletion events. ··· 940 934 .skb_iif = sctp_v4_skb_iif, 941 935 .is_ce = sctp_v4_is_ce, 942 936 .seq_dump_addr = sctp_v4_seq_dump_addr, 937 + .ecn_capable = sctp_v4_ecn_capable, 943 938 .net_header_len = sizeof(struct iphdr), 944 939 .sockaddr_len = sizeof(struct sockaddr_in), 945 940 #ifdef CONFIG_COMPAT
+35 -15
net/sctp/transport.c
··· 79 79 peer->rttvar = 0; 80 80 peer->srtt = 0; 81 81 peer->rto_pending = 0; 82 + peer->fast_recovery = 0; 82 83 83 84 peer->last_time_heard = jiffies; 84 85 peer->last_time_used = jiffies; ··· 191 190 /* Start T3_rtx timer if it is not already running and update the heartbeat 192 191 * timer. This routine is called every time a DATA chunk is sent. 193 192 */ 194 - void sctp_transport_reset_timers(struct sctp_transport *transport) 193 + void sctp_transport_reset_timers(struct sctp_transport *transport, int force) 195 194 { 196 195 /* RFC 2960 6.3.2 Retransmission Timer Rules 197 196 * ··· 201 200 * address. 202 201 */ 203 202 204 - if (!timer_pending(&transport->T3_rtx_timer)) 203 + if (force || !timer_pending(&transport->T3_rtx_timer)) 205 204 if (!mod_timer(&transport->T3_rtx_timer, 206 205 jiffies + transport->rto)) 207 206 sctp_transport_hold(transport); ··· 292 291 if (saddr) 293 292 memcpy(&transport->saddr, saddr, sizeof(union sctp_addr)); 294 293 else 295 - af->get_saddr(asoc, dst, daddr, &transport->saddr); 294 + af->get_saddr(opt, asoc, dst, daddr, &transport->saddr); 296 295 297 296 transport->dst = dst; 298 297 if ((transport->param_flags & SPP_PMTUD_DISABLE) && transport->pathmtu) { ··· 404 403 cwnd = transport->cwnd; 405 404 flight_size = transport->flight_size; 406 405 406 + /* See if we need to exit Fast Recovery first */ 407 + if (transport->fast_recovery && 408 + TSN_lte(transport->fast_recovery_exit, sack_ctsn)) 409 + transport->fast_recovery = 0; 410 + 407 411 /* The appropriate cwnd increase algorithm is performed if, and only 408 - * if the cumulative TSN has advanced and the congestion window is 412 + * if the cumulative TSN whould advanced and the congestion window is 409 413 * being fully utilized. 410 414 */ 411 - if ((transport->asoc->ctsn_ack_point >= sack_ctsn) || 415 + if (TSN_lte(sack_ctsn, transport->asoc->ctsn_ack_point) || 412 416 (flight_size < cwnd)) 413 417 return; 414 418 ··· 422 416 pmtu = transport->asoc->pathmtu; 423 417 424 418 if (cwnd <= ssthresh) { 425 - /* RFC 2960 7.2.1, sctpimpguide-05 2.14.2 When cwnd is less 426 - * than or equal to ssthresh an SCTP endpoint MUST use the 427 - * slow start algorithm to increase cwnd only if the current 428 - * congestion window is being fully utilized and an incoming 429 - * SACK advances the Cumulative TSN Ack Point. Only when these 430 - * two conditions are met can the cwnd be increased otherwise 431 - * the cwnd MUST not be increased. If these conditions are met 432 - * then cwnd MUST be increased by at most the lesser of 433 - * 1) the total size of the previously outstanding DATA 434 - * chunk(s) acknowledged, and 2) the destination's path MTU. 419 + /* RFC 4960 7.2.1 420 + * o When cwnd is less than or equal to ssthresh, an SCTP 421 + * endpoint MUST use the slow-start algorithm to increase 422 + * cwnd only if the current congestion window is being fully 423 + * utilized, an incoming SACK advances the Cumulative TSN 424 + * Ack Point, and the data sender is not in Fast Recovery. 425 + * Only when these three conditions are met can the cwnd be 426 + * increased; otherwise, the cwnd MUST not be increased. 427 + * If these conditions are met, then cwnd MUST be increased 428 + * by, at most, the lesser of 1) the total size of the 429 + * previously outstanding DATA chunk(s) acknowledged, and 430 + * 2) the destination's path MTU. This upper bound protects 431 + * against the ACK-Splitting attack outlined in [SAVAGE99]. 435 432 */ 433 + if (transport->fast_recovery) 434 + return; 435 + 436 436 if (bytes_acked > pmtu) 437 437 cwnd += pmtu; 438 438 else ··· 514 502 * cwnd = ssthresh 515 503 * partial_bytes_acked = 0 516 504 */ 505 + if (transport->fast_recovery) 506 + return; 507 + 508 + /* Mark Fast recovery */ 509 + transport->fast_recovery = 1; 510 + transport->fast_recovery_exit = transport->asoc->next_tsn - 1; 511 + 517 512 transport->ssthresh = max(transport->cwnd/2, 518 513 4*transport->asoc->pathmtu); 519 514 transport->cwnd = transport->ssthresh; ··· 605 586 t->flight_size = 0; 606 587 t->error_count = 0; 607 588 t->rto_pending = 0; 589 + t->fast_recovery = 0; 608 590 609 591 /* Initialize the state information for SFR-CACC */ 610 592 t->cacc.changeover_active = 0;
+8 -4
net/wireless/nl80211.c
··· 187 187 return genlmsg_end(msg, hdr); 188 188 189 189 nla_put_failure: 190 - return genlmsg_cancel(msg, hdr); 190 + genlmsg_cancel(msg, hdr); 191 + return -EMSGSIZE; 191 192 } 192 193 193 194 static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) ··· 274 273 return genlmsg_end(msg, hdr); 275 274 276 275 nla_put_failure: 277 - return genlmsg_cancel(msg, hdr); 276 + genlmsg_cancel(msg, hdr); 277 + return -EMSGSIZE; 278 278 } 279 279 280 280 static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *cb) ··· 930 928 return genlmsg_end(msg, hdr); 931 929 932 930 nla_put_failure: 933 - return genlmsg_cancel(msg, hdr); 931 + genlmsg_cancel(msg, hdr); 932 + return -EMSGSIZE; 934 933 } 935 934 936 935 static int nl80211_dump_station(struct sk_buff *skb, ··· 1270 1267 return genlmsg_end(msg, hdr); 1271 1268 1272 1269 nla_put_failure: 1273 - return genlmsg_cancel(msg, hdr); 1270 + genlmsg_cancel(msg, hdr); 1271 + return -EMSGSIZE; 1274 1272 } 1275 1273 1276 1274 static int nl80211_dump_mpath(struct sk_buff *skb,
+2 -2
net/xfrm/xfrm_algo.c
··· 200 200 } 201 201 }, 202 202 { 203 - .name = "hmac(ripemd160)", 204 - .compat = "ripemd160", 203 + .name = "hmac(rmd160)", 204 + .compat = "rmd160", 205 205 206 206 .uinfo = { 207 207 .auth = {