Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
wimax/i2400m: fix missing endian correction read in fw loader
net8139: fix a race at the end of NAPI
pktgen: Fix accuracy of inter-packet delay.
pkt_sched: gen_estimator: add a new lock
net: deliver skbs on inactive slaves to exact matches
ipv6: fix ICMP6_MIB_OUTERRORS
r8169: fix mdio_read and update mdio_write according to hw specs
gianfar: Revive the driver for eTSEC devices (disable timestamping)
caif: fix a couple range checks
phylib: Add support for the LXT973 phy.
net: Print num_rx_queues imbalance warning only when there are allocated queues

+103 -28
+1 -1
drivers/net/8139cp.c
··· 598 598 goto rx_status_loop; 599 599 600 600 spin_lock_irqsave(&cp->lock, flags); 601 - cpw16_f(IntrMask, cp_intr_mask); 602 601 __napi_complete(napi); 602 + cpw16_f(IntrMask, cp_intr_mask); 603 603 spin_unlock_irqrestore(&cp->lock, flags); 604 604 } 605 605
+1 -1
drivers/net/8139too.c
··· 2089 2089 * again when we think we are done. 2090 2090 */ 2091 2091 spin_lock_irqsave(&tp->lock, flags); 2092 - RTL_W16_F(IntrMask, rtl8139_intr_mask); 2093 2092 __napi_complete(napi); 2093 + RTL_W16_F(IntrMask, rtl8139_intr_mask); 2094 2094 spin_unlock_irqrestore(&tp->lock, flags); 2095 2095 } 2096 2096 spin_unlock(&tp->rx_lock);
+1 -2
drivers/net/gianfar.c
··· 747 747 FSL_GIANFAR_DEV_HAS_CSUM | 748 748 FSL_GIANFAR_DEV_HAS_VLAN | 749 749 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 750 - FSL_GIANFAR_DEV_HAS_EXTENDED_HASH | 751 - FSL_GIANFAR_DEV_HAS_TIMER; 750 + FSL_GIANFAR_DEV_HAS_EXTENDED_HASH; 752 751 753 752 ctype = of_get_property(np, "phy-connection-type", NULL); 754 753
+50 -1
drivers/net/phy/lxt.c
··· 53 53 54 54 #define MII_LXT971_ISR 19 /* Interrupt Status Register */ 55 55 56 + /* register definitions for the 973 */ 57 + #define MII_LXT973_PCR 16 /* Port Configuration Register */ 58 + #define PCR_FIBER_SELECT 1 56 59 57 60 MODULE_DESCRIPTION("Intel LXT PHY driver"); 58 61 MODULE_AUTHOR("Andy Fleming"); ··· 122 119 return err; 123 120 } 124 121 122 + static int lxt973_probe(struct phy_device *phydev) 123 + { 124 + int val = phy_read(phydev, MII_LXT973_PCR); 125 + 126 + if (val & PCR_FIBER_SELECT) { 127 + /* 128 + * If fiber is selected, then the only correct setting 129 + * is 100Mbps, full duplex, and auto negotiation off. 130 + */ 131 + val = phy_read(phydev, MII_BMCR); 132 + val |= (BMCR_SPEED100 | BMCR_FULLDPLX); 133 + val &= ~BMCR_ANENABLE; 134 + phy_write(phydev, MII_BMCR, val); 135 + /* Remember that the port is in fiber mode. */ 136 + phydev->priv = lxt973_probe; 137 + } else { 138 + phydev->priv = NULL; 139 + } 140 + return 0; 141 + } 142 + 143 + static int lxt973_config_aneg(struct phy_device *phydev) 144 + { 145 + /* Do nothing if port is in fiber mode. */ 146 + return phydev->priv ? 0 : genphy_config_aneg(phydev); 147 + } 148 + 125 149 static struct phy_driver lxt970_driver = { 126 150 .phy_id = 0x78100000, 127 151 .name = "LXT970", ··· 176 146 .driver = { .owner = THIS_MODULE,}, 177 147 }; 178 148 149 + static struct phy_driver lxt973_driver = { 150 + .phy_id = 0x00137a10, 151 + .name = "LXT973", 152 + .phy_id_mask = 0xfffffff0, 153 + .features = PHY_BASIC_FEATURES, 154 + .flags = 0, 155 + .probe = lxt973_probe, 156 + .config_aneg = lxt973_config_aneg, 157 + .read_status = genphy_read_status, 158 + .driver = { .owner = THIS_MODULE,}, 159 + }; 160 + 179 161 static int __init lxt_init(void) 180 162 { 181 163 int ret; ··· 199 157 ret = phy_driver_register(&lxt971_driver); 200 158 if (ret) 201 159 goto err2; 160 + 161 + ret = phy_driver_register(&lxt973_driver); 162 + if (ret) 163 + goto err3; 202 164 return 0; 203 165 204 - err2: 166 + err3: 167 + phy_driver_unregister(&lxt971_driver); 168 + err2: 205 169 phy_driver_unregister(&lxt970_driver); 206 170 err1: 207 171 return ret; ··· 217 169 { 218 170 phy_driver_unregister(&lxt970_driver); 219 171 phy_driver_unregister(&lxt971_driver); 172 + phy_driver_unregister(&lxt973_driver); 220 173 } 221 174 222 175 module_init(lxt_init);
+9 -3
drivers/net/r8169.c
··· 560 560 udelay(25); 561 561 } 562 562 /* 563 - * Some configurations require a small delay even after the write 564 - * completed indication or the next write might fail. 563 + * According to hardware specs a 20us delay is required after write 564 + * complete indication, but before sending next command. 565 565 */ 566 - udelay(25); 566 + udelay(20); 567 567 } 568 568 569 569 static int mdio_read(void __iomem *ioaddr, int reg_addr) ··· 583 583 } 584 584 udelay(25); 585 585 } 586 + /* 587 + * According to hardware specs a 20us delay is required after read 588 + * complete indication, but before sending next command. 589 + */ 590 + udelay(20); 591 + 586 592 return value; 587 593 } 588 594
+1 -1
drivers/net/wimax/i2400m/fw.c
··· 1192 1192 unsigned module_type, header_len, major_version, minor_version, 1193 1193 module_id, module_vendor, date, size; 1194 1194 1195 - module_type = bcf_hdr->module_type; 1195 + module_type = le32_to_cpu(bcf_hdr->module_type); 1196 1196 header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len); 1197 1197 major_version = (le32_to_cpu(bcf_hdr->header_version) & 0xffff0000) 1198 1198 >> 16;
+4 -1
include/linux/skbuff.h
··· 380 380 kmemcheck_bitfield_begin(flags2); 381 381 __u16 queue_mapping:16; 382 382 #ifdef CONFIG_IPV6_NDISC_NODETYPE 383 - __u8 ndisc_nodetype:2; 383 + __u8 ndisc_nodetype:2, 384 + deliver_no_wcard:1; 385 + #else 386 + __u8 deliver_no_wcard:1; 384 387 #endif 385 388 kmemcheck_bitfield_end(flags2); 386 389
+2 -2
net/8021q/vlan_core.c
··· 12 12 return NET_RX_DROP; 13 13 14 14 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) 15 - goto drop; 15 + skb->deliver_no_wcard = 1; 16 16 17 17 skb->skb_iif = skb->dev->ifindex; 18 18 __vlan_hwaccel_put_tag(skb, vlan_tci); ··· 84 84 struct sk_buff *p; 85 85 86 86 if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master))) 87 - goto drop; 87 + skb->deliver_no_wcard = 1; 88 88 89 89 skb->skb_iif = skb->dev->ifindex; 90 90 __vlan_hwaccel_put_tag(skb, vlan_tci);
+1 -1
net/caif/cfrfml.c
··· 83 83 if (!cfsrvl_ready(service, &ret)) 84 84 return ret; 85 85 86 - if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { 86 + if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { 87 87 pr_err("CAIF: %s():Packet too large - size=%d\n", 88 88 __func__, cfpkt_getlen(pkt)); 89 89 return -EOVERFLOW;
+1 -1
net/caif/cfveil.c
··· 84 84 return ret; 85 85 caif_assert(layr->dn != NULL); 86 86 caif_assert(layr->dn->transmit != NULL); 87 - if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { 87 + if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) { 88 88 pr_warning("CAIF: %s(): Packet too large - size=%d\n", 89 89 __func__, cfpkt_getlen(pkt)); 90 90 return -EOVERFLOW;
+17 -8
net/core/dev.c
··· 2253 2253 if (skb_rx_queue_recorded(skb)) { 2254 2254 u16 index = skb_get_rx_queue(skb); 2255 2255 if (unlikely(index >= dev->num_rx_queues)) { 2256 - if (net_ratelimit()) { 2257 - pr_warning("%s received packet on queue " 2258 - "%u, but number of RX queues is %u\n", 2259 - dev->name, index, dev->num_rx_queues); 2260 - } 2256 + WARN_ONCE(dev->num_rx_queues > 1, "%s received packet " 2257 + "on queue %u, but number of RX queues is %u\n", 2258 + dev->name, index, dev->num_rx_queues); 2261 2259 goto done; 2262 2260 } 2263 2261 rxqueue = dev->_rx + index; ··· 2810 2812 if (!skb->skb_iif) 2811 2813 skb->skb_iif = skb->dev->ifindex; 2812 2814 2815 + /* 2816 + * bonding note: skbs received on inactive slaves should only 2817 + * be delivered to pkt handlers that are exact matches. Also 2818 + * the deliver_no_wcard flag will be set. If packet handlers 2819 + * are sensitive to duplicate packets these skbs will need to 2820 + * be dropped at the handler. The vlan accel path may have 2821 + * already set the deliver_no_wcard flag. 2822 + */ 2813 2823 null_or_orig = NULL; 2814 2824 orig_dev = skb->dev; 2815 2825 master = ACCESS_ONCE(orig_dev->master); 2816 - if (master) { 2817 - if (skb_bond_should_drop(skb, master)) 2826 + if (skb->deliver_no_wcard) 2827 + null_or_orig = orig_dev; 2828 + else if (master) { 2829 + if (skb_bond_should_drop(skb, master)) { 2830 + skb->deliver_no_wcard = 1; 2818 2831 null_or_orig = orig_dev; /* deliver only exact match */ 2819 - else 2832 + } else 2820 2833 skb->dev = master; 2821 2834 } 2822 2835
+12 -3
net/core/gen_estimator.c
··· 107 107 108 108 /* Protects against soft lockup during large deletion */ 109 109 static struct rb_root est_root = RB_ROOT; 110 + static DEFINE_SPINLOCK(est_tree_lock); 110 111 111 112 static void est_timer(unsigned long arg) 112 113 { ··· 202 201 * 203 202 * Returns 0 on success or a negative error code. 204 203 * 205 - * NOTE: Called under rtnl_mutex 206 204 */ 207 205 int gen_new_estimator(struct gnet_stats_basic_packed *bstats, 208 206 struct gnet_stats_rate_est *rate_est, ··· 232 232 est->last_packets = bstats->packets; 233 233 est->avpps = rate_est->pps<<10; 234 234 235 + spin_lock(&est_tree_lock); 235 236 if (!elist[idx].timer.function) { 236 237 INIT_LIST_HEAD(&elist[idx].list); 237 238 setup_timer(&elist[idx].timer, est_timer, idx); ··· 243 242 244 243 list_add_rcu(&est->list, &elist[idx].list); 245 244 gen_add_node(est); 245 + spin_unlock(&est_tree_lock); 246 246 247 247 return 0; 248 248 } ··· 263 261 * 264 262 * Removes the rate estimator specified by &bstats and &rate_est. 265 263 * 266 - * NOTE: Called under rtnl_mutex 267 264 */ 268 265 void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, 269 266 struct gnet_stats_rate_est *rate_est) 270 267 { 271 268 struct gen_estimator *e; 272 269 270 + spin_lock(&est_tree_lock); 273 271 while ((e = gen_find_node(bstats, rate_est))) { 274 272 rb_erase(&e->node, &est_root); 275 273 ··· 280 278 list_del_rcu(&e->list); 281 279 call_rcu(&e->e_rcu, __gen_kill_estimator); 282 280 } 281 + spin_unlock(&est_tree_lock); 283 282 } 284 283 EXPORT_SYMBOL(gen_kill_estimator); 285 284 ··· 315 312 bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, 316 313 const struct gnet_stats_rate_est *rate_est) 317 314 { 315 + bool res; 316 + 318 317 ASSERT_RTNL(); 319 318 320 - return gen_find_node(bstats, rate_est) != NULL; 319 + spin_lock(&est_tree_lock); 320 + res = gen_find_node(bstats, rate_est) != NULL; 321 + spin_unlock(&est_tree_lock); 322 + 323 + return res; 321 324 } 322 325 EXPORT_SYMBOL(gen_estimator_active);
+1 -1
net/core/pktgen.c
··· 2170 2170 end_time = ktime_now(); 2171 2171 2172 2172 pkt_dev->idle_acc += ktime_to_ns(ktime_sub(end_time, start_time)); 2173 - pkt_dev->next_tx = ktime_add_ns(end_time, pkt_dev->delay); 2173 + pkt_dev->next_tx = ktime_add_ns(spin_until, pkt_dev->delay); 2174 2174 } 2175 2175 2176 2176 static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
+2 -2
net/ipv6/icmp.c
··· 483 483 np->tclass, NULL, &fl, (struct rt6_info*)dst, 484 484 MSG_DONTWAIT, np->dontfrag); 485 485 if (err) { 486 - ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); 486 + ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); 487 487 ip6_flush_pending_frames(sk); 488 488 goto out_put; 489 489 } ··· 565 565 np->dontfrag); 566 566 567 567 if (err) { 568 - ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS); 568 + ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); 569 569 ip6_flush_pending_frames(sk); 570 570 goto out_put; 571 571 }