Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6:
rtnetlink: make SR-IOV VF interface symmetric
sctp: delete active ICMP proto unreachable timer when free transport
tcp: fix MD5 (RFC2385) support

+160 -81
+19 -4
include/linux/if_link.h
··· 79 79 IFLA_NET_NS_PID, 80 80 IFLA_IFALIAS, 81 81 IFLA_NUM_VF, /* Number of VFs if device is SR-IOV PF */ 82 - IFLA_VF_MAC, /* Hardware queue specific attributes */ 83 - IFLA_VF_VLAN, 84 - IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ 85 - IFLA_VFINFO, 82 + IFLA_VFINFO_LIST, 86 83 __IFLA_MAX 87 84 }; 88 85 ··· 199 202 }; 200 203 201 204 /* SR-IOV virtual function managment section */ 205 + 206 + enum { 207 + IFLA_VF_INFO_UNSPEC, 208 + IFLA_VF_INFO, 209 + __IFLA_VF_INFO_MAX, 210 + }; 211 + 212 + #define IFLA_VF_INFO_MAX (__IFLA_VF_INFO_MAX - 1) 213 + 214 + enum { 215 + IFLA_VF_UNSPEC, 216 + IFLA_VF_MAC, /* Hardware queue specific attributes */ 217 + IFLA_VF_VLAN, 218 + IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ 219 + __IFLA_VF_MAX, 220 + }; 221 + 222 + #define IFLA_VF_MAX (__IFLA_VF_MAX - 1) 202 223 203 224 struct ifla_vf_mac { 204 225 __u32 vf;
+3 -18
include/net/tcp.h
··· 1197 1197 extern struct tcp_md5sig_pool * __percpu *tcp_alloc_md5sig_pool(struct sock *); 1198 1198 extern void tcp_free_md5sig_pool(void); 1199 1199 1200 - extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu); 1201 - extern void __tcp_put_md5sig_pool(void); 1200 + extern struct tcp_md5sig_pool *tcp_get_md5sig_pool(void); 1201 + extern void tcp_put_md5sig_pool(void); 1202 + 1202 1203 extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *); 1203 1204 extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *, 1204 1205 unsigned header_len); 1205 1206 extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, 1206 1207 struct tcp_md5sig_key *key); 1207 - 1208 - static inline 1209 - struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) 1210 - { 1211 - int cpu = get_cpu(); 1212 - struct tcp_md5sig_pool *ret = __tcp_get_md5sig_pool(cpu); 1213 - if (!ret) 1214 - put_cpu(); 1215 - return ret; 1216 - } 1217 - 1218 - static inline void tcp_put_md5sig_pool(void) 1219 - { 1220 - __tcp_put_md5sig_pool(); 1221 - put_cpu(); 1222 - } 1223 1208 1224 1209 /* write queue abstraction */ 1225 1210 static inline void tcp_write_queue_purge(struct sock *sk)
+110 -49
net/core/rtnetlink.c
··· 602 602 a->tx_compressed = b->tx_compressed; 603 603 }; 604 604 605 + /* All VF info */ 605 606 static inline int rtnl_vfinfo_size(const struct net_device *dev) 606 607 { 607 - if (dev->dev.parent && dev_is_pci(dev->dev.parent)) 608 - return dev_num_vf(dev->dev.parent) * 609 - sizeof(struct ifla_vf_info); 610 - else 608 + if (dev->dev.parent && dev_is_pci(dev->dev.parent)) { 609 + 610 + int num_vfs = dev_num_vf(dev->dev.parent); 611 + size_t size = nlmsg_total_size(sizeof(struct nlattr)); 612 + size += nlmsg_total_size(num_vfs * sizeof(struct nlattr)); 613 + size += num_vfs * (sizeof(struct ifla_vf_mac) + 614 + sizeof(struct ifla_vf_vlan) + 615 + sizeof(struct ifla_vf_tx_rate)); 616 + return size; 617 + } else 611 618 return 0; 612 619 } 613 620 ··· 636 629 + nla_total_size(1) /* IFLA_OPERSTATE */ 637 630 + nla_total_size(1) /* IFLA_LINKMODE */ 638 631 + nla_total_size(4) /* IFLA_NUM_VF */ 639 - + nla_total_size(rtnl_vfinfo_size(dev)) /* IFLA_VFINFO */ 632 + + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */ 640 633 + rtnl_link_get_size(dev); /* IFLA_LINKINFO */ 641 634 } 642 635 ··· 707 700 708 701 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { 709 702 int i; 710 - struct ifla_vf_info ivi; 711 703 712 - NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); 713 - for (i = 0; i < dev_num_vf(dev->dev.parent); i++) { 704 + struct nlattr *vfinfo, *vf; 705 + int num_vfs = dev_num_vf(dev->dev.parent); 706 + 707 + NLA_PUT_U32(skb, IFLA_NUM_VF, num_vfs); 708 + vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST); 709 + if (!vfinfo) 710 + goto nla_put_failure; 711 + for (i = 0; i < num_vfs; i++) { 712 + struct ifla_vf_info ivi; 713 + struct ifla_vf_mac vf_mac; 714 + struct ifla_vf_vlan vf_vlan; 715 + struct ifla_vf_tx_rate vf_tx_rate; 714 716 if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi)) 715 717 break; 716 - NLA_PUT(skb, IFLA_VFINFO, sizeof(ivi), &ivi); 718 + vf_mac.vf = vf_vlan.vf = vf_tx_rate.vf = ivi.vf; 719 + memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac)); 720 + vf_vlan.vlan = ivi.vlan; 721 + vf_vlan.qos = ivi.qos; 722 + vf_tx_rate.rate = ivi.tx_rate; 723 + vf = nla_nest_start(skb, IFLA_VF_INFO); 724 + if (!vf) { 725 + nla_nest_cancel(skb, vfinfo); 726 + goto nla_put_failure; 727 + } 728 + NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac); 729 + NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan); 730 + NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), &vf_tx_rate); 731 + nla_nest_end(skb, vf); 717 732 } 733 + nla_nest_end(skb, vfinfo); 718 734 } 719 735 if (dev->rtnl_link_ops) { 720 736 if (rtnl_link_fill(skb, dev) < 0) ··· 799 769 [IFLA_LINKINFO] = { .type = NLA_NESTED }, 800 770 [IFLA_NET_NS_PID] = { .type = NLA_U32 }, 801 771 [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, 802 - [IFLA_VF_MAC] = { .type = NLA_BINARY, 803 - .len = sizeof(struct ifla_vf_mac) }, 804 - [IFLA_VF_VLAN] = { .type = NLA_BINARY, 805 - .len = sizeof(struct ifla_vf_vlan) }, 806 - [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, 807 - .len = sizeof(struct ifla_vf_tx_rate) }, 772 + [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, 808 773 }; 809 774 EXPORT_SYMBOL(ifla_policy); 810 775 811 776 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { 812 777 [IFLA_INFO_KIND] = { .type = NLA_STRING }, 813 778 [IFLA_INFO_DATA] = { .type = NLA_NESTED }, 779 + }; 780 + 781 + static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = { 782 + [IFLA_VF_INFO] = { .type = NLA_NESTED }, 783 + }; 784 + 785 + static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { 786 + [IFLA_VF_MAC] = { .type = NLA_BINARY, 787 + .len = sizeof(struct ifla_vf_mac) }, 788 + [IFLA_VF_VLAN] = { .type = NLA_BINARY, 789 + .len = sizeof(struct ifla_vf_vlan) }, 790 + [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, 791 + .len = sizeof(struct ifla_vf_tx_rate) }, 814 792 }; 815 793 816 794 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) ··· 848 810 } 849 811 850 812 return 0; 813 + } 814 + 815 + static int do_setvfinfo(struct net_device *dev, struct nlattr *attr) 816 + { 817 + int rem, err = -EINVAL; 818 + struct nlattr *vf; 819 + const struct net_device_ops *ops = dev->netdev_ops; 820 + 821 + nla_for_each_nested(vf, attr, rem) { 822 + switch (nla_type(vf)) { 823 + case IFLA_VF_MAC: { 824 + struct ifla_vf_mac *ivm; 825 + ivm = nla_data(vf); 826 + err = -EOPNOTSUPP; 827 + if (ops->ndo_set_vf_mac) 828 + err = ops->ndo_set_vf_mac(dev, ivm->vf, 829 + ivm->mac); 830 + break; 831 + } 832 + case IFLA_VF_VLAN: { 833 + struct ifla_vf_vlan *ivv; 834 + ivv = nla_data(vf); 835 + err = -EOPNOTSUPP; 836 + if (ops->ndo_set_vf_vlan) 837 + err = ops->ndo_set_vf_vlan(dev, ivv->vf, 838 + ivv->vlan, 839 + ivv->qos); 840 + break; 841 + } 842 + case IFLA_VF_TX_RATE: { 843 + struct ifla_vf_tx_rate *ivt; 844 + ivt = nla_data(vf); 845 + err = -EOPNOTSUPP; 846 + if (ops->ndo_set_vf_tx_rate) 847 + err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, 848 + ivt->rate); 849 + break; 850 + } 851 + default: 852 + err = -EINVAL; 853 + break; 854 + } 855 + if (err) 856 + break; 857 + } 858 + return err; 851 859 } 852 860 853 861 static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm, ··· 1026 942 write_unlock_bh(&dev_base_lock); 1027 943 } 1028 944 1029 - if (tb[IFLA_VF_MAC]) { 1030 - struct ifla_vf_mac *ivm; 1031 - ivm = nla_data(tb[IFLA_VF_MAC]); 1032 - err = -EOPNOTSUPP; 1033 - if (ops->ndo_set_vf_mac) 1034 - err = ops->ndo_set_vf_mac(dev, ivm->vf, ivm->mac); 1035 - if (err < 0) 1036 - goto errout; 1037 - modified = 1; 1038 - } 1039 - 1040 - if (tb[IFLA_VF_VLAN]) { 1041 - struct ifla_vf_vlan *ivv; 1042 - ivv = nla_data(tb[IFLA_VF_VLAN]); 1043 - err = -EOPNOTSUPP; 1044 - if (ops->ndo_set_vf_vlan) 1045 - err = ops->ndo_set_vf_vlan(dev, ivv->vf, 1046 - ivv->vlan, 1047 - ivv->qos); 1048 - if (err < 0) 1049 - goto errout; 1050 - modified = 1; 1051 - } 1052 - err = 0; 1053 - 1054 - if (tb[IFLA_VF_TX_RATE]) { 1055 - struct ifla_vf_tx_rate *ivt; 1056 - ivt = nla_data(tb[IFLA_VF_TX_RATE]); 1057 - err = -EOPNOTSUPP; 1058 - if (ops->ndo_set_vf_tx_rate) 1059 - err = ops->ndo_set_vf_tx_rate(dev, ivt->vf, ivt->rate); 1060 - if (err < 0) 1061 - goto errout; 1062 - modified = 1; 945 + if (tb[IFLA_VFINFO_LIST]) { 946 + struct nlattr *attr; 947 + int rem; 948 + nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { 949 + if (nla_type(attr) != IFLA_VF_INFO) 950 + goto errout; 951 + err = do_setvfinfo(dev, attr); 952 + if (err < 0) 953 + goto errout; 954 + modified = 1; 955 + } 1063 956 } 1064 957 err = 0; 1065 958
+24 -10
net/ipv4/tcp.c
··· 2839 2839 if (p->md5_desc.tfm) 2840 2840 crypto_free_hash(p->md5_desc.tfm); 2841 2841 kfree(p); 2842 - p = NULL; 2843 2842 } 2844 2843 } 2845 2844 free_percpu(pool); ··· 2936 2937 2937 2938 EXPORT_SYMBOL(tcp_alloc_md5sig_pool); 2938 2939 2939 - struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu) 2940 + 2941 + /** 2942 + * tcp_get_md5sig_pool - get md5sig_pool for this user 2943 + * 2944 + * We use percpu structure, so if we succeed, we exit with preemption 2945 + * and BH disabled, to make sure another thread or softirq handling 2946 + * wont try to get same context. 2947 + */ 2948 + struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) 2940 2949 { 2941 2950 struct tcp_md5sig_pool * __percpu *p; 2942 - spin_lock_bh(&tcp_md5sig_pool_lock); 2951 + 2952 + local_bh_disable(); 2953 + 2954 + spin_lock(&tcp_md5sig_pool_lock); 2943 2955 p = tcp_md5sig_pool; 2944 2956 if (p) 2945 2957 tcp_md5sig_users++; 2946 - spin_unlock_bh(&tcp_md5sig_pool_lock); 2947 - return (p ? *per_cpu_ptr(p, cpu) : NULL); 2958 + spin_unlock(&tcp_md5sig_pool_lock); 2959 + 2960 + if (p) 2961 + return *per_cpu_ptr(p, smp_processor_id()); 2962 + 2963 + local_bh_enable(); 2964 + return NULL; 2948 2965 } 2966 + EXPORT_SYMBOL(tcp_get_md5sig_pool); 2949 2967 2950 - EXPORT_SYMBOL(__tcp_get_md5sig_pool); 2951 - 2952 - void __tcp_put_md5sig_pool(void) 2968 + void tcp_put_md5sig_pool(void) 2953 2969 { 2970 + local_bh_enable(); 2954 2971 tcp_free_md5sig_pool(); 2955 2972 } 2956 - 2957 - EXPORT_SYMBOL(__tcp_put_md5sig_pool); 2973 + EXPORT_SYMBOL(tcp_put_md5sig_pool); 2958 2974 2959 2975 int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, 2960 2976 struct tcphdr *th)
+4
net/sctp/transport.c
··· 173 173 del_timer(&transport->T3_rtx_timer)) 174 174 sctp_transport_put(transport); 175 175 176 + /* Delete the ICMP proto unreachable timer if it's active. */ 177 + if (timer_pending(&transport->proto_unreach_timer) && 178 + del_timer(&transport->proto_unreach_timer)) 179 + sctp_association_put(transport->asoc); 176 180 177 181 sctp_transport_put(transport); 178 182 }