Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

+109 -76
+1
include/linux/sysctl.h
··· 715 715 NET_SCTP_PRSCTP_ENABLE = 14, 716 716 NET_SCTP_SNDBUF_POLICY = 15, 717 717 NET_SCTP_SACK_TIMEOUT = 16, 718 + NET_SCTP_RCVBUF_POLICY = 17, 718 719 }; 719 720 720 721 /* /proc/sys/net/bridge */
+4 -3
include/net/sctp/command.h
··· 120 120 int error; 121 121 sctp_state_t state; 122 122 sctp_event_timeout_t to; 123 + unsigned long zero; 123 124 void *ptr; 124 125 struct sctp_chunk *chunk; 125 126 struct sctp_association *asoc; ··· 149 148 } 150 149 static inline sctp_arg_t SCTP_NOFORCE(void) 151 150 { 152 - sctp_arg_t retval; retval.i32 = 0; return retval; 151 + sctp_arg_t retval = {.zero = 0UL}; retval.i32 = 0; return retval; 153 152 } 154 153 static inline sctp_arg_t SCTP_FORCE(void) 155 154 { 156 - sctp_arg_t retval; retval.i32 = 1; return retval; 155 + sctp_arg_t retval = {.zero = 0UL}; retval.i32 = 1; return retval; 157 156 } 158 157 159 158 #define SCTP_ARG_CONSTRUCTOR(name, type, elt) \ 160 159 static inline sctp_arg_t \ 161 160 SCTP_## name (type arg) \ 162 - { sctp_arg_t retval; retval.elt = arg; return retval; } 161 + { sctp_arg_t retval = {.zero = 0UL}; retval.elt = arg; return retval; } 163 162 164 163 SCTP_ARG_CONSTRUCTOR(I32, __s32, i32) 165 164 SCTP_ARG_CONSTRUCTOR(U32, __u32, u32)
+16 -3
include/net/sctp/structs.h
··· 161 161 */ 162 162 int sndbuf_policy; 163 163 164 + /* 165 + * Policy for preforming sctp/socket accounting 166 + * 0 - do socket level accounting, all assocs share sk_rcvbuf 167 + * 1 - do sctp accounting, each asoc may use sk_rcvbuf bytes 168 + */ 169 + int rcvbuf_policy; 170 + 164 171 /* Delayed SACK timeout 200ms default*/ 165 172 int sack_timeout; 166 173 ··· 225 218 #define sctp_cookie_preserve_enable (sctp_globals.cookie_preserve_enable) 226 219 #define sctp_max_retrans_association (sctp_globals.max_retrans_association) 227 220 #define sctp_sndbuf_policy (sctp_globals.sndbuf_policy) 221 + #define sctp_rcvbuf_policy (sctp_globals.rcvbuf_policy) 228 222 #define sctp_max_retrans_path (sctp_globals.max_retrans_path) 229 223 #define sctp_max_retrans_init (sctp_globals.max_retrans_init) 230 224 #define sctp_sack_timeout (sctp_globals.sack_timeout) ··· 1230 1222 int last_key; 1231 1223 int key_changed_at; 1232 1224 1233 - /* Default timeouts. */ 1234 - int timeouts[SCTP_NUM_TIMEOUT_TYPES]; 1235 - 1236 1225 /* sendbuf acct. policy. */ 1237 1226 __u32 sndbuf_policy; 1227 + 1228 + /* rcvbuf acct. policy. */ 1229 + __u32 rcvbuf_policy; 1238 1230 }; 1239 1231 1240 1232 /* Recover the outter endpoint structure. */ ··· 1560 1552 * as specified in the sk->sndbuf. 1561 1553 */ 1562 1554 int sndbuf_used; 1555 + 1556 + /* This is the amount of memory that this association has allocated 1557 + * in the receive path at any given time. 1558 + */ 1559 + atomic_t rmem_alloc; 1563 1560 1564 1561 /* This is the wait queue head for send requests waiting on 1565 1562 * the association sndbuf space.
+6 -2
net/ipv6/af_inet6.c
··· 699 699 /* Register the family here so that the init calls below will 700 700 * be able to create sockets. (?? is this dangerous ??) 701 701 */ 702 - (void) sock_register(&inet6_family_ops); 702 + err = sock_register(&inet6_family_ops); 703 + if (err) 704 + goto out_unregister_raw_proto; 703 705 704 706 /* Initialise ipv6 mibs */ 705 707 err = init_ipv6_mibs(); 706 708 if (err) 707 - goto out_unregister_raw_proto; 709 + goto out_unregister_sock; 708 710 709 711 /* 710 712 * ipngwg API draft makes clear that the correct semantics ··· 798 796 ipv6_sysctl_unregister(); 799 797 #endif 800 798 cleanup_ipv6_mibs(); 799 + out_unregister_sock: 800 + sock_unregister(PF_INET6); 801 801 out_unregister_raw_proto: 802 802 proto_unregister(&rawv6_prot); 803 803 out_unregister_udp_proto:
+29 -4
net/sctp/associola.c
··· 128 128 */ 129 129 asoc->max_burst = sctp_max_burst; 130 130 131 - /* Copy things from the endpoint. */ 131 + /* initialize association timers */ 132 + asoc->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0; 133 + asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = asoc->rto_initial; 134 + asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = asoc->rto_initial; 135 + asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = asoc->rto_initial; 136 + asoc->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0; 137 + asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0; 138 + 139 + /* sctpimpguide Section 2.12.2 140 + * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the 141 + * recommended value of 5 times 'RTO.Max'. 142 + */ 143 + asoc->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] 144 + = 5 * asoc->rto_max; 145 + 146 + asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; 147 + asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = 148 + SCTP_DEFAULT_TIMEOUT_SACK; 149 + asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = 150 + sp->autoclose * HZ; 151 + 152 + /* Initilizes the timers */ 132 153 for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) { 133 - asoc->timeouts[i] = ep->timeouts[i]; 134 154 init_timer(&asoc->timers[i]); 135 155 asoc->timers[i].function = sctp_timer_events[i]; 136 156 asoc->timers[i].data = (unsigned long) asoc; ··· 177 157 * RFC 6 - A SCTP receiver MUST be able to receive a minimum of 178 158 * 1500 bytes in one SCTP packet. 179 159 */ 180 - if (sk->sk_rcvbuf < SCTP_DEFAULT_MINWINDOW) 160 + if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW) 181 161 asoc->rwnd = SCTP_DEFAULT_MINWINDOW; 182 162 else 183 - asoc->rwnd = sk->sk_rcvbuf; 163 + asoc->rwnd = sk->sk_rcvbuf/2; 184 164 185 165 asoc->a_rwnd = asoc->rwnd; 186 166 ··· 191 171 192 172 /* Set the sndbuf size for transmit. */ 193 173 asoc->sndbuf_used = 0; 174 + 175 + /* Initialize the receive memory counter */ 176 + atomic_set(&asoc->rmem_alloc, 0); 194 177 195 178 init_waitqueue_head(&asoc->wait); 196 179 ··· 402 379 idr_remove(&sctp_assocs_id, asoc->assoc_id); 403 380 spin_unlock_bh(&sctp_assocs_id_lock); 404 381 } 382 + 383 + BUG_TRAP(!atomic_read(&asoc->rmem_alloc)); 405 384 406 385 if (asoc->base.malloced) { 407 386 kfree(asoc);
+3 -23
net/sctp/endpointola.c
··· 70 70 struct sock *sk, 71 71 gfp_t gfp) 72 72 { 73 - struct sctp_sock *sp = sctp_sk(sk); 74 73 memset(ep, 0, sizeof(struct sctp_endpoint)); 75 74 76 75 /* Initialize the base structure. */ ··· 99 100 /* Create the lists of associations. */ 100 101 INIT_LIST_HEAD(&ep->asocs); 101 102 102 - /* Set up the base timeout information. */ 103 - ep->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0; 104 - ep->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = 105 - msecs_to_jiffies(sp->rtoinfo.srto_initial); 106 - ep->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = 107 - msecs_to_jiffies(sp->rtoinfo.srto_initial); 108 - ep->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = 109 - msecs_to_jiffies(sp->rtoinfo.srto_initial); 110 - ep->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0; 111 - ep->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0; 112 - 113 - /* sctpimpguide-05 Section 2.12.2 114 - * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the 115 - * recommended value of 5 times 'RTO.Max'. 116 - */ 117 - ep->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] 118 - = 5 * msecs_to_jiffies(sp->rtoinfo.srto_max); 119 - 120 - ep->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0; 121 - ep->timeouts[SCTP_EVENT_TIMEOUT_SACK] = sctp_sack_timeout; 122 - ep->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ; 123 - 124 103 /* Use SCTP specific send buffer space queues. */ 125 104 ep->sndbuf_policy = sctp_sndbuf_policy; 126 105 sk->sk_write_space = sctp_write_space; 127 106 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); 107 + 108 + /* Get the receive buffer policy for this endpoint */ 109 + ep->rcvbuf_policy = sctp_rcvbuf_policy; 128 110 129 111 /* Initialize the secret key used with cookie. */ 130 112 get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE);
-20
net/sctp/input.c
··· 100 100 return 0; 101 101 } 102 102 103 - /* The free routine for skbuffs that sctp receives */ 104 - static void sctp_rfree(struct sk_buff *skb) 105 - { 106 - atomic_sub(sizeof(struct sctp_chunk),&skb->sk->sk_rmem_alloc); 107 - sock_rfree(skb); 108 - } 109 - 110 - /* The ownership wrapper routine to do receive buffer accounting */ 111 - static void sctp_rcv_set_owner_r(struct sk_buff *skb, struct sock *sk) 112 - { 113 - skb_set_owner_r(skb,sk); 114 - skb->destructor = sctp_rfree; 115 - atomic_add(sizeof(struct sctp_chunk),&sk->sk_rmem_alloc); 116 - } 117 - 118 103 struct sctp_input_cb { 119 104 union { 120 105 struct inet_skb_parm h4; ··· 202 217 rcvr = &ep->base; 203 218 } 204 219 205 - if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) 206 - goto discard_release; 207 - 208 220 /* 209 221 * RFC 2960, 8.4 - Handle "Out of the blue" Packets. 210 222 * An SCTP packet is called an "out of the blue" (OOTB) ··· 237 255 goto discard_release; 238 256 } 239 257 SCTP_INPUT_CB(skb)->chunk = chunk; 240 - 241 - sctp_rcv_set_owner_r(skb,sk); 242 258 243 259 /* Remember what endpoint is to handle this packet. */ 244 260 chunk->rcvr = rcvr;
+6
net/sctp/protocol.c
··· 530 530 { 531 531 struct rtable *rt = (struct rtable *)dst; 532 532 533 + if (!asoc) 534 + return; 535 + 533 536 if (rt) { 534 537 saddr->v4.sin_family = AF_INET; 535 538 saddr->v4.sin_port = asoc->base.bind_addr.port; ··· 1049 1046 1050 1047 /* Sendbuffer growth - do per-socket accounting */ 1051 1048 sctp_sndbuf_policy = 0; 1049 + 1050 + /* Rcvbuffer growth - do per-socket accounting */ 1051 + sctp_rcvbuf_policy = 0; 1052 1052 1053 1053 /* HB.interval - 30 seconds */ 1054 1054 sctp_hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT;
+3 -3
net/sctp/sm_sideeffect.c
··· 385 385 NULL, 386 386 sctp_generate_t4_rto_event, 387 387 sctp_generate_t5_shutdown_guard_event, 388 - sctp_generate_heartbeat_event, 388 + NULL, 389 389 sctp_generate_sack_event, 390 390 sctp_generate_autoclose_event, 391 391 }; ··· 689 689 * increased due to timer expirations. 690 690 */ 691 691 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = 692 - asoc->ep->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT]; 692 + asoc->rto_initial; 693 693 asoc->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = 694 - asoc->ep->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE]; 694 + asoc->rto_initial; 695 695 } 696 696 697 697 if (sctp_state(asoc, ESTABLISHED) ||
+22
net/sctp/sm_statefuns.c
··· 5160 5160 sctp_verb_t deliver; 5161 5161 int tmp; 5162 5162 __u32 tsn; 5163 + int account_value; 5164 + struct sock *sk = asoc->base.sk; 5163 5165 5164 5166 data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; 5165 5167 skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); ··· 5170 5168 SCTP_DEBUG_PRINTK("eat_data: TSN 0x%x.\n", tsn); 5171 5169 5172 5170 /* ASSERT: Now skb->data is really the user data. */ 5171 + 5172 + /* 5173 + * if we are established, and we have used up our receive 5174 + * buffer memory, drop the frame 5175 + */ 5176 + if (asoc->state == SCTP_STATE_ESTABLISHED) { 5177 + /* 5178 + * If the receive buffer policy is 1, then each 5179 + * association can allocate up to sk_rcvbuf bytes 5180 + * otherwise, all the associations in aggregate 5181 + * may allocate up to sk_rcvbuf bytes 5182 + */ 5183 + if (asoc->ep->rcvbuf_policy) 5184 + account_value = atomic_read(&asoc->rmem_alloc); 5185 + else 5186 + account_value = atomic_read(&sk->sk_rmem_alloc); 5187 + 5188 + if (account_value > sk->sk_rcvbuf) 5189 + return SCTP_IERROR_IGNORE_TSN; 5190 + } 5173 5191 5174 5192 /* Process ECN based congestion. 5175 5193 *
+4 -1
net/sctp/socket.c
··· 1932 1932 if (copy_from_user(&sp->autoclose, optval, optlen)) 1933 1933 return -EFAULT; 1934 1934 1935 - sp->ep->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ; 1936 1935 return 0; 1937 1936 } 1938 1937 ··· 5114 5115 sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { 5115 5116 event = sctp_skb2event(skb); 5116 5117 if (event->asoc == assoc) { 5118 + sock_rfree(skb); 5117 5119 __skb_unlink(skb, &oldsk->sk_receive_queue); 5118 5120 __skb_queue_tail(&newsk->sk_receive_queue, skb); 5121 + skb_set_owner_r(skb, newsk); 5119 5122 } 5120 5123 } 5121 5124 ··· 5145 5144 sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { 5146 5145 event = sctp_skb2event(skb); 5147 5146 if (event->asoc == assoc) { 5147 + sock_rfree(skb); 5148 5148 __skb_unlink(skb, &oldsp->pd_lobby); 5149 5149 __skb_queue_tail(queue, skb); 5150 + skb_set_owner_r(skb, newsk); 5150 5151 } 5151 5152 } 5152 5153
+8
net/sctp/sysctl.c
··· 121 121 .proc_handler = &proc_dointvec 122 122 }, 123 123 { 124 + .ctl_name = NET_SCTP_RCVBUF_POLICY, 125 + .procname = "rcvbuf_policy", 126 + .data = &sctp_rcvbuf_policy, 127 + .maxlen = sizeof(int), 128 + .mode = 0644, 129 + .proc_handler = &proc_dointvec 130 + }, 131 + { 124 132 .ctl_name = NET_SCTP_PATH_MAX_RETRANS, 125 133 .procname = "path_max_retrans", 126 134 .data = &sctp_max_retrans_path,
+7 -17
net/sctp/ulpevent.c
··· 52 52 struct sctp_association *asoc); 53 53 static void sctp_ulpevent_release_data(struct sctp_ulpevent *event); 54 54 55 - /* Stub skb destructor. */ 56 - static void sctp_stub_rfree(struct sk_buff *skb) 57 - { 58 - /* WARNING: This function is just a warning not to use the 59 - * skb destructor. If the skb is shared, we may get the destructor 60 - * callback on some processor that does not own the sock_lock. This 61 - * was occuring with PACKET socket applications that were monitoring 62 - * our skbs. We can't take the sock_lock, because we can't risk 63 - * recursing if we do really own the sock lock. Instead, do all 64 - * of our rwnd manipulation while we own the sock_lock outright. 65 - */ 66 - } 67 - 68 55 /* Initialize an ULP event from an given skb. */ 69 56 SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags) 70 57 { ··· 98 111 */ 99 112 sctp_association_hold((struct sctp_association *)asoc); 100 113 skb = sctp_event2skb(event); 101 - skb->sk = asoc->base.sk; 102 114 event->asoc = (struct sctp_association *)asoc; 103 - skb->destructor = sctp_stub_rfree; 115 + atomic_add(skb->truesize, &event->asoc->rmem_alloc); 116 + skb_set_owner_r(skb, asoc->base.sk); 104 117 } 105 118 106 119 /* A simple destructor to give up the reference to the association. */ 107 120 static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) 108 121 { 109 - sctp_association_put(event->asoc); 122 + struct sctp_association *asoc = event->asoc; 123 + struct sk_buff *skb = sctp_event2skb(event); 124 + 125 + atomic_sub(skb->truesize, &asoc->rmem_alloc); 126 + sctp_association_put(asoc); 110 127 } 111 128 112 129 /* Create and initialize an SCTP_ASSOC_CHANGE event. ··· 913 922 /* Free a ulpevent that has an owner. It includes releasing the reference 914 923 * to the owner, updating the rwnd in case of a DATA event and freeing the 915 924 * skb. 916 - * See comments in sctp_stub_rfree(). 917 925 */ 918 926 void sctp_ulpevent_free(struct sctp_ulpevent *event) 919 927 {