From: Neil H. <nh...@tu...> - 2005-10-30 00:53:09
|
On Fri, Oct 28, 2005 at 04:45:35PM -0700, Sridhar Samudrala wrote: Had a free minute so I ran my tests against this modified patch, and everything seems to work just fine. Regards Neil Signed-off-by: Neil Horman <nh...@tu...> > > > diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h > index fc8e367..af22fa0 100644 > --- a/include/linux/sysctl.h > +++ b/include/linux/sysctl.h > @@ -676,6 +676,7 @@ enum { > NET_SCTP_PRSCTP_ENABLE = 14, > NET_SCTP_SNDBUF_POLICY = 15, > NET_SCTP_SACK_TIMEOUT = 16, > + NET_SCTP_RCVBUF_POLICY = 17, > }; > > /* /proc/sys/net/bridge */ > diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h > index 9c385b6..57f4d97 100644 > --- a/include/net/sctp/structs.h > +++ b/include/net/sctp/structs.h > @@ -161,6 +161,13 @@ extern struct sctp_globals { > */ > int sndbuf_policy; > > + /* > + * Policy for preforming sctp/socket accounting > + * 0 - do socket level accounting, all assocs share sk_rcvbuf > + * 1 - do sctp accounting, each asoc may use sk_rcvbuf bytes > + */ > + int rcvbuf_policy; > + > /* Delayed SACK timeout 200ms default*/ > int sack_timeout; > > @@ -218,6 +225,7 @@ extern struct sctp_globals { > #define sctp_cookie_preserve_enable (sctp_globals.cookie_preserve_enable) > #define sctp_max_retrans_association (sctp_globals.max_retrans_association) > #define sctp_sndbuf_policy (sctp_globals.sndbuf_policy) > +#define sctp_rcvbuf_policy (sctp_globals.rcvbuf_policy) > #define sctp_max_retrans_path (sctp_globals.max_retrans_path) > #define sctp_max_retrans_init (sctp_globals.max_retrans_init) > #define sctp_sack_timeout (sctp_globals.sack_timeout) > @@ -1227,6 +1235,9 @@ struct sctp_endpoint { > > /* sendbuf acct. policy. */ > __u32 sndbuf_policy; > + > + /* rcvbuf acct. policy. */ > + __u32 rcvbuf_policy; > }; > > /* Recover the outter endpoint structure. */ > @@ -1553,6 +1564,11 @@ struct sctp_association { > */ > int sndbuf_used; > > + /* This is the amount of memory that this association has allocated > + * in the receive path at any given time. > + */ > + atomic_t rmem_alloc; > + > /* This is the wait queue head for send requests waiting on > * the association sndbuf space. > */ > diff --git a/net/sctp/associola.c b/net/sctp/associola.c > index 12b0f58..bff6fdb 100644 > --- a/net/sctp/associola.c > +++ b/net/sctp/associola.c > @@ -157,10 +157,10 @@ static struct sctp_association *sctp_ass > * RFC 6 - A SCTP receiver MUST be able to receive a minimum of > * 1500 bytes in one SCTP packet. > */ > - if (sk->sk_rcvbuf < SCTP_DEFAULT_MINWINDOW) > + if ((sk->sk_rcvbuf/2) < SCTP_DEFAULT_MINWINDOW) > asoc->rwnd = SCTP_DEFAULT_MINWINDOW; > else > - asoc->rwnd = sk->sk_rcvbuf; > + asoc->rwnd = sk->sk_rcvbuf/2; > > asoc->a_rwnd = asoc->rwnd; > > @@ -172,6 +172,9 @@ static struct sctp_association *sctp_ass > /* Set the sndbuf size for transmit. */ > asoc->sndbuf_used = 0; > > + /* Initialize the receive memory counter */ > + atomic_set(&asoc->rmem_alloc, 0); > + > init_waitqueue_head(&asoc->wait); > > asoc->c.my_vtag = sctp_generate_tag(ep); > diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c > index 96984f7..c60da15 100644 > --- a/net/sctp/endpointola.c > +++ b/net/sctp/endpointola.c > @@ -127,6 +127,9 @@ static struct sctp_endpoint *sctp_endpoi > sk->sk_write_space = sctp_write_space; > sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); > > + /* Get the receive buffer policy for this endpoint */ > + ep->rcvbuf_policy = sctp_rcvbuf_policy; > + > /* Initialize the secret key used with cookie. */ > get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE); > ep->last_key = ep->current_key = 0; > diff --git a/net/sctp/input.c b/net/sctp/input.c > index 28f3224..b24ff2c 100644 > --- a/net/sctp/input.c > +++ b/net/sctp/input.c > @@ -100,21 +100,6 @@ static inline int sctp_rcv_checksum(stru > return 0; > } > > -/* The free routine for skbuffs that sctp receives */ > -static void sctp_rfree(struct sk_buff *skb) > -{ > - atomic_sub(sizeof(struct sctp_chunk),&skb->sk->sk_rmem_alloc); > - sock_rfree(skb); > -} > - > -/* The ownership wrapper routine to do receive buffer accounting */ > -static void sctp_rcv_set_owner_r(struct sk_buff *skb, struct sock *sk) > -{ > - skb_set_owner_r(skb,sk); > - skb->destructor = sctp_rfree; > - atomic_add(sizeof(struct sctp_chunk),&sk->sk_rmem_alloc); > -} > - > struct sctp_input_cb { > union { > struct inet_skb_parm h4; > @@ -217,9 +202,6 @@ int sctp_rcv(struct sk_buff *skb) > rcvr = &ep->base; > } > > - if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) > - goto discard_release; > - > /* > * RFC 2960, 8.4 - Handle "Out of the blue" Packets. > * An SCTP packet is called an "out of the blue" (OOTB) > @@ -256,8 +238,6 @@ int sctp_rcv(struct sk_buff *skb) > } > SCTP_INPUT_CB(skb)->chunk = chunk; > > - sctp_rcv_set_owner_r(skb,sk); > - > /* Remember what endpoint is to handle this packet. */ > chunk->rcvr = rcvr; > > diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c > index 26de4d3..f2137c3 100644 > --- a/net/sctp/protocol.c > +++ b/net/sctp/protocol.c > @@ -1047,6 +1047,9 @@ SCTP_STATIC __init int sctp_init(void) > /* Sendbuffer growth - do per-socket accounting */ > sctp_sndbuf_policy = 0; > > + /* Rcvbuffer growth - do per-socket accounting */ > + sctp_rcvbuf_policy = 0; > + > /* HB.interval - 30 seconds */ > sctp_hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT; > > diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c > index 505c7de..475bfb4 100644 > --- a/net/sctp/sm_statefuns.c > +++ b/net/sctp/sm_statefuns.c > @@ -5160,6 +5160,8 @@ static int sctp_eat_data(const struct sc > sctp_verb_t deliver; > int tmp; > __u32 tsn; > + int account_value; > + struct sock *sk = asoc->base.sk; > > data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; > skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); > @@ -5169,6 +5171,26 @@ static int sctp_eat_data(const struct sc > > /* ASSERT: Now skb->data is really the user data. */ > > + /* > + * if we are established, and we have used up our receive > + * buffer memory, drop the frame > + */ > + if (asoc->state == SCTP_STATE_ESTABLISHED) { > + /* > + * If the receive buffer policy is 1, then each > + * association can allocate up to sk_rcvbuf bytes > + * otherwise, all the associations in aggregate > + * may allocate up to sk_rcvbuf bytes > + */ > + if (asoc->ep->rcvbuf_policy) > + account_value = atomic_read(&asoc->rmem_alloc); > + else > + account_value = atomic_read(&sk->sk_rmem_alloc); > + > + if (account_value > sk->sk_rcvbuf) > + return SCTP_IERROR_IGNORE_TSN; > + } > + > /* Process ECN based congestion. > * > * Since the chunk structure is reused for all chunks within > diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c > index 75b28dd..fcd7096 100644 > --- a/net/sctp/sysctl.c > +++ b/net/sctp/sysctl.c > @@ -121,6 +121,14 @@ static ctl_table sctp_table[] = { > .proc_handler = &proc_dointvec > }, > { > + .ctl_name = NET_SCTP_RCVBUF_POLICY, > + .procname = "rcvbuf_policy", > + .data = &sctp_rcvbuf_policy, > + .maxlen = sizeof(int), > + .mode = 0644, > + .proc_handler = &proc_dointvec > + }, > + { > .ctl_name = NET_SCTP_PATH_MAX_RETRANS, > .procname = "path_max_retrans", > .data = &sctp_max_retrans_path, > diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c > index e049f41..add7496 100644 > --- a/net/sctp/ulpevent.c > +++ b/net/sctp/ulpevent.c > @@ -52,19 +52,6 @@ static void sctp_ulpevent_receive_data(s > struct sctp_association *asoc); > static void sctp_ulpevent_release_data(struct sctp_ulpevent *event); > > -/* Stub skb destructor. */ > -static void sctp_stub_rfree(struct sk_buff *skb) > -{ > -/* WARNING: This function is just a warning not to use the > - * skb destructor. If the skb is shared, we may get the destructor > - * callback on some processor that does not own the sock_lock. This > - * was occuring with PACKET socket applications that were monitoring > - * our skbs. We can't take the sock_lock, because we can't risk > - * recursing if we do really own the sock lock. Instead, do all > - * of our rwnd manipulation while we own the sock_lock outright. > - */ > -} > - > /* Initialize an ULP event from an given skb. */ > SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags) > { > @@ -98,6 +85,16 @@ int sctp_ulpevent_is_notification(const > return MSG_NOTIFICATION == (event->msg_flags & MSG_NOTIFICATION); > } > > +/* The free routine for skbuffs that sctp receives */ > +static void sctp_ulp_rfree(struct sk_buff *skb) > +{ > + struct sctp_ulpevent *event = sctp_skb2event(skb); > + struct sctp_association *asoc = event->asoc; > + > + atomic_sub(skb->truesize, &asoc->rmem_alloc); > + sock_rfree(skb); > +} > + > /* Hold the association in case the msg_name needs read out of > * the association. > */ > @@ -111,9 +108,10 @@ static inline void sctp_ulpevent_set_own > */ > sctp_association_hold((struct sctp_association *)asoc); > skb = sctp_event2skb(event); > - skb->sk = asoc->base.sk; > event->asoc = (struct sctp_association *)asoc; > - skb->destructor = sctp_stub_rfree; > + skb_set_owner_r(skb,asoc->base.sk); > + skb->destructor = sctp_ulp_rfree; > + atomic_add(skb->truesize, &event->asoc->rmem_alloc); > } > > /* A simple destructor to give up the reference to the association. */ > -- /*************************************************** *Neil Horman *Software Engineer *gpg keyid: 1024D / 0x92A74FA1 - http://pgp.mit.edu ***************************************************/ |