You can subscribe to this list here.
2003 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
(6) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2004 |
Jan
(9) |
Feb
(11) |
Mar
(22) |
Apr
(73) |
May
(78) |
Jun
(146) |
Jul
(80) |
Aug
(27) |
Sep
(5) |
Oct
(14) |
Nov
(18) |
Dec
(27) |
2005 |
Jan
(20) |
Feb
(30) |
Mar
(19) |
Apr
(28) |
May
(50) |
Jun
(31) |
Jul
(32) |
Aug
(14) |
Sep
(36) |
Oct
(43) |
Nov
(74) |
Dec
(63) |
2006 |
Jan
(34) |
Feb
(32) |
Mar
(21) |
Apr
(76) |
May
(106) |
Jun
(72) |
Jul
(70) |
Aug
(175) |
Sep
(130) |
Oct
(39) |
Nov
(81) |
Dec
(43) |
2007 |
Jan
(81) |
Feb
(36) |
Mar
(20) |
Apr
(43) |
May
(54) |
Jun
(34) |
Jul
(44) |
Aug
(55) |
Sep
(44) |
Oct
(54) |
Nov
(43) |
Dec
(41) |
2008 |
Jan
(42) |
Feb
(84) |
Mar
(73) |
Apr
(30) |
May
(119) |
Jun
(54) |
Jul
(54) |
Aug
(93) |
Sep
(173) |
Oct
(130) |
Nov
(145) |
Dec
(153) |
2009 |
Jan
(59) |
Feb
(12) |
Mar
(28) |
Apr
(18) |
May
(56) |
Jun
(9) |
Jul
(28) |
Aug
(62) |
Sep
(16) |
Oct
(19) |
Nov
(15) |
Dec
(17) |
2010 |
Jan
(14) |
Feb
(36) |
Mar
(37) |
Apr
(30) |
May
(33) |
Jun
(53) |
Jul
(42) |
Aug
(50) |
Sep
(67) |
Oct
(66) |
Nov
(69) |
Dec
(36) |
2011 |
Jan
(52) |
Feb
(45) |
Mar
(49) |
Apr
(21) |
May
(34) |
Jun
(13) |
Jul
(19) |
Aug
(37) |
Sep
(43) |
Oct
(10) |
Nov
(23) |
Dec
(30) |
2012 |
Jan
(42) |
Feb
(36) |
Mar
(46) |
Apr
(25) |
May
(96) |
Jun
(146) |
Jul
(40) |
Aug
(28) |
Sep
(61) |
Oct
(45) |
Nov
(100) |
Dec
(53) |
2013 |
Jan
(79) |
Feb
(24) |
Mar
(134) |
Apr
(156) |
May
(118) |
Jun
(75) |
Jul
(278) |
Aug
(145) |
Sep
(136) |
Oct
(168) |
Nov
(137) |
Dec
(439) |
2014 |
Jan
(284) |
Feb
(158) |
Mar
(231) |
Apr
(275) |
May
(259) |
Jun
(91) |
Jul
(222) |
Aug
(215) |
Sep
(165) |
Oct
(166) |
Nov
(211) |
Dec
(150) |
2015 |
Jan
(164) |
Feb
(324) |
Mar
(299) |
Apr
(214) |
May
(111) |
Jun
(109) |
Jul
(105) |
Aug
(36) |
Sep
(58) |
Oct
(131) |
Nov
(68) |
Dec
(30) |
2016 |
Jan
(46) |
Feb
(87) |
Mar
(135) |
Apr
(174) |
May
(132) |
Jun
(135) |
Jul
(149) |
Aug
(125) |
Sep
(79) |
Oct
(49) |
Nov
(95) |
Dec
(102) |
2017 |
Jan
(104) |
Feb
(75) |
Mar
(72) |
Apr
(53) |
May
(18) |
Jun
(5) |
Jul
(14) |
Aug
(19) |
Sep
(2) |
Oct
(13) |
Nov
(21) |
Dec
(67) |
2018 |
Jan
(56) |
Feb
(50) |
Mar
(148) |
Apr
(41) |
May
(37) |
Jun
(34) |
Jul
(34) |
Aug
(11) |
Sep
(52) |
Oct
(48) |
Nov
(28) |
Dec
(46) |
2019 |
Jan
(29) |
Feb
(63) |
Mar
(95) |
Apr
(54) |
May
(14) |
Jun
(71) |
Jul
(60) |
Aug
(49) |
Sep
(3) |
Oct
(64) |
Nov
(115) |
Dec
(57) |
2020 |
Jan
(15) |
Feb
(9) |
Mar
(38) |
Apr
(27) |
May
(60) |
Jun
(53) |
Jul
(35) |
Aug
(46) |
Sep
(37) |
Oct
(64) |
Nov
(20) |
Dec
(25) |
2021 |
Jan
(20) |
Feb
(31) |
Mar
(27) |
Apr
(23) |
May
(21) |
Jun
(30) |
Jul
(30) |
Aug
(7) |
Sep
(18) |
Oct
|
Nov
(15) |
Dec
(4) |
2022 |
Jan
(3) |
Feb
(1) |
Mar
(10) |
Apr
|
May
(2) |
Jun
(26) |
Jul
(5) |
Aug
|
Sep
(1) |
Oct
(2) |
Nov
(9) |
Dec
(2) |
2023 |
Jan
(4) |
Feb
(4) |
Mar
(5) |
Apr
(10) |
May
(29) |
Jun
(17) |
Jul
|
Aug
|
Sep
(1) |
Oct
(1) |
Nov
(2) |
Dec
|
2024 |
Jan
|
Feb
(6) |
Mar
|
Apr
(1) |
May
(6) |
Jun
|
Jul
(5) |
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2025 |
Jan
|
Feb
(3) |
Mar
|
Apr
|
May
|
Jun
|
Jul
(6) |
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: Jon M. <jm...@re...> - 2020-06-09 13:20:29
|
On 6/8/20 11:55 PM, Tuong Lien wrote: > syzbot found the following issue: > > WARNING: CPU: 0 PID: 6808 at include/linux/thread_info.h:150 check_copy_size include/linux/thread_info.h:150 [inline] > WARNING: CPU: 0 PID: 6808 at include/linux/thread_info.h:150 copy_from_iter include/linux/uio.h:144 [inline] > WARNING: CPU: 0 PID: 6808 at include/linux/thread_info.h:150 tipc_msg_append+0x49a/0x5e0 net/tipc/msg.c:242 > Kernel panic - not syncing: panic_on_warn set ... > > This happens after commit 5e9eeccc58f3 ("tipc: fix NULL pointer > dereference in streaming") that tried to build at least one buffer even > when the message data length is zero... However, it now exposes another > bug that the 'mss' can be zero and the 'cpy' will be negative, thus the > above kernel WARNING will appear! > The zero value of 'mss' is never expected because it means Nagle is not > enabled for the socket (actually the socket type was 'SOCK_SEQPACKET'), > so the function 'tipc_msg_append()' must not be called at all. But that > was in this particular case since the message data length was zero, and > the 'send <= maxnagle' check became true. > > We resolve the issue by explicitly checking if Nagle is enabled for the > socket, i.e. 'maxnagle != 0' before calling the 'tipc_msg_append()'. In > addition, we put a sanity check in the function to avoid calling the > 'copy_from_iter()' with a negative size and doing an infinite loop. --- Same suggestion as I had to Hoang; add the three dashes above to avoid that the version info by accident becomes part of the commit log. > > v2: use 'size_t' in the 'min_t()' to get a proper value of 'cpy' (after > Jon's comment) > > Reported-by: syz...@sy... > Fixes: c0bceb97db9e ("tipc: add smart nagle feature") > Signed-off-by: Tuong Lien <tuo...@de...> > --- > net/tipc/msg.c | 4 ++-- > net/tipc/socket.c | 3 ++- > 2 files changed, 4 insertions(+), 3 deletions(-) > > diff --git a/net/tipc/msg.c b/net/tipc/msg.c > index 046e4cb3acea..01b64869a173 100644 > --- a/net/tipc/msg.c > +++ b/net/tipc/msg.c > @@ -238,14 +238,14 @@ int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen, > hdr = buf_msg(skb); > curr = msg_blocks(hdr); > mlen = msg_size(hdr); > - cpy = min_t(int, rem, mss - mlen); > + cpy = min_t(size_t, rem, mss - mlen); > if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter)) > return -EFAULT; > msg_set_size(hdr, mlen + cpy); > skb_put(skb, cpy); > rem -= cpy; > total += msg_blocks(hdr) - curr; > - } while (rem); > + } while (rem > 0); > return total - accounted; > } > > diff --git a/net/tipc/socket.c b/net/tipc/socket.c > index 26123f4177fd..a94f38333698 100644 > --- a/net/tipc/socket.c > +++ b/net/tipc/socket.c > @@ -1574,7 +1574,8 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen) > break; > send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE); > blocks = tsk->snd_backlog; > - if (tsk->oneway++ >= tsk->nagle_start && send <= maxnagle) { > + if (tsk->oneway++ >= tsk->nagle_start && maxnagle && > + send <= maxnagle) { > rc = tipc_msg_append(hdr, m, send, maxnagle, txq); > if (unlikely(rc < 0)) > break; Acked-by: Jon Maloy <jm...@re...> |
From: Jon M. <jm...@re...> - 2020-06-09 13:11:32
|
On 6/9/20 5:20 AM, Hoang Huu Le wrote: > > -----Original Message----- > From: Jon Maloy <jm...@re...> > Sent: Monday, June 8, 2020 8:33 PM > To: Hoang Huu Le <hoa...@de...>; ma...@do...; yin...@wi...; tip...@li... > Subject: Re: [next-net v6] tipc: update a binding service via broadcast > > > > On 6/6/20 11:10 PM, Hoang Huu Le wrote: >> -----Original Message----- >> From: Jon Maloy <jm...@re...> >> Sent: Friday, June 5, 2020 8:03 PM >> To: Hoang Huu Le <hoa...@de...>; ma...@do...; yin...@wi...; tip...@li... >> Subject: Re: [next-net v6] tipc: update a binding service via broadcast >> >> >> >> On 6/5/20 3:52 AM, Hoang Huu Le wrote: >>> Currently, updating binding table (add service binding to >>> name table/withdraw a service binding) is being sent over replicast. >>> However, if we are scaling up clusters to > 100 nodes/containers this >>> method is less affection because of looping through nodes in a cluster one >>> by one. > [...] >>> + if (*open && (*rcv_nxt == seqno)) { >>> + (*rcv_nxt)++; >>> + __skb_unlink(skb, namedq); >>> + return skb; >>> + } >>> + >>> + if (less(seqno, *rcv_nxt)) { >>> + __skb_unlink(skb, namedq); >>> + kfree_skb(skb); >>> + continue; >> Still not needed. This queue should be flushed in >> tipc_node_lost_contact(), which I now see we don't do. >> [Hoang] Yes, that's right. I will verify and send it out. >> >> This has to e fixed too. >> ///jon > I hate to admit it, but we might actually need this test after all. > Imagine that somebody has done 'publish' just after the broadcast link > came up (in tipc_bcast_add_peer()) , but before tipc_named_node_up() is > called. The context of those two calls is not atomic, so I think it is > possible that this publication might end up both in the bcast_link > backlog queue and in the bulk distribution. > This publication message will have a named_seqno that is lower than the > agreed synch point, and should be dropped at reception. > > Given the crucial role of the binding table for the overall TIPC > functionality I think it is better be safe than sorry here, and keep > this test. > [Hoang] Finally, I'm able to reproduce the problem as same as above scene: > <code> > 357 if (less(seqno, *rcv_nxt)) { > 358 pr_info("DROP[%x->%x]: %s blk %d lblk %d nxt %d legacy %d seqno %u bc %u hsz %u dsz %u qlen %u\n", > 359 msg_orignode(hdr), tipc_own_addr(net), > 360 msg_type(hdr) == PUBLICATION ? "PUBL":"DRAW", > 361 msg_is_bulk(hdr), msg_is_last_bulk(hdr), > 362 *rcv_nxt, msg_is_legacy(hdr), > 363 msg_named_seqno(hdr), msg_non_seq(hdr), > 364 msg_hdr_sz(hdr), msg_data_sz(hdr), > 365 skb_queue_len(namedq)); > 366 > 367 __skb_unlink(skb, namedq); > 368 kfree_skb(skb); > 369 continue; > 370 } > </code> > ----------- > [12528.036895] tipc: Established link <1001024:eth0-1001001:brtipc> on network plane A > [12528.043857] tipc: Established link <1001002:brtipc-1001001:brtipc> on network plane A > [12528.136462] tipc: DROP[1001001->1001002]: DRAW blk 0 lblk 0 nxt 3895 legacy 0 seqno 3878 bc 0 hsz 40 dsz 20 qlen 23 > [12528.140864] tipc: DROP[1001001->1001002]: DRAW blk 0 lblk 0 nxt 3895 legacy 0 seqno 3879 bc 0 hsz 40 dsz 20 qlen 22 > [...] > [12528.210959] tipc: DROP[1001001->1001002]: DRAW blk 0 lblk 0 nxt 3895 legacy 0 seqno 3893 bc 0 hsz 40 dsz 20 qlen 8 > [12528.218903] tipc: DROP[1001001->1001002]: DRAW blk 0 lblk 0 nxt 3895 legacy 0 seqno 3894 bc 0 hsz 40 dsz 20 qlen 7 > [12528.227214] tipc: DROP[1001001->1001024]: DRAW blk 0 lblk 0 nxt 3895 legacy 0 seqno 3878 bc 0 hsz 40 dsz 20 qlen 23 > [12528.231285] tipc: DROP[1001001->1001024]: DRAW blk 0 lblk 0 nxt 3895 legacy 0 seqno 3879 bc 0 hsz 40 dsz 20 qlen 22 > [...] > [12528.277445] tipc: DROP[1001001->1001024]: DRAW blk 0 lblk 0 nxt 3895 legacy 0 seqno 3893 bc 0 hsz 40 dsz 20 qlen 8 > [12528.280847] tipc: DROP[1001001->1001024]: DRAW blk 0 lblk 0 nxt 3895 legacy 0 seqno 3894 bc 0 hsz 40 dsz 20 qlen 7 > --- > I will re-post the patch including the test as well. Great. I remember took us several years to get the broadcast link start synchronization right, so we'd better be careful and build on that experience. But now I think we are ok. ///jon > > ///jon > >>> + } >>> + } >>> + return NULL; >>> +} >>> + >>> /** >>> * tipc_named_rcv - process name table update messages sent by another node >>> */ >>> -void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq) >>> +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, >>> + u16 *rcv_nxt, bool *open) >>> { >>> - struct tipc_net *tn = net_generic(net, tipc_net_id); >>> - struct tipc_msg *msg; >>> + struct tipc_net *tn = tipc_net(net); >>> struct distr_item *item; >>> - uint count; >>> - u32 node; >>> + struct tipc_msg *hdr; >>> struct sk_buff *skb; >>> - int mtype; >>> + u32 count, node = 0; >>> >>> spin_lock_bh(&tn->nametbl_lock); >>> - for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) { >>> - skb_linearize(skb); >>> - msg = buf_msg(skb); >>> - mtype = msg_type(msg); >>> - item = (struct distr_item *)msg_data(msg); >>> - count = msg_data_sz(msg) / ITEM_SIZE; >>> - node = msg_orignode(msg); >>> + while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) { >>> + hdr = buf_msg(skb); >>> + node = msg_orignode(hdr); >>> + item = (struct distr_item *)msg_data(hdr); >>> + count = msg_data_sz(hdr) / ITEM_SIZE; >>> while (count--) { >>> - tipc_update_nametbl(net, item, node, mtype); >>> + tipc_update_nametbl(net, item, node, msg_type(hdr)); >>> item++; >>> } >>> kfree_skb(skb); >>> @@ -345,6 +402,6 @@ void tipc_named_reinit(struct net *net) >>> publ->node = self; >>> list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node) >>> publ->node = self; >>> - >>> + nt->rc_dests = 0; >>> spin_unlock_bh(&tn->nametbl_lock); >>> } >>> diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h >>> index 63fc73e0fa6c..092323158f06 100644 >>> --- a/net/tipc/name_distr.h >>> +++ b/net/tipc/name_distr.h >>> @@ -67,11 +67,14 @@ struct distr_item { >>> __be32 key; >>> }; >>> >>> +void tipc_named_bcast(struct net *net, struct sk_buff *skb); >>> struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ); >>> struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ); >>> -void tipc_named_node_up(struct net *net, u32 dnode); >>> -void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue); >>> +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities); >>> +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, >>> + u16 *rcv_nxt, bool *open); >>> void tipc_named_reinit(struct net *net); >>> -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr); >>> +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, >>> + u32 addr, u16 capabilities); >>> >>> #endif >>> diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c >>> index 359b2bc888cf..2ac33d32edc2 100644 >>> --- a/net/tipc/name_table.c >>> +++ b/net/tipc/name_table.c >>> @@ -729,6 +729,7 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, >>> struct tipc_net *tn = tipc_net(net); >>> struct publication *p = NULL; >>> struct sk_buff *skb = NULL; >>> + u32 rc_dests; >>> >>> spin_lock_bh(&tn->nametbl_lock); >>> >>> @@ -743,12 +744,14 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, >>> nt->local_publ_count++; >>> skb = tipc_named_publish(net, p); >>> } >>> + rc_dests = nt->rc_dests; >>> exit: >>> spin_unlock_bh(&tn->nametbl_lock); >>> >>> if (skb) >>> - tipc_node_broadcast(net, skb); >>> + tipc_node_broadcast(net, skb, rc_dests); >>> return p; >>> + >>> } >>> >>> /** >>> @@ -762,6 +765,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, >>> u32 self = tipc_own_addr(net); >>> struct sk_buff *skb = NULL; >>> struct publication *p; >>> + u32 rc_dests; >>> >>> spin_lock_bh(&tn->nametbl_lock); >>> >>> @@ -775,10 +779,11 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, >>> pr_err("Failed to remove local publication {%u,%u,%u}/%u\n", >>> type, lower, upper, key); >>> } >>> + rc_dests = nt->rc_dests; >>> spin_unlock_bh(&tn->nametbl_lock); >>> >>> if (skb) { >>> - tipc_node_broadcast(net, skb); >>> + tipc_node_broadcast(net, skb, rc_dests); >>> return 1; >>> } >>> return 0; >>> diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h >>> index 728bc7016c38..8064e1986e2c 100644 >>> --- a/net/tipc/name_table.h >>> +++ b/net/tipc/name_table.h >>> @@ -106,6 +106,8 @@ struct name_table { >>> struct list_head cluster_scope; >>> rwlock_t cluster_scope_lock; >>> u32 local_publ_count; >>> + u32 rc_dests; >>> + u32 snd_nxt; >>> }; >>> >>> int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); >>> diff --git a/net/tipc/node.c b/net/tipc/node.c >>> index 803a3a6d0f50..ad8d7bce1f98 100644 >>> --- a/net/tipc/node.c >>> +++ b/net/tipc/node.c >>> @@ -75,6 +75,8 @@ struct tipc_bclink_entry { >>> struct sk_buff_head arrvq; >>> struct sk_buff_head inputq2; >>> struct sk_buff_head namedq; >>> + u16 named_rcv_nxt; >>> + bool named_open; >>> }; >>> >>> /** >>> @@ -396,10 +398,10 @@ static void tipc_node_write_unlock(struct tipc_node *n) >>> write_unlock_bh(&n->lock); >>> >>> if (flags & TIPC_NOTIFY_NODE_DOWN) >>> - tipc_publ_notify(net, publ_list, addr); >>> + tipc_publ_notify(net, publ_list, addr, n->capabilities); >>> >>> if (flags & TIPC_NOTIFY_NODE_UP) >>> - tipc_named_node_up(net, addr); >>> + tipc_named_node_up(net, addr, n->capabilities); >>> >>> if (flags & TIPC_NOTIFY_LINK_UP) { >>> tipc_mon_peer_up(net, addr, bearer_id); >>> @@ -1729,12 +1731,23 @@ int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) >>> return 0; >>> } >>> >>> -void tipc_node_broadcast(struct net *net, struct sk_buff *skb) >>> +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests) >>> { >>> + struct sk_buff_head xmitq; >>> struct sk_buff *txskb; >>> struct tipc_node *n; >>> + u16 dummy; >>> u32 dst; >>> >>> + /* Use broadcast if all nodes support it */ >>> + if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) { >>> + __skb_queue_head_init(&xmitq); >>> + __skb_queue_tail(&xmitq, skb); >>> + tipc_bcast_xmit(net, &xmitq, &dummy); >>> + return; >>> + } >>> + >>> + /* Otherwise use legacy replicast method */ >>> rcu_read_lock(); >>> list_for_each_entry_rcu(n, tipc_nodes(net), list) { >>> dst = n->addr; >>> @@ -1749,7 +1762,6 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb) >>> tipc_node_xmit_skb(net, txskb, dst, 0); >>> } >>> rcu_read_unlock(); >>> - >>> kfree_skb(skb); >>> } >>> >>> @@ -1844,7 +1856,9 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id >>> >>> /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ >>> if (!skb_queue_empty(&n->bc_entry.namedq)) >>> - tipc_named_rcv(net, &n->bc_entry.namedq); >>> + tipc_named_rcv(net, &n->bc_entry.namedq, >>> + &n->bc_entry.named_rcv_nxt, >>> + &n->bc_entry.named_open); >>> >>> /* If reassembly or retransmission failure => reset all links to peer */ >>> if (rc & TIPC_LINK_DOWN_EVT) >>> @@ -2109,7 +2123,9 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) >>> tipc_node_link_down(n, bearer_id, false); >>> >>> if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) >>> - tipc_named_rcv(net, &n->bc_entry.namedq); >>> + tipc_named_rcv(net, &n->bc_entry.namedq, >>> + &n->bc_entry.named_rcv_nxt, >>> + &n->bc_entry.named_open); >>> >>> if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) >>> tipc_node_mcast_rcv(n); >>> diff --git a/net/tipc/node.h b/net/tipc/node.h >>> index a6803b449a2c..9f6f13f1604f 100644 >>> --- a/net/tipc/node.h >>> +++ b/net/tipc/node.h >>> @@ -55,7 +55,8 @@ enum { >>> TIPC_MCAST_RBCTL = (1 << 7), >>> TIPC_GAP_ACK_BLOCK = (1 << 8), >>> TIPC_TUNNEL_ENHANCED = (1 << 9), >>> - TIPC_NAGLE = (1 << 10) >>> + TIPC_NAGLE = (1 << 10), >>> + TIPC_NAMED_BCAST = (1 << 11) >>> }; >>> >>> #define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \ >>> @@ -68,7 +69,8 @@ enum { >>> TIPC_MCAST_RBCTL | \ >>> TIPC_GAP_ACK_BLOCK | \ >>> TIPC_TUNNEL_ENHANCED | \ >>> - TIPC_NAGLE) >>> + TIPC_NAGLE | \ >>> + TIPC_NAMED_BCAST) >>> >>> #define INVALID_BEARER_ID -1 >>> >>> @@ -101,7 +103,7 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest, >>> u32 selector); >>> void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr); >>> void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr); >>> -void tipc_node_broadcast(struct net *net, struct sk_buff *skb); >>> +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests); >>> int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port); >>> void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port); >>> int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected); |
From: Hoang H. Le <hoa...@de...> - 2020-06-09 09:35:55
|
-----Original Message----- From: Jon Maloy <jm...@re...> Sent: Monday, June 8, 2020 8:33 PM To: Hoang Huu Le <hoa...@de...>; ma...@do...; yin...@wi...; tip...@li... Subject: Re: [next-net v6] tipc: update a binding service via broadcast On 6/6/20 11:10 PM, Hoang Huu Le wrote: > -----Original Message----- > From: Jon Maloy <jm...@re...> > Sent: Friday, June 5, 2020 8:03 PM > To: Hoang Huu Le <hoa...@de...>; ma...@do...; yin...@wi...; tip...@li... > Subject: Re: [next-net v6] tipc: update a binding service via broadcast > > > > On 6/5/20 3:52 AM, Hoang Huu Le wrote: >> Currently, updating binding table (add service binding to >> name table/withdraw a service binding) is being sent over replicast. >> However, if we are scaling up clusters to > 100 nodes/containers this >> method is less affection because of looping through nodes in a cluster one >> by one. [...] >> + if (*open && (*rcv_nxt == seqno)) { >> + (*rcv_nxt)++; >> + __skb_unlink(skb, namedq); >> + return skb; >> + } >> + >> + if (less(seqno, *rcv_nxt)) { >> + __skb_unlink(skb, namedq); >> + kfree_skb(skb); >> + continue; > Still not needed. This queue should be flushed in > tipc_node_lost_contact(), which I now see we don't do. > [Hoang] Yes, that's right. I will verify and send it out. > > This has to e fixed too. > ///jon I hate to admit it, but we might actually need this test after all. Imagine that somebody has done 'publish' just after the broadcast link came up (in tipc_bcast_add_peer()) , but before tipc_named_node_up() is called. The context of those two calls is not atomic, so I think it is possible that this publication might end up both in the bcast_link backlog queue and in the bulk distribution. This publication message will have a named_seqno that is lower than the agreed synch point, and should be dropped at reception. Given the crucial role of the binding table for the overall TIPC functionality I think it is better be safe than sorry here, and keep this test. [Hoang] Finally, I'm able to reproduce the problem as same as above scene: <code> 357 if (less(seqno, *rcv_nxt)) { 358 pr_info("DROP[%x->%x]: %s blk %d lblk %d nxt %d legacy %d seqno %u bc %u hsz %u dsz %u qlen %u\n", 359 msg_orignode(hdr), tipc_own_addr(net), 360 msg_type(hdr) == PUBLICATION ? "PUBL":"DRAW", 361 msg_is_bulk(hdr), msg_is_last_bulk(hdr), 362 *rcv_nxt, msg_is_legacy(hdr), 363 msg_named_seqno(hdr), msg_non_seq(hdr), 364 msg_hdr_sz(hdr), msg_data_sz(hdr), 365 skb_queue_len(namedq)); 366 367 __skb_unlink(skb, namedq); 368 kfree_skb(skb); 369 continue; 370 } </code> ----------- [12528.036895] tipc: Established link <1001024:eth0-1001001:brtipc> on network plane A [12528.043857] tipc: Established link <1001002:brtipc-1001001:brtipc> on network plane A [12528.136462] tipc: DROP[1001001->1001002]: DRAW blk 0 lblk 0 nxt 3895 legacy 0 seqno 3878 bc 0 hsz 40 dsz 20 qlen 23 [12528.140864] tipc: DROP[1001001->1001002]: DRAW blk 0 lblk 0 nxt 3895 legacy 0 seqno 3879 bc 0 hsz 40 dsz 20 qlen 22 [...] [12528.210959] tipc: DROP[1001001->1001002]: DRAW blk 0 lblk 0 nxt 3895 legacy 0 seqno 3893 bc 0 hsz 40 dsz 20 qlen 8 [12528.218903] tipc: DROP[1001001->1001002]: DRAW blk 0 lblk 0 nxt 3895 legacy 0 seqno 3894 bc 0 hsz 40 dsz 20 qlen 7 [12528.227214] tipc: DROP[1001001->1001024]: DRAW blk 0 lblk 0 nxt 3895 legacy 0 seqno 3878 bc 0 hsz 40 dsz 20 qlen 23 [12528.231285] tipc: DROP[1001001->1001024]: DRAW blk 0 lblk 0 nxt 3895 legacy 0 seqno 3879 bc 0 hsz 40 dsz 20 qlen 22 [...] [12528.277445] tipc: DROP[1001001->1001024]: DRAW blk 0 lblk 0 nxt 3895 legacy 0 seqno 3893 bc 0 hsz 40 dsz 20 qlen 8 [12528.280847] tipc: DROP[1001001->1001024]: DRAW blk 0 lblk 0 nxt 3895 legacy 0 seqno 3894 bc 0 hsz 40 dsz 20 qlen 7 --- I will re-post the patch including the test as well. ///jon >> + } >> + } >> + return NULL; >> +} >> + >> /** >> * tipc_named_rcv - process name table update messages sent by another node >> */ >> -void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq) >> +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, >> + u16 *rcv_nxt, bool *open) >> { >> - struct tipc_net *tn = net_generic(net, tipc_net_id); >> - struct tipc_msg *msg; >> + struct tipc_net *tn = tipc_net(net); >> struct distr_item *item; >> - uint count; >> - u32 node; >> + struct tipc_msg *hdr; >> struct sk_buff *skb; >> - int mtype; >> + u32 count, node = 0; >> >> spin_lock_bh(&tn->nametbl_lock); >> - for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) { >> - skb_linearize(skb); >> - msg = buf_msg(skb); >> - mtype = msg_type(msg); >> - item = (struct distr_item *)msg_data(msg); >> - count = msg_data_sz(msg) / ITEM_SIZE; >> - node = msg_orignode(msg); >> + while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) { >> + hdr = buf_msg(skb); >> + node = msg_orignode(hdr); >> + item = (struct distr_item *)msg_data(hdr); >> + count = msg_data_sz(hdr) / ITEM_SIZE; >> while (count--) { >> - tipc_update_nametbl(net, item, node, mtype); >> + tipc_update_nametbl(net, item, node, msg_type(hdr)); >> item++; >> } >> kfree_skb(skb); >> @@ -345,6 +402,6 @@ void tipc_named_reinit(struct net *net) >> publ->node = self; >> list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node) >> publ->node = self; >> - >> + nt->rc_dests = 0; >> spin_unlock_bh(&tn->nametbl_lock); >> } >> diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h >> index 63fc73e0fa6c..092323158f06 100644 >> --- a/net/tipc/name_distr.h >> +++ b/net/tipc/name_distr.h >> @@ -67,11 +67,14 @@ struct distr_item { >> __be32 key; >> }; >> >> +void tipc_named_bcast(struct net *net, struct sk_buff *skb); >> struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ); >> struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ); >> -void tipc_named_node_up(struct net *net, u32 dnode); >> -void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue); >> +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities); >> +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, >> + u16 *rcv_nxt, bool *open); >> void tipc_named_reinit(struct net *net); >> -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr); >> +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, >> + u32 addr, u16 capabilities); >> >> #endif >> diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c >> index 359b2bc888cf..2ac33d32edc2 100644 >> --- a/net/tipc/name_table.c >> +++ b/net/tipc/name_table.c >> @@ -729,6 +729,7 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, >> struct tipc_net *tn = tipc_net(net); >> struct publication *p = NULL; >> struct sk_buff *skb = NULL; >> + u32 rc_dests; >> >> spin_lock_bh(&tn->nametbl_lock); >> >> @@ -743,12 +744,14 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, >> nt->local_publ_count++; >> skb = tipc_named_publish(net, p); >> } >> + rc_dests = nt->rc_dests; >> exit: >> spin_unlock_bh(&tn->nametbl_lock); >> >> if (skb) >> - tipc_node_broadcast(net, skb); >> + tipc_node_broadcast(net, skb, rc_dests); >> return p; >> + >> } >> >> /** >> @@ -762,6 +765,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, >> u32 self = tipc_own_addr(net); >> struct sk_buff *skb = NULL; >> struct publication *p; >> + u32 rc_dests; >> >> spin_lock_bh(&tn->nametbl_lock); >> >> @@ -775,10 +779,11 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, >> pr_err("Failed to remove local publication {%u,%u,%u}/%u\n", >> type, lower, upper, key); >> } >> + rc_dests = nt->rc_dests; >> spin_unlock_bh(&tn->nametbl_lock); >> >> if (skb) { >> - tipc_node_broadcast(net, skb); >> + tipc_node_broadcast(net, skb, rc_dests); >> return 1; >> } >> return 0; >> diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h >> index 728bc7016c38..8064e1986e2c 100644 >> --- a/net/tipc/name_table.h >> +++ b/net/tipc/name_table.h >> @@ -106,6 +106,8 @@ struct name_table { >> struct list_head cluster_scope; >> rwlock_t cluster_scope_lock; >> u32 local_publ_count; >> + u32 rc_dests; >> + u32 snd_nxt; >> }; >> >> int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); >> diff --git a/net/tipc/node.c b/net/tipc/node.c >> index 803a3a6d0f50..ad8d7bce1f98 100644 >> --- a/net/tipc/node.c >> +++ b/net/tipc/node.c >> @@ -75,6 +75,8 @@ struct tipc_bclink_entry { >> struct sk_buff_head arrvq; >> struct sk_buff_head inputq2; >> struct sk_buff_head namedq; >> + u16 named_rcv_nxt; >> + bool named_open; >> }; >> >> /** >> @@ -396,10 +398,10 @@ static void tipc_node_write_unlock(struct tipc_node *n) >> write_unlock_bh(&n->lock); >> >> if (flags & TIPC_NOTIFY_NODE_DOWN) >> - tipc_publ_notify(net, publ_list, addr); >> + tipc_publ_notify(net, publ_list, addr, n->capabilities); >> >> if (flags & TIPC_NOTIFY_NODE_UP) >> - tipc_named_node_up(net, addr); >> + tipc_named_node_up(net, addr, n->capabilities); >> >> if (flags & TIPC_NOTIFY_LINK_UP) { >> tipc_mon_peer_up(net, addr, bearer_id); >> @@ -1729,12 +1731,23 @@ int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) >> return 0; >> } >> >> -void tipc_node_broadcast(struct net *net, struct sk_buff *skb) >> +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests) >> { >> + struct sk_buff_head xmitq; >> struct sk_buff *txskb; >> struct tipc_node *n; >> + u16 dummy; >> u32 dst; >> >> + /* Use broadcast if all nodes support it */ >> + if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) { >> + __skb_queue_head_init(&xmitq); >> + __skb_queue_tail(&xmitq, skb); >> + tipc_bcast_xmit(net, &xmitq, &dummy); >> + return; >> + } >> + >> + /* Otherwise use legacy replicast method */ >> rcu_read_lock(); >> list_for_each_entry_rcu(n, tipc_nodes(net), list) { >> dst = n->addr; >> @@ -1749,7 +1762,6 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb) >> tipc_node_xmit_skb(net, txskb, dst, 0); >> } >> rcu_read_unlock(); >> - >> kfree_skb(skb); >> } >> >> @@ -1844,7 +1856,9 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id >> >> /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ >> if (!skb_queue_empty(&n->bc_entry.namedq)) >> - tipc_named_rcv(net, &n->bc_entry.namedq); >> + tipc_named_rcv(net, &n->bc_entry.namedq, >> + &n->bc_entry.named_rcv_nxt, >> + &n->bc_entry.named_open); >> >> /* If reassembly or retransmission failure => reset all links to peer */ >> if (rc & TIPC_LINK_DOWN_EVT) >> @@ -2109,7 +2123,9 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) >> tipc_node_link_down(n, bearer_id, false); >> >> if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) >> - tipc_named_rcv(net, &n->bc_entry.namedq); >> + tipc_named_rcv(net, &n->bc_entry.namedq, >> + &n->bc_entry.named_rcv_nxt, >> + &n->bc_entry.named_open); >> >> if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) >> tipc_node_mcast_rcv(n); >> diff --git a/net/tipc/node.h b/net/tipc/node.h >> index a6803b449a2c..9f6f13f1604f 100644 >> --- a/net/tipc/node.h >> +++ b/net/tipc/node.h >> @@ -55,7 +55,8 @@ enum { >> TIPC_MCAST_RBCTL = (1 << 7), >> TIPC_GAP_ACK_BLOCK = (1 << 8), >> TIPC_TUNNEL_ENHANCED = (1 << 9), >> - TIPC_NAGLE = (1 << 10) >> + TIPC_NAGLE = (1 << 10), >> + TIPC_NAMED_BCAST = (1 << 11) >> }; >> >> #define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \ >> @@ -68,7 +69,8 @@ enum { >> TIPC_MCAST_RBCTL | \ >> TIPC_GAP_ACK_BLOCK | \ >> TIPC_TUNNEL_ENHANCED | \ >> - TIPC_NAGLE) >> + TIPC_NAGLE | \ >> + TIPC_NAMED_BCAST) >> >> #define INVALID_BEARER_ID -1 >> >> @@ -101,7 +103,7 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest, >> u32 selector); >> void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr); >> void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr); >> -void tipc_node_broadcast(struct net *net, struct sk_buff *skb); >> +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests); >> int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port); >> void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port); >> int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected); |
From: Tuong L. <tuo...@de...> - 2020-06-09 03:55:53
|
syzbot found the following issue: WARNING: CPU: 0 PID: 6808 at include/linux/thread_info.h:150 check_copy_size include/linux/thread_info.h:150 [inline] WARNING: CPU: 0 PID: 6808 at include/linux/thread_info.h:150 copy_from_iter include/linux/uio.h:144 [inline] WARNING: CPU: 0 PID: 6808 at include/linux/thread_info.h:150 tipc_msg_append+0x49a/0x5e0 net/tipc/msg.c:242 Kernel panic - not syncing: panic_on_warn set ... This happens after commit 5e9eeccc58f3 ("tipc: fix NULL pointer dereference in streaming") that tried to build at least one buffer even when the message data length is zero... However, it now exposes another bug that the 'mss' can be zero and the 'cpy' will be negative, thus the above kernel WARNING will appear! The zero value of 'mss' is never expected because it means Nagle is not enabled for the socket (actually the socket type was 'SOCK_SEQPACKET'), so the function 'tipc_msg_append()' must not be called at all. But that was in this particular case since the message data length was zero, and the 'send <= maxnagle' check became true. We resolve the issue by explicitly checking if Nagle is enabled for the socket, i.e. 'maxnagle != 0' before calling the 'tipc_msg_append()'. In addition, we put a sanity check in the function to avoid calling the 'copy_from_iter()' with a negative size and doing an infinite loop. v2: use 'size_t' in the 'min_t()' to get a proper value of 'cpy' (after Jon's comment) Reported-by: syz...@sy... Fixes: c0bceb97db9e ("tipc: add smart nagle feature") Signed-off-by: Tuong Lien <tuo...@de...> --- net/tipc/msg.c | 4 ++-- net/tipc/socket.c | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 046e4cb3acea..01b64869a173 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -238,14 +238,14 @@ int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen, hdr = buf_msg(skb); curr = msg_blocks(hdr); mlen = msg_size(hdr); - cpy = min_t(int, rem, mss - mlen); + cpy = min_t(size_t, rem, mss - mlen); if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter)) return -EFAULT; msg_set_size(hdr, mlen + cpy); skb_put(skb, cpy); rem -= cpy; total += msg_blocks(hdr) - curr; - } while (rem); + } while (rem > 0); return total - accounted; } diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 26123f4177fd..a94f38333698 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -1574,7 +1574,8 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen) break; send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE); blocks = tsk->snd_backlog; - if (tsk->oneway++ >= tsk->nagle_start && send <= maxnagle) { + if (tsk->oneway++ >= tsk->nagle_start && maxnagle && + send <= maxnagle) { rc = tipc_msg_append(hdr, m, send, maxnagle, txq); if (unlikely(rc < 0)) break; -- 2.13.7 |
From: Tuong T. L. <tuo...@de...> - 2020-06-09 03:28:14
|
> -----Original Message----- > From: Jon Maloy <jm...@re...> > Sent: Monday, June 8, 2020 8:13 PM > To: Tuong Tong Lien <tuo...@de...>; ma...@do...; yin...@wi...; tipc- > dis...@li... > Cc: tipc-dek <tip...@de...> > Subject: Re: [net] tipc: fix kernel WARNING in tipc_msg_append() > > > > On 6/8/20 8:05 AM, Tuong Lien wrote: > > syzbot found the following issue: > > > > WARNING: CPU: 0 PID: 6808 at include/linux/thread_info.h:150 check_copy_size include/linux/thread_info.h:150 [inline] > > WARNING: CPU: 0 PID: 6808 at include/linux/thread_info.h:150 copy_from_iter include/linux/uio.h:144 [inline] > > WARNING: CPU: 0 PID: 6808 at include/linux/thread_info.h:150 tipc_msg_append+0x49a/0x5e0 net/tipc/msg.c:242 > > Kernel panic - not syncing: panic_on_warn set ... > > > > This happens after commit 5e9eeccc58f3 ("tipc: fix NULL pointer > > dereference in streaming") that tried to build at least one buffer even > > when the message data length is zero... However, it now exposes another > > bug that the 'mss' can be zero and the 'cpy' will be negative, thus the > > above kernel WARNING will appear! > > The zero value of 'mss' is never expected because it means Nagle is not > > enabled for the socket (actually the socket type was 'SOCK_SEQPACKET'), > > so the function 'tipc_msg_append()' must not be called at all. But that > > was in this particular case since the message data length was zero, and > > the 'send <= maxnagle' check became true. > > > > We resolve the issue by explicitly checking if Nagle is enabled for the > > socket, i.e. 'maxnagle != 0' before calling the 'tipc_msg_append()'. In > > addition, we put a sanity check in the function to avoid calling the > > 'copy_from_iter()' with a negative size and doing an infinite loop. > > > > Reported-by: syz...@sy... > > Fixes: c0bceb97db9e ("tipc: add smart nagle feature") > > Signed-off-by: Tuong Lien <tuo...@de...> > > --- > > net/tipc/msg.c | 5 +++-- > > net/tipc/socket.c | 3 ++- > > 2 files changed, 5 insertions(+), 3 deletions(-) > > > > diff --git a/net/tipc/msg.c b/net/tipc/msg.c > > index 046e4cb3acea..ea3ebf35e0c2 100644 > > --- a/net/tipc/msg.c > > +++ b/net/tipc/msg.c > > @@ -239,13 +239,14 @@ int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen, > > curr = msg_blocks(hdr); > > mlen = msg_size(hdr); > > cpy = min_t(int, rem, mss - mlen); > > - if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter)) > > + if (cpy < 0 || > You can probably just redeclare cpy (and mlen, rem) to u32 here. > ///jon Yes, it should be 'unsigned', but the actual issue here is overflow, so if we use u32, we will still need to check if not > INT_MAX... Instead, I think we can just change the data type at the 'min_t()', such as: cpy = min_t(unsinged int, rem, mss - mlen); Do you agree? > > + cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter)) > > return -EFAULT; > > msg_set_size(hdr, mlen + cpy); > > skb_put(skb, cpy); > > rem -= cpy; > > total += msg_blocks(hdr) - curr; > > - } while (rem); > > + } while (rem > 0); > > return total - accounted; > > } > > > > diff --git a/net/tipc/socket.c b/net/tipc/socket.c > > index 26123f4177fd..a94f38333698 100644 > > --- a/net/tipc/socket.c > > +++ b/net/tipc/socket.c > > @@ -1574,7 +1574,8 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen) > > break; > > send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE); > > blocks = tsk->snd_backlog; > > - if (tsk->oneway++ >= tsk->nagle_start && send <= maxnagle) { > > + if (tsk->oneway++ >= tsk->nagle_start && maxnagle && > > + send <= maxnagle) { How about this? I believe this is a must because we never want to do Nagle stuffs for a non-Nagle socket (like SOCK_SEQPACKET). > > rc = tipc_msg_append(hdr, m, send, maxnagle, txq); > > if (unlikely(rc < 0)) > > break; |
From: Jon M. <jm...@re...> - 2020-06-08 13:33:51
|
On 6/6/20 11:10 PM, Hoang Huu Le wrote: > -----Original Message----- > From: Jon Maloy <jm...@re...> > Sent: Friday, June 5, 2020 8:03 PM > To: Hoang Huu Le <hoa...@de...>; ma...@do...; yin...@wi...; tip...@li... > Subject: Re: [next-net v6] tipc: update a binding service via broadcast > > > > On 6/5/20 3:52 AM, Hoang Huu Le wrote: >> Currently, updating binding table (add service binding to >> name table/withdraw a service binding) is being sent over replicast. >> However, if we are scaling up clusters to > 100 nodes/containers this >> method is less affection because of looping through nodes in a cluster one >> by one. [...] >> + if (*open && (*rcv_nxt == seqno)) { >> + (*rcv_nxt)++; >> + __skb_unlink(skb, namedq); >> + return skb; >> + } >> + >> + if (less(seqno, *rcv_nxt)) { >> + __skb_unlink(skb, namedq); >> + kfree_skb(skb); >> + continue; > Still not needed. This queue should be flushed in > tipc_node_lost_contact(), which I now see we don't do. > [Hoang] Yes, that's right. I will verify and send it out. > > This has to e fixed too. > ///jon I hate to admit it, but we might actually need this test after all. Imagine that somebody has done 'publish' just after the broadcast link came up (in tipc_bcast_add_peer()) , but before tipc_named_node_up() is called. The context of those two calls is not atomic, so I think it is possible that this publication might end up both in the bcast_link backlog queue and in the bulk distribution. This publication message will have a named_seqno that is lower than the agreed synch point, and should be dropped at reception. Given the crucial role of the binding table for the overall TIPC functionality I think it is better be safe than sorry here, and keep this test. ///jon >> + } >> + } >> + return NULL; >> +} >> + >> /** >> * tipc_named_rcv - process name table update messages sent by another node >> */ >> -void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq) >> +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, >> + u16 *rcv_nxt, bool *open) >> { >> - struct tipc_net *tn = net_generic(net, tipc_net_id); >> - struct tipc_msg *msg; >> + struct tipc_net *tn = tipc_net(net); >> struct distr_item *item; >> - uint count; >> - u32 node; >> + struct tipc_msg *hdr; >> struct sk_buff *skb; >> - int mtype; >> + u32 count, node = 0; >> >> spin_lock_bh(&tn->nametbl_lock); >> - for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) { >> - skb_linearize(skb); >> - msg = buf_msg(skb); >> - mtype = msg_type(msg); >> - item = (struct distr_item *)msg_data(msg); >> - count = msg_data_sz(msg) / ITEM_SIZE; >> - node = msg_orignode(msg); >> + while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) { >> + hdr = buf_msg(skb); >> + node = msg_orignode(hdr); >> + item = (struct distr_item *)msg_data(hdr); >> + count = msg_data_sz(hdr) / ITEM_SIZE; >> while (count--) { >> - tipc_update_nametbl(net, item, node, mtype); >> + tipc_update_nametbl(net, item, node, msg_type(hdr)); >> item++; >> } >> kfree_skb(skb); >> @@ -345,6 +402,6 @@ void tipc_named_reinit(struct net *net) >> publ->node = self; >> list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node) >> publ->node = self; >> - >> + nt->rc_dests = 0; >> spin_unlock_bh(&tn->nametbl_lock); >> } >> diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h >> index 63fc73e0fa6c..092323158f06 100644 >> --- a/net/tipc/name_distr.h >> +++ b/net/tipc/name_distr.h >> @@ -67,11 +67,14 @@ struct distr_item { >> __be32 key; >> }; >> >> +void tipc_named_bcast(struct net *net, struct sk_buff *skb); >> struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ); >> struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ); >> -void tipc_named_node_up(struct net *net, u32 dnode); >> -void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue); >> +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities); >> +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, >> + u16 *rcv_nxt, bool *open); >> void tipc_named_reinit(struct net *net); >> -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr); >> +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, >> + u32 addr, u16 capabilities); >> >> #endif >> diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c >> index 359b2bc888cf..2ac33d32edc2 100644 >> --- a/net/tipc/name_table.c >> +++ b/net/tipc/name_table.c >> @@ -729,6 +729,7 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, >> struct tipc_net *tn = tipc_net(net); >> struct publication *p = NULL; >> struct sk_buff *skb = NULL; >> + u32 rc_dests; >> >> spin_lock_bh(&tn->nametbl_lock); >> >> @@ -743,12 +744,14 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, >> nt->local_publ_count++; >> skb = tipc_named_publish(net, p); >> } >> + rc_dests = nt->rc_dests; >> exit: >> spin_unlock_bh(&tn->nametbl_lock); >> >> if (skb) >> - tipc_node_broadcast(net, skb); >> + tipc_node_broadcast(net, skb, rc_dests); >> return p; >> + >> } >> >> /** >> @@ -762,6 +765,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, >> u32 self = tipc_own_addr(net); >> struct sk_buff *skb = NULL; >> struct publication *p; >> + u32 rc_dests; >> >> spin_lock_bh(&tn->nametbl_lock); >> >> @@ -775,10 +779,11 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, >> pr_err("Failed to remove local publication {%u,%u,%u}/%u\n", >> type, lower, upper, key); >> } >> + rc_dests = nt->rc_dests; >> spin_unlock_bh(&tn->nametbl_lock); >> >> if (skb) { >> - tipc_node_broadcast(net, skb); >> + tipc_node_broadcast(net, skb, rc_dests); >> return 1; >> } >> return 0; >> diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h >> index 728bc7016c38..8064e1986e2c 100644 >> --- a/net/tipc/name_table.h >> +++ b/net/tipc/name_table.h >> @@ -106,6 +106,8 @@ struct name_table { >> struct list_head cluster_scope; >> rwlock_t cluster_scope_lock; >> u32 local_publ_count; >> + u32 rc_dests; >> + u32 snd_nxt; >> }; >> >> int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); >> diff --git a/net/tipc/node.c b/net/tipc/node.c >> index 803a3a6d0f50..ad8d7bce1f98 100644 >> --- a/net/tipc/node.c >> +++ b/net/tipc/node.c >> @@ -75,6 +75,8 @@ struct tipc_bclink_entry { >> struct sk_buff_head arrvq; >> struct sk_buff_head inputq2; >> struct sk_buff_head namedq; >> + u16 named_rcv_nxt; >> + bool named_open; >> }; >> >> /** >> @@ -396,10 +398,10 @@ static void tipc_node_write_unlock(struct tipc_node *n) >> write_unlock_bh(&n->lock); >> >> if (flags & TIPC_NOTIFY_NODE_DOWN) >> - tipc_publ_notify(net, publ_list, addr); >> + tipc_publ_notify(net, publ_list, addr, n->capabilities); >> >> if (flags & TIPC_NOTIFY_NODE_UP) >> - tipc_named_node_up(net, addr); >> + tipc_named_node_up(net, addr, n->capabilities); >> >> if (flags & TIPC_NOTIFY_LINK_UP) { >> tipc_mon_peer_up(net, addr, bearer_id); >> @@ -1729,12 +1731,23 @@ int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) >> return 0; >> } >> >> -void tipc_node_broadcast(struct net *net, struct sk_buff *skb) >> +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests) >> { >> + struct sk_buff_head xmitq; >> struct sk_buff *txskb; >> struct tipc_node *n; >> + u16 dummy; >> u32 dst; >> >> + /* Use broadcast if all nodes support it */ >> + if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) { >> + __skb_queue_head_init(&xmitq); >> + __skb_queue_tail(&xmitq, skb); >> + tipc_bcast_xmit(net, &xmitq, &dummy); >> + return; >> + } >> + >> + /* Otherwise use legacy replicast method */ >> rcu_read_lock(); >> list_for_each_entry_rcu(n, tipc_nodes(net), list) { >> dst = n->addr; >> @@ -1749,7 +1762,6 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb) >> tipc_node_xmit_skb(net, txskb, dst, 0); >> } >> rcu_read_unlock(); >> - >> kfree_skb(skb); >> } >> >> @@ -1844,7 +1856,9 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id >> >> /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ >> if (!skb_queue_empty(&n->bc_entry.namedq)) >> - tipc_named_rcv(net, &n->bc_entry.namedq); >> + tipc_named_rcv(net, &n->bc_entry.namedq, >> + &n->bc_entry.named_rcv_nxt, >> + &n->bc_entry.named_open); >> >> /* If reassembly or retransmission failure => reset all links to peer */ >> if (rc & TIPC_LINK_DOWN_EVT) >> @@ -2109,7 +2123,9 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) >> tipc_node_link_down(n, bearer_id, false); >> >> if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) >> - tipc_named_rcv(net, &n->bc_entry.namedq); >> + tipc_named_rcv(net, &n->bc_entry.namedq, >> + &n->bc_entry.named_rcv_nxt, >> + &n->bc_entry.named_open); >> >> if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) >> tipc_node_mcast_rcv(n); >> diff --git a/net/tipc/node.h b/net/tipc/node.h >> index a6803b449a2c..9f6f13f1604f 100644 >> --- a/net/tipc/node.h >> +++ b/net/tipc/node.h >> @@ -55,7 +55,8 @@ enum { >> TIPC_MCAST_RBCTL = (1 << 7), >> TIPC_GAP_ACK_BLOCK = (1 << 8), >> TIPC_TUNNEL_ENHANCED = (1 << 9), >> - TIPC_NAGLE = (1 << 10) >> + TIPC_NAGLE = (1 << 10), >> + TIPC_NAMED_BCAST = (1 << 11) >> }; >> >> #define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \ >> @@ -68,7 +69,8 @@ enum { >> TIPC_MCAST_RBCTL | \ >> TIPC_GAP_ACK_BLOCK | \ >> TIPC_TUNNEL_ENHANCED | \ >> - TIPC_NAGLE) >> + TIPC_NAGLE | \ >> + TIPC_NAMED_BCAST) >> >> #define INVALID_BEARER_ID -1 >> >> @@ -101,7 +103,7 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest, >> u32 selector); >> void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr); >> void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr); >> -void tipc_node_broadcast(struct net *net, struct sk_buff *skb); >> +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests); >> int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port); >> void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port); >> int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected); |
From: Jon M. <jm...@re...> - 2020-06-08 13:13:03
|
On 6/8/20 8:05 AM, Tuong Lien wrote: > syzbot found the following issue: > > WARNING: CPU: 0 PID: 6808 at include/linux/thread_info.h:150 check_copy_size include/linux/thread_info.h:150 [inline] > WARNING: CPU: 0 PID: 6808 at include/linux/thread_info.h:150 copy_from_iter include/linux/uio.h:144 [inline] > WARNING: CPU: 0 PID: 6808 at include/linux/thread_info.h:150 tipc_msg_append+0x49a/0x5e0 net/tipc/msg.c:242 > Kernel panic - not syncing: panic_on_warn set ... > > This happens after commit 5e9eeccc58f3 ("tipc: fix NULL pointer > dereference in streaming") that tried to build at least one buffer even > when the message data length is zero... However, it now exposes another > bug that the 'mss' can be zero and the 'cpy' will be negative, thus the > above kernel WARNING will appear! > The zero value of 'mss' is never expected because it means Nagle is not > enabled for the socket (actually the socket type was 'SOCK_SEQPACKET'), > so the function 'tipc_msg_append()' must not be called at all. But that > was in this particular case since the message data length was zero, and > the 'send <= maxnagle' check became true. > > We resolve the issue by explicitly checking if Nagle is enabled for the > socket, i.e. 'maxnagle != 0' before calling the 'tipc_msg_append()'. In > addition, we put a sanity check in the function to avoid calling the > 'copy_from_iter()' with a negative size and doing an infinite loop. > > Reported-by: syz...@sy... > Fixes: c0bceb97db9e ("tipc: add smart nagle feature") > Signed-off-by: Tuong Lien <tuo...@de...> > --- > net/tipc/msg.c | 5 +++-- > net/tipc/socket.c | 3 ++- > 2 files changed, 5 insertions(+), 3 deletions(-) > > diff --git a/net/tipc/msg.c b/net/tipc/msg.c > index 046e4cb3acea..ea3ebf35e0c2 100644 > --- a/net/tipc/msg.c > +++ b/net/tipc/msg.c > @@ -239,13 +239,14 @@ int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen, > curr = msg_blocks(hdr); > mlen = msg_size(hdr); > cpy = min_t(int, rem, mss - mlen); > - if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter)) > + if (cpy < 0 || You can probably just redeclare cpy (and mlen, rem) to u32 here. ///jon > + cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter)) > return -EFAULT; > msg_set_size(hdr, mlen + cpy); > skb_put(skb, cpy); > rem -= cpy; > total += msg_blocks(hdr) - curr; > - } while (rem); > + } while (rem > 0); > return total - accounted; > } > > diff --git a/net/tipc/socket.c b/net/tipc/socket.c > index 26123f4177fd..a94f38333698 100644 > --- a/net/tipc/socket.c > +++ b/net/tipc/socket.c > @@ -1574,7 +1574,8 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen) > break; > send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE); > blocks = tsk->snd_backlog; > - if (tsk->oneway++ >= tsk->nagle_start && send <= maxnagle) { > + if (tsk->oneway++ >= tsk->nagle_start && maxnagle && > + send <= maxnagle) { > rc = tipc_msg_append(hdr, m, send, maxnagle, txq); > if (unlikely(rc < 0)) > break; |
From: Tuong L. <tuo...@de...> - 2020-06-08 12:05:59
|
syzbot found the following issue: WARNING: CPU: 0 PID: 6808 at include/linux/thread_info.h:150 check_copy_size include/linux/thread_info.h:150 [inline] WARNING: CPU: 0 PID: 6808 at include/linux/thread_info.h:150 copy_from_iter include/linux/uio.h:144 [inline] WARNING: CPU: 0 PID: 6808 at include/linux/thread_info.h:150 tipc_msg_append+0x49a/0x5e0 net/tipc/msg.c:242 Kernel panic - not syncing: panic_on_warn set ... This happens after commit 5e9eeccc58f3 ("tipc: fix NULL pointer dereference in streaming") that tried to build at least one buffer even when the message data length is zero... However, it now exposes another bug that the 'mss' can be zero and the 'cpy' will be negative, thus the above kernel WARNING will appear! The zero value of 'mss' is never expected because it means Nagle is not enabled for the socket (actually the socket type was 'SOCK_SEQPACKET'), so the function 'tipc_msg_append()' must not be called at all. But that was in this particular case since the message data length was zero, and the 'send <= maxnagle' check became true. We resolve the issue by explicitly checking if Nagle is enabled for the socket, i.e. 'maxnagle != 0' before calling the 'tipc_msg_append()'. In addition, we put a sanity check in the function to avoid calling the 'copy_from_iter()' with a negative size and doing an infinite loop. Reported-by: syz...@sy... Fixes: c0bceb97db9e ("tipc: add smart nagle feature") Signed-off-by: Tuong Lien <tuo...@de...> --- net/tipc/msg.c | 5 +++-- net/tipc/socket.c | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 046e4cb3acea..ea3ebf35e0c2 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -239,13 +239,14 @@ int tipc_msg_append(struct tipc_msg *_hdr, struct msghdr *m, int dlen, curr = msg_blocks(hdr); mlen = msg_size(hdr); cpy = min_t(int, rem, mss - mlen); - if (cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter)) + if (cpy < 0 || + cpy != copy_from_iter(skb->data + mlen, cpy, &m->msg_iter)) return -EFAULT; msg_set_size(hdr, mlen + cpy); skb_put(skb, cpy); rem -= cpy; total += msg_blocks(hdr) - curr; - } while (rem); + } while (rem > 0); return total - accounted; } diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 26123f4177fd..a94f38333698 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -1574,7 +1574,8 @@ static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen) break; send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE); blocks = tsk->snd_backlog; - if (tsk->oneway++ >= tsk->nagle_start && send <= maxnagle) { + if (tsk->oneway++ >= tsk->nagle_start && maxnagle && + send <= maxnagle) { rc = tipc_msg_append(hdr, m, send, maxnagle, txq); if (unlikely(rc < 0)) break; -- 2.13.7 |
From: Hoang H. Le <hoa...@de...> - 2020-06-08 01:59:35
|
Thanks Jon! Hoang -----Original Message----- From: Jon Maloy <jm...@re...> Sent: Monday, June 8, 2020 8:57 AM To: Hoang Huu Le <hoa...@de...>; tip...@li...; tipc-dek <tip...@de...> Subject: Re: [tipc-discussion] [net-next] tipc: update a binding service via broadcast In case you are not aware of this, check this link before you send anything to net-next. http://vger.kernel.org/~davem/net-next.html ///jon On 6/7/20 9:50 PM, Hoang Huu Le wrote: > > -----Original Message----- > From: Jon Maloy <jm...@re...> > Sent: Monday, June 8, 2020 2:14 AM > To: tip...@li... > Subject: Re: [tipc-discussion] [net-next] tipc: update a binding service via broadcast > > > > On 6/7/20 3:03 PM, Jon Maloy wrote: >> >> On 6/7/20 12:24 AM, Hoang Huu Le wrote: >>> Currently, updating binding table (add service binding to >>> name table/withdraw a service binding) is being sent over replicast. >>> However, if we are scaling up clusters to > 100 nodes/containers this >>> method is less affection because of looping through nodes in a >>> cluster one >>> by one. >>> >>> It is worth to use broadcast to update a binding service. This way, the >>> binding table can be updated on all peer nodes in one shot. >>> >>> Broadcast is used when all peer nodes, as indicated by a new capability >>> flag TIPC_NAMED_BCAST, support reception of this message type. >>> >>> Four problems need to be considered when introducing this feature. >>> 1) When establishing a link to a new peer node we still update this by a >>> unicast 'bulk' update. This may lead to race conditions, where a later >>> broadcast publication/withdrawal bypass the 'bulk', resulting in >>> disordered publications, or even that a withdrawal may arrive before the >>> corresponding publication. We solve this by adding an 'is_last_bulk' bit >>> in the last bulk messages so that it can be distinguished from all other >>> messages. Only when this message has arrived do we open up for reception >>> of broadcast publications/withdrawals. >> Add a line feed between these paragraphs before you send the patch. >> Otherwise, still acked by me. >> >> ///jon > Oh, already posted... Just ignore my comment above. > [Hoang] net-next is closed. I will re-post the patch later with your suggestion. > > ///jon >>> 2) When a first legacy node is added to the cluster all distribution >>> will switch over to use the legacy 'replicast' method, while the >>> opposite happens when the last legacy node leaves the cluster. This >>> entails another risk of message disordering that has to be handled. We >>> solve this by adding a sequence number to the broadcast/replicast >>> messages, so that disordering can be discovered and corrected. Note >>> however that we don't need to consider potential message loss or >>> duplication at this protocol level. >>> 3) Bulk messages don't contain any sequence numbers, and will always >>> arrive in order. Hence we must exempt those from the sequence number >>> control and deliver them unconditionally. We solve this by adding a new >>> 'is_bulk' bit in those messages so that they can be recognized. >>> 4) Legacy messages, which don't contain any new bits or sequence >>> numbers, but neither can arrive out of order, also need to be exempt >>> from the initial synchronization and sequence number check, and >>> delivered unconditionally. Therefore, we add another 'is_not_legacy' bit >>> to all new messages so that those can be distinguished from legacy >>> messages and the latter delivered directly. >>> >>> Signed-off-by: Hoang Huu Le <hoa...@de...> >>> Acked-by: Jon Maloy <jm...@re...> >>> --- >>> net/tipc/bcast.c | 6 +-- >>> net/tipc/bcast.h | 4 +- >>> net/tipc/link.c | 2 +- >>> net/tipc/msg.h | 40 ++++++++++++++++ >>> net/tipc/name_distr.c | 109 +++++++++++++++++++++++++++++++----------- >>> net/tipc/name_distr.h | 9 ++-- >>> net/tipc/name_table.c | 9 +++- >>> net/tipc/name_table.h | 2 + >>> net/tipc/node.c | 29 ++++++++--- >>> net/tipc/node.h | 8 ++-- >>> 10 files changed, 170 insertions(+), 48 deletions(-) >>> >>> diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c >>> index 383f87bc1061..940d176e0e87 100644 >>> --- a/net/tipc/bcast.c >>> +++ b/net/tipc/bcast.c >>> @@ -250,8 +250,8 @@ static void tipc_bcast_select_xmit_method(struct >>> net *net, int dests, >>> * Consumes the buffer chain. >>> * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE >>> */ >>> -static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, >>> - u16 *cong_link_cnt) >>> +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, >>> + u16 *cong_link_cnt) >>> { >>> struct tipc_link *l = tipc_bc_sndlink(net); >>> struct sk_buff_head xmitq; >>> @@ -752,7 +752,7 @@ void tipc_nlist_purge(struct tipc_nlist *nl) >>> nl->local = false; >>> } >>> -u32 tipc_bcast_get_broadcast_mode(struct net *net) >>> +u32 tipc_bcast_get_mode(struct net *net) >>> { >>> struct tipc_bc_base *bb = tipc_bc_base(net); >>> diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h >>> index 4240c95188b1..2d9352dc7b0e 100644 >>> --- a/net/tipc/bcast.h >>> +++ b/net/tipc/bcast.h >>> @@ -90,6 +90,8 @@ void tipc_bcast_toggle_rcast(struct net *net, bool >>> supp); >>> int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, >>> struct tipc_mc_method *method, struct tipc_nlist *dests, >>> u16 *cong_link_cnt); >>> +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, >>> + u16 *cong_link_cnt); >>> int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct >>> sk_buff *skb); >>> void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, >>> struct tipc_msg *hdr); >>> @@ -101,7 +103,7 @@ int tipc_nl_add_bc_link(struct net *net, struct >>> tipc_nl_msg *msg, >>> int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]); >>> int tipc_bclink_reset_stats(struct net *net, struct tipc_link *l); >>> -u32 tipc_bcast_get_broadcast_mode(struct net *net); >>> +u32 tipc_bcast_get_mode(struct net *net); >>> u32 tipc_bcast_get_broadcast_ratio(struct net *net); >>> void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head >>> *defq, >>> diff --git a/net/tipc/link.c b/net/tipc/link.c >>> index ee3b8d0576b8..eac89a3e22ce 100644 >>> --- a/net/tipc/link.c >>> +++ b/net/tipc/link.c >>> @@ -2745,7 +2745,7 @@ int tipc_nl_add_bc_link(struct net *net, struct >>> tipc_nl_msg *msg, >>> void *hdr; >>> struct nlattr *attrs; >>> struct nlattr *prop; >>> - u32 bc_mode = tipc_bcast_get_broadcast_mode(net); >>> + u32 bc_mode = tipc_bcast_get_mode(net); >>> u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net); >>> if (!bcl) >>> diff --git a/net/tipc/msg.h b/net/tipc/msg.h >>> index 58660d56bc83..65119e81ff0c 100644 >>> --- a/net/tipc/msg.h >>> +++ b/net/tipc/msg.h >>> @@ -438,6 +438,36 @@ static inline void msg_set_errcode(struct >>> tipc_msg *m, u32 err) >>> msg_set_bits(m, 1, 25, 0xf, err); >>> } >>> +static inline void msg_set_bulk(struct tipc_msg *m) >>> +{ >>> + msg_set_bits(m, 1, 28, 0x1, 1); >>> +} >>> + >>> +static inline u32 msg_is_bulk(struct tipc_msg *m) >>> +{ >>> + return msg_bits(m, 1, 28, 0x1); >>> +} >>> + >>> +static inline void msg_set_last_bulk(struct tipc_msg *m) >>> +{ >>> + msg_set_bits(m, 1, 27, 0x1, 1); >>> +} >>> + >>> +static inline u32 msg_is_last_bulk(struct tipc_msg *m) >>> +{ >>> + return msg_bits(m, 1, 27, 0x1); >>> +} >>> + >>> +static inline void msg_set_non_legacy(struct tipc_msg *m) >>> +{ >>> + msg_set_bits(m, 1, 26, 0x1, 1); >>> +} >>> + >>> +static inline u32 msg_is_legacy(struct tipc_msg *m) >>> +{ >>> + return !msg_bits(m, 1, 26, 0x1); >>> +} >>> + >>> static inline u32 msg_reroute_cnt(struct tipc_msg *m) >>> { >>> return msg_bits(m, 1, 21, 0xf); >>> @@ -567,6 +597,16 @@ static inline void msg_set_origport(struct >>> tipc_msg *m, u32 p) >>> msg_set_word(m, 4, p); >>> } >>> +static inline u16 msg_named_seqno(struct tipc_msg *m) >>> +{ >>> + return msg_bits(m, 4, 0, 0xffff); >>> +} >>> + >>> +static inline void msg_set_named_seqno(struct tipc_msg *m, u16 n) >>> +{ >>> + msg_set_bits(m, 4, 0, 0xffff, n); >>> +} >>> + >>> static inline u32 msg_destport(struct tipc_msg *m) >>> { >>> return msg_word(m, 5); >>> diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c >>> index 5feaf3b67380..481d480609f0 100644 >>> --- a/net/tipc/name_distr.c >>> +++ b/net/tipc/name_distr.c >>> @@ -102,7 +102,8 @@ struct sk_buff *tipc_named_publish(struct net >>> *net, struct publication *publ) >>> pr_warn("Publication distribution failure\n"); >>> return NULL; >>> } >>> - >>> + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); >>> + msg_set_non_legacy(buf_msg(skb)); >>> item = (struct distr_item *)msg_data(buf_msg(skb)); >>> publ_to_item(item, publ); >>> return skb; >>> @@ -114,8 +115,8 @@ struct sk_buff *tipc_named_publish(struct net >>> *net, struct publication *publ) >>> struct sk_buff *tipc_named_withdraw(struct net *net, struct >>> publication *publ) >>> { >>> struct name_table *nt = tipc_name_table(net); >>> - struct sk_buff *buf; >>> struct distr_item *item; >>> + struct sk_buff *skb; >>> write_lock_bh(&nt->cluster_scope_lock); >>> list_del(&publ->binding_node); >>> @@ -123,15 +124,16 @@ struct sk_buff *tipc_named_withdraw(struct net >>> *net, struct publication *publ) >>> if (publ->scope == TIPC_NODE_SCOPE) >>> return NULL; >>> - buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); >>> - if (!buf) { >>> + skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); >>> + if (!skb) { >>> pr_warn("Withdrawal distribution failure\n"); >>> return NULL; >>> } >>> - >>> - item = (struct distr_item *)msg_data(buf_msg(buf)); >>> + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); >>> + msg_set_non_legacy(buf_msg(skb)); >>> + item = (struct distr_item *)msg_data(buf_msg(skb)); >>> publ_to_item(item, publ); >>> - return buf; >>> + return skb; >>> } >>> /** >>> @@ -141,7 +143,7 @@ struct sk_buff *tipc_named_withdraw(struct net >>> *net, struct publication *publ) >>> * @pls: linked list of publication items to be packed into buffer >>> chain >>> */ >>> static void named_distribute(struct net *net, struct sk_buff_head >>> *list, >>> - u32 dnode, struct list_head *pls) >>> + u32 dnode, struct list_head *pls, u16 seqno) >>> { >>> struct publication *publ; >>> struct sk_buff *skb = NULL; >>> @@ -149,6 +151,7 @@ static void named_distribute(struct net *net, >>> struct sk_buff_head *list, >>> u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - >>> INT_H_SIZE) / >>> ITEM_SIZE) * ITEM_SIZE; >>> u32 msg_rem = msg_dsz; >>> + struct tipc_msg *hdr; >>> list_for_each_entry(publ, pls, binding_node) { >>> /* Prepare next buffer: */ >>> @@ -159,8 +162,11 @@ static void named_distribute(struct net *net, >>> struct sk_buff_head *list, >>> pr_warn("Bulk publication failure\n"); >>> return; >>> } >>> - msg_set_bc_ack_invalid(buf_msg(skb), true); >>> - item = (struct distr_item *)msg_data(buf_msg(skb)); >>> + hdr = buf_msg(skb); >>> + msg_set_bc_ack_invalid(hdr, true); >>> + msg_set_bulk(hdr); >>> + msg_set_non_legacy(hdr); >>> + item = (struct distr_item *)msg_data(hdr); >>> } >>> /* Pack publication into message: */ >>> @@ -176,24 +182,35 @@ static void named_distribute(struct net *net, >>> struct sk_buff_head *list, >>> } >>> } >>> if (skb) { >>> - msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem)); >>> + hdr = buf_msg(skb); >>> + msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem)); >>> skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem)); >>> __skb_queue_tail(list, skb); >>> } >>> + hdr = buf_msg(skb_peek_tail(list)); >>> + msg_set_last_bulk(hdr); >>> + msg_set_named_seqno(hdr, seqno); >>> } >>> /** >>> * tipc_named_node_up - tell specified node about all publications >>> by this node >>> */ >>> -void tipc_named_node_up(struct net *net, u32 dnode) >>> +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities) >>> { >>> struct name_table *nt = tipc_name_table(net); >>> + struct tipc_net *tn = tipc_net(net); >>> struct sk_buff_head head; >>> + u16 seqno; >>> __skb_queue_head_init(&head); >>> + spin_lock_bh(&tn->nametbl_lock); >>> + if (!(capabilities & TIPC_NAMED_BCAST)) >>> + nt->rc_dests++; >>> + seqno = nt->snd_nxt; >>> + spin_unlock_bh(&tn->nametbl_lock); >>> read_lock_bh(&nt->cluster_scope_lock); >>> - named_distribute(net, &head, dnode, &nt->cluster_scope); >>> + named_distribute(net, &head, dnode, &nt->cluster_scope, seqno); >>> tipc_node_xmit(net, &head, dnode, 0); >>> read_unlock_bh(&nt->cluster_scope_lock); >>> } >>> @@ -245,13 +262,21 @@ static void tipc_dist_queue_purge(struct net >>> *net, u32 addr) >>> spin_unlock_bh(&tn->nametbl_lock); >>> } >>> -void tipc_publ_notify(struct net *net, struct list_head >>> *nsub_list, u32 addr) >>> +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, >>> + u32 addr, u16 capabilities) >>> { >>> + struct name_table *nt = tipc_name_table(net); >>> + struct tipc_net *tn = tipc_net(net); >>> + >>> struct publication *publ, *tmp; >>> list_for_each_entry_safe(publ, tmp, nsub_list, binding_node) >>> tipc_publ_purge(net, publ, addr); >>> tipc_dist_queue_purge(net, addr); >>> + spin_lock_bh(&tn->nametbl_lock); >>> + if (!(capabilities & TIPC_NAMED_BCAST)) >>> + nt->rc_dests--; >>> + spin_unlock_bh(&tn->nametbl_lock); >>> } >>> /** >>> @@ -295,29 +320,55 @@ static bool tipc_update_nametbl(struct net >>> *net, struct distr_item *i, >>> return false; >>> } >>> +struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq, >>> + u16 *rcv_nxt, bool *open) >>> +{ >>> + struct sk_buff *skb, *tmp; >>> + struct tipc_msg *hdr; >>> + u16 seqno; >>> + >>> + skb_queue_walk_safe(namedq, skb, tmp) { >>> + skb_linearize(skb); >>> + hdr = buf_msg(skb); >>> + seqno = msg_named_seqno(hdr); >>> + if (msg_is_last_bulk(hdr)) { >>> + *rcv_nxt = seqno; >>> + *open = true; >>> + } >>> + if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) { >>> + __skb_unlink(skb, namedq); >>> + return skb; >>> + } >>> + >>> + if (*open && (*rcv_nxt == seqno)) { >>> + (*rcv_nxt)++; >>> + __skb_unlink(skb, namedq); >>> + return skb; >>> + } >>> + } >>> + return NULL; >>> +} >>> + >>> /** >>> * tipc_named_rcv - process name table update messages sent by >>> another node >>> */ >>> -void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq) >>> +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, >>> + u16 *rcv_nxt, bool *open) >>> { >>> - struct tipc_net *tn = net_generic(net, tipc_net_id); >>> - struct tipc_msg *msg; >>> + struct tipc_net *tn = tipc_net(net); >>> struct distr_item *item; >>> - uint count; >>> - u32 node; >>> + struct tipc_msg *hdr; >>> struct sk_buff *skb; >>> - int mtype; >>> + u32 count, node = 0; >>> spin_lock_bh(&tn->nametbl_lock); >>> - for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) { >>> - skb_linearize(skb); >>> - msg = buf_msg(skb); >>> - mtype = msg_type(msg); >>> - item = (struct distr_item *)msg_data(msg); >>> - count = msg_data_sz(msg) / ITEM_SIZE; >>> - node = msg_orignode(msg); >>> + while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) { >>> + hdr = buf_msg(skb); >>> + node = msg_orignode(hdr); >>> + item = (struct distr_item *)msg_data(hdr); >>> + count = msg_data_sz(hdr) / ITEM_SIZE; >>> while (count--) { >>> - tipc_update_nametbl(net, item, node, mtype); >>> + tipc_update_nametbl(net, item, node, msg_type(hdr)); >>> item++; >>> } >>> kfree_skb(skb); >>> @@ -345,6 +396,6 @@ void tipc_named_reinit(struct net *net) >>> publ->node = self; >>> list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node) >>> publ->node = self; >>> - >>> + nt->rc_dests = 0; >>> spin_unlock_bh(&tn->nametbl_lock); >>> } >>> diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h >>> index 63fc73e0fa6c..092323158f06 100644 >>> --- a/net/tipc/name_distr.h >>> +++ b/net/tipc/name_distr.h >>> @@ -67,11 +67,14 @@ struct distr_item { >>> __be32 key; >>> }; >>> +void tipc_named_bcast(struct net *net, struct sk_buff *skb); >>> struct sk_buff *tipc_named_publish(struct net *net, struct >>> publication *publ); >>> struct sk_buff *tipc_named_withdraw(struct net *net, struct >>> publication *publ); >>> -void tipc_named_node_up(struct net *net, u32 dnode); >>> -void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue); >>> +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities); >>> +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, >>> + u16 *rcv_nxt, bool *open); >>> void tipc_named_reinit(struct net *net); >>> -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, >>> u32 addr); >>> +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, >>> + u32 addr, u16 capabilities); >>> #endif >>> diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c >>> index 359b2bc888cf..2ac33d32edc2 100644 >>> --- a/net/tipc/name_table.c >>> +++ b/net/tipc/name_table.c >>> @@ -729,6 +729,7 @@ struct publication *tipc_nametbl_publish(struct >>> net *net, u32 type, u32 lower, >>> struct tipc_net *tn = tipc_net(net); >>> struct publication *p = NULL; >>> struct sk_buff *skb = NULL; >>> + u32 rc_dests; >>> spin_lock_bh(&tn->nametbl_lock); >>> @@ -743,12 +744,14 @@ struct publication >>> *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, >>> nt->local_publ_count++; >>> skb = tipc_named_publish(net, p); >>> } >>> + rc_dests = nt->rc_dests; >>> exit: >>> spin_unlock_bh(&tn->nametbl_lock); >>> if (skb) >>> - tipc_node_broadcast(net, skb); >>> + tipc_node_broadcast(net, skb, rc_dests); >>> return p; >>> + >>> } >>> /** >>> @@ -762,6 +765,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 >>> type, u32 lower, >>> u32 self = tipc_own_addr(net); >>> struct sk_buff *skb = NULL; >>> struct publication *p; >>> + u32 rc_dests; >>> spin_lock_bh(&tn->nametbl_lock); >>> @@ -775,10 +779,11 @@ int tipc_nametbl_withdraw(struct net *net, >>> u32 type, u32 lower, >>> pr_err("Failed to remove local publication {%u,%u,%u}/%u\n", >>> type, lower, upper, key); >>> } >>> + rc_dests = nt->rc_dests; >>> spin_unlock_bh(&tn->nametbl_lock); >>> if (skb) { >>> - tipc_node_broadcast(net, skb); >>> + tipc_node_broadcast(net, skb, rc_dests); >>> return 1; >>> } >>> return 0; >>> diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h >>> index 728bc7016c38..8064e1986e2c 100644 >>> --- a/net/tipc/name_table.h >>> +++ b/net/tipc/name_table.h >>> @@ -106,6 +106,8 @@ struct name_table { >>> struct list_head cluster_scope; >>> rwlock_t cluster_scope_lock; >>> u32 local_publ_count; >>> + u32 rc_dests; >>> + u32 snd_nxt; >>> }; >>> int tipc_nl_name_table_dump(struct sk_buff *skb, struct >>> netlink_callback *cb); >>> diff --git a/net/tipc/node.c b/net/tipc/node.c >>> index a4c2816c3746..030a51c4d1fa 100644 >>> --- a/net/tipc/node.c >>> +++ b/net/tipc/node.c >>> @@ -75,6 +75,8 @@ struct tipc_bclink_entry { >>> struct sk_buff_head arrvq; >>> struct sk_buff_head inputq2; >>> struct sk_buff_head namedq; >>> + u16 named_rcv_nxt; >>> + bool named_open; >>> }; >>> /** >>> @@ -396,10 +398,10 @@ static void tipc_node_write_unlock(struct >>> tipc_node *n) >>> write_unlock_bh(&n->lock); >>> if (flags & TIPC_NOTIFY_NODE_DOWN) >>> - tipc_publ_notify(net, publ_list, addr); >>> + tipc_publ_notify(net, publ_list, addr, n->capabilities); >>> if (flags & TIPC_NOTIFY_NODE_UP) >>> - tipc_named_node_up(net, addr); >>> + tipc_named_node_up(net, addr, n->capabilities); >>> if (flags & TIPC_NOTIFY_LINK_UP) { >>> tipc_mon_peer_up(net, addr, bearer_id); >>> @@ -1483,6 +1485,7 @@ static void node_lost_contact(struct tipc_node *n, >>> /* Clean up broadcast state */ >>> tipc_bcast_remove_peer(n->net, n->bc_entry.link); >>> + __skb_queue_purge(&n->bc_entry.namedq); >>> /* Abort any ongoing link failover */ >>> for (i = 0; i < MAX_BEARERS; i++) { >>> @@ -1729,12 +1732,23 @@ int tipc_node_distr_xmit(struct net *net, >>> struct sk_buff_head *xmitq) >>> return 0; >>> } >>> -void tipc_node_broadcast(struct net *net, struct sk_buff *skb) >>> +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int >>> rc_dests) >>> { >>> + struct sk_buff_head xmitq; >>> struct sk_buff *txskb; >>> struct tipc_node *n; >>> + u16 dummy; >>> u32 dst; >>> + /* Use broadcast if all nodes support it */ >>> + if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) { >>> + __skb_queue_head_init(&xmitq); >>> + __skb_queue_tail(&xmitq, skb); >>> + tipc_bcast_xmit(net, &xmitq, &dummy); >>> + return; >>> + } >>> + >>> + /* Otherwise use legacy replicast method */ >>> rcu_read_lock(); >>> list_for_each_entry_rcu(n, tipc_nodes(net), list) { >>> dst = n->addr; >>> @@ -1749,7 +1763,6 @@ void tipc_node_broadcast(struct net *net, >>> struct sk_buff *skb) >>> tipc_node_xmit_skb(net, txskb, dst, 0); >>> } >>> rcu_read_unlock(); >>> - >>> kfree_skb(skb); >>> } >>> @@ -1844,7 +1857,9 @@ static void tipc_node_bc_rcv(struct net *net, >>> struct sk_buff *skb, int bearer_id >>> /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ >>> if (!skb_queue_empty(&n->bc_entry.namedq)) >>> - tipc_named_rcv(net, &n->bc_entry.namedq); >>> + tipc_named_rcv(net, &n->bc_entry.namedq, >>> + &n->bc_entry.named_rcv_nxt, >>> + &n->bc_entry.named_open); >>> /* If reassembly or retransmission failure => reset all links >>> to peer */ >>> if (rc & TIPC_LINK_DOWN_EVT) >>> @@ -2114,7 +2129,9 @@ void tipc_rcv(struct net *net, struct sk_buff >>> *skb, struct tipc_bearer *b) >>> tipc_node_link_down(n, bearer_id, false); >>> if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) >>> - tipc_named_rcv(net, &n->bc_entry.namedq); >>> + tipc_named_rcv(net, &n->bc_entry.namedq, >>> + &n->bc_entry.named_rcv_nxt, >>> + &n->bc_entry.named_open); >>> if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) >>> tipc_node_mcast_rcv(n); >>> diff --git a/net/tipc/node.h b/net/tipc/node.h >>> index a6803b449a2c..9f6f13f1604f 100644 >>> --- a/net/tipc/node.h >>> +++ b/net/tipc/node.h >>> @@ -55,7 +55,8 @@ enum { >>> TIPC_MCAST_RBCTL = (1 << 7), >>> TIPC_GAP_ACK_BLOCK = (1 << 8), >>> TIPC_TUNNEL_ENHANCED = (1 << 9), >>> - TIPC_NAGLE = (1 << 10) >>> + TIPC_NAGLE = (1 << 10), >>> + TIPC_NAMED_BCAST = (1 << 11) >>> }; >>> #define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \ >>> @@ -68,7 +69,8 @@ enum { >>> TIPC_MCAST_RBCTL | \ >>> TIPC_GAP_ACK_BLOCK | \ >>> TIPC_TUNNEL_ENHANCED | \ >>> - TIPC_NAGLE) >>> + TIPC_NAGLE | \ >>> + TIPC_NAMED_BCAST) >>> #define INVALID_BEARER_ID -1 >>> @@ -101,7 +103,7 @@ int tipc_node_xmit_skb(struct net *net, struct >>> sk_buff *skb, u32 dest, >>> u32 selector); >>> void tipc_node_subscribe(struct net *net, struct list_head *subscr, >>> u32 addr); >>> void tipc_node_unsubscribe(struct net *net, struct list_head >>> *subscr, u32 addr); >>> -void tipc_node_broadcast(struct net *net, struct sk_buff *skb); >>> +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int >>> rc_dests); >>> int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 >>> peer_port); >>> void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port); >>> int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool >>> connected); >> >> >> _______________________________________________ >> tipc-discussion mailing list >> tip...@li... >> https://lists.sourceforge.net/lists/listinfo/tipc-discussion >> > > > _______________________________________________ > tipc-discussion mailing list > tip...@li... > https://lists.sourceforge.net/lists/listinfo/tipc-discussion |
From: Jon M. <jm...@re...> - 2020-06-08 01:57:33
|
In case you are not aware of this, check this link before you send anything to net-next. http://vger.kernel.org/~davem/net-next.html ///jon On 6/7/20 9:50 PM, Hoang Huu Le wrote: > > -----Original Message----- > From: Jon Maloy <jm...@re...> > Sent: Monday, June 8, 2020 2:14 AM > To: tip...@li... > Subject: Re: [tipc-discussion] [net-next] tipc: update a binding service via broadcast > > > > On 6/7/20 3:03 PM, Jon Maloy wrote: >> >> On 6/7/20 12:24 AM, Hoang Huu Le wrote: >>> Currently, updating binding table (add service binding to >>> name table/withdraw a service binding) is being sent over replicast. >>> However, if we are scaling up clusters to > 100 nodes/containers this >>> method is less affection because of looping through nodes in a >>> cluster one >>> by one. >>> >>> It is worth to use broadcast to update a binding service. This way, the >>> binding table can be updated on all peer nodes in one shot. >>> >>> Broadcast is used when all peer nodes, as indicated by a new capability >>> flag TIPC_NAMED_BCAST, support reception of this message type. >>> >>> Four problems need to be considered when introducing this feature. >>> 1) When establishing a link to a new peer node we still update this by a >>> unicast 'bulk' update. This may lead to race conditions, where a later >>> broadcast publication/withdrawal bypass the 'bulk', resulting in >>> disordered publications, or even that a withdrawal may arrive before the >>> corresponding publication. We solve this by adding an 'is_last_bulk' bit >>> in the last bulk messages so that it can be distinguished from all other >>> messages. Only when this message has arrived do we open up for reception >>> of broadcast publications/withdrawals. >> Add a line feed between these paragraphs before you send the patch. >> Otherwise, still acked by me. >> >> ///jon > Oh, already posted... Just ignore my comment above. > [Hoang] net-next is closed. I will re-post the patch later with your suggestion. > > ///jon >>> 2) When a first legacy node is added to the cluster all distribution >>> will switch over to use the legacy 'replicast' method, while the >>> opposite happens when the last legacy node leaves the cluster. This >>> entails another risk of message disordering that has to be handled. We >>> solve this by adding a sequence number to the broadcast/replicast >>> messages, so that disordering can be discovered and corrected. Note >>> however that we don't need to consider potential message loss or >>> duplication at this protocol level. >>> 3) Bulk messages don't contain any sequence numbers, and will always >>> arrive in order. Hence we must exempt those from the sequence number >>> control and deliver them unconditionally. We solve this by adding a new >>> 'is_bulk' bit in those messages so that they can be recognized. >>> 4) Legacy messages, which don't contain any new bits or sequence >>> numbers, but neither can arrive out of order, also need to be exempt >>> from the initial synchronization and sequence number check, and >>> delivered unconditionally. Therefore, we add another 'is_not_legacy' bit >>> to all new messages so that those can be distinguished from legacy >>> messages and the latter delivered directly. >>> >>> Signed-off-by: Hoang Huu Le <hoa...@de...> >>> Acked-by: Jon Maloy <jm...@re...> >>> --- >>> net/tipc/bcast.c | 6 +-- >>> net/tipc/bcast.h | 4 +- >>> net/tipc/link.c | 2 +- >>> net/tipc/msg.h | 40 ++++++++++++++++ >>> net/tipc/name_distr.c | 109 +++++++++++++++++++++++++++++++----------- >>> net/tipc/name_distr.h | 9 ++-- >>> net/tipc/name_table.c | 9 +++- >>> net/tipc/name_table.h | 2 + >>> net/tipc/node.c | 29 ++++++++--- >>> net/tipc/node.h | 8 ++-- >>> 10 files changed, 170 insertions(+), 48 deletions(-) >>> >>> diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c >>> index 383f87bc1061..940d176e0e87 100644 >>> --- a/net/tipc/bcast.c >>> +++ b/net/tipc/bcast.c >>> @@ -250,8 +250,8 @@ static void tipc_bcast_select_xmit_method(struct >>> net *net, int dests, >>> * Consumes the buffer chain. >>> * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE >>> */ >>> -static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, >>> - u16 *cong_link_cnt) >>> +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, >>> + u16 *cong_link_cnt) >>> { >>> struct tipc_link *l = tipc_bc_sndlink(net); >>> struct sk_buff_head xmitq; >>> @@ -752,7 +752,7 @@ void tipc_nlist_purge(struct tipc_nlist *nl) >>> nl->local = false; >>> } >>> -u32 tipc_bcast_get_broadcast_mode(struct net *net) >>> +u32 tipc_bcast_get_mode(struct net *net) >>> { >>> struct tipc_bc_base *bb = tipc_bc_base(net); >>> diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h >>> index 4240c95188b1..2d9352dc7b0e 100644 >>> --- a/net/tipc/bcast.h >>> +++ b/net/tipc/bcast.h >>> @@ -90,6 +90,8 @@ void tipc_bcast_toggle_rcast(struct net *net, bool >>> supp); >>> int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, >>> struct tipc_mc_method *method, struct tipc_nlist *dests, >>> u16 *cong_link_cnt); >>> +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, >>> + u16 *cong_link_cnt); >>> int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct >>> sk_buff *skb); >>> void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, >>> struct tipc_msg *hdr); >>> @@ -101,7 +103,7 @@ int tipc_nl_add_bc_link(struct net *net, struct >>> tipc_nl_msg *msg, >>> int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]); >>> int tipc_bclink_reset_stats(struct net *net, struct tipc_link *l); >>> -u32 tipc_bcast_get_broadcast_mode(struct net *net); >>> +u32 tipc_bcast_get_mode(struct net *net); >>> u32 tipc_bcast_get_broadcast_ratio(struct net *net); >>> void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head >>> *defq, >>> diff --git a/net/tipc/link.c b/net/tipc/link.c >>> index ee3b8d0576b8..eac89a3e22ce 100644 >>> --- a/net/tipc/link.c >>> +++ b/net/tipc/link.c >>> @@ -2745,7 +2745,7 @@ int tipc_nl_add_bc_link(struct net *net, struct >>> tipc_nl_msg *msg, >>> void *hdr; >>> struct nlattr *attrs; >>> struct nlattr *prop; >>> - u32 bc_mode = tipc_bcast_get_broadcast_mode(net); >>> + u32 bc_mode = tipc_bcast_get_mode(net); >>> u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net); >>> if (!bcl) >>> diff --git a/net/tipc/msg.h b/net/tipc/msg.h >>> index 58660d56bc83..65119e81ff0c 100644 >>> --- a/net/tipc/msg.h >>> +++ b/net/tipc/msg.h >>> @@ -438,6 +438,36 @@ static inline void msg_set_errcode(struct >>> tipc_msg *m, u32 err) >>> msg_set_bits(m, 1, 25, 0xf, err); >>> } >>> +static inline void msg_set_bulk(struct tipc_msg *m) >>> +{ >>> + msg_set_bits(m, 1, 28, 0x1, 1); >>> +} >>> + >>> +static inline u32 msg_is_bulk(struct tipc_msg *m) >>> +{ >>> + return msg_bits(m, 1, 28, 0x1); >>> +} >>> + >>> +static inline void msg_set_last_bulk(struct tipc_msg *m) >>> +{ >>> + msg_set_bits(m, 1, 27, 0x1, 1); >>> +} >>> + >>> +static inline u32 msg_is_last_bulk(struct tipc_msg *m) >>> +{ >>> + return msg_bits(m, 1, 27, 0x1); >>> +} >>> + >>> +static inline void msg_set_non_legacy(struct tipc_msg *m) >>> +{ >>> + msg_set_bits(m, 1, 26, 0x1, 1); >>> +} >>> + >>> +static inline u32 msg_is_legacy(struct tipc_msg *m) >>> +{ >>> + return !msg_bits(m, 1, 26, 0x1); >>> +} >>> + >>> static inline u32 msg_reroute_cnt(struct tipc_msg *m) >>> { >>> return msg_bits(m, 1, 21, 0xf); >>> @@ -567,6 +597,16 @@ static inline void msg_set_origport(struct >>> tipc_msg *m, u32 p) >>> msg_set_word(m, 4, p); >>> } >>> +static inline u16 msg_named_seqno(struct tipc_msg *m) >>> +{ >>> + return msg_bits(m, 4, 0, 0xffff); >>> +} >>> + >>> +static inline void msg_set_named_seqno(struct tipc_msg *m, u16 n) >>> +{ >>> + msg_set_bits(m, 4, 0, 0xffff, n); >>> +} >>> + >>> static inline u32 msg_destport(struct tipc_msg *m) >>> { >>> return msg_word(m, 5); >>> diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c >>> index 5feaf3b67380..481d480609f0 100644 >>> --- a/net/tipc/name_distr.c >>> +++ b/net/tipc/name_distr.c >>> @@ -102,7 +102,8 @@ struct sk_buff *tipc_named_publish(struct net >>> *net, struct publication *publ) >>> pr_warn("Publication distribution failure\n"); >>> return NULL; >>> } >>> - >>> + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); >>> + msg_set_non_legacy(buf_msg(skb)); >>> item = (struct distr_item *)msg_data(buf_msg(skb)); >>> publ_to_item(item, publ); >>> return skb; >>> @@ -114,8 +115,8 @@ struct sk_buff *tipc_named_publish(struct net >>> *net, struct publication *publ) >>> struct sk_buff *tipc_named_withdraw(struct net *net, struct >>> publication *publ) >>> { >>> struct name_table *nt = tipc_name_table(net); >>> - struct sk_buff *buf; >>> struct distr_item *item; >>> + struct sk_buff *skb; >>> write_lock_bh(&nt->cluster_scope_lock); >>> list_del(&publ->binding_node); >>> @@ -123,15 +124,16 @@ struct sk_buff *tipc_named_withdraw(struct net >>> *net, struct publication *publ) >>> if (publ->scope == TIPC_NODE_SCOPE) >>> return NULL; >>> - buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); >>> - if (!buf) { >>> + skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); >>> + if (!skb) { >>> pr_warn("Withdrawal distribution failure\n"); >>> return NULL; >>> } >>> - >>> - item = (struct distr_item *)msg_data(buf_msg(buf)); >>> + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); >>> + msg_set_non_legacy(buf_msg(skb)); >>> + item = (struct distr_item *)msg_data(buf_msg(skb)); >>> publ_to_item(item, publ); >>> - return buf; >>> + return skb; >>> } >>> /** >>> @@ -141,7 +143,7 @@ struct sk_buff *tipc_named_withdraw(struct net >>> *net, struct publication *publ) >>> * @pls: linked list of publication items to be packed into buffer >>> chain >>> */ >>> static void named_distribute(struct net *net, struct sk_buff_head >>> *list, >>> - u32 dnode, struct list_head *pls) >>> + u32 dnode, struct list_head *pls, u16 seqno) >>> { >>> struct publication *publ; >>> struct sk_buff *skb = NULL; >>> @@ -149,6 +151,7 @@ static void named_distribute(struct net *net, >>> struct sk_buff_head *list, >>> u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - >>> INT_H_SIZE) / >>> ITEM_SIZE) * ITEM_SIZE; >>> u32 msg_rem = msg_dsz; >>> + struct tipc_msg *hdr; >>> list_for_each_entry(publ, pls, binding_node) { >>> /* Prepare next buffer: */ >>> @@ -159,8 +162,11 @@ static void named_distribute(struct net *net, >>> struct sk_buff_head *list, >>> pr_warn("Bulk publication failure\n"); >>> return; >>> } >>> - msg_set_bc_ack_invalid(buf_msg(skb), true); >>> - item = (struct distr_item *)msg_data(buf_msg(skb)); >>> + hdr = buf_msg(skb); >>> + msg_set_bc_ack_invalid(hdr, true); >>> + msg_set_bulk(hdr); >>> + msg_set_non_legacy(hdr); >>> + item = (struct distr_item *)msg_data(hdr); >>> } >>> /* Pack publication into message: */ >>> @@ -176,24 +182,35 @@ static void named_distribute(struct net *net, >>> struct sk_buff_head *list, >>> } >>> } >>> if (skb) { >>> - msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem)); >>> + hdr = buf_msg(skb); >>> + msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem)); >>> skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem)); >>> __skb_queue_tail(list, skb); >>> } >>> + hdr = buf_msg(skb_peek_tail(list)); >>> + msg_set_last_bulk(hdr); >>> + msg_set_named_seqno(hdr, seqno); >>> } >>> /** >>> * tipc_named_node_up - tell specified node about all publications >>> by this node >>> */ >>> -void tipc_named_node_up(struct net *net, u32 dnode) >>> +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities) >>> { >>> struct name_table *nt = tipc_name_table(net); >>> + struct tipc_net *tn = tipc_net(net); >>> struct sk_buff_head head; >>> + u16 seqno; >>> __skb_queue_head_init(&head); >>> + spin_lock_bh(&tn->nametbl_lock); >>> + if (!(capabilities & TIPC_NAMED_BCAST)) >>> + nt->rc_dests++; >>> + seqno = nt->snd_nxt; >>> + spin_unlock_bh(&tn->nametbl_lock); >>> read_lock_bh(&nt->cluster_scope_lock); >>> - named_distribute(net, &head, dnode, &nt->cluster_scope); >>> + named_distribute(net, &head, dnode, &nt->cluster_scope, seqno); >>> tipc_node_xmit(net, &head, dnode, 0); >>> read_unlock_bh(&nt->cluster_scope_lock); >>> } >>> @@ -245,13 +262,21 @@ static void tipc_dist_queue_purge(struct net >>> *net, u32 addr) >>> spin_unlock_bh(&tn->nametbl_lock); >>> } >>> -void tipc_publ_notify(struct net *net, struct list_head >>> *nsub_list, u32 addr) >>> +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, >>> + u32 addr, u16 capabilities) >>> { >>> + struct name_table *nt = tipc_name_table(net); >>> + struct tipc_net *tn = tipc_net(net); >>> + >>> struct publication *publ, *tmp; >>> list_for_each_entry_safe(publ, tmp, nsub_list, binding_node) >>> tipc_publ_purge(net, publ, addr); >>> tipc_dist_queue_purge(net, addr); >>> + spin_lock_bh(&tn->nametbl_lock); >>> + if (!(capabilities & TIPC_NAMED_BCAST)) >>> + nt->rc_dests--; >>> + spin_unlock_bh(&tn->nametbl_lock); >>> } >>> /** >>> @@ -295,29 +320,55 @@ static bool tipc_update_nametbl(struct net >>> *net, struct distr_item *i, >>> return false; >>> } >>> +struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq, >>> + u16 *rcv_nxt, bool *open) >>> +{ >>> + struct sk_buff *skb, *tmp; >>> + struct tipc_msg *hdr; >>> + u16 seqno; >>> + >>> + skb_queue_walk_safe(namedq, skb, tmp) { >>> + skb_linearize(skb); >>> + hdr = buf_msg(skb); >>> + seqno = msg_named_seqno(hdr); >>> + if (msg_is_last_bulk(hdr)) { >>> + *rcv_nxt = seqno; >>> + *open = true; >>> + } >>> + if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) { >>> + __skb_unlink(skb, namedq); >>> + return skb; >>> + } >>> + >>> + if (*open && (*rcv_nxt == seqno)) { >>> + (*rcv_nxt)++; >>> + __skb_unlink(skb, namedq); >>> + return skb; >>> + } >>> + } >>> + return NULL; >>> +} >>> + >>> /** >>> * tipc_named_rcv - process name table update messages sent by >>> another node >>> */ >>> -void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq) >>> +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, >>> + u16 *rcv_nxt, bool *open) >>> { >>> - struct tipc_net *tn = net_generic(net, tipc_net_id); >>> - struct tipc_msg *msg; >>> + struct tipc_net *tn = tipc_net(net); >>> struct distr_item *item; >>> - uint count; >>> - u32 node; >>> + struct tipc_msg *hdr; >>> struct sk_buff *skb; >>> - int mtype; >>> + u32 count, node = 0; >>> spin_lock_bh(&tn->nametbl_lock); >>> - for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) { >>> - skb_linearize(skb); >>> - msg = buf_msg(skb); >>> - mtype = msg_type(msg); >>> - item = (struct distr_item *)msg_data(msg); >>> - count = msg_data_sz(msg) / ITEM_SIZE; >>> - node = msg_orignode(msg); >>> + while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) { >>> + hdr = buf_msg(skb); >>> + node = msg_orignode(hdr); >>> + item = (struct distr_item *)msg_data(hdr); >>> + count = msg_data_sz(hdr) / ITEM_SIZE; >>> while (count--) { >>> - tipc_update_nametbl(net, item, node, mtype); >>> + tipc_update_nametbl(net, item, node, msg_type(hdr)); >>> item++; >>> } >>> kfree_skb(skb); >>> @@ -345,6 +396,6 @@ void tipc_named_reinit(struct net *net) >>> publ->node = self; >>> list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node) >>> publ->node = self; >>> - >>> + nt->rc_dests = 0; >>> spin_unlock_bh(&tn->nametbl_lock); >>> } >>> diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h >>> index 63fc73e0fa6c..092323158f06 100644 >>> --- a/net/tipc/name_distr.h >>> +++ b/net/tipc/name_distr.h >>> @@ -67,11 +67,14 @@ struct distr_item { >>> __be32 key; >>> }; >>> +void tipc_named_bcast(struct net *net, struct sk_buff *skb); >>> struct sk_buff *tipc_named_publish(struct net *net, struct >>> publication *publ); >>> struct sk_buff *tipc_named_withdraw(struct net *net, struct >>> publication *publ); >>> -void tipc_named_node_up(struct net *net, u32 dnode); >>> -void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue); >>> +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities); >>> +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, >>> + u16 *rcv_nxt, bool *open); >>> void tipc_named_reinit(struct net *net); >>> -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, >>> u32 addr); >>> +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, >>> + u32 addr, u16 capabilities); >>> #endif >>> diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c >>> index 359b2bc888cf..2ac33d32edc2 100644 >>> --- a/net/tipc/name_table.c >>> +++ b/net/tipc/name_table.c >>> @@ -729,6 +729,7 @@ struct publication *tipc_nametbl_publish(struct >>> net *net, u32 type, u32 lower, >>> struct tipc_net *tn = tipc_net(net); >>> struct publication *p = NULL; >>> struct sk_buff *skb = NULL; >>> + u32 rc_dests; >>> spin_lock_bh(&tn->nametbl_lock); >>> @@ -743,12 +744,14 @@ struct publication >>> *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, >>> nt->local_publ_count++; >>> skb = tipc_named_publish(net, p); >>> } >>> + rc_dests = nt->rc_dests; >>> exit: >>> spin_unlock_bh(&tn->nametbl_lock); >>> if (skb) >>> - tipc_node_broadcast(net, skb); >>> + tipc_node_broadcast(net, skb, rc_dests); >>> return p; >>> + >>> } >>> /** >>> @@ -762,6 +765,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 >>> type, u32 lower, >>> u32 self = tipc_own_addr(net); >>> struct sk_buff *skb = NULL; >>> struct publication *p; >>> + u32 rc_dests; >>> spin_lock_bh(&tn->nametbl_lock); >>> @@ -775,10 +779,11 @@ int tipc_nametbl_withdraw(struct net *net, >>> u32 type, u32 lower, >>> pr_err("Failed to remove local publication {%u,%u,%u}/%u\n", >>> type, lower, upper, key); >>> } >>> + rc_dests = nt->rc_dests; >>> spin_unlock_bh(&tn->nametbl_lock); >>> if (skb) { >>> - tipc_node_broadcast(net, skb); >>> + tipc_node_broadcast(net, skb, rc_dests); >>> return 1; >>> } >>> return 0; >>> diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h >>> index 728bc7016c38..8064e1986e2c 100644 >>> --- a/net/tipc/name_table.h >>> +++ b/net/tipc/name_table.h >>> @@ -106,6 +106,8 @@ struct name_table { >>> struct list_head cluster_scope; >>> rwlock_t cluster_scope_lock; >>> u32 local_publ_count; >>> + u32 rc_dests; >>> + u32 snd_nxt; >>> }; >>> int tipc_nl_name_table_dump(struct sk_buff *skb, struct >>> netlink_callback *cb); >>> diff --git a/net/tipc/node.c b/net/tipc/node.c >>> index a4c2816c3746..030a51c4d1fa 100644 >>> --- a/net/tipc/node.c >>> +++ b/net/tipc/node.c >>> @@ -75,6 +75,8 @@ struct tipc_bclink_entry { >>> struct sk_buff_head arrvq; >>> struct sk_buff_head inputq2; >>> struct sk_buff_head namedq; >>> + u16 named_rcv_nxt; >>> + bool named_open; >>> }; >>> /** >>> @@ -396,10 +398,10 @@ static void tipc_node_write_unlock(struct >>> tipc_node *n) >>> write_unlock_bh(&n->lock); >>> if (flags & TIPC_NOTIFY_NODE_DOWN) >>> - tipc_publ_notify(net, publ_list, addr); >>> + tipc_publ_notify(net, publ_list, addr, n->capabilities); >>> if (flags & TIPC_NOTIFY_NODE_UP) >>> - tipc_named_node_up(net, addr); >>> + tipc_named_node_up(net, addr, n->capabilities); >>> if (flags & TIPC_NOTIFY_LINK_UP) { >>> tipc_mon_peer_up(net, addr, bearer_id); >>> @@ -1483,6 +1485,7 @@ static void node_lost_contact(struct tipc_node *n, >>> /* Clean up broadcast state */ >>> tipc_bcast_remove_peer(n->net, n->bc_entry.link); >>> + __skb_queue_purge(&n->bc_entry.namedq); >>> /* Abort any ongoing link failover */ >>> for (i = 0; i < MAX_BEARERS; i++) { >>> @@ -1729,12 +1732,23 @@ int tipc_node_distr_xmit(struct net *net, >>> struct sk_buff_head *xmitq) >>> return 0; >>> } >>> -void tipc_node_broadcast(struct net *net, struct sk_buff *skb) >>> +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int >>> rc_dests) >>> { >>> + struct sk_buff_head xmitq; >>> struct sk_buff *txskb; >>> struct tipc_node *n; >>> + u16 dummy; >>> u32 dst; >>> + /* Use broadcast if all nodes support it */ >>> + if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) { >>> + __skb_queue_head_init(&xmitq); >>> + __skb_queue_tail(&xmitq, skb); >>> + tipc_bcast_xmit(net, &xmitq, &dummy); >>> + return; >>> + } >>> + >>> + /* Otherwise use legacy replicast method */ >>> rcu_read_lock(); >>> list_for_each_entry_rcu(n, tipc_nodes(net), list) { >>> dst = n->addr; >>> @@ -1749,7 +1763,6 @@ void tipc_node_broadcast(struct net *net, >>> struct sk_buff *skb) >>> tipc_node_xmit_skb(net, txskb, dst, 0); >>> } >>> rcu_read_unlock(); >>> - >>> kfree_skb(skb); >>> } >>> @@ -1844,7 +1857,9 @@ static void tipc_node_bc_rcv(struct net *net, >>> struct sk_buff *skb, int bearer_id >>> /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ >>> if (!skb_queue_empty(&n->bc_entry.namedq)) >>> - tipc_named_rcv(net, &n->bc_entry.namedq); >>> + tipc_named_rcv(net, &n->bc_entry.namedq, >>> + &n->bc_entry.named_rcv_nxt, >>> + &n->bc_entry.named_open); >>> /* If reassembly or retransmission failure => reset all links >>> to peer */ >>> if (rc & TIPC_LINK_DOWN_EVT) >>> @@ -2114,7 +2129,9 @@ void tipc_rcv(struct net *net, struct sk_buff >>> *skb, struct tipc_bearer *b) >>> tipc_node_link_down(n, bearer_id, false); >>> if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) >>> - tipc_named_rcv(net, &n->bc_entry.namedq); >>> + tipc_named_rcv(net, &n->bc_entry.namedq, >>> + &n->bc_entry.named_rcv_nxt, >>> + &n->bc_entry.named_open); >>> if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) >>> tipc_node_mcast_rcv(n); >>> diff --git a/net/tipc/node.h b/net/tipc/node.h >>> index a6803b449a2c..9f6f13f1604f 100644 >>> --- a/net/tipc/node.h >>> +++ b/net/tipc/node.h >>> @@ -55,7 +55,8 @@ enum { >>> TIPC_MCAST_RBCTL = (1 << 7), >>> TIPC_GAP_ACK_BLOCK = (1 << 8), >>> TIPC_TUNNEL_ENHANCED = (1 << 9), >>> - TIPC_NAGLE = (1 << 10) >>> + TIPC_NAGLE = (1 << 10), >>> + TIPC_NAMED_BCAST = (1 << 11) >>> }; >>> #define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \ >>> @@ -68,7 +69,8 @@ enum { >>> TIPC_MCAST_RBCTL | \ >>> TIPC_GAP_ACK_BLOCK | \ >>> TIPC_TUNNEL_ENHANCED | \ >>> - TIPC_NAGLE) >>> + TIPC_NAGLE | \ >>> + TIPC_NAMED_BCAST) >>> #define INVALID_BEARER_ID -1 >>> @@ -101,7 +103,7 @@ int tipc_node_xmit_skb(struct net *net, struct >>> sk_buff *skb, u32 dest, >>> u32 selector); >>> void tipc_node_subscribe(struct net *net, struct list_head *subscr, >>> u32 addr); >>> void tipc_node_unsubscribe(struct net *net, struct list_head >>> *subscr, u32 addr); >>> -void tipc_node_broadcast(struct net *net, struct sk_buff *skb); >>> +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int >>> rc_dests); >>> int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 >>> peer_port); >>> void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port); >>> int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool >>> connected); >> >> >> _______________________________________________ >> tipc-discussion mailing list >> tip...@li... >> https://lists.sourceforge.net/lists/listinfo/tipc-discussion >> > > > _______________________________________________ > tipc-discussion mailing list > tip...@li... > https://lists.sourceforge.net/lists/listinfo/tipc-discussion |
From: Hoang H. Le <hoa...@de...> - 2020-06-08 01:50:55
|
-----Original Message----- From: Jon Maloy <jm...@re...> Sent: Monday, June 8, 2020 2:14 AM To: tip...@li... Subject: Re: [tipc-discussion] [net-next] tipc: update a binding service via broadcast On 6/7/20 3:03 PM, Jon Maloy wrote: > > > On 6/7/20 12:24 AM, Hoang Huu Le wrote: >> Currently, updating binding table (add service binding to >> name table/withdraw a service binding) is being sent over replicast. >> However, if we are scaling up clusters to > 100 nodes/containers this >> method is less affection because of looping through nodes in a >> cluster one >> by one. >> >> It is worth to use broadcast to update a binding service. This way, the >> binding table can be updated on all peer nodes in one shot. >> >> Broadcast is used when all peer nodes, as indicated by a new capability >> flag TIPC_NAMED_BCAST, support reception of this message type. >> >> Four problems need to be considered when introducing this feature. >> 1) When establishing a link to a new peer node we still update this by a >> unicast 'bulk' update. This may lead to race conditions, where a later >> broadcast publication/withdrawal bypass the 'bulk', resulting in >> disordered publications, or even that a withdrawal may arrive before the >> corresponding publication. We solve this by adding an 'is_last_bulk' bit >> in the last bulk messages so that it can be distinguished from all other >> messages. Only when this message has arrived do we open up for reception >> of broadcast publications/withdrawals. > Add a line feed between these paragraphs before you send the patch. > Otherwise, still acked by me. > > ///jon Oh, already posted... Just ignore my comment above. [Hoang] net-next is closed. I will re-post the patch later with your suggestion. ///jon > >> 2) When a first legacy node is added to the cluster all distribution >> will switch over to use the legacy 'replicast' method, while the >> opposite happens when the last legacy node leaves the cluster. This >> entails another risk of message disordering that has to be handled. We >> solve this by adding a sequence number to the broadcast/replicast >> messages, so that disordering can be discovered and corrected. Note >> however that we don't need to consider potential message loss or >> duplication at this protocol level. >> 3) Bulk messages don't contain any sequence numbers, and will always >> arrive in order. Hence we must exempt those from the sequence number >> control and deliver them unconditionally. We solve this by adding a new >> 'is_bulk' bit in those messages so that they can be recognized. >> 4) Legacy messages, which don't contain any new bits or sequence >> numbers, but neither can arrive out of order, also need to be exempt >> from the initial synchronization and sequence number check, and >> delivered unconditionally. Therefore, we add another 'is_not_legacy' bit >> to all new messages so that those can be distinguished from legacy >> messages and the latter delivered directly. >> >> Signed-off-by: Hoang Huu Le <hoa...@de...> >> Acked-by: Jon Maloy <jm...@re...> >> --- >> net/tipc/bcast.c | 6 +-- >> net/tipc/bcast.h | 4 +- >> net/tipc/link.c | 2 +- >> net/tipc/msg.h | 40 ++++++++++++++++ >> net/tipc/name_distr.c | 109 +++++++++++++++++++++++++++++++----------- >> net/tipc/name_distr.h | 9 ++-- >> net/tipc/name_table.c | 9 +++- >> net/tipc/name_table.h | 2 + >> net/tipc/node.c | 29 ++++++++--- >> net/tipc/node.h | 8 ++-- >> 10 files changed, 170 insertions(+), 48 deletions(-) >> >> diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c >> index 383f87bc1061..940d176e0e87 100644 >> --- a/net/tipc/bcast.c >> +++ b/net/tipc/bcast.c >> @@ -250,8 +250,8 @@ static void tipc_bcast_select_xmit_method(struct >> net *net, int dests, >> * Consumes the buffer chain. >> * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE >> */ >> -static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, >> - u16 *cong_link_cnt) >> +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, >> + u16 *cong_link_cnt) >> { >> struct tipc_link *l = tipc_bc_sndlink(net); >> struct sk_buff_head xmitq; >> @@ -752,7 +752,7 @@ void tipc_nlist_purge(struct tipc_nlist *nl) >> nl->local = false; >> } >> -u32 tipc_bcast_get_broadcast_mode(struct net *net) >> +u32 tipc_bcast_get_mode(struct net *net) >> { >> struct tipc_bc_base *bb = tipc_bc_base(net); >> diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h >> index 4240c95188b1..2d9352dc7b0e 100644 >> --- a/net/tipc/bcast.h >> +++ b/net/tipc/bcast.h >> @@ -90,6 +90,8 @@ void tipc_bcast_toggle_rcast(struct net *net, bool >> supp); >> int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, >> struct tipc_mc_method *method, struct tipc_nlist *dests, >> u16 *cong_link_cnt); >> +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, >> + u16 *cong_link_cnt); >> int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct >> sk_buff *skb); >> void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, >> struct tipc_msg *hdr); >> @@ -101,7 +103,7 @@ int tipc_nl_add_bc_link(struct net *net, struct >> tipc_nl_msg *msg, >> int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]); >> int tipc_bclink_reset_stats(struct net *net, struct tipc_link *l); >> -u32 tipc_bcast_get_broadcast_mode(struct net *net); >> +u32 tipc_bcast_get_mode(struct net *net); >> u32 tipc_bcast_get_broadcast_ratio(struct net *net); >> void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head >> *defq, >> diff --git a/net/tipc/link.c b/net/tipc/link.c >> index ee3b8d0576b8..eac89a3e22ce 100644 >> --- a/net/tipc/link.c >> +++ b/net/tipc/link.c >> @@ -2745,7 +2745,7 @@ int tipc_nl_add_bc_link(struct net *net, struct >> tipc_nl_msg *msg, >> void *hdr; >> struct nlattr *attrs; >> struct nlattr *prop; >> - u32 bc_mode = tipc_bcast_get_broadcast_mode(net); >> + u32 bc_mode = tipc_bcast_get_mode(net); >> u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net); >> if (!bcl) >> diff --git a/net/tipc/msg.h b/net/tipc/msg.h >> index 58660d56bc83..65119e81ff0c 100644 >> --- a/net/tipc/msg.h >> +++ b/net/tipc/msg.h >> @@ -438,6 +438,36 @@ static inline void msg_set_errcode(struct >> tipc_msg *m, u32 err) >> msg_set_bits(m, 1, 25, 0xf, err); >> } >> +static inline void msg_set_bulk(struct tipc_msg *m) >> +{ >> + msg_set_bits(m, 1, 28, 0x1, 1); >> +} >> + >> +static inline u32 msg_is_bulk(struct tipc_msg *m) >> +{ >> + return msg_bits(m, 1, 28, 0x1); >> +} >> + >> +static inline void msg_set_last_bulk(struct tipc_msg *m) >> +{ >> + msg_set_bits(m, 1, 27, 0x1, 1); >> +} >> + >> +static inline u32 msg_is_last_bulk(struct tipc_msg *m) >> +{ >> + return msg_bits(m, 1, 27, 0x1); >> +} >> + >> +static inline void msg_set_non_legacy(struct tipc_msg *m) >> +{ >> + msg_set_bits(m, 1, 26, 0x1, 1); >> +} >> + >> +static inline u32 msg_is_legacy(struct tipc_msg *m) >> +{ >> + return !msg_bits(m, 1, 26, 0x1); >> +} >> + >> static inline u32 msg_reroute_cnt(struct tipc_msg *m) >> { >> return msg_bits(m, 1, 21, 0xf); >> @@ -567,6 +597,16 @@ static inline void msg_set_origport(struct >> tipc_msg *m, u32 p) >> msg_set_word(m, 4, p); >> } >> +static inline u16 msg_named_seqno(struct tipc_msg *m) >> +{ >> + return msg_bits(m, 4, 0, 0xffff); >> +} >> + >> +static inline void msg_set_named_seqno(struct tipc_msg *m, u16 n) >> +{ >> + msg_set_bits(m, 4, 0, 0xffff, n); >> +} >> + >> static inline u32 msg_destport(struct tipc_msg *m) >> { >> return msg_word(m, 5); >> diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c >> index 5feaf3b67380..481d480609f0 100644 >> --- a/net/tipc/name_distr.c >> +++ b/net/tipc/name_distr.c >> @@ -102,7 +102,8 @@ struct sk_buff *tipc_named_publish(struct net >> *net, struct publication *publ) >> pr_warn("Publication distribution failure\n"); >> return NULL; >> } >> - >> + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); >> + msg_set_non_legacy(buf_msg(skb)); >> item = (struct distr_item *)msg_data(buf_msg(skb)); >> publ_to_item(item, publ); >> return skb; >> @@ -114,8 +115,8 @@ struct sk_buff *tipc_named_publish(struct net >> *net, struct publication *publ) >> struct sk_buff *tipc_named_withdraw(struct net *net, struct >> publication *publ) >> { >> struct name_table *nt = tipc_name_table(net); >> - struct sk_buff *buf; >> struct distr_item *item; >> + struct sk_buff *skb; >> write_lock_bh(&nt->cluster_scope_lock); >> list_del(&publ->binding_node); >> @@ -123,15 +124,16 @@ struct sk_buff *tipc_named_withdraw(struct net >> *net, struct publication *publ) >> if (publ->scope == TIPC_NODE_SCOPE) >> return NULL; >> - buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); >> - if (!buf) { >> + skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); >> + if (!skb) { >> pr_warn("Withdrawal distribution failure\n"); >> return NULL; >> } >> - >> - item = (struct distr_item *)msg_data(buf_msg(buf)); >> + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); >> + msg_set_non_legacy(buf_msg(skb)); >> + item = (struct distr_item *)msg_data(buf_msg(skb)); >> publ_to_item(item, publ); >> - return buf; >> + return skb; >> } >> /** >> @@ -141,7 +143,7 @@ struct sk_buff *tipc_named_withdraw(struct net >> *net, struct publication *publ) >> * @pls: linked list of publication items to be packed into buffer >> chain >> */ >> static void named_distribute(struct net *net, struct sk_buff_head >> *list, >> - u32 dnode, struct list_head *pls) >> + u32 dnode, struct list_head *pls, u16 seqno) >> { >> struct publication *publ; >> struct sk_buff *skb = NULL; >> @@ -149,6 +151,7 @@ static void named_distribute(struct net *net, >> struct sk_buff_head *list, >> u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - >> INT_H_SIZE) / >> ITEM_SIZE) * ITEM_SIZE; >> u32 msg_rem = msg_dsz; >> + struct tipc_msg *hdr; >> list_for_each_entry(publ, pls, binding_node) { >> /* Prepare next buffer: */ >> @@ -159,8 +162,11 @@ static void named_distribute(struct net *net, >> struct sk_buff_head *list, >> pr_warn("Bulk publication failure\n"); >> return; >> } >> - msg_set_bc_ack_invalid(buf_msg(skb), true); >> - item = (struct distr_item *)msg_data(buf_msg(skb)); >> + hdr = buf_msg(skb); >> + msg_set_bc_ack_invalid(hdr, true); >> + msg_set_bulk(hdr); >> + msg_set_non_legacy(hdr); >> + item = (struct distr_item *)msg_data(hdr); >> } >> /* Pack publication into message: */ >> @@ -176,24 +182,35 @@ static void named_distribute(struct net *net, >> struct sk_buff_head *list, >> } >> } >> if (skb) { >> - msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem)); >> + hdr = buf_msg(skb); >> + msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem)); >> skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem)); >> __skb_queue_tail(list, skb); >> } >> + hdr = buf_msg(skb_peek_tail(list)); >> + msg_set_last_bulk(hdr); >> + msg_set_named_seqno(hdr, seqno); >> } >> /** >> * tipc_named_node_up - tell specified node about all publications >> by this node >> */ >> -void tipc_named_node_up(struct net *net, u32 dnode) >> +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities) >> { >> struct name_table *nt = tipc_name_table(net); >> + struct tipc_net *tn = tipc_net(net); >> struct sk_buff_head head; >> + u16 seqno; >> __skb_queue_head_init(&head); >> + spin_lock_bh(&tn->nametbl_lock); >> + if (!(capabilities & TIPC_NAMED_BCAST)) >> + nt->rc_dests++; >> + seqno = nt->snd_nxt; >> + spin_unlock_bh(&tn->nametbl_lock); >> read_lock_bh(&nt->cluster_scope_lock); >> - named_distribute(net, &head, dnode, &nt->cluster_scope); >> + named_distribute(net, &head, dnode, &nt->cluster_scope, seqno); >> tipc_node_xmit(net, &head, dnode, 0); >> read_unlock_bh(&nt->cluster_scope_lock); >> } >> @@ -245,13 +262,21 @@ static void tipc_dist_queue_purge(struct net >> *net, u32 addr) >> spin_unlock_bh(&tn->nametbl_lock); >> } >> -void tipc_publ_notify(struct net *net, struct list_head >> *nsub_list, u32 addr) >> +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, >> + u32 addr, u16 capabilities) >> { >> + struct name_table *nt = tipc_name_table(net); >> + struct tipc_net *tn = tipc_net(net); >> + >> struct publication *publ, *tmp; >> list_for_each_entry_safe(publ, tmp, nsub_list, binding_node) >> tipc_publ_purge(net, publ, addr); >> tipc_dist_queue_purge(net, addr); >> + spin_lock_bh(&tn->nametbl_lock); >> + if (!(capabilities & TIPC_NAMED_BCAST)) >> + nt->rc_dests--; >> + spin_unlock_bh(&tn->nametbl_lock); >> } >> /** >> @@ -295,29 +320,55 @@ static bool tipc_update_nametbl(struct net >> *net, struct distr_item *i, >> return false; >> } >> +struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq, >> + u16 *rcv_nxt, bool *open) >> +{ >> + struct sk_buff *skb, *tmp; >> + struct tipc_msg *hdr; >> + u16 seqno; >> + >> + skb_queue_walk_safe(namedq, skb, tmp) { >> + skb_linearize(skb); >> + hdr = buf_msg(skb); >> + seqno = msg_named_seqno(hdr); >> + if (msg_is_last_bulk(hdr)) { >> + *rcv_nxt = seqno; >> + *open = true; >> + } >> + if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) { >> + __skb_unlink(skb, namedq); >> + return skb; >> + } >> + >> + if (*open && (*rcv_nxt == seqno)) { >> + (*rcv_nxt)++; >> + __skb_unlink(skb, namedq); >> + return skb; >> + } >> + } >> + return NULL; >> +} >> + >> /** >> * tipc_named_rcv - process name table update messages sent by >> another node >> */ >> -void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq) >> +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, >> + u16 *rcv_nxt, bool *open) >> { >> - struct tipc_net *tn = net_generic(net, tipc_net_id); >> - struct tipc_msg *msg; >> + struct tipc_net *tn = tipc_net(net); >> struct distr_item *item; >> - uint count; >> - u32 node; >> + struct tipc_msg *hdr; >> struct sk_buff *skb; >> - int mtype; >> + u32 count, node = 0; >> spin_lock_bh(&tn->nametbl_lock); >> - for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) { >> - skb_linearize(skb); >> - msg = buf_msg(skb); >> - mtype = msg_type(msg); >> - item = (struct distr_item *)msg_data(msg); >> - count = msg_data_sz(msg) / ITEM_SIZE; >> - node = msg_orignode(msg); >> + while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) { >> + hdr = buf_msg(skb); >> + node = msg_orignode(hdr); >> + item = (struct distr_item *)msg_data(hdr); >> + count = msg_data_sz(hdr) / ITEM_SIZE; >> while (count--) { >> - tipc_update_nametbl(net, item, node, mtype); >> + tipc_update_nametbl(net, item, node, msg_type(hdr)); >> item++; >> } >> kfree_skb(skb); >> @@ -345,6 +396,6 @@ void tipc_named_reinit(struct net *net) >> publ->node = self; >> list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node) >> publ->node = self; >> - >> + nt->rc_dests = 0; >> spin_unlock_bh(&tn->nametbl_lock); >> } >> diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h >> index 63fc73e0fa6c..092323158f06 100644 >> --- a/net/tipc/name_distr.h >> +++ b/net/tipc/name_distr.h >> @@ -67,11 +67,14 @@ struct distr_item { >> __be32 key; >> }; >> +void tipc_named_bcast(struct net *net, struct sk_buff *skb); >> struct sk_buff *tipc_named_publish(struct net *net, struct >> publication *publ); >> struct sk_buff *tipc_named_withdraw(struct net *net, struct >> publication *publ); >> -void tipc_named_node_up(struct net *net, u32 dnode); >> -void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue); >> +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities); >> +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, >> + u16 *rcv_nxt, bool *open); >> void tipc_named_reinit(struct net *net); >> -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, >> u32 addr); >> +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, >> + u32 addr, u16 capabilities); >> #endif >> diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c >> index 359b2bc888cf..2ac33d32edc2 100644 >> --- a/net/tipc/name_table.c >> +++ b/net/tipc/name_table.c >> @@ -729,6 +729,7 @@ struct publication *tipc_nametbl_publish(struct >> net *net, u32 type, u32 lower, >> struct tipc_net *tn = tipc_net(net); >> struct publication *p = NULL; >> struct sk_buff *skb = NULL; >> + u32 rc_dests; >> spin_lock_bh(&tn->nametbl_lock); >> @@ -743,12 +744,14 @@ struct publication >> *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, >> nt->local_publ_count++; >> skb = tipc_named_publish(net, p); >> } >> + rc_dests = nt->rc_dests; >> exit: >> spin_unlock_bh(&tn->nametbl_lock); >> if (skb) >> - tipc_node_broadcast(net, skb); >> + tipc_node_broadcast(net, skb, rc_dests); >> return p; >> + >> } >> /** >> @@ -762,6 +765,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 >> type, u32 lower, >> u32 self = tipc_own_addr(net); >> struct sk_buff *skb = NULL; >> struct publication *p; >> + u32 rc_dests; >> spin_lock_bh(&tn->nametbl_lock); >> @@ -775,10 +779,11 @@ int tipc_nametbl_withdraw(struct net *net, >> u32 type, u32 lower, >> pr_err("Failed to remove local publication {%u,%u,%u}/%u\n", >> type, lower, upper, key); >> } >> + rc_dests = nt->rc_dests; >> spin_unlock_bh(&tn->nametbl_lock); >> if (skb) { >> - tipc_node_broadcast(net, skb); >> + tipc_node_broadcast(net, skb, rc_dests); >> return 1; >> } >> return 0; >> diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h >> index 728bc7016c38..8064e1986e2c 100644 >> --- a/net/tipc/name_table.h >> +++ b/net/tipc/name_table.h >> @@ -106,6 +106,8 @@ struct name_table { >> struct list_head cluster_scope; >> rwlock_t cluster_scope_lock; >> u32 local_publ_count; >> + u32 rc_dests; >> + u32 snd_nxt; >> }; >> int tipc_nl_name_table_dump(struct sk_buff *skb, struct >> netlink_callback *cb); >> diff --git a/net/tipc/node.c b/net/tipc/node.c >> index a4c2816c3746..030a51c4d1fa 100644 >> --- a/net/tipc/node.c >> +++ b/net/tipc/node.c >> @@ -75,6 +75,8 @@ struct tipc_bclink_entry { >> struct sk_buff_head arrvq; >> struct sk_buff_head inputq2; >> struct sk_buff_head namedq; >> + u16 named_rcv_nxt; >> + bool named_open; >> }; >> /** >> @@ -396,10 +398,10 @@ static void tipc_node_write_unlock(struct >> tipc_node *n) >> write_unlock_bh(&n->lock); >> if (flags & TIPC_NOTIFY_NODE_DOWN) >> - tipc_publ_notify(net, publ_list, addr); >> + tipc_publ_notify(net, publ_list, addr, n->capabilities); >> if (flags & TIPC_NOTIFY_NODE_UP) >> - tipc_named_node_up(net, addr); >> + tipc_named_node_up(net, addr, n->capabilities); >> if (flags & TIPC_NOTIFY_LINK_UP) { >> tipc_mon_peer_up(net, addr, bearer_id); >> @@ -1483,6 +1485,7 @@ static void node_lost_contact(struct tipc_node *n, >> /* Clean up broadcast state */ >> tipc_bcast_remove_peer(n->net, n->bc_entry.link); >> + __skb_queue_purge(&n->bc_entry.namedq); >> /* Abort any ongoing link failover */ >> for (i = 0; i < MAX_BEARERS; i++) { >> @@ -1729,12 +1732,23 @@ int tipc_node_distr_xmit(struct net *net, >> struct sk_buff_head *xmitq) >> return 0; >> } >> -void tipc_node_broadcast(struct net *net, struct sk_buff *skb) >> +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int >> rc_dests) >> { >> + struct sk_buff_head xmitq; >> struct sk_buff *txskb; >> struct tipc_node *n; >> + u16 dummy; >> u32 dst; >> + /* Use broadcast if all nodes support it */ >> + if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) { >> + __skb_queue_head_init(&xmitq); >> + __skb_queue_tail(&xmitq, skb); >> + tipc_bcast_xmit(net, &xmitq, &dummy); >> + return; >> + } >> + >> + /* Otherwise use legacy replicast method */ >> rcu_read_lock(); >> list_for_each_entry_rcu(n, tipc_nodes(net), list) { >> dst = n->addr; >> @@ -1749,7 +1763,6 @@ void tipc_node_broadcast(struct net *net, >> struct sk_buff *skb) >> tipc_node_xmit_skb(net, txskb, dst, 0); >> } >> rcu_read_unlock(); >> - >> kfree_skb(skb); >> } >> @@ -1844,7 +1857,9 @@ static void tipc_node_bc_rcv(struct net *net, >> struct sk_buff *skb, int bearer_id >> /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ >> if (!skb_queue_empty(&n->bc_entry.namedq)) >> - tipc_named_rcv(net, &n->bc_entry.namedq); >> + tipc_named_rcv(net, &n->bc_entry.namedq, >> + &n->bc_entry.named_rcv_nxt, >> + &n->bc_entry.named_open); >> /* If reassembly or retransmission failure => reset all links >> to peer */ >> if (rc & TIPC_LINK_DOWN_EVT) >> @@ -2114,7 +2129,9 @@ void tipc_rcv(struct net *net, struct sk_buff >> *skb, struct tipc_bearer *b) >> tipc_node_link_down(n, bearer_id, false); >> if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) >> - tipc_named_rcv(net, &n->bc_entry.namedq); >> + tipc_named_rcv(net, &n->bc_entry.namedq, >> + &n->bc_entry.named_rcv_nxt, >> + &n->bc_entry.named_open); >> if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) >> tipc_node_mcast_rcv(n); >> diff --git a/net/tipc/node.h b/net/tipc/node.h >> index a6803b449a2c..9f6f13f1604f 100644 >> --- a/net/tipc/node.h >> +++ b/net/tipc/node.h >> @@ -55,7 +55,8 @@ enum { >> TIPC_MCAST_RBCTL = (1 << 7), >> TIPC_GAP_ACK_BLOCK = (1 << 8), >> TIPC_TUNNEL_ENHANCED = (1 << 9), >> - TIPC_NAGLE = (1 << 10) >> + TIPC_NAGLE = (1 << 10), >> + TIPC_NAMED_BCAST = (1 << 11) >> }; >> #define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \ >> @@ -68,7 +69,8 @@ enum { >> TIPC_MCAST_RBCTL | \ >> TIPC_GAP_ACK_BLOCK | \ >> TIPC_TUNNEL_ENHANCED | \ >> - TIPC_NAGLE) >> + TIPC_NAGLE | \ >> + TIPC_NAMED_BCAST) >> #define INVALID_BEARER_ID -1 >> @@ -101,7 +103,7 @@ int tipc_node_xmit_skb(struct net *net, struct >> sk_buff *skb, u32 dest, >> u32 selector); >> void tipc_node_subscribe(struct net *net, struct list_head *subscr, >> u32 addr); >> void tipc_node_unsubscribe(struct net *net, struct list_head >> *subscr, u32 addr); >> -void tipc_node_broadcast(struct net *net, struct sk_buff *skb); >> +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int >> rc_dests); >> int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 >> peer_port); >> void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port); >> int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool >> connected); > > > > _______________________________________________ > tipc-discussion mailing list > tip...@li... > https://lists.sourceforge.net/lists/listinfo/tipc-discussion > _______________________________________________ tipc-discussion mailing list tip...@li... https://lists.sourceforge.net/lists/listinfo/tipc-discussion |
From: Hoang H. Le <hoa...@de...> - 2020-06-08 01:47:04
|
-----Original Message----- From: Jon Maloy <jm...@re...> Sent: Monday, June 8, 2020 2:12 AM To: Hoang Huu Le <hoa...@de...>; ma...@do...; yin...@wi...; tip...@li... Subject: Re: [next-net v6] tipc: update a binding service via broadcast On 6/6/20 11:10 PM, Hoang Huu Le wrote: > -----Original Message----- > From: Jon Maloy <jm...@re...> > Sent: Friday, June 5, 2020 8:03 PM > To: Hoang Huu Le <hoa...@de...>; ma...@do...; yin...@wi...; tip...@li... > Subject: Re: [next-net v6] tipc: update a binding service via broadcast > > > > On 6/5/20 3:52 AM, Hoang Huu Le wrote: >> Currently, updating binding table (add service binding to >> name table/withdraw a service binding) is being sent over replicast. >> However, if we are scaling up clusters to > 100 nodes/containers this >> method is less affection because of looping through nodes in a cluster one >> by one. >> [...] > Still not needed. This queue should be flushed in > tipc_node_lost_contact(), which I now see we don't do. > [Hoang] Yes, that's right. I will verify and send it out. Actually, this might explain the mysterious "Failed to withdraw" printouts you observed during testing earlier. Those withdraw items might be from a previous session just lingering in the queue. On the other hand, such a bug is so obvious and would have such grave consequences (what if there are old 'publish' items in the queue?) that I find it hard to believe that it can have remain unnoticed all this time. Are you sure we are not cleaning up this queue somewhere else? If it really is so we must also issue a correction patch to 'net' for this issue. [Hoang] Yes, I checked and already do the clean stuff at v2-3 in this feature ... So, we should apply this for 'net' too. ///jon > > This has to e fixed too. > ///jon >> + } >> + } >> + return NULL; >> +} >> + >> /** >> * tipc_named_rcv - process name table update messages sent by another node >> */ >> -void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq) >> +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, >> + u16 *rcv_nxt, bool *open) >> { >> - struct tipc_net *tn = net_generic(net, tipc_net_id); >> - struct tipc_msg *msg; >> + struct tipc_net *tn = tipc_net(net); >> struct distr_item *item; >> - uint count; >> - u32 node; >> + struct tipc_msg *hdr; >> struct sk_buff *skb; >> - int mtype; >> + u32 count, node = 0; >> >> spin_lock_bh(&tn->nametbl_lock); >> - for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) { >> - skb_linearize(skb); >> - msg = buf_msg(skb); >> - mtype = msg_type(msg); >> - item = (struct distr_item *)msg_data(msg); >> - count = msg_data_sz(msg) / ITEM_SIZE; >> - node = msg_orignode(msg); >> + while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) { >> + hdr = buf_msg(skb); >> + node = msg_orignode(hdr); >> + item = (struct distr_item *)msg_data(hdr); >> + count = msg_data_sz(hdr) / ITEM_SIZE; >> while (count--) { >> - tipc_update_nametbl(net, item, node, mtype); >> + tipc_update_nametbl(net, item, node, msg_type(hdr)); >> item++; >> } >> kfree_skb(skb); >> @@ -345,6 +402,6 @@ void tipc_named_reinit(struct net *net) >> publ->node = self; >> list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node) >> publ->node = self; >> - >> + nt->rc_dests = 0; >> spin_unlock_bh(&tn->nametbl_lock); >> } >> diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h >> index 63fc73e0fa6c..092323158f06 100644 >> --- a/net/tipc/name_distr.h >> +++ b/net/tipc/name_distr.h >> @@ -67,11 +67,14 @@ struct distr_item { >> __be32 key; >> }; >> >> +void tipc_named_bcast(struct net *net, struct sk_buff *skb); >> struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ); >> struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ); >> -void tipc_named_node_up(struct net *net, u32 dnode); >> -void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue); >> +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities); >> +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, >> + u16 *rcv_nxt, bool *open); >> void tipc_named_reinit(struct net *net); >> -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr); >> +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, >> + u32 addr, u16 capabilities); >> >> #endif >> diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c >> index 359b2bc888cf..2ac33d32edc2 100644 >> --- a/net/tipc/name_table.c >> +++ b/net/tipc/name_table.c >> @@ -729,6 +729,7 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, >> struct tipc_net *tn = tipc_net(net); >> struct publication *p = NULL; >> struct sk_buff *skb = NULL; >> + u32 rc_dests; >> >> spin_lock_bh(&tn->nametbl_lock); >> >> @@ -743,12 +744,14 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, >> nt->local_publ_count++; >> skb = tipc_named_publish(net, p); >> } >> + rc_dests = nt->rc_dests; >> exit: >> spin_unlock_bh(&tn->nametbl_lock); >> >> if (skb) >> - tipc_node_broadcast(net, skb); >> + tipc_node_broadcast(net, skb, rc_dests); >> return p; >> + >> } >> >> /** >> @@ -762,6 +765,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, >> u32 self = tipc_own_addr(net); >> struct sk_buff *skb = NULL; >> struct publication *p; >> + u32 rc_dests; >> >> spin_lock_bh(&tn->nametbl_lock); >> >> @@ -775,10 +779,11 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, >> pr_err("Failed to remove local publication {%u,%u,%u}/%u\n", >> type, lower, upper, key); >> } >> + rc_dests = nt->rc_dests; >> spin_unlock_bh(&tn->nametbl_lock); >> >> if (skb) { >> - tipc_node_broadcast(net, skb); >> + tipc_node_broadcast(net, skb, rc_dests); >> return 1; >> } >> return 0; >> diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h >> index 728bc7016c38..8064e1986e2c 100644 >> --- a/net/tipc/name_table.h >> +++ b/net/tipc/name_table.h >> @@ -106,6 +106,8 @@ struct name_table { >> struct list_head cluster_scope; >> rwlock_t cluster_scope_lock; >> u32 local_publ_count; >> + u32 rc_dests; >> + u32 snd_nxt; >> }; >> >> int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); >> diff --git a/net/tipc/node.c b/net/tipc/node.c >> index 803a3a6d0f50..ad8d7bce1f98 100644 >> --- a/net/tipc/node.c >> +++ b/net/tipc/node.c >> @@ -75,6 +75,8 @@ struct tipc_bclink_entry { >> struct sk_buff_head arrvq; >> struct sk_buff_head inputq2; >> struct sk_buff_head namedq; >> + u16 named_rcv_nxt; >> + bool named_open; >> }; >> >> /** >> @@ -396,10 +398,10 @@ static void tipc_node_write_unlock(struct tipc_node *n) >> write_unlock_bh(&n->lock); >> >> if (flags & TIPC_NOTIFY_NODE_DOWN) >> - tipc_publ_notify(net, publ_list, addr); >> + tipc_publ_notify(net, publ_list, addr, n->capabilities); >> >> if (flags & TIPC_NOTIFY_NODE_UP) >> - tipc_named_node_up(net, addr); >> + tipc_named_node_up(net, addr, n->capabilities); >> >> if (flags & TIPC_NOTIFY_LINK_UP) { >> tipc_mon_peer_up(net, addr, bearer_id); >> @@ -1729,12 +1731,23 @@ int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) >> return 0; >> } >> >> -void tipc_node_broadcast(struct net *net, struct sk_buff *skb) >> +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests) >> { >> + struct sk_buff_head xmitq; >> struct sk_buff *txskb; >> struct tipc_node *n; >> + u16 dummy; >> u32 dst; >> >> + /* Use broadcast if all nodes support it */ >> + if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) { >> + __skb_queue_head_init(&xmitq); >> + __skb_queue_tail(&xmitq, skb); >> + tipc_bcast_xmit(net, &xmitq, &dummy); >> + return; >> + } >> + >> + /* Otherwise use legacy replicast method */ >> rcu_read_lock(); >> list_for_each_entry_rcu(n, tipc_nodes(net), list) { >> dst = n->addr; >> @@ -1749,7 +1762,6 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb) >> tipc_node_xmit_skb(net, txskb, dst, 0); >> } >> rcu_read_unlock(); >> - >> kfree_skb(skb); >> } >> >> @@ -1844,7 +1856,9 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id >> >> /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ >> if (!skb_queue_empty(&n->bc_entry.namedq)) >> - tipc_named_rcv(net, &n->bc_entry.namedq); >> + tipc_named_rcv(net, &n->bc_entry.namedq, >> + &n->bc_entry.named_rcv_nxt, >> + &n->bc_entry.named_open); >> >> /* If reassembly or retransmission failure => reset all links to peer */ >> if (rc & TIPC_LINK_DOWN_EVT) >> @@ -2109,7 +2123,9 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) >> tipc_node_link_down(n, bearer_id, false); >> >> if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) >> - tipc_named_rcv(net, &n->bc_entry.namedq); >> + tipc_named_rcv(net, &n->bc_entry.namedq, >> + &n->bc_entry.named_rcv_nxt, >> + &n->bc_entry.named_open); >> >> if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) >> tipc_node_mcast_rcv(n); >> diff --git a/net/tipc/node.h b/net/tipc/node.h >> index a6803b449a2c..9f6f13f1604f 100644 >> --- a/net/tipc/node.h >> +++ b/net/tipc/node.h >> @@ -55,7 +55,8 @@ enum { >> TIPC_MCAST_RBCTL = (1 << 7), >> TIPC_GAP_ACK_BLOCK = (1 << 8), >> TIPC_TUNNEL_ENHANCED = (1 << 9), >> - TIPC_NAGLE = (1 << 10) >> + TIPC_NAGLE = (1 << 10), >> + TIPC_NAMED_BCAST = (1 << 11) >> }; >> >> #define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \ >> @@ -68,7 +69,8 @@ enum { >> TIPC_MCAST_RBCTL | \ >> TIPC_GAP_ACK_BLOCK | \ >> TIPC_TUNNEL_ENHANCED | \ >> - TIPC_NAGLE) >> + TIPC_NAGLE | \ >> + TIPC_NAMED_BCAST) >> >> #define INVALID_BEARER_ID -1 >> >> @@ -101,7 +103,7 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest, >> u32 selector); >> void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr); >> void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr); >> -void tipc_node_broadcast(struct net *net, struct sk_buff *skb); >> +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests); >> int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port); >> void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port); >> int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected); |
From: David M. <da...@da...> - 2020-06-07 23:56:44
|
net-next is CLOSED, thank you |
From: Jon M. <jm...@re...> - 2020-06-07 19:13:57
|
On 6/7/20 3:03 PM, Jon Maloy wrote: > > > On 6/7/20 12:24 AM, Hoang Huu Le wrote: >> Currently, updating binding table (add service binding to >> name table/withdraw a service binding) is being sent over replicast. >> However, if we are scaling up clusters to > 100 nodes/containers this >> method is less affection because of looping through nodes in a >> cluster one >> by one. >> >> It is worth to use broadcast to update a binding service. This way, the >> binding table can be updated on all peer nodes in one shot. >> >> Broadcast is used when all peer nodes, as indicated by a new capability >> flag TIPC_NAMED_BCAST, support reception of this message type. >> >> Four problems need to be considered when introducing this feature. >> 1) When establishing a link to a new peer node we still update this by a >> unicast 'bulk' update. This may lead to race conditions, where a later >> broadcast publication/withdrawal bypass the 'bulk', resulting in >> disordered publications, or even that a withdrawal may arrive before the >> corresponding publication. We solve this by adding an 'is_last_bulk' bit >> in the last bulk messages so that it can be distinguished from all other >> messages. Only when this message has arrived do we open up for reception >> of broadcast publications/withdrawals. > Add a line feed between these paragraphs before you send the patch. > Otherwise, still acked by me. > > ///jon Oh, already posted... Just ignore my comment above. ///jon > >> 2) When a first legacy node is added to the cluster all distribution >> will switch over to use the legacy 'replicast' method, while the >> opposite happens when the last legacy node leaves the cluster. This >> entails another risk of message disordering that has to be handled. We >> solve this by adding a sequence number to the broadcast/replicast >> messages, so that disordering can be discovered and corrected. Note >> however that we don't need to consider potential message loss or >> duplication at this protocol level. >> 3) Bulk messages don't contain any sequence numbers, and will always >> arrive in order. Hence we must exempt those from the sequence number >> control and deliver them unconditionally. We solve this by adding a new >> 'is_bulk' bit in those messages so that they can be recognized. >> 4) Legacy messages, which don't contain any new bits or sequence >> numbers, but neither can arrive out of order, also need to be exempt >> from the initial synchronization and sequence number check, and >> delivered unconditionally. Therefore, we add another 'is_not_legacy' bit >> to all new messages so that those can be distinguished from legacy >> messages and the latter delivered directly. >> >> Signed-off-by: Hoang Huu Le <hoa...@de...> >> Acked-by: Jon Maloy <jm...@re...> >> --- >> net/tipc/bcast.c | 6 +-- >> net/tipc/bcast.h | 4 +- >> net/tipc/link.c | 2 +- >> net/tipc/msg.h | 40 ++++++++++++++++ >> net/tipc/name_distr.c | 109 +++++++++++++++++++++++++++++++----------- >> net/tipc/name_distr.h | 9 ++-- >> net/tipc/name_table.c | 9 +++- >> net/tipc/name_table.h | 2 + >> net/tipc/node.c | 29 ++++++++--- >> net/tipc/node.h | 8 ++-- >> 10 files changed, 170 insertions(+), 48 deletions(-) >> >> diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c >> index 383f87bc1061..940d176e0e87 100644 >> --- a/net/tipc/bcast.c >> +++ b/net/tipc/bcast.c >> @@ -250,8 +250,8 @@ static void tipc_bcast_select_xmit_method(struct >> net *net, int dests, >> * Consumes the buffer chain. >> * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE >> */ >> -static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, >> - u16 *cong_link_cnt) >> +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, >> + u16 *cong_link_cnt) >> { >> struct tipc_link *l = tipc_bc_sndlink(net); >> struct sk_buff_head xmitq; >> @@ -752,7 +752,7 @@ void tipc_nlist_purge(struct tipc_nlist *nl) >> nl->local = false; >> } >> -u32 tipc_bcast_get_broadcast_mode(struct net *net) >> +u32 tipc_bcast_get_mode(struct net *net) >> { >> struct tipc_bc_base *bb = tipc_bc_base(net); >> diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h >> index 4240c95188b1..2d9352dc7b0e 100644 >> --- a/net/tipc/bcast.h >> +++ b/net/tipc/bcast.h >> @@ -90,6 +90,8 @@ void tipc_bcast_toggle_rcast(struct net *net, bool >> supp); >> int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, >> struct tipc_mc_method *method, struct tipc_nlist *dests, >> u16 *cong_link_cnt); >> +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, >> + u16 *cong_link_cnt); >> int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct >> sk_buff *skb); >> void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, >> struct tipc_msg *hdr); >> @@ -101,7 +103,7 @@ int tipc_nl_add_bc_link(struct net *net, struct >> tipc_nl_msg *msg, >> int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]); >> int tipc_bclink_reset_stats(struct net *net, struct tipc_link *l); >> -u32 tipc_bcast_get_broadcast_mode(struct net *net); >> +u32 tipc_bcast_get_mode(struct net *net); >> u32 tipc_bcast_get_broadcast_ratio(struct net *net); >> void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head >> *defq, >> diff --git a/net/tipc/link.c b/net/tipc/link.c >> index ee3b8d0576b8..eac89a3e22ce 100644 >> --- a/net/tipc/link.c >> +++ b/net/tipc/link.c >> @@ -2745,7 +2745,7 @@ int tipc_nl_add_bc_link(struct net *net, struct >> tipc_nl_msg *msg, >> void *hdr; >> struct nlattr *attrs; >> struct nlattr *prop; >> - u32 bc_mode = tipc_bcast_get_broadcast_mode(net); >> + u32 bc_mode = tipc_bcast_get_mode(net); >> u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net); >> if (!bcl) >> diff --git a/net/tipc/msg.h b/net/tipc/msg.h >> index 58660d56bc83..65119e81ff0c 100644 >> --- a/net/tipc/msg.h >> +++ b/net/tipc/msg.h >> @@ -438,6 +438,36 @@ static inline void msg_set_errcode(struct >> tipc_msg *m, u32 err) >> msg_set_bits(m, 1, 25, 0xf, err); >> } >> +static inline void msg_set_bulk(struct tipc_msg *m) >> +{ >> + msg_set_bits(m, 1, 28, 0x1, 1); >> +} >> + >> +static inline u32 msg_is_bulk(struct tipc_msg *m) >> +{ >> + return msg_bits(m, 1, 28, 0x1); >> +} >> + >> +static inline void msg_set_last_bulk(struct tipc_msg *m) >> +{ >> + msg_set_bits(m, 1, 27, 0x1, 1); >> +} >> + >> +static inline u32 msg_is_last_bulk(struct tipc_msg *m) >> +{ >> + return msg_bits(m, 1, 27, 0x1); >> +} >> + >> +static inline void msg_set_non_legacy(struct tipc_msg *m) >> +{ >> + msg_set_bits(m, 1, 26, 0x1, 1); >> +} >> + >> +static inline u32 msg_is_legacy(struct tipc_msg *m) >> +{ >> + return !msg_bits(m, 1, 26, 0x1); >> +} >> + >> static inline u32 msg_reroute_cnt(struct tipc_msg *m) >> { >> return msg_bits(m, 1, 21, 0xf); >> @@ -567,6 +597,16 @@ static inline void msg_set_origport(struct >> tipc_msg *m, u32 p) >> msg_set_word(m, 4, p); >> } >> +static inline u16 msg_named_seqno(struct tipc_msg *m) >> +{ >> + return msg_bits(m, 4, 0, 0xffff); >> +} >> + >> +static inline void msg_set_named_seqno(struct tipc_msg *m, u16 n) >> +{ >> + msg_set_bits(m, 4, 0, 0xffff, n); >> +} >> + >> static inline u32 msg_destport(struct tipc_msg *m) >> { >> return msg_word(m, 5); >> diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c >> index 5feaf3b67380..481d480609f0 100644 >> --- a/net/tipc/name_distr.c >> +++ b/net/tipc/name_distr.c >> @@ -102,7 +102,8 @@ struct sk_buff *tipc_named_publish(struct net >> *net, struct publication *publ) >> pr_warn("Publication distribution failure\n"); >> return NULL; >> } >> - >> + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); >> + msg_set_non_legacy(buf_msg(skb)); >> item = (struct distr_item *)msg_data(buf_msg(skb)); >> publ_to_item(item, publ); >> return skb; >> @@ -114,8 +115,8 @@ struct sk_buff *tipc_named_publish(struct net >> *net, struct publication *publ) >> struct sk_buff *tipc_named_withdraw(struct net *net, struct >> publication *publ) >> { >> struct name_table *nt = tipc_name_table(net); >> - struct sk_buff *buf; >> struct distr_item *item; >> + struct sk_buff *skb; >> write_lock_bh(&nt->cluster_scope_lock); >> list_del(&publ->binding_node); >> @@ -123,15 +124,16 @@ struct sk_buff *tipc_named_withdraw(struct net >> *net, struct publication *publ) >> if (publ->scope == TIPC_NODE_SCOPE) >> return NULL; >> - buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); >> - if (!buf) { >> + skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); >> + if (!skb) { >> pr_warn("Withdrawal distribution failure\n"); >> return NULL; >> } >> - >> - item = (struct distr_item *)msg_data(buf_msg(buf)); >> + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); >> + msg_set_non_legacy(buf_msg(skb)); >> + item = (struct distr_item *)msg_data(buf_msg(skb)); >> publ_to_item(item, publ); >> - return buf; >> + return skb; >> } >> /** >> @@ -141,7 +143,7 @@ struct sk_buff *tipc_named_withdraw(struct net >> *net, struct publication *publ) >> * @pls: linked list of publication items to be packed into buffer >> chain >> */ >> static void named_distribute(struct net *net, struct sk_buff_head >> *list, >> - u32 dnode, struct list_head *pls) >> + u32 dnode, struct list_head *pls, u16 seqno) >> { >> struct publication *publ; >> struct sk_buff *skb = NULL; >> @@ -149,6 +151,7 @@ static void named_distribute(struct net *net, >> struct sk_buff_head *list, >> u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - >> INT_H_SIZE) / >> ITEM_SIZE) * ITEM_SIZE; >> u32 msg_rem = msg_dsz; >> + struct tipc_msg *hdr; >> list_for_each_entry(publ, pls, binding_node) { >> /* Prepare next buffer: */ >> @@ -159,8 +162,11 @@ static void named_distribute(struct net *net, >> struct sk_buff_head *list, >> pr_warn("Bulk publication failure\n"); >> return; >> } >> - msg_set_bc_ack_invalid(buf_msg(skb), true); >> - item = (struct distr_item *)msg_data(buf_msg(skb)); >> + hdr = buf_msg(skb); >> + msg_set_bc_ack_invalid(hdr, true); >> + msg_set_bulk(hdr); >> + msg_set_non_legacy(hdr); >> + item = (struct distr_item *)msg_data(hdr); >> } >> /* Pack publication into message: */ >> @@ -176,24 +182,35 @@ static void named_distribute(struct net *net, >> struct sk_buff_head *list, >> } >> } >> if (skb) { >> - msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem)); >> + hdr = buf_msg(skb); >> + msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem)); >> skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem)); >> __skb_queue_tail(list, skb); >> } >> + hdr = buf_msg(skb_peek_tail(list)); >> + msg_set_last_bulk(hdr); >> + msg_set_named_seqno(hdr, seqno); >> } >> /** >> * tipc_named_node_up - tell specified node about all publications >> by this node >> */ >> -void tipc_named_node_up(struct net *net, u32 dnode) >> +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities) >> { >> struct name_table *nt = tipc_name_table(net); >> + struct tipc_net *tn = tipc_net(net); >> struct sk_buff_head head; >> + u16 seqno; >> __skb_queue_head_init(&head); >> + spin_lock_bh(&tn->nametbl_lock); >> + if (!(capabilities & TIPC_NAMED_BCAST)) >> + nt->rc_dests++; >> + seqno = nt->snd_nxt; >> + spin_unlock_bh(&tn->nametbl_lock); >> read_lock_bh(&nt->cluster_scope_lock); >> - named_distribute(net, &head, dnode, &nt->cluster_scope); >> + named_distribute(net, &head, dnode, &nt->cluster_scope, seqno); >> tipc_node_xmit(net, &head, dnode, 0); >> read_unlock_bh(&nt->cluster_scope_lock); >> } >> @@ -245,13 +262,21 @@ static void tipc_dist_queue_purge(struct net >> *net, u32 addr) >> spin_unlock_bh(&tn->nametbl_lock); >> } >> -void tipc_publ_notify(struct net *net, struct list_head >> *nsub_list, u32 addr) >> +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, >> + u32 addr, u16 capabilities) >> { >> + struct name_table *nt = tipc_name_table(net); >> + struct tipc_net *tn = tipc_net(net); >> + >> struct publication *publ, *tmp; >> list_for_each_entry_safe(publ, tmp, nsub_list, binding_node) >> tipc_publ_purge(net, publ, addr); >> tipc_dist_queue_purge(net, addr); >> + spin_lock_bh(&tn->nametbl_lock); >> + if (!(capabilities & TIPC_NAMED_BCAST)) >> + nt->rc_dests--; >> + spin_unlock_bh(&tn->nametbl_lock); >> } >> /** >> @@ -295,29 +320,55 @@ static bool tipc_update_nametbl(struct net >> *net, struct distr_item *i, >> return false; >> } >> +struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq, >> + u16 *rcv_nxt, bool *open) >> +{ >> + struct sk_buff *skb, *tmp; >> + struct tipc_msg *hdr; >> + u16 seqno; >> + >> + skb_queue_walk_safe(namedq, skb, tmp) { >> + skb_linearize(skb); >> + hdr = buf_msg(skb); >> + seqno = msg_named_seqno(hdr); >> + if (msg_is_last_bulk(hdr)) { >> + *rcv_nxt = seqno; >> + *open = true; >> + } >> + if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) { >> + __skb_unlink(skb, namedq); >> + return skb; >> + } >> + >> + if (*open && (*rcv_nxt == seqno)) { >> + (*rcv_nxt)++; >> + __skb_unlink(skb, namedq); >> + return skb; >> + } >> + } >> + return NULL; >> +} >> + >> /** >> * tipc_named_rcv - process name table update messages sent by >> another node >> */ >> -void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq) >> +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, >> + u16 *rcv_nxt, bool *open) >> { >> - struct tipc_net *tn = net_generic(net, tipc_net_id); >> - struct tipc_msg *msg; >> + struct tipc_net *tn = tipc_net(net); >> struct distr_item *item; >> - uint count; >> - u32 node; >> + struct tipc_msg *hdr; >> struct sk_buff *skb; >> - int mtype; >> + u32 count, node = 0; >> spin_lock_bh(&tn->nametbl_lock); >> - for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) { >> - skb_linearize(skb); >> - msg = buf_msg(skb); >> - mtype = msg_type(msg); >> - item = (struct distr_item *)msg_data(msg); >> - count = msg_data_sz(msg) / ITEM_SIZE; >> - node = msg_orignode(msg); >> + while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) { >> + hdr = buf_msg(skb); >> + node = msg_orignode(hdr); >> + item = (struct distr_item *)msg_data(hdr); >> + count = msg_data_sz(hdr) / ITEM_SIZE; >> while (count--) { >> - tipc_update_nametbl(net, item, node, mtype); >> + tipc_update_nametbl(net, item, node, msg_type(hdr)); >> item++; >> } >> kfree_skb(skb); >> @@ -345,6 +396,6 @@ void tipc_named_reinit(struct net *net) >> publ->node = self; >> list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node) >> publ->node = self; >> - >> + nt->rc_dests = 0; >> spin_unlock_bh(&tn->nametbl_lock); >> } >> diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h >> index 63fc73e0fa6c..092323158f06 100644 >> --- a/net/tipc/name_distr.h >> +++ b/net/tipc/name_distr.h >> @@ -67,11 +67,14 @@ struct distr_item { >> __be32 key; >> }; >> +void tipc_named_bcast(struct net *net, struct sk_buff *skb); >> struct sk_buff *tipc_named_publish(struct net *net, struct >> publication *publ); >> struct sk_buff *tipc_named_withdraw(struct net *net, struct >> publication *publ); >> -void tipc_named_node_up(struct net *net, u32 dnode); >> -void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue); >> +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities); >> +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, >> + u16 *rcv_nxt, bool *open); >> void tipc_named_reinit(struct net *net); >> -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, >> u32 addr); >> +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, >> + u32 addr, u16 capabilities); >> #endif >> diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c >> index 359b2bc888cf..2ac33d32edc2 100644 >> --- a/net/tipc/name_table.c >> +++ b/net/tipc/name_table.c >> @@ -729,6 +729,7 @@ struct publication *tipc_nametbl_publish(struct >> net *net, u32 type, u32 lower, >> struct tipc_net *tn = tipc_net(net); >> struct publication *p = NULL; >> struct sk_buff *skb = NULL; >> + u32 rc_dests; >> spin_lock_bh(&tn->nametbl_lock); >> @@ -743,12 +744,14 @@ struct publication >> *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, >> nt->local_publ_count++; >> skb = tipc_named_publish(net, p); >> } >> + rc_dests = nt->rc_dests; >> exit: >> spin_unlock_bh(&tn->nametbl_lock); >> if (skb) >> - tipc_node_broadcast(net, skb); >> + tipc_node_broadcast(net, skb, rc_dests); >> return p; >> + >> } >> /** >> @@ -762,6 +765,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 >> type, u32 lower, >> u32 self = tipc_own_addr(net); >> struct sk_buff *skb = NULL; >> struct publication *p; >> + u32 rc_dests; >> spin_lock_bh(&tn->nametbl_lock); >> @@ -775,10 +779,11 @@ int tipc_nametbl_withdraw(struct net *net, >> u32 type, u32 lower, >> pr_err("Failed to remove local publication {%u,%u,%u}/%u\n", >> type, lower, upper, key); >> } >> + rc_dests = nt->rc_dests; >> spin_unlock_bh(&tn->nametbl_lock); >> if (skb) { >> - tipc_node_broadcast(net, skb); >> + tipc_node_broadcast(net, skb, rc_dests); >> return 1; >> } >> return 0; >> diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h >> index 728bc7016c38..8064e1986e2c 100644 >> --- a/net/tipc/name_table.h >> +++ b/net/tipc/name_table.h >> @@ -106,6 +106,8 @@ struct name_table { >> struct list_head cluster_scope; >> rwlock_t cluster_scope_lock; >> u32 local_publ_count; >> + u32 rc_dests; >> + u32 snd_nxt; >> }; >> int tipc_nl_name_table_dump(struct sk_buff *skb, struct >> netlink_callback *cb); >> diff --git a/net/tipc/node.c b/net/tipc/node.c >> index a4c2816c3746..030a51c4d1fa 100644 >> --- a/net/tipc/node.c >> +++ b/net/tipc/node.c >> @@ -75,6 +75,8 @@ struct tipc_bclink_entry { >> struct sk_buff_head arrvq; >> struct sk_buff_head inputq2; >> struct sk_buff_head namedq; >> + u16 named_rcv_nxt; >> + bool named_open; >> }; >> /** >> @@ -396,10 +398,10 @@ static void tipc_node_write_unlock(struct >> tipc_node *n) >> write_unlock_bh(&n->lock); >> if (flags & TIPC_NOTIFY_NODE_DOWN) >> - tipc_publ_notify(net, publ_list, addr); >> + tipc_publ_notify(net, publ_list, addr, n->capabilities); >> if (flags & TIPC_NOTIFY_NODE_UP) >> - tipc_named_node_up(net, addr); >> + tipc_named_node_up(net, addr, n->capabilities); >> if (flags & TIPC_NOTIFY_LINK_UP) { >> tipc_mon_peer_up(net, addr, bearer_id); >> @@ -1483,6 +1485,7 @@ static void node_lost_contact(struct tipc_node *n, >> /* Clean up broadcast state */ >> tipc_bcast_remove_peer(n->net, n->bc_entry.link); >> + __skb_queue_purge(&n->bc_entry.namedq); >> /* Abort any ongoing link failover */ >> for (i = 0; i < MAX_BEARERS; i++) { >> @@ -1729,12 +1732,23 @@ int tipc_node_distr_xmit(struct net *net, >> struct sk_buff_head *xmitq) >> return 0; >> } >> -void tipc_node_broadcast(struct net *net, struct sk_buff *skb) >> +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int >> rc_dests) >> { >> + struct sk_buff_head xmitq; >> struct sk_buff *txskb; >> struct tipc_node *n; >> + u16 dummy; >> u32 dst; >> + /* Use broadcast if all nodes support it */ >> + if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) { >> + __skb_queue_head_init(&xmitq); >> + __skb_queue_tail(&xmitq, skb); >> + tipc_bcast_xmit(net, &xmitq, &dummy); >> + return; >> + } >> + >> + /* Otherwise use legacy replicast method */ >> rcu_read_lock(); >> list_for_each_entry_rcu(n, tipc_nodes(net), list) { >> dst = n->addr; >> @@ -1749,7 +1763,6 @@ void tipc_node_broadcast(struct net *net, >> struct sk_buff *skb) >> tipc_node_xmit_skb(net, txskb, dst, 0); >> } >> rcu_read_unlock(); >> - >> kfree_skb(skb); >> } >> @@ -1844,7 +1857,9 @@ static void tipc_node_bc_rcv(struct net *net, >> struct sk_buff *skb, int bearer_id >> /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ >> if (!skb_queue_empty(&n->bc_entry.namedq)) >> - tipc_named_rcv(net, &n->bc_entry.namedq); >> + tipc_named_rcv(net, &n->bc_entry.namedq, >> + &n->bc_entry.named_rcv_nxt, >> + &n->bc_entry.named_open); >> /* If reassembly or retransmission failure => reset all links >> to peer */ >> if (rc & TIPC_LINK_DOWN_EVT) >> @@ -2114,7 +2129,9 @@ void tipc_rcv(struct net *net, struct sk_buff >> *skb, struct tipc_bearer *b) >> tipc_node_link_down(n, bearer_id, false); >> if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) >> - tipc_named_rcv(net, &n->bc_entry.namedq); >> + tipc_named_rcv(net, &n->bc_entry.namedq, >> + &n->bc_entry.named_rcv_nxt, >> + &n->bc_entry.named_open); >> if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) >> tipc_node_mcast_rcv(n); >> diff --git a/net/tipc/node.h b/net/tipc/node.h >> index a6803b449a2c..9f6f13f1604f 100644 >> --- a/net/tipc/node.h >> +++ b/net/tipc/node.h >> @@ -55,7 +55,8 @@ enum { >> TIPC_MCAST_RBCTL = (1 << 7), >> TIPC_GAP_ACK_BLOCK = (1 << 8), >> TIPC_TUNNEL_ENHANCED = (1 << 9), >> - TIPC_NAGLE = (1 << 10) >> + TIPC_NAGLE = (1 << 10), >> + TIPC_NAMED_BCAST = (1 << 11) >> }; >> #define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \ >> @@ -68,7 +69,8 @@ enum { >> TIPC_MCAST_RBCTL | \ >> TIPC_GAP_ACK_BLOCK | \ >> TIPC_TUNNEL_ENHANCED | \ >> - TIPC_NAGLE) >> + TIPC_NAGLE | \ >> + TIPC_NAMED_BCAST) >> #define INVALID_BEARER_ID -1 >> @@ -101,7 +103,7 @@ int tipc_node_xmit_skb(struct net *net, struct >> sk_buff *skb, u32 dest, >> u32 selector); >> void tipc_node_subscribe(struct net *net, struct list_head *subscr, >> u32 addr); >> void tipc_node_unsubscribe(struct net *net, struct list_head >> *subscr, u32 addr); >> -void tipc_node_broadcast(struct net *net, struct sk_buff *skb); >> +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int >> rc_dests); >> int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 >> peer_port); >> void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port); >> int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool >> connected); > > > > _______________________________________________ > tipc-discussion mailing list > tip...@li... > https://lists.sourceforge.net/lists/listinfo/tipc-discussion > |
From: Jon M. <jm...@re...> - 2020-06-07 19:12:02
|
On 6/6/20 11:10 PM, Hoang Huu Le wrote: > -----Original Message----- > From: Jon Maloy <jm...@re...> > Sent: Friday, June 5, 2020 8:03 PM > To: Hoang Huu Le <hoa...@de...>; ma...@do...; yin...@wi...; tip...@li... > Subject: Re: [next-net v6] tipc: update a binding service via broadcast > > > > On 6/5/20 3:52 AM, Hoang Huu Le wrote: >> Currently, updating binding table (add service binding to >> name table/withdraw a service binding) is being sent over replicast. >> However, if we are scaling up clusters to > 100 nodes/containers this >> method is less affection because of looping through nodes in a cluster one >> by one. >> [...] > Still not needed. This queue should be flushed in > tipc_node_lost_contact(), which I now see we don't do. > [Hoang] Yes, that's right. I will verify and send it out. Actually, this might explain the mysterious "Failed to withdraw" printouts you observed during testing earlier. Those withdraw items might be from a previous session just lingering in the queue. On the other hand, such a bug is so obvious and would have such grave consequences (what if there are old 'publish' items in the queue?) that I find it hard to believe that it can have remain unnoticed all this time. Are you sure we are not cleaning up this queue somewhere else? If it really is so we must also issue a correction patch to 'net' for this issue. ///jon > > This has to e fixed too. > ///jon >> + } >> + } >> + return NULL; >> +} >> + >> /** >> * tipc_named_rcv - process name table update messages sent by another node >> */ >> -void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq) >> +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, >> + u16 *rcv_nxt, bool *open) >> { >> - struct tipc_net *tn = net_generic(net, tipc_net_id); >> - struct tipc_msg *msg; >> + struct tipc_net *tn = tipc_net(net); >> struct distr_item *item; >> - uint count; >> - u32 node; >> + struct tipc_msg *hdr; >> struct sk_buff *skb; >> - int mtype; >> + u32 count, node = 0; >> >> spin_lock_bh(&tn->nametbl_lock); >> - for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) { >> - skb_linearize(skb); >> - msg = buf_msg(skb); >> - mtype = msg_type(msg); >> - item = (struct distr_item *)msg_data(msg); >> - count = msg_data_sz(msg) / ITEM_SIZE; >> - node = msg_orignode(msg); >> + while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) { >> + hdr = buf_msg(skb); >> + node = msg_orignode(hdr); >> + item = (struct distr_item *)msg_data(hdr); >> + count = msg_data_sz(hdr) / ITEM_SIZE; >> while (count--) { >> - tipc_update_nametbl(net, item, node, mtype); >> + tipc_update_nametbl(net, item, node, msg_type(hdr)); >> item++; >> } >> kfree_skb(skb); >> @@ -345,6 +402,6 @@ void tipc_named_reinit(struct net *net) >> publ->node = self; >> list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node) >> publ->node = self; >> - >> + nt->rc_dests = 0; >> spin_unlock_bh(&tn->nametbl_lock); >> } >> diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h >> index 63fc73e0fa6c..092323158f06 100644 >> --- a/net/tipc/name_distr.h >> +++ b/net/tipc/name_distr.h >> @@ -67,11 +67,14 @@ struct distr_item { >> __be32 key; >> }; >> >> +void tipc_named_bcast(struct net *net, struct sk_buff *skb); >> struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ); >> struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ); >> -void tipc_named_node_up(struct net *net, u32 dnode); >> -void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue); >> +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities); >> +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, >> + u16 *rcv_nxt, bool *open); >> void tipc_named_reinit(struct net *net); >> -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr); >> +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, >> + u32 addr, u16 capabilities); >> >> #endif >> diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c >> index 359b2bc888cf..2ac33d32edc2 100644 >> --- a/net/tipc/name_table.c >> +++ b/net/tipc/name_table.c >> @@ -729,6 +729,7 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, >> struct tipc_net *tn = tipc_net(net); >> struct publication *p = NULL; >> struct sk_buff *skb = NULL; >> + u32 rc_dests; >> >> spin_lock_bh(&tn->nametbl_lock); >> >> @@ -743,12 +744,14 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, >> nt->local_publ_count++; >> skb = tipc_named_publish(net, p); >> } >> + rc_dests = nt->rc_dests; >> exit: >> spin_unlock_bh(&tn->nametbl_lock); >> >> if (skb) >> - tipc_node_broadcast(net, skb); >> + tipc_node_broadcast(net, skb, rc_dests); >> return p; >> + >> } >> >> /** >> @@ -762,6 +765,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, >> u32 self = tipc_own_addr(net); >> struct sk_buff *skb = NULL; >> struct publication *p; >> + u32 rc_dests; >> >> spin_lock_bh(&tn->nametbl_lock); >> >> @@ -775,10 +779,11 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, >> pr_err("Failed to remove local publication {%u,%u,%u}/%u\n", >> type, lower, upper, key); >> } >> + rc_dests = nt->rc_dests; >> spin_unlock_bh(&tn->nametbl_lock); >> >> if (skb) { >> - tipc_node_broadcast(net, skb); >> + tipc_node_broadcast(net, skb, rc_dests); >> return 1; >> } >> return 0; >> diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h >> index 728bc7016c38..8064e1986e2c 100644 >> --- a/net/tipc/name_table.h >> +++ b/net/tipc/name_table.h >> @@ -106,6 +106,8 @@ struct name_table { >> struct list_head cluster_scope; >> rwlock_t cluster_scope_lock; >> u32 local_publ_count; >> + u32 rc_dests; >> + u32 snd_nxt; >> }; >> >> int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); >> diff --git a/net/tipc/node.c b/net/tipc/node.c >> index 803a3a6d0f50..ad8d7bce1f98 100644 >> --- a/net/tipc/node.c >> +++ b/net/tipc/node.c >> @@ -75,6 +75,8 @@ struct tipc_bclink_entry { >> struct sk_buff_head arrvq; >> struct sk_buff_head inputq2; >> struct sk_buff_head namedq; >> + u16 named_rcv_nxt; >> + bool named_open; >> }; >> >> /** >> @@ -396,10 +398,10 @@ static void tipc_node_write_unlock(struct tipc_node *n) >> write_unlock_bh(&n->lock); >> >> if (flags & TIPC_NOTIFY_NODE_DOWN) >> - tipc_publ_notify(net, publ_list, addr); >> + tipc_publ_notify(net, publ_list, addr, n->capabilities); >> >> if (flags & TIPC_NOTIFY_NODE_UP) >> - tipc_named_node_up(net, addr); >> + tipc_named_node_up(net, addr, n->capabilities); >> >> if (flags & TIPC_NOTIFY_LINK_UP) { >> tipc_mon_peer_up(net, addr, bearer_id); >> @@ -1729,12 +1731,23 @@ int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) >> return 0; >> } >> >> -void tipc_node_broadcast(struct net *net, struct sk_buff *skb) >> +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests) >> { >> + struct sk_buff_head xmitq; >> struct sk_buff *txskb; >> struct tipc_node *n; >> + u16 dummy; >> u32 dst; >> >> + /* Use broadcast if all nodes support it */ >> + if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) { >> + __skb_queue_head_init(&xmitq); >> + __skb_queue_tail(&xmitq, skb); >> + tipc_bcast_xmit(net, &xmitq, &dummy); >> + return; >> + } >> + >> + /* Otherwise use legacy replicast method */ >> rcu_read_lock(); >> list_for_each_entry_rcu(n, tipc_nodes(net), list) { >> dst = n->addr; >> @@ -1749,7 +1762,6 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb) >> tipc_node_xmit_skb(net, txskb, dst, 0); >> } >> rcu_read_unlock(); >> - >> kfree_skb(skb); >> } >> >> @@ -1844,7 +1856,9 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id >> >> /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ >> if (!skb_queue_empty(&n->bc_entry.namedq)) >> - tipc_named_rcv(net, &n->bc_entry.namedq); >> + tipc_named_rcv(net, &n->bc_entry.namedq, >> + &n->bc_entry.named_rcv_nxt, >> + &n->bc_entry.named_open); >> >> /* If reassembly or retransmission failure => reset all links to peer */ >> if (rc & TIPC_LINK_DOWN_EVT) >> @@ -2109,7 +2123,9 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) >> tipc_node_link_down(n, bearer_id, false); >> >> if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) >> - tipc_named_rcv(net, &n->bc_entry.namedq); >> + tipc_named_rcv(net, &n->bc_entry.namedq, >> + &n->bc_entry.named_rcv_nxt, >> + &n->bc_entry.named_open); >> >> if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) >> tipc_node_mcast_rcv(n); >> diff --git a/net/tipc/node.h b/net/tipc/node.h >> index a6803b449a2c..9f6f13f1604f 100644 >> --- a/net/tipc/node.h >> +++ b/net/tipc/node.h >> @@ -55,7 +55,8 @@ enum { >> TIPC_MCAST_RBCTL = (1 << 7), >> TIPC_GAP_ACK_BLOCK = (1 << 8), >> TIPC_TUNNEL_ENHANCED = (1 << 9), >> - TIPC_NAGLE = (1 << 10) >> + TIPC_NAGLE = (1 << 10), >> + TIPC_NAMED_BCAST = (1 << 11) >> }; >> >> #define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \ >> @@ -68,7 +69,8 @@ enum { >> TIPC_MCAST_RBCTL | \ >> TIPC_GAP_ACK_BLOCK | \ >> TIPC_TUNNEL_ENHANCED | \ >> - TIPC_NAGLE) >> + TIPC_NAGLE | \ >> + TIPC_NAMED_BCAST) >> >> #define INVALID_BEARER_ID -1 >> >> @@ -101,7 +103,7 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest, >> u32 selector); >> void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr); >> void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr); >> -void tipc_node_broadcast(struct net *net, struct sk_buff *skb); >> +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests); >> int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port); >> void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port); >> int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected); |
From: Jon M. <jm...@re...> - 2020-06-07 19:03:42
|
On 6/7/20 12:24 AM, Hoang Huu Le wrote: > Currently, updating binding table (add service binding to > name table/withdraw a service binding) is being sent over replicast. > However, if we are scaling up clusters to > 100 nodes/containers this > method is less affection because of looping through nodes in a cluster one > by one. > > It is worth to use broadcast to update a binding service. This way, the > binding table can be updated on all peer nodes in one shot. > > Broadcast is used when all peer nodes, as indicated by a new capability > flag TIPC_NAMED_BCAST, support reception of this message type. > > Four problems need to be considered when introducing this feature. > 1) When establishing a link to a new peer node we still update this by a > unicast 'bulk' update. This may lead to race conditions, where a later > broadcast publication/withdrawal bypass the 'bulk', resulting in > disordered publications, or even that a withdrawal may arrive before the > corresponding publication. We solve this by adding an 'is_last_bulk' bit > in the last bulk messages so that it can be distinguished from all other > messages. Only when this message has arrived do we open up for reception > of broadcast publications/withdrawals. Add a line feed between these paragraphs before you send the patch. Otherwise, still acked by me. ///join > 2) When a first legacy node is added to the cluster all distribution > will switch over to use the legacy 'replicast' method, while the > opposite happens when the last legacy node leaves the cluster. This > entails another risk of message disordering that has to be handled. We > solve this by adding a sequence number to the broadcast/replicast > messages, so that disordering can be discovered and corrected. Note > however that we don't need to consider potential message loss or > duplication at this protocol level. > 3) Bulk messages don't contain any sequence numbers, and will always > arrive in order. Hence we must exempt those from the sequence number > control and deliver them unconditionally. We solve this by adding a new > 'is_bulk' bit in those messages so that they can be recognized. > 4) Legacy messages, which don't contain any new bits or sequence > numbers, but neither can arrive out of order, also need to be exempt > from the initial synchronization and sequence number check, and > delivered unconditionally. Therefore, we add another 'is_not_legacy' bit > to all new messages so that those can be distinguished from legacy > messages and the latter delivered directly. > > Signed-off-by: Hoang Huu Le <hoa...@de...> > Acked-by: Jon Maloy <jm...@re...> > --- > net/tipc/bcast.c | 6 +-- > net/tipc/bcast.h | 4 +- > net/tipc/link.c | 2 +- > net/tipc/msg.h | 40 ++++++++++++++++ > net/tipc/name_distr.c | 109 +++++++++++++++++++++++++++++++----------- > net/tipc/name_distr.h | 9 ++-- > net/tipc/name_table.c | 9 +++- > net/tipc/name_table.h | 2 + > net/tipc/node.c | 29 ++++++++--- > net/tipc/node.h | 8 ++-- > 10 files changed, 170 insertions(+), 48 deletions(-) > > diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c > index 383f87bc1061..940d176e0e87 100644 > --- a/net/tipc/bcast.c > +++ b/net/tipc/bcast.c > @@ -250,8 +250,8 @@ static void tipc_bcast_select_xmit_method(struct net *net, int dests, > * Consumes the buffer chain. > * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE > */ > -static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, > - u16 *cong_link_cnt) > +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, > + u16 *cong_link_cnt) > { > struct tipc_link *l = tipc_bc_sndlink(net); > struct sk_buff_head xmitq; > @@ -752,7 +752,7 @@ void tipc_nlist_purge(struct tipc_nlist *nl) > nl->local = false; > } > > -u32 tipc_bcast_get_broadcast_mode(struct net *net) > +u32 tipc_bcast_get_mode(struct net *net) > { > struct tipc_bc_base *bb = tipc_bc_base(net); > > diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h > index 4240c95188b1..2d9352dc7b0e 100644 > --- a/net/tipc/bcast.h > +++ b/net/tipc/bcast.h > @@ -90,6 +90,8 @@ void tipc_bcast_toggle_rcast(struct net *net, bool supp); > int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, > struct tipc_mc_method *method, struct tipc_nlist *dests, > u16 *cong_link_cnt); > +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, > + u16 *cong_link_cnt); > int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb); > void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, > struct tipc_msg *hdr); > @@ -101,7 +103,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg, > int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]); > int tipc_bclink_reset_stats(struct net *net, struct tipc_link *l); > > -u32 tipc_bcast_get_broadcast_mode(struct net *net); > +u32 tipc_bcast_get_mode(struct net *net); > u32 tipc_bcast_get_broadcast_ratio(struct net *net); > > void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head *defq, > diff --git a/net/tipc/link.c b/net/tipc/link.c > index ee3b8d0576b8..eac89a3e22ce 100644 > --- a/net/tipc/link.c > +++ b/net/tipc/link.c > @@ -2745,7 +2745,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg, > void *hdr; > struct nlattr *attrs; > struct nlattr *prop; > - u32 bc_mode = tipc_bcast_get_broadcast_mode(net); > + u32 bc_mode = tipc_bcast_get_mode(net); > u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net); > > if (!bcl) > diff --git a/net/tipc/msg.h b/net/tipc/msg.h > index 58660d56bc83..65119e81ff0c 100644 > --- a/net/tipc/msg.h > +++ b/net/tipc/msg.h > @@ -438,6 +438,36 @@ static inline void msg_set_errcode(struct tipc_msg *m, u32 err) > msg_set_bits(m, 1, 25, 0xf, err); > } > > +static inline void msg_set_bulk(struct tipc_msg *m) > +{ > + msg_set_bits(m, 1, 28, 0x1, 1); > +} > + > +static inline u32 msg_is_bulk(struct tipc_msg *m) > +{ > + return msg_bits(m, 1, 28, 0x1); > +} > + > +static inline void msg_set_last_bulk(struct tipc_msg *m) > +{ > + msg_set_bits(m, 1, 27, 0x1, 1); > +} > + > +static inline u32 msg_is_last_bulk(struct tipc_msg *m) > +{ > + return msg_bits(m, 1, 27, 0x1); > +} > + > +static inline void msg_set_non_legacy(struct tipc_msg *m) > +{ > + msg_set_bits(m, 1, 26, 0x1, 1); > +} > + > +static inline u32 msg_is_legacy(struct tipc_msg *m) > +{ > + return !msg_bits(m, 1, 26, 0x1); > +} > + > static inline u32 msg_reroute_cnt(struct tipc_msg *m) > { > return msg_bits(m, 1, 21, 0xf); > @@ -567,6 +597,16 @@ static inline void msg_set_origport(struct tipc_msg *m, u32 p) > msg_set_word(m, 4, p); > } > > +static inline u16 msg_named_seqno(struct tipc_msg *m) > +{ > + return msg_bits(m, 4, 0, 0xffff); > +} > + > +static inline void msg_set_named_seqno(struct tipc_msg *m, u16 n) > +{ > + msg_set_bits(m, 4, 0, 0xffff, n); > +} > + > static inline u32 msg_destport(struct tipc_msg *m) > { > return msg_word(m, 5); > diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c > index 5feaf3b67380..481d480609f0 100644 > --- a/net/tipc/name_distr.c > +++ b/net/tipc/name_distr.c > @@ -102,7 +102,8 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ) > pr_warn("Publication distribution failure\n"); > return NULL; > } > - > + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); > + msg_set_non_legacy(buf_msg(skb)); > item = (struct distr_item *)msg_data(buf_msg(skb)); > publ_to_item(item, publ); > return skb; > @@ -114,8 +115,8 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ) > struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) > { > struct name_table *nt = tipc_name_table(net); > - struct sk_buff *buf; > struct distr_item *item; > + struct sk_buff *skb; > > write_lock_bh(&nt->cluster_scope_lock); > list_del(&publ->binding_node); > @@ -123,15 +124,16 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) > if (publ->scope == TIPC_NODE_SCOPE) > return NULL; > > - buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); > - if (!buf) { > + skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); > + if (!skb) { > pr_warn("Withdrawal distribution failure\n"); > return NULL; > } > - > - item = (struct distr_item *)msg_data(buf_msg(buf)); > + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); > + msg_set_non_legacy(buf_msg(skb)); > + item = (struct distr_item *)msg_data(buf_msg(skb)); > publ_to_item(item, publ); > - return buf; > + return skb; > } > > /** > @@ -141,7 +143,7 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) > * @pls: linked list of publication items to be packed into buffer chain > */ > static void named_distribute(struct net *net, struct sk_buff_head *list, > - u32 dnode, struct list_head *pls) > + u32 dnode, struct list_head *pls, u16 seqno) > { > struct publication *publ; > struct sk_buff *skb = NULL; > @@ -149,6 +151,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, > u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) / > ITEM_SIZE) * ITEM_SIZE; > u32 msg_rem = msg_dsz; > + struct tipc_msg *hdr; > > list_for_each_entry(publ, pls, binding_node) { > /* Prepare next buffer: */ > @@ -159,8 +162,11 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, > pr_warn("Bulk publication failure\n"); > return; > } > - msg_set_bc_ack_invalid(buf_msg(skb), true); > - item = (struct distr_item *)msg_data(buf_msg(skb)); > + hdr = buf_msg(skb); > + msg_set_bc_ack_invalid(hdr, true); > + msg_set_bulk(hdr); > + msg_set_non_legacy(hdr); > + item = (struct distr_item *)msg_data(hdr); > } > > /* Pack publication into message: */ > @@ -176,24 +182,35 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, > } > } > if (skb) { > - msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem)); > + hdr = buf_msg(skb); > + msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem)); > skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem)); > __skb_queue_tail(list, skb); > } > + hdr = buf_msg(skb_peek_tail(list)); > + msg_set_last_bulk(hdr); > + msg_set_named_seqno(hdr, seqno); > } > > /** > * tipc_named_node_up - tell specified node about all publications by this node > */ > -void tipc_named_node_up(struct net *net, u32 dnode) > +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities) > { > struct name_table *nt = tipc_name_table(net); > + struct tipc_net *tn = tipc_net(net); > struct sk_buff_head head; > + u16 seqno; > > __skb_queue_head_init(&head); > + spin_lock_bh(&tn->nametbl_lock); > + if (!(capabilities & TIPC_NAMED_BCAST)) > + nt->rc_dests++; > + seqno = nt->snd_nxt; > + spin_unlock_bh(&tn->nametbl_lock); > > read_lock_bh(&nt->cluster_scope_lock); > - named_distribute(net, &head, dnode, &nt->cluster_scope); > + named_distribute(net, &head, dnode, &nt->cluster_scope, seqno); > tipc_node_xmit(net, &head, dnode, 0); > read_unlock_bh(&nt->cluster_scope_lock); > } > @@ -245,13 +262,21 @@ static void tipc_dist_queue_purge(struct net *net, u32 addr) > spin_unlock_bh(&tn->nametbl_lock); > } > > -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr) > +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, > + u32 addr, u16 capabilities) > { > + struct name_table *nt = tipc_name_table(net); > + struct tipc_net *tn = tipc_net(net); > + > struct publication *publ, *tmp; > > list_for_each_entry_safe(publ, tmp, nsub_list, binding_node) > tipc_publ_purge(net, publ, addr); > tipc_dist_queue_purge(net, addr); > + spin_lock_bh(&tn->nametbl_lock); > + if (!(capabilities & TIPC_NAMED_BCAST)) > + nt->rc_dests--; > + spin_unlock_bh(&tn->nametbl_lock); > } > > /** > @@ -295,29 +320,55 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i, > return false; > } > > +struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq, > + u16 *rcv_nxt, bool *open) > +{ > + struct sk_buff *skb, *tmp; > + struct tipc_msg *hdr; > + u16 seqno; > + > + skb_queue_walk_safe(namedq, skb, tmp) { > + skb_linearize(skb); > + hdr = buf_msg(skb); > + seqno = msg_named_seqno(hdr); > + if (msg_is_last_bulk(hdr)) { > + *rcv_nxt = seqno; > + *open = true; > + } > + if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) { > + __skb_unlink(skb, namedq); > + return skb; > + } > + > + if (*open && (*rcv_nxt == seqno)) { > + (*rcv_nxt)++; > + __skb_unlink(skb, namedq); > + return skb; > + } > + } > + return NULL; > +} > + > /** > * tipc_named_rcv - process name table update messages sent by another node > */ > -void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq) > +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, > + u16 *rcv_nxt, bool *open) > { > - struct tipc_net *tn = net_generic(net, tipc_net_id); > - struct tipc_msg *msg; > + struct tipc_net *tn = tipc_net(net); > struct distr_item *item; > - uint count; > - u32 node; > + struct tipc_msg *hdr; > struct sk_buff *skb; > - int mtype; > + u32 count, node = 0; > > spin_lock_bh(&tn->nametbl_lock); > - for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) { > - skb_linearize(skb); > - msg = buf_msg(skb); > - mtype = msg_type(msg); > - item = (struct distr_item *)msg_data(msg); > - count = msg_data_sz(msg) / ITEM_SIZE; > - node = msg_orignode(msg); > + while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) { > + hdr = buf_msg(skb); > + node = msg_orignode(hdr); > + item = (struct distr_item *)msg_data(hdr); > + count = msg_data_sz(hdr) / ITEM_SIZE; > while (count--) { > - tipc_update_nametbl(net, item, node, mtype); > + tipc_update_nametbl(net, item, node, msg_type(hdr)); > item++; > } > kfree_skb(skb); > @@ -345,6 +396,6 @@ void tipc_named_reinit(struct net *net) > publ->node = self; > list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node) > publ->node = self; > - > + nt->rc_dests = 0; > spin_unlock_bh(&tn->nametbl_lock); > } > diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h > index 63fc73e0fa6c..092323158f06 100644 > --- a/net/tipc/name_distr.h > +++ b/net/tipc/name_distr.h > @@ -67,11 +67,14 @@ struct distr_item { > __be32 key; > }; > > +void tipc_named_bcast(struct net *net, struct sk_buff *skb); > struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ); > struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ); > -void tipc_named_node_up(struct net *net, u32 dnode); > -void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue); > +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities); > +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, > + u16 *rcv_nxt, bool *open); > void tipc_named_reinit(struct net *net); > -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr); > +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, > + u32 addr, u16 capabilities); > > #endif > diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c > index 359b2bc888cf..2ac33d32edc2 100644 > --- a/net/tipc/name_table.c > +++ b/net/tipc/name_table.c > @@ -729,6 +729,7 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, > struct tipc_net *tn = tipc_net(net); > struct publication *p = NULL; > struct sk_buff *skb = NULL; > + u32 rc_dests; > > spin_lock_bh(&tn->nametbl_lock); > > @@ -743,12 +744,14 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, > nt->local_publ_count++; > skb = tipc_named_publish(net, p); > } > + rc_dests = nt->rc_dests; > exit: > spin_unlock_bh(&tn->nametbl_lock); > > if (skb) > - tipc_node_broadcast(net, skb); > + tipc_node_broadcast(net, skb, rc_dests); > return p; > + > } > > /** > @@ -762,6 +765,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, > u32 self = tipc_own_addr(net); > struct sk_buff *skb = NULL; > struct publication *p; > + u32 rc_dests; > > spin_lock_bh(&tn->nametbl_lock); > > @@ -775,10 +779,11 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, > pr_err("Failed to remove local publication {%u,%u,%u}/%u\n", > type, lower, upper, key); > } > + rc_dests = nt->rc_dests; > spin_unlock_bh(&tn->nametbl_lock); > > if (skb) { > - tipc_node_broadcast(net, skb); > + tipc_node_broadcast(net, skb, rc_dests); > return 1; > } > return 0; > diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h > index 728bc7016c38..8064e1986e2c 100644 > --- a/net/tipc/name_table.h > +++ b/net/tipc/name_table.h > @@ -106,6 +106,8 @@ struct name_table { > struct list_head cluster_scope; > rwlock_t cluster_scope_lock; > u32 local_publ_count; > + u32 rc_dests; > + u32 snd_nxt; > }; > > int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); > diff --git a/net/tipc/node.c b/net/tipc/node.c > index a4c2816c3746..030a51c4d1fa 100644 > --- a/net/tipc/node.c > +++ b/net/tipc/node.c > @@ -75,6 +75,8 @@ struct tipc_bclink_entry { > struct sk_buff_head arrvq; > struct sk_buff_head inputq2; > struct sk_buff_head namedq; > + u16 named_rcv_nxt; > + bool named_open; > }; > > /** > @@ -396,10 +398,10 @@ static void tipc_node_write_unlock(struct tipc_node *n) > write_unlock_bh(&n->lock); > > if (flags & TIPC_NOTIFY_NODE_DOWN) > - tipc_publ_notify(net, publ_list, addr); > + tipc_publ_notify(net, publ_list, addr, n->capabilities); > > if (flags & TIPC_NOTIFY_NODE_UP) > - tipc_named_node_up(net, addr); > + tipc_named_node_up(net, addr, n->capabilities); > > if (flags & TIPC_NOTIFY_LINK_UP) { > tipc_mon_peer_up(net, addr, bearer_id); > @@ -1483,6 +1485,7 @@ static void node_lost_contact(struct tipc_node *n, > > /* Clean up broadcast state */ > tipc_bcast_remove_peer(n->net, n->bc_entry.link); > + __skb_queue_purge(&n->bc_entry.namedq); > > /* Abort any ongoing link failover */ > for (i = 0; i < MAX_BEARERS; i++) { > @@ -1729,12 +1732,23 @@ int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) > return 0; > } > > -void tipc_node_broadcast(struct net *net, struct sk_buff *skb) > +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests) > { > + struct sk_buff_head xmitq; > struct sk_buff *txskb; > struct tipc_node *n; > + u16 dummy; > u32 dst; > > + /* Use broadcast if all nodes support it */ > + if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) { > + __skb_queue_head_init(&xmitq); > + __skb_queue_tail(&xmitq, skb); > + tipc_bcast_xmit(net, &xmitq, &dummy); > + return; > + } > + > + /* Otherwise use legacy replicast method */ > rcu_read_lock(); > list_for_each_entry_rcu(n, tipc_nodes(net), list) { > dst = n->addr; > @@ -1749,7 +1763,6 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb) > tipc_node_xmit_skb(net, txskb, dst, 0); > } > rcu_read_unlock(); > - > kfree_skb(skb); > } > > @@ -1844,7 +1857,9 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id > > /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ > if (!skb_queue_empty(&n->bc_entry.namedq)) > - tipc_named_rcv(net, &n->bc_entry.namedq); > + tipc_named_rcv(net, &n->bc_entry.namedq, > + &n->bc_entry.named_rcv_nxt, > + &n->bc_entry.named_open); > > /* If reassembly or retransmission failure => reset all links to peer */ > if (rc & TIPC_LINK_DOWN_EVT) > @@ -2114,7 +2129,9 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) > tipc_node_link_down(n, bearer_id, false); > > if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) > - tipc_named_rcv(net, &n->bc_entry.namedq); > + tipc_named_rcv(net, &n->bc_entry.namedq, > + &n->bc_entry.named_rcv_nxt, > + &n->bc_entry.named_open); > > if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) > tipc_node_mcast_rcv(n); > diff --git a/net/tipc/node.h b/net/tipc/node.h > index a6803b449a2c..9f6f13f1604f 100644 > --- a/net/tipc/node.h > +++ b/net/tipc/node.h > @@ -55,7 +55,8 @@ enum { > TIPC_MCAST_RBCTL = (1 << 7), > TIPC_GAP_ACK_BLOCK = (1 << 8), > TIPC_TUNNEL_ENHANCED = (1 << 9), > - TIPC_NAGLE = (1 << 10) > + TIPC_NAGLE = (1 << 10), > + TIPC_NAMED_BCAST = (1 << 11) > }; > > #define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \ > @@ -68,7 +69,8 @@ enum { > TIPC_MCAST_RBCTL | \ > TIPC_GAP_ACK_BLOCK | \ > TIPC_TUNNEL_ENHANCED | \ > - TIPC_NAGLE) > + TIPC_NAGLE | \ > + TIPC_NAMED_BCAST) > > #define INVALID_BEARER_ID -1 > > @@ -101,7 +103,7 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest, > u32 selector); > void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr); > void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr); > -void tipc_node_broadcast(struct net *net, struct sk_buff *skb); > +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests); > int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port); > void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port); > int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected); |
From: Hoang H. Le <hoa...@de...> - 2020-06-07 04:25:27
|
Currently, updating binding table (add service binding to name table/withdraw a service binding) is being sent over replicast. However, if we are scaling up clusters to > 100 nodes/containers this method is less affection because of looping through nodes in a cluster one by one. It is worth to use broadcast to update a binding service. This way, the binding table can be updated on all peer nodes in one shot. Broadcast is used when all peer nodes, as indicated by a new capability flag TIPC_NAMED_BCAST, support reception of this message type. Four problems need to be considered when introducing this feature. 1) When establishing a link to a new peer node we still update this by a unicast 'bulk' update. This may lead to race conditions, where a later broadcast publication/withdrawal bypass the 'bulk', resulting in disordered publications, or even that a withdrawal may arrive before the corresponding publication. We solve this by adding an 'is_last_bulk' bit in the last bulk messages so that it can be distinguished from all other messages. Only when this message has arrived do we open up for reception of broadcast publications/withdrawals. 2) When a first legacy node is added to the cluster all distribution will switch over to use the legacy 'replicast' method, while the opposite happens when the last legacy node leaves the cluster. This entails another risk of message disordering that has to be handled. We solve this by adding a sequence number to the broadcast/replicast messages, so that disordering can be discovered and corrected. Note however that we don't need to consider potential message loss or duplication at this protocol level. 3) Bulk messages don't contain any sequence numbers, and will always arrive in order. Hence we must exempt those from the sequence number control and deliver them unconditionally. We solve this by adding a new 'is_bulk' bit in those messages so that they can be recognized. 4) Legacy messages, which don't contain any new bits or sequence numbers, but neither can arrive out of order, also need to be exempt from the initial synchronization and sequence number check, and delivered unconditionally. Therefore, we add another 'is_not_legacy' bit to all new messages so that those can be distinguished from legacy messages and the latter delivered directly. Signed-off-by: Hoang Huu Le <hoa...@de...> Acked-by: Jon Maloy <jm...@re...> --- net/tipc/bcast.c | 6 +-- net/tipc/bcast.h | 4 +- net/tipc/link.c | 2 +- net/tipc/msg.h | 40 ++++++++++++++++ net/tipc/name_distr.c | 109 +++++++++++++++++++++++++++++++----------- net/tipc/name_distr.h | 9 ++-- net/tipc/name_table.c | 9 +++- net/tipc/name_table.h | 2 + net/tipc/node.c | 29 ++++++++--- net/tipc/node.h | 8 ++-- 10 files changed, 170 insertions(+), 48 deletions(-) diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 383f87bc1061..940d176e0e87 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -250,8 +250,8 @@ static void tipc_bcast_select_xmit_method(struct net *net, int dests, * Consumes the buffer chain. * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE */ -static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, - u16 *cong_link_cnt) +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, + u16 *cong_link_cnt) { struct tipc_link *l = tipc_bc_sndlink(net); struct sk_buff_head xmitq; @@ -752,7 +752,7 @@ void tipc_nlist_purge(struct tipc_nlist *nl) nl->local = false; } -u32 tipc_bcast_get_broadcast_mode(struct net *net) +u32 tipc_bcast_get_mode(struct net *net) { struct tipc_bc_base *bb = tipc_bc_base(net); diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h index 4240c95188b1..2d9352dc7b0e 100644 --- a/net/tipc/bcast.h +++ b/net/tipc/bcast.h @@ -90,6 +90,8 @@ void tipc_bcast_toggle_rcast(struct net *net, bool supp); int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, struct tipc_mc_method *method, struct tipc_nlist *dests, u16 *cong_link_cnt); +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, + u16 *cong_link_cnt); int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb); void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, struct tipc_msg *hdr); @@ -101,7 +103,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg, int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]); int tipc_bclink_reset_stats(struct net *net, struct tipc_link *l); -u32 tipc_bcast_get_broadcast_mode(struct net *net); +u32 tipc_bcast_get_mode(struct net *net); u32 tipc_bcast_get_broadcast_ratio(struct net *net); void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head *defq, diff --git a/net/tipc/link.c b/net/tipc/link.c index ee3b8d0576b8..eac89a3e22ce 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -2745,7 +2745,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg, void *hdr; struct nlattr *attrs; struct nlattr *prop; - u32 bc_mode = tipc_bcast_get_broadcast_mode(net); + u32 bc_mode = tipc_bcast_get_mode(net); u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net); if (!bcl) diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 58660d56bc83..65119e81ff0c 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -438,6 +438,36 @@ static inline void msg_set_errcode(struct tipc_msg *m, u32 err) msg_set_bits(m, 1, 25, 0xf, err); } +static inline void msg_set_bulk(struct tipc_msg *m) +{ + msg_set_bits(m, 1, 28, 0x1, 1); +} + +static inline u32 msg_is_bulk(struct tipc_msg *m) +{ + return msg_bits(m, 1, 28, 0x1); +} + +static inline void msg_set_last_bulk(struct tipc_msg *m) +{ + msg_set_bits(m, 1, 27, 0x1, 1); +} + +static inline u32 msg_is_last_bulk(struct tipc_msg *m) +{ + return msg_bits(m, 1, 27, 0x1); +} + +static inline void msg_set_non_legacy(struct tipc_msg *m) +{ + msg_set_bits(m, 1, 26, 0x1, 1); +} + +static inline u32 msg_is_legacy(struct tipc_msg *m) +{ + return !msg_bits(m, 1, 26, 0x1); +} + static inline u32 msg_reroute_cnt(struct tipc_msg *m) { return msg_bits(m, 1, 21, 0xf); @@ -567,6 +597,16 @@ static inline void msg_set_origport(struct tipc_msg *m, u32 p) msg_set_word(m, 4, p); } +static inline u16 msg_named_seqno(struct tipc_msg *m) +{ + return msg_bits(m, 4, 0, 0xffff); +} + +static inline void msg_set_named_seqno(struct tipc_msg *m, u16 n) +{ + msg_set_bits(m, 4, 0, 0xffff, n); +} + static inline u32 msg_destport(struct tipc_msg *m) { return msg_word(m, 5); diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index 5feaf3b67380..481d480609f0 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -102,7 +102,8 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ) pr_warn("Publication distribution failure\n"); return NULL; } - + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); + msg_set_non_legacy(buf_msg(skb)); item = (struct distr_item *)msg_data(buf_msg(skb)); publ_to_item(item, publ); return skb; @@ -114,8 +115,8 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ) struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) { struct name_table *nt = tipc_name_table(net); - struct sk_buff *buf; struct distr_item *item; + struct sk_buff *skb; write_lock_bh(&nt->cluster_scope_lock); list_del(&publ->binding_node); @@ -123,15 +124,16 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) if (publ->scope == TIPC_NODE_SCOPE) return NULL; - buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); - if (!buf) { + skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); + if (!skb) { pr_warn("Withdrawal distribution failure\n"); return NULL; } - - item = (struct distr_item *)msg_data(buf_msg(buf)); + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); + msg_set_non_legacy(buf_msg(skb)); + item = (struct distr_item *)msg_data(buf_msg(skb)); publ_to_item(item, publ); - return buf; + return skb; } /** @@ -141,7 +143,7 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) * @pls: linked list of publication items to be packed into buffer chain */ static void named_distribute(struct net *net, struct sk_buff_head *list, - u32 dnode, struct list_head *pls) + u32 dnode, struct list_head *pls, u16 seqno) { struct publication *publ; struct sk_buff *skb = NULL; @@ -149,6 +151,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) / ITEM_SIZE) * ITEM_SIZE; u32 msg_rem = msg_dsz; + struct tipc_msg *hdr; list_for_each_entry(publ, pls, binding_node) { /* Prepare next buffer: */ @@ -159,8 +162,11 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, pr_warn("Bulk publication failure\n"); return; } - msg_set_bc_ack_invalid(buf_msg(skb), true); - item = (struct distr_item *)msg_data(buf_msg(skb)); + hdr = buf_msg(skb); + msg_set_bc_ack_invalid(hdr, true); + msg_set_bulk(hdr); + msg_set_non_legacy(hdr); + item = (struct distr_item *)msg_data(hdr); } /* Pack publication into message: */ @@ -176,24 +182,35 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, } } if (skb) { - msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem)); + hdr = buf_msg(skb); + msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem)); skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem)); __skb_queue_tail(list, skb); } + hdr = buf_msg(skb_peek_tail(list)); + msg_set_last_bulk(hdr); + msg_set_named_seqno(hdr, seqno); } /** * tipc_named_node_up - tell specified node about all publications by this node */ -void tipc_named_node_up(struct net *net, u32 dnode) +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities) { struct name_table *nt = tipc_name_table(net); + struct tipc_net *tn = tipc_net(net); struct sk_buff_head head; + u16 seqno; __skb_queue_head_init(&head); + spin_lock_bh(&tn->nametbl_lock); + if (!(capabilities & TIPC_NAMED_BCAST)) + nt->rc_dests++; + seqno = nt->snd_nxt; + spin_unlock_bh(&tn->nametbl_lock); read_lock_bh(&nt->cluster_scope_lock); - named_distribute(net, &head, dnode, &nt->cluster_scope); + named_distribute(net, &head, dnode, &nt->cluster_scope, seqno); tipc_node_xmit(net, &head, dnode, 0); read_unlock_bh(&nt->cluster_scope_lock); } @@ -245,13 +262,21 @@ static void tipc_dist_queue_purge(struct net *net, u32 addr) spin_unlock_bh(&tn->nametbl_lock); } -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr) +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, + u32 addr, u16 capabilities) { + struct name_table *nt = tipc_name_table(net); + struct tipc_net *tn = tipc_net(net); + struct publication *publ, *tmp; list_for_each_entry_safe(publ, tmp, nsub_list, binding_node) tipc_publ_purge(net, publ, addr); tipc_dist_queue_purge(net, addr); + spin_lock_bh(&tn->nametbl_lock); + if (!(capabilities & TIPC_NAMED_BCAST)) + nt->rc_dests--; + spin_unlock_bh(&tn->nametbl_lock); } /** @@ -295,29 +320,55 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i, return false; } +struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq, + u16 *rcv_nxt, bool *open) +{ + struct sk_buff *skb, *tmp; + struct tipc_msg *hdr; + u16 seqno; + + skb_queue_walk_safe(namedq, skb, tmp) { + skb_linearize(skb); + hdr = buf_msg(skb); + seqno = msg_named_seqno(hdr); + if (msg_is_last_bulk(hdr)) { + *rcv_nxt = seqno; + *open = true; + } + if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) { + __skb_unlink(skb, namedq); + return skb; + } + + if (*open && (*rcv_nxt == seqno)) { + (*rcv_nxt)++; + __skb_unlink(skb, namedq); + return skb; + } + } + return NULL; +} + /** * tipc_named_rcv - process name table update messages sent by another node */ -void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq) +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, + u16 *rcv_nxt, bool *open) { - struct tipc_net *tn = net_generic(net, tipc_net_id); - struct tipc_msg *msg; + struct tipc_net *tn = tipc_net(net); struct distr_item *item; - uint count; - u32 node; + struct tipc_msg *hdr; struct sk_buff *skb; - int mtype; + u32 count, node = 0; spin_lock_bh(&tn->nametbl_lock); - for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) { - skb_linearize(skb); - msg = buf_msg(skb); - mtype = msg_type(msg); - item = (struct distr_item *)msg_data(msg); - count = msg_data_sz(msg) / ITEM_SIZE; - node = msg_orignode(msg); + while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) { + hdr = buf_msg(skb); + node = msg_orignode(hdr); + item = (struct distr_item *)msg_data(hdr); + count = msg_data_sz(hdr) / ITEM_SIZE; while (count--) { - tipc_update_nametbl(net, item, node, mtype); + tipc_update_nametbl(net, item, node, msg_type(hdr)); item++; } kfree_skb(skb); @@ -345,6 +396,6 @@ void tipc_named_reinit(struct net *net) publ->node = self; list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node) publ->node = self; - + nt->rc_dests = 0; spin_unlock_bh(&tn->nametbl_lock); } diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h index 63fc73e0fa6c..092323158f06 100644 --- a/net/tipc/name_distr.h +++ b/net/tipc/name_distr.h @@ -67,11 +67,14 @@ struct distr_item { __be32 key; }; +void tipc_named_bcast(struct net *net, struct sk_buff *skb); struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ); struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ); -void tipc_named_node_up(struct net *net, u32 dnode); -void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue); +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities); +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, + u16 *rcv_nxt, bool *open); void tipc_named_reinit(struct net *net); -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr); +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, + u32 addr, u16 capabilities); #endif diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 359b2bc888cf..2ac33d32edc2 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -729,6 +729,7 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, struct tipc_net *tn = tipc_net(net); struct publication *p = NULL; struct sk_buff *skb = NULL; + u32 rc_dests; spin_lock_bh(&tn->nametbl_lock); @@ -743,12 +744,14 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, nt->local_publ_count++; skb = tipc_named_publish(net, p); } + rc_dests = nt->rc_dests; exit: spin_unlock_bh(&tn->nametbl_lock); if (skb) - tipc_node_broadcast(net, skb); + tipc_node_broadcast(net, skb, rc_dests); return p; + } /** @@ -762,6 +765,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 self = tipc_own_addr(net); struct sk_buff *skb = NULL; struct publication *p; + u32 rc_dests; spin_lock_bh(&tn->nametbl_lock); @@ -775,10 +779,11 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, pr_err("Failed to remove local publication {%u,%u,%u}/%u\n", type, lower, upper, key); } + rc_dests = nt->rc_dests; spin_unlock_bh(&tn->nametbl_lock); if (skb) { - tipc_node_broadcast(net, skb); + tipc_node_broadcast(net, skb, rc_dests); return 1; } return 0; diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index 728bc7016c38..8064e1986e2c 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h @@ -106,6 +106,8 @@ struct name_table { struct list_head cluster_scope; rwlock_t cluster_scope_lock; u32 local_publ_count; + u32 rc_dests; + u32 snd_nxt; }; int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); diff --git a/net/tipc/node.c b/net/tipc/node.c index a4c2816c3746..030a51c4d1fa 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -75,6 +75,8 @@ struct tipc_bclink_entry { struct sk_buff_head arrvq; struct sk_buff_head inputq2; struct sk_buff_head namedq; + u16 named_rcv_nxt; + bool named_open; }; /** @@ -396,10 +398,10 @@ static void tipc_node_write_unlock(struct tipc_node *n) write_unlock_bh(&n->lock); if (flags & TIPC_NOTIFY_NODE_DOWN) - tipc_publ_notify(net, publ_list, addr); + tipc_publ_notify(net, publ_list, addr, n->capabilities); if (flags & TIPC_NOTIFY_NODE_UP) - tipc_named_node_up(net, addr); + tipc_named_node_up(net, addr, n->capabilities); if (flags & TIPC_NOTIFY_LINK_UP) { tipc_mon_peer_up(net, addr, bearer_id); @@ -1483,6 +1485,7 @@ static void node_lost_contact(struct tipc_node *n, /* Clean up broadcast state */ tipc_bcast_remove_peer(n->net, n->bc_entry.link); + __skb_queue_purge(&n->bc_entry.namedq); /* Abort any ongoing link failover */ for (i = 0; i < MAX_BEARERS; i++) { @@ -1729,12 +1732,23 @@ int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) return 0; } -void tipc_node_broadcast(struct net *net, struct sk_buff *skb) +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests) { + struct sk_buff_head xmitq; struct sk_buff *txskb; struct tipc_node *n; + u16 dummy; u32 dst; + /* Use broadcast if all nodes support it */ + if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) { + __skb_queue_head_init(&xmitq); + __skb_queue_tail(&xmitq, skb); + tipc_bcast_xmit(net, &xmitq, &dummy); + return; + } + + /* Otherwise use legacy replicast method */ rcu_read_lock(); list_for_each_entry_rcu(n, tipc_nodes(net), list) { dst = n->addr; @@ -1749,7 +1763,6 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb) tipc_node_xmit_skb(net, txskb, dst, 0); } rcu_read_unlock(); - kfree_skb(skb); } @@ -1844,7 +1857,9 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ if (!skb_queue_empty(&n->bc_entry.namedq)) - tipc_named_rcv(net, &n->bc_entry.namedq); + tipc_named_rcv(net, &n->bc_entry.namedq, + &n->bc_entry.named_rcv_nxt, + &n->bc_entry.named_open); /* If reassembly or retransmission failure => reset all links to peer */ if (rc & TIPC_LINK_DOWN_EVT) @@ -2114,7 +2129,9 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) tipc_node_link_down(n, bearer_id, false); if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) - tipc_named_rcv(net, &n->bc_entry.namedq); + tipc_named_rcv(net, &n->bc_entry.namedq, + &n->bc_entry.named_rcv_nxt, + &n->bc_entry.named_open); if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) tipc_node_mcast_rcv(n); diff --git a/net/tipc/node.h b/net/tipc/node.h index a6803b449a2c..9f6f13f1604f 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -55,7 +55,8 @@ enum { TIPC_MCAST_RBCTL = (1 << 7), TIPC_GAP_ACK_BLOCK = (1 << 8), TIPC_TUNNEL_ENHANCED = (1 << 9), - TIPC_NAGLE = (1 << 10) + TIPC_NAGLE = (1 << 10), + TIPC_NAMED_BCAST = (1 << 11) }; #define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \ @@ -68,7 +69,8 @@ enum { TIPC_MCAST_RBCTL | \ TIPC_GAP_ACK_BLOCK | \ TIPC_TUNNEL_ENHANCED | \ - TIPC_NAGLE) + TIPC_NAGLE | \ + TIPC_NAMED_BCAST) #define INVALID_BEARER_ID -1 @@ -101,7 +103,7 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest, u32 selector); void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr); void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr); -void tipc_node_broadcast(struct net *net, struct sk_buff *skb); +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests); int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port); void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port); int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected); -- 2.25.1 |
From: Hoang H. Le <hoa...@de...> - 2020-06-07 03:10:24
|
-----Original Message----- From: Jon Maloy <jm...@re...> Sent: Friday, June 5, 2020 8:03 PM To: Hoang Huu Le <hoa...@de...>; ma...@do...; yin...@wi...; tip...@li... Subject: Re: [next-net v6] tipc: update a binding service via broadcast On 6/5/20 3:52 AM, Hoang Huu Le wrote: > Currently, updating binding table (add service binding to > name table/withdraw a service binding) is being sent over replicast. > However, if we are scaling up clusters to > 100 nodes/containers this > method is less affection because of looping through nodes in a cluster one > by one. > > It is worth to use broadcast to update a binding service. This way, the > binding table can be updated on all peer nodes in one shot. > > Broadcast is used when all peer nodes, as indicated by a new capability > flag TIPC_NAMED_BCAST, support reception of this message type. > > Four problems need to be considered when introducing this feature. > 1) When establishing a link to a new peer node we still update this by a > unicast 'bulk' update. This may lead to race conditions, where a later > broadcast publication/withdrawal bypass the 'bulk', resulting in > disordered publications, or even that a withdrawal may arrive before the > corresponding publication. We solve this by adding an 'is_last_bulk' bit > in the last bulk messages so that it can be distinguished from all other > messages. Only when this message has arrived do we open up for reception > of broadcast publications/withdrawals. > 2) When a first legacy node is added to the cluster all distribution > will switch over to use the legacy 'replicast' method, while the > opposite happens when the last legacy node leaves the cluster. This > entails another risk of message disordering that has to be handled. We > solve this by adding a sequence number to the broadcast/replicast > messages, so that disordering can be discovered and corrected. Note > however that we don't need to consider potential message loss or > duplication at this protocol level. > 3) Bulk messages don't contain any sequence numbers, and will always > arrive in order. Hence we must exempt those from the sequence number > control and deliver them unconditionally. We solve this by adding a new > 'is_bulk' bit in those messages so that they can be recognized. > 4) Legacy messages, which don't contain any new bits or sequence > numbers, but neither can arrive out of order, also need to be exempt > from the initial synchronization and sequence number check, and > delivered unconditionally. Therefore, we add another 'is_not_legacy' bit > to all new messages so that those can be distinguished from legacy > messages and the latter delivered directly. --- If you add the above three dashes before the version info that will not by accident be considered part of the commit log. But of course our internal versioning is irrelevant to David M, so you should just remove this info before posting. [Hoang] Thanks Jon. Frankly, I didn't know that. > v2: resolve synchronization problem when switching from unicast to > broadcast > > v5: - never use broadcast if there is a single node not understanding it > - always use broadcast otherwise > - add sequence numbering to non-bulk messages > > v6: update Jon's comment > > Signed-off-by: Hoang Huu Le <hoa...@de...> > Acked-by: Jon Maloy <jm...@re...> > --- > net/tipc/bcast.c | 6 +-- > net/tipc/bcast.h | 4 +- > net/tipc/link.c | 2 +- > net/tipc/msg.h | 40 +++++++++++++++ > net/tipc/name_distr.c | 115 +++++++++++++++++++++++++++++++----------- > net/tipc/name_distr.h | 9 ++-- > net/tipc/name_table.c | 9 +++- > net/tipc/name_table.h | 2 + > net/tipc/node.c | 28 +++++++--- > net/tipc/node.h | 8 +-- > 10 files changed, 175 insertions(+), 48 deletions(-) > > diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c > index 4c20be08b9c4..9d085ad6f0cf 100644 > --- a/net/tipc/bcast.c > +++ b/net/tipc/bcast.c > @@ -249,8 +249,8 @@ static void tipc_bcast_select_xmit_method(struct net *net, int dests, > * Consumes the buffer chain. > * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE > */ > -static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, > - u16 *cong_link_cnt) > +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, > + u16 *cong_link_cnt) > { > struct tipc_link *l = tipc_bc_sndlink(net); > struct sk_buff_head xmitq; > @@ -746,7 +746,7 @@ void tipc_nlist_purge(struct tipc_nlist *nl) > nl->local = false; > } > > -u32 tipc_bcast_get_broadcast_mode(struct net *net) > +u32 tipc_bcast_get_mode(struct net *net) > { > struct tipc_bc_base *bb = tipc_bc_base(net); > > diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h > index 9e847d9617d3..b3b883e2a823 100644 > --- a/net/tipc/bcast.h > +++ b/net/tipc/bcast.h > @@ -89,6 +89,8 @@ void tipc_bcast_toggle_rcast(struct net *net, bool supp); > int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, > struct tipc_mc_method *method, struct tipc_nlist *dests, > u16 *cong_link_cnt); > +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, > + u16 *cong_link_cnt); > int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb); > void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, > struct tipc_msg *hdr); > @@ -98,7 +100,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg); > int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]); > int tipc_bclink_reset_stats(struct net *net); > > -u32 tipc_bcast_get_broadcast_mode(struct net *net); > +u32 tipc_bcast_get_mode(struct net *net); > u32 tipc_bcast_get_broadcast_ratio(struct net *net); > > void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head *defq, > diff --git a/net/tipc/link.c b/net/tipc/link.c > index d4675e922a8f..da0b30733549 100644 > --- a/net/tipc/link.c > +++ b/net/tipc/link.c > @@ -2646,7 +2646,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg) > struct nlattr *attrs; > struct nlattr *prop; > struct tipc_net *tn = net_generic(net, tipc_net_id); > - u32 bc_mode = tipc_bcast_get_broadcast_mode(net); > + u32 bc_mode = tipc_bcast_get_mode(net); > u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net); > struct tipc_link *bcl = tn->bcl; > > diff --git a/net/tipc/msg.h b/net/tipc/msg.h > index 871feadbbc19..d53914316684 100644 > --- a/net/tipc/msg.h > +++ b/net/tipc/msg.h > @@ -409,6 +409,36 @@ static inline void msg_set_errcode(struct tipc_msg *m, u32 err) > msg_set_bits(m, 1, 25, 0xf, err); > } > > +static inline void msg_set_bulk(struct tipc_msg *m) > +{ > + msg_set_bits(m, 1, 28, 0x1, 1); > +} > + > +static inline u32 msg_is_bulk(struct tipc_msg *m) > +{ > + return msg_bits(m, 1, 28, 0x1); > +} > + > +static inline void msg_set_last_bulk(struct tipc_msg *m) > +{ > + msg_set_bits(m, 1, 27, 0x1, 1); > +} > + > +static inline u32 msg_is_last_bulk(struct tipc_msg *m) > +{ > + return msg_bits(m, 1, 27, 0x1); > +} > + > +static inline void msg_set_non_legacy(struct tipc_msg *m) > +{ > + msg_set_bits(m, 1, 26, 0x1, 1); > +} > + > +static inline u32 msg_is_legacy(struct tipc_msg *m) > +{ > + return !msg_bits(m, 1, 26, 0x1); > +} > + > static inline u32 msg_reroute_cnt(struct tipc_msg *m) > { > return msg_bits(m, 1, 21, 0xf); > @@ -538,6 +568,16 @@ static inline void msg_set_origport(struct tipc_msg *m, u32 p) > msg_set_word(m, 4, p); > } > > +static inline u16 msg_named_seqno(struct tipc_msg *m) > +{ > + return msg_bits(m, 4, 0, 0xffff); > +} > + > +static inline void msg_set_named_seqno(struct tipc_msg *m, u16 n) > +{ > + msg_set_bits(m, 4, 0, 0xffff, n); > +} > + > static inline u32 msg_destport(struct tipc_msg *m) > { > return msg_word(m, 5); > diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c > index 5feaf3b67380..6cbd4b271768 100644 > --- a/net/tipc/name_distr.c > +++ b/net/tipc/name_distr.c > @@ -102,7 +102,8 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ) > pr_warn("Publication distribution failure\n"); > return NULL; > } > - > + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); > + msg_set_non_legacy(buf_msg(skb)); > item = (struct distr_item *)msg_data(buf_msg(skb)); > publ_to_item(item, publ); > return skb; > @@ -114,8 +115,8 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ) > struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) > { > struct name_table *nt = tipc_name_table(net); > - struct sk_buff *buf; > struct distr_item *item; > + struct sk_buff *skb; > > write_lock_bh(&nt->cluster_scope_lock); > list_del(&publ->binding_node); > @@ -123,15 +124,16 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) > if (publ->scope == TIPC_NODE_SCOPE) > return NULL; > > - buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); > - if (!buf) { > + skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); > + if (!skb) { > pr_warn("Withdrawal distribution failure\n"); > return NULL; > } > - > - item = (struct distr_item *)msg_data(buf_msg(buf)); > + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); > + msg_set_non_legacy(buf_msg(skb)); > + item = (struct distr_item *)msg_data(buf_msg(skb)); > publ_to_item(item, publ); > - return buf; > + return skb; > } > > /** > @@ -141,7 +143,7 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) > * @pls: linked list of publication items to be packed into buffer chain > */ > static void named_distribute(struct net *net, struct sk_buff_head *list, > - u32 dnode, struct list_head *pls) > + u32 dnode, struct list_head *pls, u16 seqno) > { > struct publication *publ; > struct sk_buff *skb = NULL; > @@ -149,6 +151,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, > u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) / > ITEM_SIZE) * ITEM_SIZE; > u32 msg_rem = msg_dsz; > + struct tipc_msg *hdr; > > list_for_each_entry(publ, pls, binding_node) { > /* Prepare next buffer: */ > @@ -159,8 +162,11 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, > pr_warn("Bulk publication failure\n"); > return; > } > - msg_set_bc_ack_invalid(buf_msg(skb), true); > - item = (struct distr_item *)msg_data(buf_msg(skb)); > + hdr = buf_msg(skb); > + msg_set_bc_ack_invalid(hdr, true); > + msg_set_bulk(hdr); > + msg_set_non_legacy(hdr); > + item = (struct distr_item *)msg_data(hdr); > } > > /* Pack publication into message: */ > @@ -176,24 +182,35 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, > } > } > if (skb) { > - msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem)); > + hdr = buf_msg(skb); > + msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem)); > skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem)); > __skb_queue_tail(list, skb); > } > + hdr = buf_msg(skb_peek_tail(list)); > + msg_set_last_bulk(hdr); > + msg_set_named_seqno(hdr, seqno); > } > > /** > * tipc_named_node_up - tell specified node about all publications by this node > */ > -void tipc_named_node_up(struct net *net, u32 dnode) > +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities) > { > struct name_table *nt = tipc_name_table(net); > + struct tipc_net *tn = tipc_net(net); > struct sk_buff_head head; > + u16 seqno; > > __skb_queue_head_init(&head); > + spin_lock_bh(&tn->nametbl_lock); > + if (!(capabilities & TIPC_NAMED_BCAST)) > + nt->rc_dests++; > + seqno = nt->snd_nxt; > + spin_unlock_bh(&tn->nametbl_lock); > > read_lock_bh(&nt->cluster_scope_lock); > - named_distribute(net, &head, dnode, &nt->cluster_scope); > + named_distribute(net, &head, dnode, &nt->cluster_scope, seqno); > tipc_node_xmit(net, &head, dnode, 0); > read_unlock_bh(&nt->cluster_scope_lock); > } > @@ -245,13 +262,21 @@ static void tipc_dist_queue_purge(struct net *net, u32 addr) > spin_unlock_bh(&tn->nametbl_lock); > } > > -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr) > +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, > + u32 addr, u16 capabilities) > { > + struct name_table *nt = tipc_name_table(net); > + struct tipc_net *tn = tipc_net(net); > + > struct publication *publ, *tmp; > > list_for_each_entry_safe(publ, tmp, nsub_list, binding_node) > tipc_publ_purge(net, publ, addr); > tipc_dist_queue_purge(net, addr); > + spin_lock_bh(&tn->nametbl_lock); > + if (!(capabilities & TIPC_NAMED_BCAST)) > + nt->rc_dests--; > + spin_unlock_bh(&tn->nametbl_lock); > } > > /** > @@ -295,29 +320,61 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i, > return false; > } > > +struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq, > + u16 *rcv_nxt, bool *open) > +{ > + struct sk_buff *skb, *tmp; > + struct tipc_msg *hdr; > + u16 seqno; > + > + skb_queue_walk_safe(namedq, skb, tmp) { > + skb_linearize(skb); > + hdr = buf_msg(skb); > + seqno = msg_named_seqno(hdr); > + if (msg_is_last_bulk(hdr)) { > + *rcv_nxt = seqno; > + *open = true; > + } > + if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) { > + __skb_unlink(skb, namedq); > + return skb; > + } > + > + if (*open && (*rcv_nxt == seqno)) { > + (*rcv_nxt)++; > + __skb_unlink(skb, namedq); > + return skb; > + } > + > + if (less(seqno, *rcv_nxt)) { > + __skb_unlink(skb, namedq); > + kfree_skb(skb); > + continue; Still not needed. This queue should be flushed in tipc_node_lost_contact(), which I now see we don't do. [Hoang] Yes, that's right. I will verify and send it out. This has to e fixed too. ///jon > + } > + } > + return NULL; > +} > + > /** > * tipc_named_rcv - process name table update messages sent by another node > */ > -void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq) > +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, > + u16 *rcv_nxt, bool *open) > { > - struct tipc_net *tn = net_generic(net, tipc_net_id); > - struct tipc_msg *msg; > + struct tipc_net *tn = tipc_net(net); > struct distr_item *item; > - uint count; > - u32 node; > + struct tipc_msg *hdr; > struct sk_buff *skb; > - int mtype; > + u32 count, node = 0; > > spin_lock_bh(&tn->nametbl_lock); > - for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) { > - skb_linearize(skb); > - msg = buf_msg(skb); > - mtype = msg_type(msg); > - item = (struct distr_item *)msg_data(msg); > - count = msg_data_sz(msg) / ITEM_SIZE; > - node = msg_orignode(msg); > + while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) { > + hdr = buf_msg(skb); > + node = msg_orignode(hdr); > + item = (struct distr_item *)msg_data(hdr); > + count = msg_data_sz(hdr) / ITEM_SIZE; > while (count--) { > - tipc_update_nametbl(net, item, node, mtype); > + tipc_update_nametbl(net, item, node, msg_type(hdr)); > item++; > } > kfree_skb(skb); > @@ -345,6 +402,6 @@ void tipc_named_reinit(struct net *net) > publ->node = self; > list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node) > publ->node = self; > - > + nt->rc_dests = 0; > spin_unlock_bh(&tn->nametbl_lock); > } > diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h > index 63fc73e0fa6c..092323158f06 100644 > --- a/net/tipc/name_distr.h > +++ b/net/tipc/name_distr.h > @@ -67,11 +67,14 @@ struct distr_item { > __be32 key; > }; > > +void tipc_named_bcast(struct net *net, struct sk_buff *skb); > struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ); > struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ); > -void tipc_named_node_up(struct net *net, u32 dnode); > -void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue); > +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities); > +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, > + u16 *rcv_nxt, bool *open); > void tipc_named_reinit(struct net *net); > -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr); > +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, > + u32 addr, u16 capabilities); > > #endif > diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c > index 359b2bc888cf..2ac33d32edc2 100644 > --- a/net/tipc/name_table.c > +++ b/net/tipc/name_table.c > @@ -729,6 +729,7 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, > struct tipc_net *tn = tipc_net(net); > struct publication *p = NULL; > struct sk_buff *skb = NULL; > + u32 rc_dests; > > spin_lock_bh(&tn->nametbl_lock); > > @@ -743,12 +744,14 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, > nt->local_publ_count++; > skb = tipc_named_publish(net, p); > } > + rc_dests = nt->rc_dests; > exit: > spin_unlock_bh(&tn->nametbl_lock); > > if (skb) > - tipc_node_broadcast(net, skb); > + tipc_node_broadcast(net, skb, rc_dests); > return p; > + > } > > /** > @@ -762,6 +765,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, > u32 self = tipc_own_addr(net); > struct sk_buff *skb = NULL; > struct publication *p; > + u32 rc_dests; > > spin_lock_bh(&tn->nametbl_lock); > > @@ -775,10 +779,11 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, > pr_err("Failed to remove local publication {%u,%u,%u}/%u\n", > type, lower, upper, key); > } > + rc_dests = nt->rc_dests; > spin_unlock_bh(&tn->nametbl_lock); > > if (skb) { > - tipc_node_broadcast(net, skb); > + tipc_node_broadcast(net, skb, rc_dests); > return 1; > } > return 0; > diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h > index 728bc7016c38..8064e1986e2c 100644 > --- a/net/tipc/name_table.h > +++ b/net/tipc/name_table.h > @@ -106,6 +106,8 @@ struct name_table { > struct list_head cluster_scope; > rwlock_t cluster_scope_lock; > u32 local_publ_count; > + u32 rc_dests; > + u32 snd_nxt; > }; > > int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); > diff --git a/net/tipc/node.c b/net/tipc/node.c > index 803a3a6d0f50..ad8d7bce1f98 100644 > --- a/net/tipc/node.c > +++ b/net/tipc/node.c > @@ -75,6 +75,8 @@ struct tipc_bclink_entry { > struct sk_buff_head arrvq; > struct sk_buff_head inputq2; > struct sk_buff_head namedq; > + u16 named_rcv_nxt; > + bool named_open; > }; > > /** > @@ -396,10 +398,10 @@ static void tipc_node_write_unlock(struct tipc_node *n) > write_unlock_bh(&n->lock); > > if (flags & TIPC_NOTIFY_NODE_DOWN) > - tipc_publ_notify(net, publ_list, addr); > + tipc_publ_notify(net, publ_list, addr, n->capabilities); > > if (flags & TIPC_NOTIFY_NODE_UP) > - tipc_named_node_up(net, addr); > + tipc_named_node_up(net, addr, n->capabilities); > > if (flags & TIPC_NOTIFY_LINK_UP) { > tipc_mon_peer_up(net, addr, bearer_id); > @@ -1729,12 +1731,23 @@ int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) > return 0; > } > > -void tipc_node_broadcast(struct net *net, struct sk_buff *skb) > +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests) > { > + struct sk_buff_head xmitq; > struct sk_buff *txskb; > struct tipc_node *n; > + u16 dummy; > u32 dst; > > + /* Use broadcast if all nodes support it */ > + if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) { > + __skb_queue_head_init(&xmitq); > + __skb_queue_tail(&xmitq, skb); > + tipc_bcast_xmit(net, &xmitq, &dummy); > + return; > + } > + > + /* Otherwise use legacy replicast method */ > rcu_read_lock(); > list_for_each_entry_rcu(n, tipc_nodes(net), list) { > dst = n->addr; > @@ -1749,7 +1762,6 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb) > tipc_node_xmit_skb(net, txskb, dst, 0); > } > rcu_read_unlock(); > - > kfree_skb(skb); > } > > @@ -1844,7 +1856,9 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id > > /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ > if (!skb_queue_empty(&n->bc_entry.namedq)) > - tipc_named_rcv(net, &n->bc_entry.namedq); > + tipc_named_rcv(net, &n->bc_entry.namedq, > + &n->bc_entry.named_rcv_nxt, > + &n->bc_entry.named_open); > > /* If reassembly or retransmission failure => reset all links to peer */ > if (rc & TIPC_LINK_DOWN_EVT) > @@ -2109,7 +2123,9 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) > tipc_node_link_down(n, bearer_id, false); > > if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) > - tipc_named_rcv(net, &n->bc_entry.namedq); > + tipc_named_rcv(net, &n->bc_entry.namedq, > + &n->bc_entry.named_rcv_nxt, > + &n->bc_entry.named_open); > > if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) > tipc_node_mcast_rcv(n); > diff --git a/net/tipc/node.h b/net/tipc/node.h > index a6803b449a2c..9f6f13f1604f 100644 > --- a/net/tipc/node.h > +++ b/net/tipc/node.h > @@ -55,7 +55,8 @@ enum { > TIPC_MCAST_RBCTL = (1 << 7), > TIPC_GAP_ACK_BLOCK = (1 << 8), > TIPC_TUNNEL_ENHANCED = (1 << 9), > - TIPC_NAGLE = (1 << 10) > + TIPC_NAGLE = (1 << 10), > + TIPC_NAMED_BCAST = (1 << 11) > }; > > #define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \ > @@ -68,7 +69,8 @@ enum { > TIPC_MCAST_RBCTL | \ > TIPC_GAP_ACK_BLOCK | \ > TIPC_TUNNEL_ENHANCED | \ > - TIPC_NAGLE) > + TIPC_NAGLE | \ > + TIPC_NAMED_BCAST) > > #define INVALID_BEARER_ID -1 > > @@ -101,7 +103,7 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest, > u32 selector); > void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr); > void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr); > -void tipc_node_broadcast(struct net *net, struct sk_buff *skb); > +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests); > int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port); > void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port); > int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected); |
From: Jon M. <jm...@re...> - 2020-06-05 13:03:06
|
On 6/5/20 3:52 AM, Hoang Huu Le wrote: > Currently, updating binding table (add service binding to > name table/withdraw a service binding) is being sent over replicast. > However, if we are scaling up clusters to > 100 nodes/containers this > method is less affection because of looping through nodes in a cluster one > by one. > > It is worth to use broadcast to update a binding service. This way, the > binding table can be updated on all peer nodes in one shot. > > Broadcast is used when all peer nodes, as indicated by a new capability > flag TIPC_NAMED_BCAST, support reception of this message type. > > Four problems need to be considered when introducing this feature. > 1) When establishing a link to a new peer node we still update this by a > unicast 'bulk' update. This may lead to race conditions, where a later > broadcast publication/withdrawal bypass the 'bulk', resulting in > disordered publications, or even that a withdrawal may arrive before the > corresponding publication. We solve this by adding an 'is_last_bulk' bit > in the last bulk messages so that it can be distinguished from all other > messages. Only when this message has arrived do we open up for reception > of broadcast publications/withdrawals. > 2) When a first legacy node is added to the cluster all distribution > will switch over to use the legacy 'replicast' method, while the > opposite happens when the last legacy node leaves the cluster. This > entails another risk of message disordering that has to be handled. We > solve this by adding a sequence number to the broadcast/replicast > messages, so that disordering can be discovered and corrected. Note > however that we don't need to consider potential message loss or > duplication at this protocol level. > 3) Bulk messages don't contain any sequence numbers, and will always > arrive in order. Hence we must exempt those from the sequence number > control and deliver them unconditionally. We solve this by adding a new > 'is_bulk' bit in those messages so that they can be recognized. > 4) Legacy messages, which don't contain any new bits or sequence > numbers, but neither can arrive out of order, also need to be exempt > from the initial synchronization and sequence number check, and > delivered unconditionally. Therefore, we add another 'is_not_legacy' bit > to all new messages so that those can be distinguished from legacy > messages and the latter delivered directly. --- If you add the above three dashes before the version info that will not by accident be considered part of the commit log. But of course our internal versioning is irrelevant to David M, so you should just remove this info before posting. > v2: resolve synchronization problem when switching from unicast to > broadcast > > v5: - never use broadcast if there is a single node not understanding it > - always use broadcast otherwise > - add sequence numbering to non-bulk messages > > v6: update Jon's comment > > Signed-off-by: Hoang Huu Le <hoa...@de...> > Acked-by: Jon Maloy <jm...@re...> > --- > net/tipc/bcast.c | 6 +-- > net/tipc/bcast.h | 4 +- > net/tipc/link.c | 2 +- > net/tipc/msg.h | 40 +++++++++++++++ > net/tipc/name_distr.c | 115 +++++++++++++++++++++++++++++++----------- > net/tipc/name_distr.h | 9 ++-- > net/tipc/name_table.c | 9 +++- > net/tipc/name_table.h | 2 + > net/tipc/node.c | 28 +++++++--- > net/tipc/node.h | 8 +-- > 10 files changed, 175 insertions(+), 48 deletions(-) > > diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c > index 4c20be08b9c4..9d085ad6f0cf 100644 > --- a/net/tipc/bcast.c > +++ b/net/tipc/bcast.c > @@ -249,8 +249,8 @@ static void tipc_bcast_select_xmit_method(struct net *net, int dests, > * Consumes the buffer chain. > * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE > */ > -static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, > - u16 *cong_link_cnt) > +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, > + u16 *cong_link_cnt) > { > struct tipc_link *l = tipc_bc_sndlink(net); > struct sk_buff_head xmitq; > @@ -746,7 +746,7 @@ void tipc_nlist_purge(struct tipc_nlist *nl) > nl->local = false; > } > > -u32 tipc_bcast_get_broadcast_mode(struct net *net) > +u32 tipc_bcast_get_mode(struct net *net) > { > struct tipc_bc_base *bb = tipc_bc_base(net); > > diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h > index 9e847d9617d3..b3b883e2a823 100644 > --- a/net/tipc/bcast.h > +++ b/net/tipc/bcast.h > @@ -89,6 +89,8 @@ void tipc_bcast_toggle_rcast(struct net *net, bool supp); > int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, > struct tipc_mc_method *method, struct tipc_nlist *dests, > u16 *cong_link_cnt); > +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, > + u16 *cong_link_cnt); > int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb); > void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, > struct tipc_msg *hdr); > @@ -98,7 +100,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg); > int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]); > int tipc_bclink_reset_stats(struct net *net); > > -u32 tipc_bcast_get_broadcast_mode(struct net *net); > +u32 tipc_bcast_get_mode(struct net *net); > u32 tipc_bcast_get_broadcast_ratio(struct net *net); > > void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head *defq, > diff --git a/net/tipc/link.c b/net/tipc/link.c > index d4675e922a8f..da0b30733549 100644 > --- a/net/tipc/link.c > +++ b/net/tipc/link.c > @@ -2646,7 +2646,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg) > struct nlattr *attrs; > struct nlattr *prop; > struct tipc_net *tn = net_generic(net, tipc_net_id); > - u32 bc_mode = tipc_bcast_get_broadcast_mode(net); > + u32 bc_mode = tipc_bcast_get_mode(net); > u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net); > struct tipc_link *bcl = tn->bcl; > > diff --git a/net/tipc/msg.h b/net/tipc/msg.h > index 871feadbbc19..d53914316684 100644 > --- a/net/tipc/msg.h > +++ b/net/tipc/msg.h > @@ -409,6 +409,36 @@ static inline void msg_set_errcode(struct tipc_msg *m, u32 err) > msg_set_bits(m, 1, 25, 0xf, err); > } > > +static inline void msg_set_bulk(struct tipc_msg *m) > +{ > + msg_set_bits(m, 1, 28, 0x1, 1); > +} > + > +static inline u32 msg_is_bulk(struct tipc_msg *m) > +{ > + return msg_bits(m, 1, 28, 0x1); > +} > + > +static inline void msg_set_last_bulk(struct tipc_msg *m) > +{ > + msg_set_bits(m, 1, 27, 0x1, 1); > +} > + > +static inline u32 msg_is_last_bulk(struct tipc_msg *m) > +{ > + return msg_bits(m, 1, 27, 0x1); > +} > + > +static inline void msg_set_non_legacy(struct tipc_msg *m) > +{ > + msg_set_bits(m, 1, 26, 0x1, 1); > +} > + > +static inline u32 msg_is_legacy(struct tipc_msg *m) > +{ > + return !msg_bits(m, 1, 26, 0x1); > +} > + > static inline u32 msg_reroute_cnt(struct tipc_msg *m) > { > return msg_bits(m, 1, 21, 0xf); > @@ -538,6 +568,16 @@ static inline void msg_set_origport(struct tipc_msg *m, u32 p) > msg_set_word(m, 4, p); > } > > +static inline u16 msg_named_seqno(struct tipc_msg *m) > +{ > + return msg_bits(m, 4, 0, 0xffff); > +} > + > +static inline void msg_set_named_seqno(struct tipc_msg *m, u16 n) > +{ > + msg_set_bits(m, 4, 0, 0xffff, n); > +} > + > static inline u32 msg_destport(struct tipc_msg *m) > { > return msg_word(m, 5); > diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c > index 5feaf3b67380..6cbd4b271768 100644 > --- a/net/tipc/name_distr.c > +++ b/net/tipc/name_distr.c > @@ -102,7 +102,8 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ) > pr_warn("Publication distribution failure\n"); > return NULL; > } > - > + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); > + msg_set_non_legacy(buf_msg(skb)); > item = (struct distr_item *)msg_data(buf_msg(skb)); > publ_to_item(item, publ); > return skb; > @@ -114,8 +115,8 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ) > struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) > { > struct name_table *nt = tipc_name_table(net); > - struct sk_buff *buf; > struct distr_item *item; > + struct sk_buff *skb; > > write_lock_bh(&nt->cluster_scope_lock); > list_del(&publ->binding_node); > @@ -123,15 +124,16 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) > if (publ->scope == TIPC_NODE_SCOPE) > return NULL; > > - buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); > - if (!buf) { > + skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); > + if (!skb) { > pr_warn("Withdrawal distribution failure\n"); > return NULL; > } > - > - item = (struct distr_item *)msg_data(buf_msg(buf)); > + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); > + msg_set_non_legacy(buf_msg(skb)); > + item = (struct distr_item *)msg_data(buf_msg(skb)); > publ_to_item(item, publ); > - return buf; > + return skb; > } > > /** > @@ -141,7 +143,7 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) > * @pls: linked list of publication items to be packed into buffer chain > */ > static void named_distribute(struct net *net, struct sk_buff_head *list, > - u32 dnode, struct list_head *pls) > + u32 dnode, struct list_head *pls, u16 seqno) > { > struct publication *publ; > struct sk_buff *skb = NULL; > @@ -149,6 +151,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, > u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) / > ITEM_SIZE) * ITEM_SIZE; > u32 msg_rem = msg_dsz; > + struct tipc_msg *hdr; > > list_for_each_entry(publ, pls, binding_node) { > /* Prepare next buffer: */ > @@ -159,8 +162,11 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, > pr_warn("Bulk publication failure\n"); > return; > } > - msg_set_bc_ack_invalid(buf_msg(skb), true); > - item = (struct distr_item *)msg_data(buf_msg(skb)); > + hdr = buf_msg(skb); > + msg_set_bc_ack_invalid(hdr, true); > + msg_set_bulk(hdr); > + msg_set_non_legacy(hdr); > + item = (struct distr_item *)msg_data(hdr); > } > > /* Pack publication into message: */ > @@ -176,24 +182,35 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, > } > } > if (skb) { > - msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem)); > + hdr = buf_msg(skb); > + msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem)); > skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem)); > __skb_queue_tail(list, skb); > } > + hdr = buf_msg(skb_peek_tail(list)); > + msg_set_last_bulk(hdr); > + msg_set_named_seqno(hdr, seqno); > } > > /** > * tipc_named_node_up - tell specified node about all publications by this node > */ > -void tipc_named_node_up(struct net *net, u32 dnode) > +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities) > { > struct name_table *nt = tipc_name_table(net); > + struct tipc_net *tn = tipc_net(net); > struct sk_buff_head head; > + u16 seqno; > > __skb_queue_head_init(&head); > + spin_lock_bh(&tn->nametbl_lock); > + if (!(capabilities & TIPC_NAMED_BCAST)) > + nt->rc_dests++; > + seqno = nt->snd_nxt; > + spin_unlock_bh(&tn->nametbl_lock); > > read_lock_bh(&nt->cluster_scope_lock); > - named_distribute(net, &head, dnode, &nt->cluster_scope); > + named_distribute(net, &head, dnode, &nt->cluster_scope, seqno); > tipc_node_xmit(net, &head, dnode, 0); > read_unlock_bh(&nt->cluster_scope_lock); > } > @@ -245,13 +262,21 @@ static void tipc_dist_queue_purge(struct net *net, u32 addr) > spin_unlock_bh(&tn->nametbl_lock); > } > > -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr) > +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, > + u32 addr, u16 capabilities) > { > + struct name_table *nt = tipc_name_table(net); > + struct tipc_net *tn = tipc_net(net); > + > struct publication *publ, *tmp; > > list_for_each_entry_safe(publ, tmp, nsub_list, binding_node) > tipc_publ_purge(net, publ, addr); > tipc_dist_queue_purge(net, addr); > + spin_lock_bh(&tn->nametbl_lock); > + if (!(capabilities & TIPC_NAMED_BCAST)) > + nt->rc_dests--; > + spin_unlock_bh(&tn->nametbl_lock); > } > > /** > @@ -295,29 +320,61 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i, > return false; > } > > +struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq, > + u16 *rcv_nxt, bool *open) > +{ > + struct sk_buff *skb, *tmp; > + struct tipc_msg *hdr; > + u16 seqno; > + > + skb_queue_walk_safe(namedq, skb, tmp) { > + skb_linearize(skb); > + hdr = buf_msg(skb); > + seqno = msg_named_seqno(hdr); > + if (msg_is_last_bulk(hdr)) { > + *rcv_nxt = seqno; > + *open = true; > + } > + if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) { > + __skb_unlink(skb, namedq); > + return skb; > + } > + > + if (*open && (*rcv_nxt == seqno)) { > + (*rcv_nxt)++; > + __skb_unlink(skb, namedq); > + return skb; > + } > + > + if (less(seqno, *rcv_nxt)) { > + __skb_unlink(skb, namedq); > + kfree_skb(skb); > + continue; Still not needed. This queue should be flushed in tipc_node_lost_contact(), which I now see we don't do. This has to e fixed too. ///jon > + } > + } > + return NULL; > +} > + > /** > * tipc_named_rcv - process name table update messages sent by another node > */ > -void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq) > +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, > + u16 *rcv_nxt, bool *open) > { > - struct tipc_net *tn = net_generic(net, tipc_net_id); > - struct tipc_msg *msg; > + struct tipc_net *tn = tipc_net(net); > struct distr_item *item; > - uint count; > - u32 node; > + struct tipc_msg *hdr; > struct sk_buff *skb; > - int mtype; > + u32 count, node = 0; > > spin_lock_bh(&tn->nametbl_lock); > - for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) { > - skb_linearize(skb); > - msg = buf_msg(skb); > - mtype = msg_type(msg); > - item = (struct distr_item *)msg_data(msg); > - count = msg_data_sz(msg) / ITEM_SIZE; > - node = msg_orignode(msg); > + while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) { > + hdr = buf_msg(skb); > + node = msg_orignode(hdr); > + item = (struct distr_item *)msg_data(hdr); > + count = msg_data_sz(hdr) / ITEM_SIZE; > while (count--) { > - tipc_update_nametbl(net, item, node, mtype); > + tipc_update_nametbl(net, item, node, msg_type(hdr)); > item++; > } > kfree_skb(skb); > @@ -345,6 +402,6 @@ void tipc_named_reinit(struct net *net) > publ->node = self; > list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node) > publ->node = self; > - > + nt->rc_dests = 0; > spin_unlock_bh(&tn->nametbl_lock); > } > diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h > index 63fc73e0fa6c..092323158f06 100644 > --- a/net/tipc/name_distr.h > +++ b/net/tipc/name_distr.h > @@ -67,11 +67,14 @@ struct distr_item { > __be32 key; > }; > > +void tipc_named_bcast(struct net *net, struct sk_buff *skb); > struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ); > struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ); > -void tipc_named_node_up(struct net *net, u32 dnode); > -void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue); > +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities); > +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, > + u16 *rcv_nxt, bool *open); > void tipc_named_reinit(struct net *net); > -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr); > +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, > + u32 addr, u16 capabilities); > > #endif > diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c > index 359b2bc888cf..2ac33d32edc2 100644 > --- a/net/tipc/name_table.c > +++ b/net/tipc/name_table.c > @@ -729,6 +729,7 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, > struct tipc_net *tn = tipc_net(net); > struct publication *p = NULL; > struct sk_buff *skb = NULL; > + u32 rc_dests; > > spin_lock_bh(&tn->nametbl_lock); > > @@ -743,12 +744,14 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, > nt->local_publ_count++; > skb = tipc_named_publish(net, p); > } > + rc_dests = nt->rc_dests; > exit: > spin_unlock_bh(&tn->nametbl_lock); > > if (skb) > - tipc_node_broadcast(net, skb); > + tipc_node_broadcast(net, skb, rc_dests); > return p; > + > } > > /** > @@ -762,6 +765,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, > u32 self = tipc_own_addr(net); > struct sk_buff *skb = NULL; > struct publication *p; > + u32 rc_dests; > > spin_lock_bh(&tn->nametbl_lock); > > @@ -775,10 +779,11 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, > pr_err("Failed to remove local publication {%u,%u,%u}/%u\n", > type, lower, upper, key); > } > + rc_dests = nt->rc_dests; > spin_unlock_bh(&tn->nametbl_lock); > > if (skb) { > - tipc_node_broadcast(net, skb); > + tipc_node_broadcast(net, skb, rc_dests); > return 1; > } > return 0; > diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h > index 728bc7016c38..8064e1986e2c 100644 > --- a/net/tipc/name_table.h > +++ b/net/tipc/name_table.h > @@ -106,6 +106,8 @@ struct name_table { > struct list_head cluster_scope; > rwlock_t cluster_scope_lock; > u32 local_publ_count; > + u32 rc_dests; > + u32 snd_nxt; > }; > > int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); > diff --git a/net/tipc/node.c b/net/tipc/node.c > index 803a3a6d0f50..ad8d7bce1f98 100644 > --- a/net/tipc/node.c > +++ b/net/tipc/node.c > @@ -75,6 +75,8 @@ struct tipc_bclink_entry { > struct sk_buff_head arrvq; > struct sk_buff_head inputq2; > struct sk_buff_head namedq; > + u16 named_rcv_nxt; > + bool named_open; > }; > > /** > @@ -396,10 +398,10 @@ static void tipc_node_write_unlock(struct tipc_node *n) > write_unlock_bh(&n->lock); > > if (flags & TIPC_NOTIFY_NODE_DOWN) > - tipc_publ_notify(net, publ_list, addr); > + tipc_publ_notify(net, publ_list, addr, n->capabilities); > > if (flags & TIPC_NOTIFY_NODE_UP) > - tipc_named_node_up(net, addr); > + tipc_named_node_up(net, addr, n->capabilities); > > if (flags & TIPC_NOTIFY_LINK_UP) { > tipc_mon_peer_up(net, addr, bearer_id); > @@ -1729,12 +1731,23 @@ int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) > return 0; > } > > -void tipc_node_broadcast(struct net *net, struct sk_buff *skb) > +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests) > { > + struct sk_buff_head xmitq; > struct sk_buff *txskb; > struct tipc_node *n; > + u16 dummy; > u32 dst; > > + /* Use broadcast if all nodes support it */ > + if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) { > + __skb_queue_head_init(&xmitq); > + __skb_queue_tail(&xmitq, skb); > + tipc_bcast_xmit(net, &xmitq, &dummy); > + return; > + } > + > + /* Otherwise use legacy replicast method */ > rcu_read_lock(); > list_for_each_entry_rcu(n, tipc_nodes(net), list) { > dst = n->addr; > @@ -1749,7 +1762,6 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb) > tipc_node_xmit_skb(net, txskb, dst, 0); > } > rcu_read_unlock(); > - > kfree_skb(skb); > } > > @@ -1844,7 +1856,9 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id > > /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ > if (!skb_queue_empty(&n->bc_entry.namedq)) > - tipc_named_rcv(net, &n->bc_entry.namedq); > + tipc_named_rcv(net, &n->bc_entry.namedq, > + &n->bc_entry.named_rcv_nxt, > + &n->bc_entry.named_open); > > /* If reassembly or retransmission failure => reset all links to peer */ > if (rc & TIPC_LINK_DOWN_EVT) > @@ -2109,7 +2123,9 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) > tipc_node_link_down(n, bearer_id, false); > > if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) > - tipc_named_rcv(net, &n->bc_entry.namedq); > + tipc_named_rcv(net, &n->bc_entry.namedq, > + &n->bc_entry.named_rcv_nxt, > + &n->bc_entry.named_open); > > if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) > tipc_node_mcast_rcv(n); > diff --git a/net/tipc/node.h b/net/tipc/node.h > index a6803b449a2c..9f6f13f1604f 100644 > --- a/net/tipc/node.h > +++ b/net/tipc/node.h > @@ -55,7 +55,8 @@ enum { > TIPC_MCAST_RBCTL = (1 << 7), > TIPC_GAP_ACK_BLOCK = (1 << 8), > TIPC_TUNNEL_ENHANCED = (1 << 9), > - TIPC_NAGLE = (1 << 10) > + TIPC_NAGLE = (1 << 10), > + TIPC_NAMED_BCAST = (1 << 11) > }; > > #define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \ > @@ -68,7 +69,8 @@ enum { > TIPC_MCAST_RBCTL | \ > TIPC_GAP_ACK_BLOCK | \ > TIPC_TUNNEL_ENHANCED | \ > - TIPC_NAGLE) > + TIPC_NAGLE | \ > + TIPC_NAMED_BCAST) > > #define INVALID_BEARER_ID -1 > > @@ -101,7 +103,7 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest, > u32 selector); > void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr); > void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr); > -void tipc_node_broadcast(struct net *net, struct sk_buff *skb); > +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests); > int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port); > void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port); > int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected); |
From: Tuong L. <tuo...@de...> - 2020-06-05 10:35:47
|
When a bearer is enabled, we create a 'tipc_discoverer' object to store the bearer related data along with a timer and a preformatted discovery message buffer for later probing... However, this is only carried after the bearer was set 'up', that left a race condition resulting in kernel panic. It occurs when a discovery message from a peer node is received and processed in bottom half (since the bearer is 'up' already) just before the discoverer object is created but is now accessed in order to update the preformatted buffer (with a new trial address, ...) so leads to the NULL pointer dereference. We solve the problem by simply moving the bearer 'up' setting to later, so make sure everything is ready prior to any message receiving. Signed-off-by: Tuong Lien <tuo...@de...> --- net/tipc/bearer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index 34ca7b789eba..e366ec9a7e4d 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c @@ -316,7 +316,6 @@ static int tipc_enable_bearer(struct net *net, const char *name, b->domain = disc_domain; b->net_plane = bearer_id + 'A'; b->priority = prio; - test_and_set_bit_lock(0, &b->up); refcount_set(&b->refcnt, 1); res = tipc_disc_create(net, b, &b->bcast_addr, &skb); @@ -326,6 +325,7 @@ static int tipc_enable_bearer(struct net *net, const char *name, goto rejected; } + test_and_set_bit_lock(0, &b->up); rcu_assign_pointer(tn->bearer_list[bearer_id], b); if (skb) tipc_bearer_xmit_skb(net, bearer_id, skb, &b->bcast_addr); -- 2.13.7 |
From: Hoang H. Le <hoa...@de...> - 2020-06-05 07:53:05
|
Currently, updating binding table (add service binding to name table/withdraw a service binding) is being sent over replicast. However, if we are scaling up clusters to > 100 nodes/containers this method is less affection because of looping through nodes in a cluster one by one. It is worth to use broadcast to update a binding service. This way, the binding table can be updated on all peer nodes in one shot. Broadcast is used when all peer nodes, as indicated by a new capability flag TIPC_NAMED_BCAST, support reception of this message type. Four problems need to be considered when introducing this feature. 1) When establishing a link to a new peer node we still update this by a unicast 'bulk' update. This may lead to race conditions, where a later broadcast publication/withdrawal bypass the 'bulk', resulting in disordered publications, or even that a withdrawal may arrive before the corresponding publication. We solve this by adding an 'is_last_bulk' bit in the last bulk messages so that it can be distinguished from all other messages. Only when this message has arrived do we open up for reception of broadcast publications/withdrawals. 2) When a first legacy node is added to the cluster all distribution will switch over to use the legacy 'replicast' method, while the opposite happens when the last legacy node leaves the cluster. This entails another risk of message disordering that has to be handled. We solve this by adding a sequence number to the broadcast/replicast messages, so that disordering can be discovered and corrected. Note however that we don't need to consider potential message loss or duplication at this protocol level. 3) Bulk messages don't contain any sequence numbers, and will always arrive in order. Hence we must exempt those from the sequence number control and deliver them unconditionally. We solve this by adding a new 'is_bulk' bit in those messages so that they can be recognized. 4) Legacy messages, which don't contain any new bits or sequence numbers, but neither can arrive out of order, also need to be exempt from the initial synchronization and sequence number check, and delivered unconditionally. Therefore, we add another 'is_not_legacy' bit to all new messages so that those can be distinguished from legacy messages and the latter delivered directly. v2: resolve synchronization problem when switching from unicast to broadcast v5: - never use broadcast if there is a single node not understanding it - always use broadcast otherwise - add sequence numbering to non-bulk messages v6: update Jon's comment Signed-off-by: Hoang Huu Le <hoa...@de...> Acked-by: Jon Maloy <jm...@re...> --- net/tipc/bcast.c | 6 +-- net/tipc/bcast.h | 4 +- net/tipc/link.c | 2 +- net/tipc/msg.h | 40 +++++++++++++++ net/tipc/name_distr.c | 115 +++++++++++++++++++++++++++++++----------- net/tipc/name_distr.h | 9 ++-- net/tipc/name_table.c | 9 +++- net/tipc/name_table.h | 2 + net/tipc/node.c | 28 +++++++--- net/tipc/node.h | 8 +-- 10 files changed, 175 insertions(+), 48 deletions(-) diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 4c20be08b9c4..9d085ad6f0cf 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -249,8 +249,8 @@ static void tipc_bcast_select_xmit_method(struct net *net, int dests, * Consumes the buffer chain. * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE */ -static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, - u16 *cong_link_cnt) +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, + u16 *cong_link_cnt) { struct tipc_link *l = tipc_bc_sndlink(net); struct sk_buff_head xmitq; @@ -746,7 +746,7 @@ void tipc_nlist_purge(struct tipc_nlist *nl) nl->local = false; } -u32 tipc_bcast_get_broadcast_mode(struct net *net) +u32 tipc_bcast_get_mode(struct net *net) { struct tipc_bc_base *bb = tipc_bc_base(net); diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h index 9e847d9617d3..b3b883e2a823 100644 --- a/net/tipc/bcast.h +++ b/net/tipc/bcast.h @@ -89,6 +89,8 @@ void tipc_bcast_toggle_rcast(struct net *net, bool supp); int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, struct tipc_mc_method *method, struct tipc_nlist *dests, u16 *cong_link_cnt); +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, + u16 *cong_link_cnt); int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb); void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, struct tipc_msg *hdr); @@ -98,7 +100,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg); int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]); int tipc_bclink_reset_stats(struct net *net); -u32 tipc_bcast_get_broadcast_mode(struct net *net); +u32 tipc_bcast_get_mode(struct net *net); u32 tipc_bcast_get_broadcast_ratio(struct net *net); void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head *defq, diff --git a/net/tipc/link.c b/net/tipc/link.c index d4675e922a8f..da0b30733549 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -2646,7 +2646,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg) struct nlattr *attrs; struct nlattr *prop; struct tipc_net *tn = net_generic(net, tipc_net_id); - u32 bc_mode = tipc_bcast_get_broadcast_mode(net); + u32 bc_mode = tipc_bcast_get_mode(net); u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net); struct tipc_link *bcl = tn->bcl; diff --git a/net/tipc/msg.h b/net/tipc/msg.h index 871feadbbc19..d53914316684 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h @@ -409,6 +409,36 @@ static inline void msg_set_errcode(struct tipc_msg *m, u32 err) msg_set_bits(m, 1, 25, 0xf, err); } +static inline void msg_set_bulk(struct tipc_msg *m) +{ + msg_set_bits(m, 1, 28, 0x1, 1); +} + +static inline u32 msg_is_bulk(struct tipc_msg *m) +{ + return msg_bits(m, 1, 28, 0x1); +} + +static inline void msg_set_last_bulk(struct tipc_msg *m) +{ + msg_set_bits(m, 1, 27, 0x1, 1); +} + +static inline u32 msg_is_last_bulk(struct tipc_msg *m) +{ + return msg_bits(m, 1, 27, 0x1); +} + +static inline void msg_set_non_legacy(struct tipc_msg *m) +{ + msg_set_bits(m, 1, 26, 0x1, 1); +} + +static inline u32 msg_is_legacy(struct tipc_msg *m) +{ + return !msg_bits(m, 1, 26, 0x1); +} + static inline u32 msg_reroute_cnt(struct tipc_msg *m) { return msg_bits(m, 1, 21, 0xf); @@ -538,6 +568,16 @@ static inline void msg_set_origport(struct tipc_msg *m, u32 p) msg_set_word(m, 4, p); } +static inline u16 msg_named_seqno(struct tipc_msg *m) +{ + return msg_bits(m, 4, 0, 0xffff); +} + +static inline void msg_set_named_seqno(struct tipc_msg *m, u16 n) +{ + msg_set_bits(m, 4, 0, 0xffff, n); +} + static inline u32 msg_destport(struct tipc_msg *m) { return msg_word(m, 5); diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index 5feaf3b67380..6cbd4b271768 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -102,7 +102,8 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ) pr_warn("Publication distribution failure\n"); return NULL; } - + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); + msg_set_non_legacy(buf_msg(skb)); item = (struct distr_item *)msg_data(buf_msg(skb)); publ_to_item(item, publ); return skb; @@ -114,8 +115,8 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ) struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) { struct name_table *nt = tipc_name_table(net); - struct sk_buff *buf; struct distr_item *item; + struct sk_buff *skb; write_lock_bh(&nt->cluster_scope_lock); list_del(&publ->binding_node); @@ -123,15 +124,16 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) if (publ->scope == TIPC_NODE_SCOPE) return NULL; - buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); - if (!buf) { + skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); + if (!skb) { pr_warn("Withdrawal distribution failure\n"); return NULL; } - - item = (struct distr_item *)msg_data(buf_msg(buf)); + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); + msg_set_non_legacy(buf_msg(skb)); + item = (struct distr_item *)msg_data(buf_msg(skb)); publ_to_item(item, publ); - return buf; + return skb; } /** @@ -141,7 +143,7 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) * @pls: linked list of publication items to be packed into buffer chain */ static void named_distribute(struct net *net, struct sk_buff_head *list, - u32 dnode, struct list_head *pls) + u32 dnode, struct list_head *pls, u16 seqno) { struct publication *publ; struct sk_buff *skb = NULL; @@ -149,6 +151,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) / ITEM_SIZE) * ITEM_SIZE; u32 msg_rem = msg_dsz; + struct tipc_msg *hdr; list_for_each_entry(publ, pls, binding_node) { /* Prepare next buffer: */ @@ -159,8 +162,11 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, pr_warn("Bulk publication failure\n"); return; } - msg_set_bc_ack_invalid(buf_msg(skb), true); - item = (struct distr_item *)msg_data(buf_msg(skb)); + hdr = buf_msg(skb); + msg_set_bc_ack_invalid(hdr, true); + msg_set_bulk(hdr); + msg_set_non_legacy(hdr); + item = (struct distr_item *)msg_data(hdr); } /* Pack publication into message: */ @@ -176,24 +182,35 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, } } if (skb) { - msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem)); + hdr = buf_msg(skb); + msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem)); skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem)); __skb_queue_tail(list, skb); } + hdr = buf_msg(skb_peek_tail(list)); + msg_set_last_bulk(hdr); + msg_set_named_seqno(hdr, seqno); } /** * tipc_named_node_up - tell specified node about all publications by this node */ -void tipc_named_node_up(struct net *net, u32 dnode) +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities) { struct name_table *nt = tipc_name_table(net); + struct tipc_net *tn = tipc_net(net); struct sk_buff_head head; + u16 seqno; __skb_queue_head_init(&head); + spin_lock_bh(&tn->nametbl_lock); + if (!(capabilities & TIPC_NAMED_BCAST)) + nt->rc_dests++; + seqno = nt->snd_nxt; + spin_unlock_bh(&tn->nametbl_lock); read_lock_bh(&nt->cluster_scope_lock); - named_distribute(net, &head, dnode, &nt->cluster_scope); + named_distribute(net, &head, dnode, &nt->cluster_scope, seqno); tipc_node_xmit(net, &head, dnode, 0); read_unlock_bh(&nt->cluster_scope_lock); } @@ -245,13 +262,21 @@ static void tipc_dist_queue_purge(struct net *net, u32 addr) spin_unlock_bh(&tn->nametbl_lock); } -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr) +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, + u32 addr, u16 capabilities) { + struct name_table *nt = tipc_name_table(net); + struct tipc_net *tn = tipc_net(net); + struct publication *publ, *tmp; list_for_each_entry_safe(publ, tmp, nsub_list, binding_node) tipc_publ_purge(net, publ, addr); tipc_dist_queue_purge(net, addr); + spin_lock_bh(&tn->nametbl_lock); + if (!(capabilities & TIPC_NAMED_BCAST)) + nt->rc_dests--; + spin_unlock_bh(&tn->nametbl_lock); } /** @@ -295,29 +320,61 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i, return false; } +struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq, + u16 *rcv_nxt, bool *open) +{ + struct sk_buff *skb, *tmp; + struct tipc_msg *hdr; + u16 seqno; + + skb_queue_walk_safe(namedq, skb, tmp) { + skb_linearize(skb); + hdr = buf_msg(skb); + seqno = msg_named_seqno(hdr); + if (msg_is_last_bulk(hdr)) { + *rcv_nxt = seqno; + *open = true; + } + if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) { + __skb_unlink(skb, namedq); + return skb; + } + + if (*open && (*rcv_nxt == seqno)) { + (*rcv_nxt)++; + __skb_unlink(skb, namedq); + return skb; + } + + if (less(seqno, *rcv_nxt)) { + __skb_unlink(skb, namedq); + kfree_skb(skb); + continue; + } + } + return NULL; +} + /** * tipc_named_rcv - process name table update messages sent by another node */ -void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq) +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, + u16 *rcv_nxt, bool *open) { - struct tipc_net *tn = net_generic(net, tipc_net_id); - struct tipc_msg *msg; + struct tipc_net *tn = tipc_net(net); struct distr_item *item; - uint count; - u32 node; + struct tipc_msg *hdr; struct sk_buff *skb; - int mtype; + u32 count, node = 0; spin_lock_bh(&tn->nametbl_lock); - for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) { - skb_linearize(skb); - msg = buf_msg(skb); - mtype = msg_type(msg); - item = (struct distr_item *)msg_data(msg); - count = msg_data_sz(msg) / ITEM_SIZE; - node = msg_orignode(msg); + while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) { + hdr = buf_msg(skb); + node = msg_orignode(hdr); + item = (struct distr_item *)msg_data(hdr); + count = msg_data_sz(hdr) / ITEM_SIZE; while (count--) { - tipc_update_nametbl(net, item, node, mtype); + tipc_update_nametbl(net, item, node, msg_type(hdr)); item++; } kfree_skb(skb); @@ -345,6 +402,6 @@ void tipc_named_reinit(struct net *net) publ->node = self; list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node) publ->node = self; - + nt->rc_dests = 0; spin_unlock_bh(&tn->nametbl_lock); } diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h index 63fc73e0fa6c..092323158f06 100644 --- a/net/tipc/name_distr.h +++ b/net/tipc/name_distr.h @@ -67,11 +67,14 @@ struct distr_item { __be32 key; }; +void tipc_named_bcast(struct net *net, struct sk_buff *skb); struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ); struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ); -void tipc_named_node_up(struct net *net, u32 dnode); -void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue); +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities); +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, + u16 *rcv_nxt, bool *open); void tipc_named_reinit(struct net *net); -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr); +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, + u32 addr, u16 capabilities); #endif diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 359b2bc888cf..2ac33d32edc2 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -729,6 +729,7 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, struct tipc_net *tn = tipc_net(net); struct publication *p = NULL; struct sk_buff *skb = NULL; + u32 rc_dests; spin_lock_bh(&tn->nametbl_lock); @@ -743,12 +744,14 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, nt->local_publ_count++; skb = tipc_named_publish(net, p); } + rc_dests = nt->rc_dests; exit: spin_unlock_bh(&tn->nametbl_lock); if (skb) - tipc_node_broadcast(net, skb); + tipc_node_broadcast(net, skb, rc_dests); return p; + } /** @@ -762,6 +765,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 self = tipc_own_addr(net); struct sk_buff *skb = NULL; struct publication *p; + u32 rc_dests; spin_lock_bh(&tn->nametbl_lock); @@ -775,10 +779,11 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, pr_err("Failed to remove local publication {%u,%u,%u}/%u\n", type, lower, upper, key); } + rc_dests = nt->rc_dests; spin_unlock_bh(&tn->nametbl_lock); if (skb) { - tipc_node_broadcast(net, skb); + tipc_node_broadcast(net, skb, rc_dests); return 1; } return 0; diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index 728bc7016c38..8064e1986e2c 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h @@ -106,6 +106,8 @@ struct name_table { struct list_head cluster_scope; rwlock_t cluster_scope_lock; u32 local_publ_count; + u32 rc_dests; + u32 snd_nxt; }; int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); diff --git a/net/tipc/node.c b/net/tipc/node.c index 803a3a6d0f50..ad8d7bce1f98 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -75,6 +75,8 @@ struct tipc_bclink_entry { struct sk_buff_head arrvq; struct sk_buff_head inputq2; struct sk_buff_head namedq; + u16 named_rcv_nxt; + bool named_open; }; /** @@ -396,10 +398,10 @@ static void tipc_node_write_unlock(struct tipc_node *n) write_unlock_bh(&n->lock); if (flags & TIPC_NOTIFY_NODE_DOWN) - tipc_publ_notify(net, publ_list, addr); + tipc_publ_notify(net, publ_list, addr, n->capabilities); if (flags & TIPC_NOTIFY_NODE_UP) - tipc_named_node_up(net, addr); + tipc_named_node_up(net, addr, n->capabilities); if (flags & TIPC_NOTIFY_LINK_UP) { tipc_mon_peer_up(net, addr, bearer_id); @@ -1729,12 +1731,23 @@ int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) return 0; } -void tipc_node_broadcast(struct net *net, struct sk_buff *skb) +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests) { + struct sk_buff_head xmitq; struct sk_buff *txskb; struct tipc_node *n; + u16 dummy; u32 dst; + /* Use broadcast if all nodes support it */ + if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) { + __skb_queue_head_init(&xmitq); + __skb_queue_tail(&xmitq, skb); + tipc_bcast_xmit(net, &xmitq, &dummy); + return; + } + + /* Otherwise use legacy replicast method */ rcu_read_lock(); list_for_each_entry_rcu(n, tipc_nodes(net), list) { dst = n->addr; @@ -1749,7 +1762,6 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb) tipc_node_xmit_skb(net, txskb, dst, 0); } rcu_read_unlock(); - kfree_skb(skb); } @@ -1844,7 +1856,9 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ if (!skb_queue_empty(&n->bc_entry.namedq)) - tipc_named_rcv(net, &n->bc_entry.namedq); + tipc_named_rcv(net, &n->bc_entry.namedq, + &n->bc_entry.named_rcv_nxt, + &n->bc_entry.named_open); /* If reassembly or retransmission failure => reset all links to peer */ if (rc & TIPC_LINK_DOWN_EVT) @@ -2109,7 +2123,9 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) tipc_node_link_down(n, bearer_id, false); if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) - tipc_named_rcv(net, &n->bc_entry.namedq); + tipc_named_rcv(net, &n->bc_entry.namedq, + &n->bc_entry.named_rcv_nxt, + &n->bc_entry.named_open); if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) tipc_node_mcast_rcv(n); diff --git a/net/tipc/node.h b/net/tipc/node.h index a6803b449a2c..9f6f13f1604f 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -55,7 +55,8 @@ enum { TIPC_MCAST_RBCTL = (1 << 7), TIPC_GAP_ACK_BLOCK = (1 << 8), TIPC_TUNNEL_ENHANCED = (1 << 9), - TIPC_NAGLE = (1 << 10) + TIPC_NAGLE = (1 << 10), + TIPC_NAMED_BCAST = (1 << 11) }; #define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \ @@ -68,7 +69,8 @@ enum { TIPC_MCAST_RBCTL | \ TIPC_GAP_ACK_BLOCK | \ TIPC_TUNNEL_ENHANCED | \ - TIPC_NAGLE) + TIPC_NAGLE | \ + TIPC_NAMED_BCAST) #define INVALID_BEARER_ID -1 @@ -101,7 +103,7 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest, u32 selector); void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr); void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr); -void tipc_node_broadcast(struct net *net, struct sk_buff *skb); +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests); int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port); void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port); int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected); -- 2.25.1 |
From: Hoang H. Le <hoa...@de...> - 2020-06-05 04:51:00
|
Hi Jon, See my inline comment. Regards, Hoang -----Original Message----- From: Jon Maloy <jm...@re...> Sent: Thursday, June 4, 2020 8:42 PM To: Hoang Huu Le <hoa...@de...>; ma...@do...; yin...@wi...; tip...@li... Subject: Re: [net-next] tipc: update a binding service via broadcast On 6/4/20 5:21 AM, Hoang Huu Le wrote: > Currently, updating binding table (add service binding to > name table/withdraw a service binding) is being sent over replicast. > However, if we are scaling up clusters to > 100 nodes/containers this > method is less affection because of looping through nodes in a cluster one > by one. > > It is worth to use broadcast to update a binding service. s/Then binding table updates in/all nodes in one shoy. This way, the binding table can be updated on all peer nodes in one shot./ Broadcast is used when all peer nodes, as indicated by a new capability flag TIPC_NAMED_BCAST, support reception of this message type. Four problems need to be considered when introducing this feature. 1) When establishing a link to a new peer node we still update this by a unicast 'bulk' update. This may lead to race conditions, where a later broadcast publication/withdrawal bypass the 'bulk', resulting in disordered publications, or even that a withdrawal may arrive before the corresponding publication. We solve this by adding an 'is_last_bulk' bit in the last bulk messages so that it can be distinguished from all other messages. Only when this message has arrived do we open up for reception of broadcast publications/withdrawals. 2) When a first legacy node is added to the cluster all distribution will switch over to use the legacy 'replicast' method, while the opposite happens when the last legacy node leaves the cluster. This entails another risk of message disordering that has to be handled. We solve this by adding a sequence number to the broadcast/replicast messages, so that disordering can be discovered and corrected. Note however that we don't need to consider potential message loss or duplication at this protocol level. 3) Bulk messages don't contain any sequence numbers, and will always arrive in order. Hence we must exempt those from the sequence number control and deliver them unconditionally. We solve this by adding a new 'is_bulk' bit in those messages so that they can be recognized. 4) Legacy messages, which don't contain any new bits or sequence numbers, but neither can arrive out of order, also need to be exempt from the initial synchronization and sequence number check, and delivered unconditionally. Therefore, we add another 'is_not_legacy' bit to all new messages so that those can be distinguished from legacy messages and the latter delivered directly. > Signed-off-by: Hoang Huu Le <hoa...@de...> > --- > net/tipc/bcast.c | 6 +-- > net/tipc/bcast.h | 4 +- > net/tipc/link.c | 2 +- > net/tipc/msg.h | 40 +++++++++++++++ > net/tipc/name_distr.c | 115 +++++++++++++++++++++++++++++++----------- > net/tipc/name_distr.h | 9 ++-- > net/tipc/name_table.c | 9 +++- > net/tipc/name_table.h | 2 + > net/tipc/node.c | 28 +++++++--- > net/tipc/node.h | 8 +-- > 10 files changed, 175 insertions(+), 48 deletions(-) > > diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c > index 4c20be08b9c4..9d085ad6f0cf 100644 > --- a/net/tipc/bcast.c > +++ b/net/tipc/bcast.c > @@ -249,8 +249,8 @@ static void tipc_bcast_select_xmit_method(struct net *net, int dests, > * Consumes the buffer chain. > * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE > */ > -static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, > - u16 *cong_link_cnt) > +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, > + u16 *cong_link_cnt) > { > struct tipc_link *l = tipc_bc_sndlink(net); > struct sk_buff_head xmitq; > @@ -746,7 +746,7 @@ void tipc_nlist_purge(struct tipc_nlist *nl) > nl->local = false; > } > > -u32 tipc_bcast_get_broadcast_mode(struct net *net) > +u32 tipc_bcast_get_mode(struct net *net) > { > struct tipc_bc_base *bb = tipc_bc_base(net); > > diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h > index 9e847d9617d3..b3b883e2a823 100644 > --- a/net/tipc/bcast.h > +++ b/net/tipc/bcast.h > @@ -89,6 +89,8 @@ void tipc_bcast_toggle_rcast(struct net *net, bool supp); > int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, > struct tipc_mc_method *method, struct tipc_nlist *dests, > u16 *cong_link_cnt); > +int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, > + u16 *cong_link_cnt); > int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb); > void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l, > struct tipc_msg *hdr); > @@ -98,7 +100,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg); > int tipc_nl_bc_link_set(struct net *net, struct nlattr *attrs[]); > int tipc_bclink_reset_stats(struct net *net); > > -u32 tipc_bcast_get_broadcast_mode(struct net *net); > +u32 tipc_bcast_get_mode(struct net *net); > u32 tipc_bcast_get_broadcast_ratio(struct net *net); > > void tipc_mcast_filter_msg(struct net *net, struct sk_buff_head *defq, > diff --git a/net/tipc/link.c b/net/tipc/link.c > index d4675e922a8f..da0b30733549 100644 > --- a/net/tipc/link.c > +++ b/net/tipc/link.c > @@ -2646,7 +2646,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg) > struct nlattr *attrs; > struct nlattr *prop; > struct tipc_net *tn = net_generic(net, tipc_net_id); > - u32 bc_mode = tipc_bcast_get_broadcast_mode(net); > + u32 bc_mode = tipc_bcast_get_mode(net); > u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net); > struct tipc_link *bcl = tn->bcl; > > diff --git a/net/tipc/msg.h b/net/tipc/msg.h > index 871feadbbc19..d53914316684 100644 > --- a/net/tipc/msg.h > +++ b/net/tipc/msg.h > @@ -409,6 +409,36 @@ static inline void msg_set_errcode(struct tipc_msg *m, u32 err) > msg_set_bits(m, 1, 25, 0xf, err); > } > > +static inline void msg_set_bulk(struct tipc_msg *m) > +{ > + msg_set_bits(m, 1, 28, 0x1, 1); > +} > + > +static inline u32 msg_is_bulk(struct tipc_msg *m) > +{ > + return msg_bits(m, 1, 28, 0x1); > +} > + > +static inline void msg_set_last_bulk(struct tipc_msg *m) > +{ > + msg_set_bits(m, 1, 27, 0x1, 1); > +} > + > +static inline u32 msg_is_last_bulk(struct tipc_msg *m) > +{ > + return msg_bits(m, 1, 27, 0x1); > +} > + > +static inline void msg_set_non_legacy(struct tipc_msg *m) > +{ > + msg_set_bits(m, 1, 26, 0x1, 1); > +} > + > +static inline u32 msg_is_legacy(struct tipc_msg *m) > +{ > + return !msg_bits(m, 1, 26, 0x1); > +} > + > static inline u32 msg_reroute_cnt(struct tipc_msg *m) > { > return msg_bits(m, 1, 21, 0xf); > @@ -538,6 +568,16 @@ static inline void msg_set_origport(struct tipc_msg *m, u32 p) > msg_set_word(m, 4, p); > } > > +static inline u16 msg_named_seqno(struct tipc_msg *m) > +{ > + return msg_bits(m, 4, 0, 0xffff); > +} > + > +static inline void msg_set_named_seqno(struct tipc_msg *m, u16 n) > +{ > + msg_set_bits(m, 4, 0, 0xffff, n); > +} > + > static inline u32 msg_destport(struct tipc_msg *m) > { > return msg_word(m, 5); > diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c > index 5feaf3b67380..f4bbb8e792c9 100644 > --- a/net/tipc/name_distr.c > +++ b/net/tipc/name_distr.c > @@ -102,7 +102,8 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ) > pr_warn("Publication distribution failure\n"); > return NULL; > } > - > + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); > + msg_set_non_legacy(buf_msg(skb)); > item = (struct distr_item *)msg_data(buf_msg(skb)); > publ_to_item(item, publ); > return skb; > @@ -114,8 +115,8 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ) > struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) > { > struct name_table *nt = tipc_name_table(net); > - struct sk_buff *buf; > struct distr_item *item; > + struct sk_buff *skb; > > write_lock_bh(&nt->cluster_scope_lock); > list_del(&publ->binding_node); > @@ -123,15 +124,16 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) > if (publ->scope == TIPC_NODE_SCOPE) > return NULL; > > - buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); > - if (!buf) { > + skb = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0); > + if (!skb) { > pr_warn("Withdrawal distribution failure\n"); > return NULL; > } > - > - item = (struct distr_item *)msg_data(buf_msg(buf)); > + msg_set_named_seqno(buf_msg(skb), nt->snd_nxt++); > + msg_set_non_legacy(buf_msg(skb)); > + item = (struct distr_item *)msg_data(buf_msg(skb)); > publ_to_item(item, publ); > - return buf; > + return skb; > } > > /** > @@ -141,7 +143,7 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) > * @pls: linked list of publication items to be packed into buffer chain > */ > static void named_distribute(struct net *net, struct sk_buff_head *list, > - u32 dnode, struct list_head *pls) > + u32 dnode, struct list_head *pls, u16 seqno) > { > struct publication *publ; > struct sk_buff *skb = NULL; > @@ -149,6 +151,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, > u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0, false) - INT_H_SIZE) / > ITEM_SIZE) * ITEM_SIZE; > u32 msg_rem = msg_dsz; > + struct tipc_msg *hdr; > > list_for_each_entry(publ, pls, binding_node) { > /* Prepare next buffer: */ > @@ -159,8 +162,11 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, > pr_warn("Bulk publication failure\n"); > return; > } > - msg_set_bc_ack_invalid(buf_msg(skb), true); > - item = (struct distr_item *)msg_data(buf_msg(skb)); > + hdr = buf_msg(skb); > + msg_set_bc_ack_invalid(hdr, true); > + msg_set_bulk(hdr); > + msg_set_non_legacy(hdr); > + item = (struct distr_item *)msg_data(hdr); > } > > /* Pack publication into message: */ > @@ -176,24 +182,35 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, > } > } > if (skb) { > - msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem)); > + hdr = buf_msg(skb); > + msg_set_size(hdr, INT_H_SIZE + (msg_dsz - msg_rem)); > skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem)); > __skb_queue_tail(list, skb); > } > + hdr = buf_msg(skb_peek_tail(list)); > + msg_set_last_bulk(hdr); > + msg_set_named_seqno(hdr, seqno); > } > > /** > * tipc_named_node_up - tell specified node about all publications by this node > */ > -void tipc_named_node_up(struct net *net, u32 dnode) > +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities) > { > struct name_table *nt = tipc_name_table(net); > + struct tipc_net *tn = tipc_net(net); > struct sk_buff_head head; > + u16 seqno; > > __skb_queue_head_init(&head); > + spin_lock_bh(&tn->nametbl_lock); > + if (!(capabilities & TIPC_NAMED_BCAST)) > + nt->rc_dests++; > + seqno = nt->snd_nxt; > + spin_unlock_bh(&tn->nametbl_lock); > > read_lock_bh(&nt->cluster_scope_lock); > - named_distribute(net, &head, dnode, &nt->cluster_scope); > + named_distribute(net, &head, dnode, &nt->cluster_scope, seqno); > tipc_node_xmit(net, &head, dnode, 0); > read_unlock_bh(&nt->cluster_scope_lock); > } > @@ -245,13 +262,21 @@ static void tipc_dist_queue_purge(struct net *net, u32 addr) > spin_unlock_bh(&tn->nametbl_lock); > } > > -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr) > +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, > + u32 addr, u16 capabilities) > { > + struct name_table *nt = tipc_name_table(net); > + struct tipc_net *tn = tipc_net(net); > + > struct publication *publ, *tmp; > > list_for_each_entry_safe(publ, tmp, nsub_list, binding_node) > tipc_publ_purge(net, publ, addr); > tipc_dist_queue_purge(net, addr); > + spin_lock_bh(&tn->nametbl_lock); > + if (!(capabilities & TIPC_NAMED_BCAST)) > + nt->rc_dests--; > + spin_unlock_bh(&tn->nametbl_lock); > } > > /** > @@ -295,29 +320,61 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i, > return false; > } > > +struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq, > + u16 *rcv_nxt, bool *open) > +{ > + struct tipc_msg *hdr; > + struct sk_buff *skb, *tmp; > + u16 seqno; > + > + skb_queue_walk_safe(namedq, skb, tmp) { > + skb_linearize(skb); > + hdr = buf_msg(skb); > + seqno = msg_named_seqno(hdr); > + if (msg_is_last_bulk(hdr)) { > + *rcv_nxt = seqno; > + *open = true; > + } > + if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) { > + __skb_unlink(skb, namedq); > + return skb; > + } > + > + if (*open && (*rcv_nxt == seqno)) { > + (*rcv_nxt)++; > + __skb_unlink(skb, namedq); > + return skb; > + } > + > + if (less(seqno, *rcv_nxt)) { > + __skb_unlink(skb, namedq); > + kfree_skb(skb); > + continue; > + } This test is not needed, since we will never receive duplicate messages. [Hoang] No, it could happen as below scenario (not dup): - rcv_next: 40 - replicast: 40..49 -> in transmit queue - broadcast: 50..60 -> in transmit queue Node lost, re-established, sync rcv_nxt: 60 (via bulk and last bulk), *open = false Where as in namedq: [40..60] <- those should be dropped. Sum-up, these publications in namedq become obsoleted when received 'last_bulk' > + } > + return NULL; > +} > + > /** > * tipc_named_rcv - process name table update messages sent by another node > */ > -void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq) > +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, > + u16 *rcv_nxt, bool *open) > { > - struct tipc_net *tn = net_generic(net, tipc_net_id); > - struct tipc_msg *msg; > + struct tipc_net *tn = tipc_net(net); > struct distr_item *item; > - uint count; > - u32 node; > + struct tipc_msg *hdr; > struct sk_buff *skb; > - int mtype; > + u32 count, node = 0; > > spin_lock_bh(&tn->nametbl_lock); > - for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) { > - skb_linearize(skb); > - msg = buf_msg(skb); > - mtype = msg_type(msg); > - item = (struct distr_item *)msg_data(msg); > - count = msg_data_sz(msg) / ITEM_SIZE; > - node = msg_orignode(msg); > + while ((skb = tipc_named_dequeue(namedq, rcv_nxt, open))) { > + hdr = buf_msg(skb); > + node = msg_orignode(hdr); > + item = (struct distr_item *)msg_data(hdr); > + count = msg_data_sz(hdr) / ITEM_SIZE; > while (count--) { > - tipc_update_nametbl(net, item, node, mtype); > + tipc_update_nametbl(net, item, node, msg_type(hdr)); > item++; > } > kfree_skb(skb); > @@ -345,6 +402,6 @@ void tipc_named_reinit(struct net *net) > publ->node = self; > list_for_each_entry_rcu(publ, &nt->cluster_scope, binding_node) > publ->node = self; > - > + nt->rc_dests = 0; > spin_unlock_bh(&tn->nametbl_lock); > } > diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h > index 63fc73e0fa6c..092323158f06 100644 > --- a/net/tipc/name_distr.h > +++ b/net/tipc/name_distr.h > @@ -67,11 +67,14 @@ struct distr_item { > __be32 key; > }; > > +void tipc_named_bcast(struct net *net, struct sk_buff *skb); > struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ); > struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ); > -void tipc_named_node_up(struct net *net, u32 dnode); > -void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue); > +void tipc_named_node_up(struct net *net, u32 dnode, u16 capabilities); > +void tipc_named_rcv(struct net *net, struct sk_buff_head *namedq, > + u16 *rcv_nxt, bool *open); > void tipc_named_reinit(struct net *net); > -void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr); > +void tipc_publ_notify(struct net *net, struct list_head *nsub_list, > + u32 addr, u16 capabilities); > > #endif > diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c > index 359b2bc888cf..2ac33d32edc2 100644 > --- a/net/tipc/name_table.c > +++ b/net/tipc/name_table.c > @@ -729,6 +729,7 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, > struct tipc_net *tn = tipc_net(net); > struct publication *p = NULL; > struct sk_buff *skb = NULL; > + u32 rc_dests; > > spin_lock_bh(&tn->nametbl_lock); > > @@ -743,12 +744,14 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, > nt->local_publ_count++; > skb = tipc_named_publish(net, p); > } > + rc_dests = nt->rc_dests; > exit: > spin_unlock_bh(&tn->nametbl_lock); > > if (skb) > - tipc_node_broadcast(net, skb); > + tipc_node_broadcast(net, skb, rc_dests); > return p; > + > } > > /** > @@ -762,6 +765,7 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, > u32 self = tipc_own_addr(net); > struct sk_buff *skb = NULL; > struct publication *p; > + u32 rc_dests; > > spin_lock_bh(&tn->nametbl_lock); > > @@ -775,10 +779,11 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, > pr_err("Failed to remove local publication {%u,%u,%u}/%u\n", > type, lower, upper, key); > } > + rc_dests = nt->rc_dests; > spin_unlock_bh(&tn->nametbl_lock); > > if (skb) { > - tipc_node_broadcast(net, skb); > + tipc_node_broadcast(net, skb, rc_dests); > return 1; > } > return 0; > diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h > index 728bc7016c38..8064e1986e2c 100644 > --- a/net/tipc/name_table.h > +++ b/net/tipc/name_table.h > @@ -106,6 +106,8 @@ struct name_table { > struct list_head cluster_scope; > rwlock_t cluster_scope_lock; > u32 local_publ_count; > + u32 rc_dests; > + u32 snd_nxt; > }; > > int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); > diff --git a/net/tipc/node.c b/net/tipc/node.c > index 803a3a6d0f50..ad8d7bce1f98 100644 > --- a/net/tipc/node.c > +++ b/net/tipc/node.c > @@ -75,6 +75,8 @@ struct tipc_bclink_entry { > struct sk_buff_head arrvq; > struct sk_buff_head inputq2; > struct sk_buff_head namedq; > + u16 named_rcv_nxt; > + bool named_open; > }; > > /** > @@ -396,10 +398,10 @@ static void tipc_node_write_unlock(struct tipc_node *n) > write_unlock_bh(&n->lock); > > if (flags & TIPC_NOTIFY_NODE_DOWN) > - tipc_publ_notify(net, publ_list, addr); > + tipc_publ_notify(net, publ_list, addr, n->capabilities); > > if (flags & TIPC_NOTIFY_NODE_UP) > - tipc_named_node_up(net, addr); > + tipc_named_node_up(net, addr, n->capabilities); > > if (flags & TIPC_NOTIFY_LINK_UP) { > tipc_mon_peer_up(net, addr, bearer_id); > @@ -1729,12 +1731,23 @@ int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq) > return 0; > } > > -void tipc_node_broadcast(struct net *net, struct sk_buff *skb) > +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests) > { > + struct sk_buff_head xmitq; > struct sk_buff *txskb; > struct tipc_node *n; > + u16 dummy; > u32 dst; > > + /* Use broadcast if all nodes support it */ > + if (!rc_dests && tipc_bcast_get_mode(net) != BCLINK_MODE_RCAST) { > + __skb_queue_head_init(&xmitq); > + __skb_queue_tail(&xmitq, skb); > + tipc_bcast_xmit(net, &xmitq, &dummy); > + return; > + } > + > + /* Otherwise use legacy replicast method */ > rcu_read_lock(); > list_for_each_entry_rcu(n, tipc_nodes(net), list) { > dst = n->addr; > @@ -1749,7 +1762,6 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb) > tipc_node_xmit_skb(net, txskb, dst, 0); > } > rcu_read_unlock(); > - > kfree_skb(skb); > } > > @@ -1844,7 +1856,9 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id > > /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */ > if (!skb_queue_empty(&n->bc_entry.namedq)) > - tipc_named_rcv(net, &n->bc_entry.namedq); > + tipc_named_rcv(net, &n->bc_entry.namedq, > + &n->bc_entry.named_rcv_nxt, > + &n->bc_entry.named_open); > > /* If reassembly or retransmission failure => reset all links to peer */ > if (rc & TIPC_LINK_DOWN_EVT) > @@ -2109,7 +2123,9 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b) > tipc_node_link_down(n, bearer_id, false); > > if (unlikely(!skb_queue_empty(&n->bc_entry.namedq))) > - tipc_named_rcv(net, &n->bc_entry.namedq); > + tipc_named_rcv(net, &n->bc_entry.namedq, > + &n->bc_entry.named_rcv_nxt, > + &n->bc_entry.named_open); > > if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1))) > tipc_node_mcast_rcv(n); > diff --git a/net/tipc/node.h b/net/tipc/node.h > index a6803b449a2c..9f6f13f1604f 100644 > --- a/net/tipc/node.h > +++ b/net/tipc/node.h > @@ -55,7 +55,8 @@ enum { > TIPC_MCAST_RBCTL = (1 << 7), > TIPC_GAP_ACK_BLOCK = (1 << 8), > TIPC_TUNNEL_ENHANCED = (1 << 9), > - TIPC_NAGLE = (1 << 10) > + TIPC_NAGLE = (1 << 10), > + TIPC_NAMED_BCAST = (1 << 11) > }; > > #define TIPC_NODE_CAPABILITIES (TIPC_SYN_BIT | \ > @@ -68,7 +69,8 @@ enum { > TIPC_MCAST_RBCTL | \ > TIPC_GAP_ACK_BLOCK | \ > TIPC_TUNNEL_ENHANCED | \ > - TIPC_NAGLE) > + TIPC_NAGLE | \ > + TIPC_NAMED_BCAST) > > #define INVALID_BEARER_ID -1 > > @@ -101,7 +103,7 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest, > u32 selector); > void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr); > void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr); > -void tipc_node_broadcast(struct net *net, struct sk_buff *skb); > +void tipc_node_broadcast(struct net *net, struct sk_buff *skb, int rc_dests); > int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port); > void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port); > int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected); Provided the testing didn't reveal any new problems; Acked-by: Jon Maloy <jm...@re...> |
From: Hoang H. Le <hoa...@de...> - 2020-06-05 02:40:26
|
From: Jon Maloy <jm...@re...> Sent: Thursday, June 4, 2020 8:59 PM To: Hoang Huu Le <hoa...@de...>; tip...@li...; tipc-dek <tip...@de...>; Tuong Tong Lien <tuo...@de...>; Xin Long <luc...@gm...>; Tung Quang Nguyen <tun...@de...>; Ying Xue <yin...@wi...> Subject: Re: FW: [PATCH 2/2] tipc: update a binding service via broadcast On 6/4/20 5:14 AM, Hoang Huu Le wrote: Hi Jon, Please see my inline comment Regards, Hoang From: Jon Maloy <jm...@re...><mailto:jm...@re...> Sent: Friday, May 29, 2020 11:11 PM To: tip...@li...<mailto:tip...@li...>; tipc-dek <tip...@de...><mailto:tip...@de...>; Tuong Tong Lien <tuo...@de...><mailto:tuo...@de...>; Xin Long <luc...@gm...><mailto:luc...@gm...>; Tung Quang Nguyen <tun...@de...><mailto:tun...@de...>; Ying Xue <yin...@wi...><mailto:yin...@wi...> Subject: Fwd: Re: FW: [PATCH 2/2] tipc: update a binding service via broadcast Added more recipients. -------- Forwarded Message -------- Subject: Re: FW: [PATCH 2/2] tipc: update a binding service via broadcast Date: Fri, 29 May 2020 12:08:02 -0400 From: Jon Maloy <jm...@re...><mailto:jm...@re...> To: Hoang Huu Le <hoa...@de...><mailto:hoa...@de...>, ma...@do...<mailto:ma...@do...> <ma...@do...><mailto:ma...@do...> Hi Hoang, See below. On 5/27/20 6:49 AM, Hoang Huu Le wrote: Hi Jon, I got DRAFT version base on your idea (attachment file). But from my point, this version introduce too much code implementation at sending side. I don't think this is bright idea to keep rcast_list and bcast_list in the name table. I think we should find out a new way or just ignore the feature. Yes, you are right. I came up with a new idea, to just add a sequence number to the broadcast/replicast messages and re-order them at reception. This even handles the case if the first broadcast message arrives before the first bulk message, something we have not anticipated before. I couldn't resist the temptation trying to code it, as you can see i the patch I just sent out. It is totally untested, I just added the code as I thought it should be and made sure it compiled. There is still a little too much new code to my taste, but this might be a way forward. Please give your feedback on this. I also noticed a couple of things while working with this: 1) There is still an 'expires' field in the tipc_mcast_method, and it seems to even be considered when switching bcast/rcast. The whole point of adding the mcast synchronization mechanism was to get rid of this delay. Have you tested that syncronization really works without the 'expires' ? [Hoang] Yes, I did. I issue the command below to force rcast/bcast regardless ‘expires’. $ tipc link set bro REPLICAST or $tipc link set bro BROADCAST Yes, but what happens when the protocol selects by itself, based on number of destinations, and this number has just passed a threshold? [Hoang] It is really hard to find a strategy making this happen. So, I haven’t cover it in the past but I will try. 2) There are some remnants of old code for the name table dist_queue. This functionality was made redundant by me at least two years ago, so this should be cleaned up. [Hoang] I will check and clean those stuff in separate commit. 3) We might have a potential race condition when new nodes come up, so that publications are distributed twice. a) A publication is added to the name table, and the name table lock is released. b) A new node comes up, and the new publication is delivered in the bulk message. c) The broadcast of the publication goes ahead and sends it out to all nodes, even the one that just came up. d) We end up with a double publication on one of the nodes. e) One of those will linger in the name table after the publication is withdrawn. I have never seen this happen, and my analysis might be wrong, but to me this looks like a possible scenario. Note that my patch doesn't fix this, but we could possibly arrange it by adding a 'distributed' flag i the publication item on the sending side, so that the bulk distribution will ignore it. [Hoang] I try to simulate as your scenario description on 8 nodes with publication >100 services at the same time and bring interface down/up. Sometimes I got below error logs: [ 537.322414] tipc: Failed to remove binding 1000,2 from 1001001 […] [ 537.358957] tipc: Failed to remove binding 1000,11 from 1001001 I’m not sure above counting as bug whether or not. If yes, we also fix this in another commit too. This is not what I expected, but might be another manifestation of the same problem. We are probably observing a replicast withdraw arriving before the corresponding bulk publication. If you see a binding <1000,2> in the table after this printout that would be a confirmation. [Hoang] No, I don’t see this in my testing. For my scenario: Do you see duplicate publication instances before you do withdraw? Do you see lingering publication after a withdraw? Luckily, all of this will be fixed with the new broadcast distribution protocol. [Hoang] I think so, because of none of them happen in my testing. Regards ///jon Regards ///jon Regards, Hoang -----Original Message----- From: Jon Maloy <jm...@re...><mailto:jm...@re...> Sent: Thursday, May 21, 2020 9:44 PM To: Hoang Huu Le <hoa...@de...><mailto:hoa...@de...>; ma...@do...<mailto:ma...@do...> Subject: Re: FW: [PATCH 2/2] tipc: update a binding service via broadcast Hi, I have one more comment below. Looking forward to your feedback. ///jon On 5/20/20 9:03 PM, Hoang Huu Le wrote: Yeah, thanks Jon. I will investigate more on your idea when I finish the issue with lab infrastructure. Hoang -----Original Message----- From: Jon Maloy <jm...@re...><mailto:jm...@re...> Sent: Thursday, May 21, 2020 7:13 AM To: Hoang Huu Le <hoa...@de...><mailto:hoa...@de...>; ma...@do...<mailto:ma...@do...> Subject: Re: FW: [PATCH 2/2] tipc: update a binding service via broadcast Hi Hoang, Below I try to summarize my newest proposal in relation to v2 of your patch. 1) The bulk can be sent just as is done now, with the addition that we add the NOT_LAST bit to the header. 2) We need a new capability bit identifying nodes which support broadcast NAME_DISTR. I don't see we can just reuse TIPC_MCAST_RBCTL, because the recipients need to have code for handling concurrent bulk/broadcast receptions. This bit is not added to the cluster capability word. 3) We need to keep two structs of type tipc_nlist in the name table. One contains tipc_dest structures for all nodes NOT supporting TIPC_MCAST_NAMEDISTR (rcast_list), and the other those which do (bcast_list). For more comments see below. On 5/12/20 6:22 AM, Hoang Huu Le wrote: Just forward the patch I mentioned. -----Original Message----- From: Hoang Le <hoa...@de...><mailto:hoa...@de...> Sent: Tuesday, November 19, 2019 5:01 PM To: jon...@er...<mailto:jon...@er...>; ma...@do...<mailto:ma...@do...>; tip...@de...<mailto:tip...@de...> Subject: [PATCH 2/2] tipc: update a binding service via broadcast Currently, updating binding table (add service binding to name table/withdraw a service binding) is being sent over replicast. However, if we are scaling up clusters to > 100 nodes/containers this method is less affection because of looping through nodes in a cluster one by one. It is worth to use broadcast to update a binding service. Then binding table updates in all nodes for one shot. The mechanism is backward compatible as sync message slient dropped. v2: resolve synchronization problem when switching from unicast to broadcast Signed-off-by: Hoang Le <hoa...@de...><mailto:hoa...@de...> --- net/tipc/bcast.c | 3 ++- net/tipc/link.c | 6 ++++++ net/tipc/name_table.c | 33 ++++++++++++++++++++++++++++++--- net/tipc/name_table.h | 4 ++++ net/tipc/node.c | 32 ++++++++++++++++++++++++++++++++ net/tipc/node.h | 2 ++ 6 files changed, 76 insertions(+), 4 deletions(-) diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index e06f05d55534..44ed481fec47 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -324,7 +324,8 @@ static int tipc_mcast_send_sync(struct net *net, struct sk_buff *skb, hdr = buf_msg(skb); if (msg_user(hdr) == MSG_FRAGMENTER) hdr = msg_inner_hdr(hdr); - if (msg_type(hdr) != TIPC_MCAST_MSG) + if (msg_user(hdr) != NAME_DISTRIBUTOR && + msg_type(hdr) != TIPC_MCAST_MSG) return 0; /* Allocate dummy message */ diff --git a/net/tipc/link.c b/net/tipc/link.c index fb72031228c9..a2e9a64d5a0f 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1190,6 +1190,8 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, struct sk_buff_head *inputq) { struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq; + struct name_table *nt = tipc_name_table(l->net); + struct sk_buff_head *defnq = &nt->defer_namedq; struct tipc_msg *hdr = buf_msg(skb); switch (msg_user(hdr)) { @@ -1211,6 +1213,10 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, case NAME_DISTRIBUTOR: l->bc_rcvlink->state = LINK_ESTABLISHED; skb_queue_tail(l->namedq, skb); + + spin_lock_bh(&defnq->lock); + tipc_mcast_filter_msg(l->net, defnq, l->namedq); Should not be needed here. You can instead do this in tipc_named_rcv(), using l->namedq as deferred queue and creating an temporary namedq queue on the stack for the messages ready to be delivered. We sort the messages in two steps: 1) If there are any chains of bulk messages, we sort those per source node into the temporary namedq and deliver them first, when a chain is complete. 2) If we find that a chain is incomplete we push it back to the head of n->namedq and return without further action. 3) When there are no bulk messages left in n->namedq we call tipc_mcast_filter_msgs() to sort the remaining messages into the temporary namedq, as far as possible, and deliver those which are ready to be delivered. + spin_unlock_bh(&defnq->lock); return true; case MSG_BUNDLER: case TUNNEL_PROTOCOL: diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 66a65c2cdb23..593dcd11357f 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -615,9 +615,11 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, struct tipc_net *tn = tipc_net(net); struct publication *p = NULL; struct sk_buff *skb = NULL; + bool rcast; spin_lock_bh(&tn->nametbl_lock); + rcast = nt->rcast; if (nt->local_publ_count >= TIPC_MAX_PUBL) { pr_warn("Bind failed, max limit %u reached\n", TIPC_MAX_PUBL); goto exit; @@ -632,8 +634,18 @@ struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, exit: spin_unlock_bh(&tn->nametbl_lock); - if (skb) - tipc_node_broadcast(net, skb); Here we make two calls to tipc_mcast_xmit(), one with method->rcast/mandatory and rcast_list,if not empty, and one with method->bcast/mandatory and bcast_list, if not empty. Actually, the latter should not be mandatory. We can easily imagine a situation where we start out with only legacy nodes, and then upgrade to bcast nodes, one by one. In the beginning we want tipc_mcast_xmit() to select rcast even for the bcast nodes, and then, as their proportion of the cluster grows, it should switch to bcast. This does mean that the method struct for bcast must be kept between the calls. I.e., another member of struct name_table. ///jon skb must of course be cloned if necessary. + if (skb) { + /* Use broadcast if all nodes support broadcast NAME_DISTR */ + if (tipc_net(net)->capabilities & TIPC_MCAST_RBCTL) { + tipc_node_broadcast_named_publish(net, skb, &rcast); + spin_lock_bh(&tn->nametbl_lock); + nt->rcast = rcast; + spin_unlock_bh(&tn->nametbl_lock); + } else { + /* Otherwise, be backwards compatible */ + tipc_node_broadcast(net, skb); + } Not needed according to above. return p; } @@ -648,8 +660,10 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 self = tipc_own_addr(net); struct sk_buff *skb = NULL; struct publication *p; + bool rcast; spin_lock_bh(&tn->nametbl_lock); + rcast = nt->rcast; p = tipc_nametbl_remove_publ(net, type, lower, upper, self, key); if (p) { @@ -664,7 +678,16 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, spin_unlock_bh(&tn->nametbl_lock); if (skb) { - tipc_node_broadcast(net, skb); + /* Use broadcast if all nodes support broadcast NAME_DISTR */ + if (tipc_net(net)->capabilities & TIPC_MCAST_RBCTL) { + tipc_node_broadcast_named_publish(net, skb, &rcast); + spin_lock_bh(&tn->nametbl_lock); + nt->rcast = rcast; + spin_unlock_bh(&tn->nametbl_lock); + } else { + /* Otherwise, be backwards compatible */ + tipc_node_broadcast(net, skb); + } One or two calls to tipc_mcast_xmit(), just as above. return 1; } return 0; @@ -746,6 +769,9 @@ int tipc_nametbl_init(struct net *net) INIT_LIST_HEAD(&nt->cluster_scope); rwlock_init(&nt->cluster_scope_lock); tn->nametbl = nt; + /* 'bulk' updated messages via unicast */ + nt->rcast = true; + skb_queue_head_init(&nt->defer_namedq); Not needed. Now, node->namedq *is* the deferred queue. spin_lock_init(&tn->nametbl_lock); return 0; } @@ -784,6 +810,7 @@ void tipc_nametbl_stop(struct net *net) * publications, then release the name table */ spin_lock_bh(&tn->nametbl_lock); + skb_queue_purge(&nt->defer_namedq); for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { if (hlist_empty(&nt->services[i])) continue; diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index f79066334cc8..b8cdf2a29d48 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h @@ -95,6 +95,8 @@ struct publication { * - used by name_distr to send bulk updates to new nodes * - used by name_distr during re-init of name table * @local_publ_count: number of publications issued by this node + * @defer_namedq: temporarily queue for 'synching' update + * @rcast: previous method used to publish/withdraw a service */ struct name_table { struct hlist_head services[TIPC_NAMETBL_SIZE]; @@ -102,6 +104,8 @@ struct name_table { struct list_head cluster_scope; rwlock_t cluster_scope_lock; u32 local_publ_count; + struct sk_buff_head defer_namedq; + bool rcast; }; int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); diff --git a/net/tipc/node.c b/net/tipc/node.c index aaf595613e6e..b058647fa78b 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -2981,3 +2981,35 @@ void tipc_node_pre_cleanup_net(struct net *exit_net) } rcu_read_unlock(); } + +int tipc_node_broadcast_named_publish(struct net *net, struct sk_buff *skb, + bool *rcast) +{ + struct tipc_mc_method method = {.rcast = *rcast}; + struct sk_buff_head xmitq; + struct tipc_nlist dests; + struct tipc_node *n; + u16 cong_link_cnt; + int rc = 0; + + __skb_queue_head_init(&xmitq); + __skb_queue_tail(&xmitq, skb); + + tipc_nlist_init(&dests, tipc_own_addr(net)); + rcu_read_lock(); + list_for_each_entry_rcu(n, tipc_nodes(net), list) { + if (in_own_node(net, n->addr)) + continue; + if (!node_is_up(n)) + continue; + tipc_nlist_add(&dests, n->addr); + } + rcu_read_unlock(); + + rc = tipc_mcast_xmit(net, &xmitq, &method, &dests, &cong_link_cnt); + *rcast = method.rcast; + + tipc_nlist_purge(&dests); + __skb_queue_purge(&xmitq); + return rc; +} Not needed any more. diff --git a/net/tipc/node.h b/net/tipc/node.h index a6803b449a2c..d7d19f9932b1 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -124,4 +124,6 @@ int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info); int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info); #endif void tipc_node_pre_cleanup_net(struct net *exit_net); +int tipc_node_broadcast_named_publish(struct net *net, struct sk_buff *skb, + bool *rcast); #endif There may of course be flaws or potential for improvements to this, but to me this should solve our problem without too much new complexity and code. ///jon |
From: David M. <da...@da...> - 2020-06-04 22:39:13
|
From: Tuong Lien <tuo...@de...> Date: Wed, 3 Jun 2020 12:06:01 +0700 > syzbot found the following crash: ... > Call Trace: > tipc_sendstream+0x4c/0x70 net/tipc/socket.c:1533 > sock_sendmsg_nosec net/socket.c:652 [inline] > sock_sendmsg+0xcf/0x120 net/socket.c:672 > ____sys_sendmsg+0x32f/0x810 net/socket.c:2352 > ___sys_sendmsg+0x100/0x170 net/socket.c:2406 > __sys_sendmmsg+0x195/0x480 net/socket.c:2496 > __do_sys_sendmmsg net/socket.c:2525 [inline] > __se_sys_sendmmsg net/socket.c:2522 [inline] > __x64_sys_sendmmsg+0x99/0x100 net/socket.c:2522 > do_syscall_64+0xf6/0x7d0 arch/x86/entry/common.c:295 > entry_SYSCALL_64_after_hwframe+0x49/0xb3 > RIP: 0033:0x440199 > ... > > This bug was bisected to commit 0a3e060f340d ("tipc: add test for Nagle > algorithm effectiveness"). However, it is not the case, the trouble was > from the base in the case of zero data length message sending, we would > unexpectedly make an empty 'txq' queue after the 'tipc_msg_append()' in > Nagle mode. > > A similar crash can be generated even without the bisected patch but at > the link layer when it accesses the empty queue. > > We solve the issues by building at least one buffer to go with socket's > header and an optional data section that may be empty like what we had > with the 'tipc_msg_build()'. > > Note: the previous commit 4c21daae3dbc ("tipc: Fix NULL pointer > dereference in __tipc_sendstream()") is obsoleted by this one since the > 'txq' will be never empty and the check of 'skb != NULL' is unnecessary > but it is safe anyway. > > Reported-by: syz...@sy... > Fixes: c0bceb97db9e ("tipc: add smart nagle feature") > Acked-by: Jon Maloy <jm...@re...> > Signed-off-by: Tuong Lien <tuo...@de...> Applied and queued up for -stable, thanks. |
From: Jon M. <jm...@re...> - 2020-06-04 13:59:33
|
On 6/4/20 5:14 AM, Hoang Huu Le wrote: > > Hi Jon, > > Please see my inline comment > > Regards, > > Hoang > > *From:* Jon Maloy <jm...@re...> > *Sent:* Friday, May 29, 2020 11:11 PM > *To:* tip...@li...; tipc-dek > <tip...@de...>; Tuong Tong Lien > <tuo...@de...>; Xin Long <luc...@gm...>; Tung > Quang Nguyen <tun...@de...>; Ying Xue > <yin...@wi...> > *Subject:* Fwd: Re: FW: [PATCH 2/2] tipc: update a binding service via > broadcast > > Added more recipients. > > > > -------- Forwarded Message -------- > > *Subject: * > > > > Re: FW: [PATCH 2/2] tipc: update a binding service via broadcast > > *Date: * > > > > Fri, 29 May 2020 12:08:02 -0400 > > *From: * > > > > Jon Maloy <jm...@re...> <mailto:jm...@re...> > > *To: * > > > > Hoang Huu Le <hoa...@de...> > <mailto:hoa...@de...>, ma...@do... > <mailto:ma...@do...> <ma...@do...> <mailto:ma...@do...> > > > > Hi Hoang, > See below. > > On 5/27/20 6:49 AM, Hoang Huu Le wrote: > > Hi Jon, > > I got DRAFT version base on your idea (attachment file). > But from my point, this version introduce too much code > implementation at sending side. > I don't think this is bright idea to keep rcast_list and > bcast_list in the name table. > I think we should find out a new way or just ignore the feature. > > Yes, you are right. > I came up with a new idea, to just add a sequence number to the > broadcast/replicast messages and re-order them at reception. This even > handles the case if the first broadcast message arrives before the > first bulk message, something we have not anticipated before. > I couldn't resist the temptation trying to code it, as you can see i > the patch I just sent out. > It is totally untested, I just added the code as I thought it should > be and made sure it compiled. > There is still a little too much new code to my taste, but this might > be a way forward. > Please give your feedback on this. > > > I also noticed a couple of things while working with this: > > 1) There is still an 'expires' field in the tipc_mcast_method, and it > seems to even be considered when switching bcast/rcast. The whole > point of adding the mcast synchronization mechanism was to get rid of > this delay. Have you tested that syncronization really works without > the 'expires' ? > [Hoang] Yes, I did. > > I issue the command below to force rcast/bcast regardless ‘expires’. > > $ tipc link set bro REPLICAST > > or > > $tipc link set bro BROADCAST > Yes, but what happens when the protocol selects by itself, based on number of destinations, and this number has just passed a threshold? > > 2) There are some remnants of old code for the name table dist_queue. > This functionality was made redundant by me at least two years ago, so > this should be cleaned up. > [Hoang] I will check and clean those stuff in separate commit. > > > 3) We might have a potential race condition when new nodes come up, so > that publications are distributed twice. > a) A publication is added to the name table, and the name table > lock is released. > b) A new node comes up, and the new publication is delivered in > the bulk message. > c) The broadcast of the publication goes ahead and sends it out to > all nodes, even the one that just came up. > d) We end up with a double publication on one of the nodes. > e) One of those will linger in the name table after the > publication is withdrawn. > I have never seen this happen, and my analysis might be wrong, but > to me this looks like a possible scenario. > Note that my patch doesn't fix this, but we could possibly arrange > it by adding a 'distributed' flag i the publication item on the > sending side, so that the bulk distribution will ignore it. > > [Hoang] I try to simulate as your scenario description on 8 nodes with > publication >100 services at the same time and bring interface > down/up. Sometimes I got below error logs: > > [ 537.322414] tipc: Failed to remove binding 1000,2 from 1001001 > > […] > > [ 537.358957] tipc: Failed to remove binding 1000,11 from 1001001 > > I’m not sure above counting as bug whether or not. If yes, we also fix > this in another commit too. > This is not what I expected, but might be another manifestation of the same problem. We are probably observing a replicast withdraw arriving before the corresponding bulk publication. If you see a binding <1000,2> in the table after this printout that would be a confirmation. For my scenario: Do you see duplicate publication instances before you do withdraw? Do you see lingering publication after a withdraw? Luckily, all of this will be fixed with the new broadcast distribution protocol. Regards ///jon > > Regards > ///jon > > > > > Regards, > Hoang > -----Original Message----- > From: Jon Maloy <jm...@re...> <mailto:jm...@re...> > Sent: Thursday, May 21, 2020 9:44 PM > To: Hoang Huu Le <hoa...@de...> > <mailto:hoa...@de...>; ma...@do... > <mailto:ma...@do...> > Subject: Re: FW: [PATCH 2/2] tipc: update a binding service via > broadcast > > Hi, > I have one more comment below. Looking forward to your feedback. > ///jon > > > On 5/20/20 9:03 PM, Hoang Huu Le wrote: > > Yeah, thanks Jon. > I will investigate more on your idea when I finish the issue > with lab infrastructure. > > Hoang > -----Original Message----- > From: Jon Maloy <jm...@re...> <mailto:jm...@re...> > Sent: Thursday, May 21, 2020 7:13 AM > To: Hoang Huu Le <hoa...@de...> > <mailto:hoa...@de...>; ma...@do... > <mailto:ma...@do...> > Subject: Re: FW: [PATCH 2/2] tipc: update a binding service > via broadcast > > Hi Hoang, > Below I try to summarize my newest proposal in relation to v2 > of your patch. > > 1) The bulk can be sent just as is done now, with the addition > that we > add the NOT_LAST bit to the header. > 2) We need a new capability bit identifying nodes which support > broadcast NAME_DISTR. I don't see we can just > reuse TIPC_MCAST_RBCTL, because the recipients need to > have code > for handling concurrent > bulk/broadcast receptions. This bit is not added to the > cluster > capability word. > 3) We need to keep two structs of type tipc_nlist in the name > table. One > contains tipc_dest structures for > all nodes NOT supporting TIPC_MCAST_NAMEDISTR > (rcast_list), and the > other those which do (bcast_list). > > > For more comments see below. > > > > On 5/12/20 6:22 AM, Hoang Huu Le wrote: > > Just forward the patch I mentioned. > > -----Original Message----- > From: Hoang Le <hoa...@de...> > <mailto:hoa...@de...> > Sent: Tuesday, November 19, 2019 5:01 PM > To: jon...@er... > <mailto:jon...@er...>; ma...@do... > <mailto:ma...@do...>; tip...@de... > <mailto:tip...@de...> > Subject: [PATCH 2/2] tipc: update a binding service via > broadcast > > Currently, updating binding table (add service binding to > name table/withdraw a service binding) is being sent over > replicast. > However, if we are scaling up clusters to > 100 > nodes/containers this > method is less affection because of looping through nodes > in a cluster one > by one. > > It is worth to use broadcast to update a binding service. > Then binding > table updates in all nodes for one shot. > > The mechanism is backward compatible as sync message > slient dropped. > > v2: resolve synchronization problem when switching from > unicast to > broadcast > > Signed-off-by: Hoang Le <hoa...@de...> > <mailto:hoa...@de...> > --- > net/tipc/bcast.c | 3 ++- > net/tipc/link.c | 6 ++++++ > net/tipc/name_table.c | 33 ++++++++++++++++++++++++++++++--- > net/tipc/name_table.h | 4 ++++ > net/tipc/node.c | 32 ++++++++++++++++++++++++++++++++ > net/tipc/node.h | 2 ++ > 6 files changed, 76 insertions(+), 4 deletions(-) > > diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c > index e06f05d55534..44ed481fec47 100644 > --- a/net/tipc/bcast.c > +++ b/net/tipc/bcast.c > @@ -324,7 +324,8 @@ static int tipc_mcast_send_sync(struct > net *net, struct sk_buff *skb, > hdr = buf_msg(skb); > if (msg_user(hdr) == MSG_FRAGMENTER) > hdr = msg_inner_hdr(hdr); > - if (msg_type(hdr) != TIPC_MCAST_MSG) > + if (msg_user(hdr) != NAME_DISTRIBUTOR && > + msg_type(hdr) != TIPC_MCAST_MSG) > return 0; > /* Allocate dummy message */ > diff --git a/net/tipc/link.c b/net/tipc/link.c > index fb72031228c9..a2e9a64d5a0f 100644 > --- a/net/tipc/link.c > +++ b/net/tipc/link.c > @@ -1190,6 +1190,8 @@ static bool tipc_data_input(struct > tipc_link *l, struct sk_buff *skb, > struct sk_buff_head *inputq) > { > struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq; > + struct name_table *nt = tipc_name_table(l->net); > + struct sk_buff_head *defnq = &nt->defer_namedq; > struct tipc_msg *hdr = buf_msg(skb); > switch (msg_user(hdr)) { > @@ -1211,6 +1213,10 @@ static bool tipc_data_input(struct > tipc_link *l, struct sk_buff *skb, > case NAME_DISTRIBUTOR: > l->bc_rcvlink->state = LINK_ESTABLISHED; > skb_queue_tail(l->namedq, skb); > + > + spin_lock_bh(&defnq->lock); > + tipc_mcast_filter_msg(l->net, defnq, l->namedq); > > Should not be needed here. > You can instead do this in tipc_named_rcv(), using l->namedq > as deferred queue and creating an > temporary namedq queue on the stack for the messages ready to > be delivered. > We sort the messages in two steps: > 1) If there are any chains of bulk messages, we sort those per > source node into the temporary namedq and deliver them first, > when a chain is complete. > 2) If we find that a chain is incomplete we push it back to > the head of n->namedq and return without further action. > 3) When there are no bulk messages left in n->namedq we call > tipc_mcast_filter_msgs() to sort the remaining messages into > the temporary namedq, as far as possible, and deliver those > which are ready to be delivered. > > > > + spin_unlock_bh(&defnq->lock); > return true; > case MSG_BUNDLER: > case TUNNEL_PROTOCOL: > diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c > index 66a65c2cdb23..593dcd11357f 100644 > --- a/net/tipc/name_table.c > +++ b/net/tipc/name_table.c > @@ -615,9 +615,11 @@ struct publication > *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, > struct tipc_net *tn = tipc_net(net); > struct publication *p = NULL; > struct sk_buff *skb = NULL; > + bool rcast; > spin_lock_bh(&tn->nametbl_lock); > + rcast = nt->rcast; > if (nt->local_publ_count >= TIPC_MAX_PUBL) { > pr_warn("Bind failed, max limit %u reached\n", TIPC_MAX_PUBL); > goto exit; > @@ -632,8 +634,18 @@ struct publication > *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, > exit: > spin_unlock_bh(&tn->nametbl_lock); > - if (skb) > - tipc_node_broadcast(net, skb); > > Here we make two calls to tipc_mcast_xmit(), one with > method->rcast/mandatory and rcast_list,if not empty, and one > with method->bcast/mandatory and bcast_list, if not empty. > > Actually, the latter should not be mandatory. We can easily imagine a > situation where we start out with only legacy nodes, and then > upgrade to > bcast nodes, one by one. > > In the beginning we want tipc_mcast_xmit() to select rcast even > for the > bcast nodes, and then, as their proportion of the cluster grows, it > should switch to bcast. This does mean that the method struct for > bcast > must be kept between the calls. I.e., another member of struct > name_table. > > ///jon > > > > skb must of course be cloned if necessary. > > > > + if (skb) { > + /* Use broadcast if all nodes support broadcast > NAME_DISTR */ > + if (tipc_net(net)->capabilities & TIPC_MCAST_RBCTL) { > + tipc_node_broadcast_named_publish(net, skb, &rcast); > + spin_lock_bh(&tn->nametbl_lock); > + nt->rcast = rcast; > + spin_unlock_bh(&tn->nametbl_lock); > + } else { > + /* Otherwise, be backwards compatible */ > + tipc_node_broadcast(net, skb); > + } > > Not needed according to above. > > return p; > } > @@ -648,8 +660,10 @@ int tipc_nametbl_withdraw(struct net > *net, u32 type, u32 lower, > u32 self = tipc_own_addr(net); > struct sk_buff *skb = NULL; > struct publication *p; > + bool rcast; > spin_lock_bh(&tn->nametbl_lock); > + rcast = nt->rcast; > p = tipc_nametbl_remove_publ(net, type, lower, upper, > self, key); > if (p) { > @@ -664,7 +678,16 @@ int tipc_nametbl_withdraw(struct net > *net, u32 type, u32 lower, > spin_unlock_bh(&tn->nametbl_lock); > if (skb) { > - tipc_node_broadcast(net, skb); > + /* Use broadcast if all nodes support broadcast > NAME_DISTR */ > + if (tipc_net(net)->capabilities & TIPC_MCAST_RBCTL) { > + tipc_node_broadcast_named_publish(net, skb, &rcast); > + spin_lock_bh(&tn->nametbl_lock); > + nt->rcast = rcast; > + spin_unlock_bh(&tn->nametbl_lock); > + } else { > + /* Otherwise, be backwards compatible */ > + tipc_node_broadcast(net, skb); > + } > > One or two calls to tipc_mcast_xmit(), just as above. > > return 1; > } > return 0; > @@ -746,6 +769,9 @@ int tipc_nametbl_init(struct net *net) > INIT_LIST_HEAD(&nt->cluster_scope); > rwlock_init(&nt->cluster_scope_lock); > tn->nametbl = nt; > + /* 'bulk' updated messages via unicast */ > + nt->rcast = true; > + skb_queue_head_init(&nt->defer_namedq); > > Not needed. Now, node->namedq *is* the deferred queue. > > spin_lock_init(&tn->nametbl_lock); > return 0; > } > @@ -784,6 +810,7 @@ void tipc_nametbl_stop(struct net *net) > * publications, then release the name table > */ > spin_lock_bh(&tn->nametbl_lock); > + skb_queue_purge(&nt->defer_namedq); > for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { > if (hlist_empty(&nt->services[i])) > continue; > diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h > index f79066334cc8..b8cdf2a29d48 100644 > --- a/net/tipc/name_table.h > +++ b/net/tipc/name_table.h > @@ -95,6 +95,8 @@ struct publication { > * - used by name_distr to send bulk updates to new nodes > * - used by name_distr during re-init of name table > * @local_publ_count: number of publications issued by this > node > + * @defer_namedq: temporarily queue for 'synching' update > + * @rcast: previous method used to publish/withdraw a service > */ > struct name_table { > struct hlist_head services[TIPC_NAMETBL_SIZE]; > @@ -102,6 +104,8 @@ struct name_table { > struct list_head cluster_scope; > rwlock_t cluster_scope_lock; > u32 local_publ_count; > + struct sk_buff_head defer_namedq; > + bool rcast; > }; > int tipc_nl_name_table_dump(struct sk_buff *skb, struct > netlink_callback *cb); > diff --git a/net/tipc/node.c b/net/tipc/node.c > index aaf595613e6e..b058647fa78b 100644 > --- a/net/tipc/node.c > +++ b/net/tipc/node.c > @@ -2981,3 +2981,35 @@ void > tipc_node_pre_cleanup_net(struct net *exit_net) > } > rcu_read_unlock(); > } > + > +int tipc_node_broadcast_named_publish(struct net *net, > struct sk_buff *skb, > + bool *rcast) > +{ > + struct tipc_mc_method method = {.rcast = *rcast}; > + struct sk_buff_head xmitq; > + struct tipc_nlist dests; > + struct tipc_node *n; > + u16 cong_link_cnt; > + int rc = 0; > + > + __skb_queue_head_init(&xmitq); > + __skb_queue_tail(&xmitq, skb); > + > + tipc_nlist_init(&dests, tipc_own_addr(net)); > + rcu_read_lock(); > + list_for_each_entry_rcu(n, tipc_nodes(net), list) { > + if (in_own_node(net, n->addr)) > + continue; > + if (!node_is_up(n)) > + continue; > + tipc_nlist_add(&dests, n->addr); > + } > + rcu_read_unlock(); > + > + rc = tipc_mcast_xmit(net, &xmitq, &method, &dests, > &cong_link_cnt); > + *rcast = method.rcast; > + > + tipc_nlist_purge(&dests); > + __skb_queue_purge(&xmitq); > + return rc; > +} > > Not needed any more. > > diff --git a/net/tipc/node.h b/net/tipc/node.h > index a6803b449a2c..d7d19f9932b1 100644 > --- a/net/tipc/node.h > +++ b/net/tipc/node.h > @@ -124,4 +124,6 @@ int tipc_nl_node_set_key(struct > sk_buff *skb, struct genl_info *info); > int tipc_nl_node_flush_key(struct sk_buff *skb, struct > genl_info *info); > #endif > void tipc_node_pre_cleanup_net(struct net *exit_net); > +int tipc_node_broadcast_named_publish(struct net *net, > struct sk_buff *skb, > + bool *rcast); > #endif > > There may of course be flaws or potential for improvements to > this, but > to me > this should solve our problem without too much new complexity > and code. > > ///jon > |