You can subscribe to this list here.
2003 |
Jan
|
Feb
|
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
(6) |
---|---|---|---|---|---|---|---|---|---|---|---|---|
2004 |
Jan
(9) |
Feb
(11) |
Mar
(22) |
Apr
(73) |
May
(78) |
Jun
(146) |
Jul
(80) |
Aug
(27) |
Sep
(5) |
Oct
(14) |
Nov
(18) |
Dec
(27) |
2005 |
Jan
(20) |
Feb
(30) |
Mar
(19) |
Apr
(28) |
May
(50) |
Jun
(31) |
Jul
(32) |
Aug
(14) |
Sep
(36) |
Oct
(43) |
Nov
(74) |
Dec
(63) |
2006 |
Jan
(34) |
Feb
(32) |
Mar
(21) |
Apr
(76) |
May
(106) |
Jun
(72) |
Jul
(70) |
Aug
(175) |
Sep
(130) |
Oct
(39) |
Nov
(81) |
Dec
(43) |
2007 |
Jan
(81) |
Feb
(36) |
Mar
(20) |
Apr
(43) |
May
(54) |
Jun
(34) |
Jul
(44) |
Aug
(55) |
Sep
(44) |
Oct
(54) |
Nov
(43) |
Dec
(41) |
2008 |
Jan
(42) |
Feb
(84) |
Mar
(73) |
Apr
(30) |
May
(119) |
Jun
(54) |
Jul
(54) |
Aug
(93) |
Sep
(173) |
Oct
(130) |
Nov
(145) |
Dec
(153) |
2009 |
Jan
(59) |
Feb
(12) |
Mar
(28) |
Apr
(18) |
May
(56) |
Jun
(9) |
Jul
(28) |
Aug
(62) |
Sep
(16) |
Oct
(19) |
Nov
(15) |
Dec
(17) |
2010 |
Jan
(14) |
Feb
(36) |
Mar
(37) |
Apr
(30) |
May
(33) |
Jun
(53) |
Jul
(42) |
Aug
(50) |
Sep
(67) |
Oct
(66) |
Nov
(69) |
Dec
(36) |
2011 |
Jan
(52) |
Feb
(45) |
Mar
(49) |
Apr
(21) |
May
(34) |
Jun
(13) |
Jul
(19) |
Aug
(37) |
Sep
(43) |
Oct
(10) |
Nov
(23) |
Dec
(30) |
2012 |
Jan
(42) |
Feb
(36) |
Mar
(46) |
Apr
(25) |
May
(96) |
Jun
(146) |
Jul
(40) |
Aug
(28) |
Sep
(61) |
Oct
(45) |
Nov
(100) |
Dec
(53) |
2013 |
Jan
(79) |
Feb
(24) |
Mar
(134) |
Apr
(156) |
May
(118) |
Jun
(75) |
Jul
(278) |
Aug
(145) |
Sep
(136) |
Oct
(168) |
Nov
(137) |
Dec
(439) |
2014 |
Jan
(284) |
Feb
(158) |
Mar
(231) |
Apr
(275) |
May
(259) |
Jun
(91) |
Jul
(222) |
Aug
(215) |
Sep
(165) |
Oct
(166) |
Nov
(211) |
Dec
(150) |
2015 |
Jan
(164) |
Feb
(324) |
Mar
(299) |
Apr
(214) |
May
(111) |
Jun
(109) |
Jul
(105) |
Aug
(36) |
Sep
(58) |
Oct
(131) |
Nov
(68) |
Dec
(30) |
2016 |
Jan
(46) |
Feb
(87) |
Mar
(135) |
Apr
(174) |
May
(132) |
Jun
(135) |
Jul
(149) |
Aug
(125) |
Sep
(79) |
Oct
(49) |
Nov
(95) |
Dec
(102) |
2017 |
Jan
(104) |
Feb
(75) |
Mar
(72) |
Apr
(53) |
May
(18) |
Jun
(5) |
Jul
(14) |
Aug
(19) |
Sep
(2) |
Oct
(13) |
Nov
(21) |
Dec
(67) |
2018 |
Jan
(56) |
Feb
(50) |
Mar
(148) |
Apr
(41) |
May
(37) |
Jun
(34) |
Jul
(34) |
Aug
(11) |
Sep
(52) |
Oct
(48) |
Nov
(28) |
Dec
(46) |
2019 |
Jan
(29) |
Feb
(63) |
Mar
(95) |
Apr
(54) |
May
(14) |
Jun
(71) |
Jul
(60) |
Aug
(49) |
Sep
(3) |
Oct
(64) |
Nov
(115) |
Dec
(57) |
2020 |
Jan
(15) |
Feb
(9) |
Mar
(38) |
Apr
(27) |
May
(60) |
Jun
(53) |
Jul
(35) |
Aug
(46) |
Sep
(37) |
Oct
(64) |
Nov
(20) |
Dec
(25) |
2021 |
Jan
(20) |
Feb
(31) |
Mar
(27) |
Apr
(23) |
May
(21) |
Jun
(30) |
Jul
(30) |
Aug
(7) |
Sep
(18) |
Oct
|
Nov
(15) |
Dec
(4) |
2022 |
Jan
(3) |
Feb
(1) |
Mar
(10) |
Apr
|
May
(2) |
Jun
(26) |
Jul
(5) |
Aug
|
Sep
(1) |
Oct
(2) |
Nov
(9) |
Dec
(2) |
2023 |
Jan
(4) |
Feb
(4) |
Mar
(5) |
Apr
(10) |
May
(29) |
Jun
(17) |
Jul
|
Aug
|
Sep
(1) |
Oct
(1) |
Nov
(2) |
Dec
|
2024 |
Jan
|
Feb
(6) |
Mar
|
Apr
(1) |
May
(6) |
Jun
|
Jul
(5) |
Aug
|
Sep
(3) |
Oct
|
Nov
|
Dec
|
2025 |
Jan
|
Feb
(3) |
Mar
|
Apr
|
May
|
Jun
|
Jul
(6) |
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
From: Hoang H. Le <hoa...@de...> - 2021-01-18 08:25:16
|
From: Hoang Le <hoa...@de...> (struct tipc_link_info)->dest is in network order (__be32), so we must convert the value to network order before assigning. The problem detected by sparse: net/tipc/netlink_compat.c:699:24: warning: incorrect type in assignment (different base types) net/tipc/netlink_compat.c:699:24: expected restricted __be32 [usertype] dest net/tipc/netlink_compat.c:699:24: got int Acked-by: Jon Maloy <jm...@re...> Signed-off-by: Hoang Le <hoa...@de...> --- net/tipc/netlink_compat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 5a1ce64039f7..0749df80454d 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -696,7 +696,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg, if (err) return err; - link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); + link_info.dest = htonl(nla_get_flag(link[TIPC_NLA_LINK_DEST])); link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); nla_strscpy(link_info.str, link[TIPC_NLA_LINK_NAME], TIPC_MAX_LINK_NAME); -- 2.25.1 |
From: Hoang H. Le <hoa...@de...> - 2021-01-18 08:25:06
|
This patch fixes the following warning from sparse: net/tipc/monitor.c:263:35: warning: incorrect type in assignment (different base types) net/tipc/monitor.c:263:35: expected unsigned int net/tipc/monitor.c:263:35: got restricted __be32 [usertype] [...] net/tipc/node.c:374:13: warning: context imbalance in 'tipc_node_read_lock' - wrong count at exit net/tipc/node.c:379:13: warning: context imbalance in 'tipc_node_read_unlock' - unexpected unlock net/tipc/node.c:384:13: warning: context imbalance in 'tipc_node_write_lock' - wrong count at exit net/tipc/node.c:389:13: warning: context imbalance in 'tipc_node_write_unlock_fast' - unexpected unlock net/tipc/node.c:404:17: warning: context imbalance in 'tipc_node_write_unlock' - unexpected unlock [...] net/tipc/crypto.c:1201:9: warning: incorrect type in initializer (different address spaces) net/tipc/crypto.c:1201:9: expected struct tipc_aead [noderef] __rcu *__tmp net/tipc/crypto.c:1201:9: got struct tipc_aead * [...] Signed-off-by: Hoang Huu Le <hoa...@de...> --- net/tipc/crypto.c | 12 ++++----- net/tipc/monitor.c | 63 ++++++++++++++++++++++++++++++++++------------ net/tipc/node.c | 5 ++++ 3 files changed, 58 insertions(+), 22 deletions(-) diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index f4fca8f7f63f..6f64acef73dc 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -317,7 +317,7 @@ static int tipc_aead_key_generate(struct tipc_aead_key *skey); #define tipc_aead_rcu_replace(rcu_ptr, ptr, lock) \ do { \ - typeof(rcu_ptr) __tmp = rcu_dereference_protected((rcu_ptr), \ + struct tipc_aead *__tmp = rcu_dereference_protected((rcu_ptr), \ lockdep_is_held(lock)); \ rcu_assign_pointer((rcu_ptr), (ptr)); \ tipc_aead_put(__tmp); \ @@ -798,7 +798,7 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, ehdr = (struct tipc_ehdr *)skb->data; salt = aead->salt; if (aead->mode == CLUSTER_KEY) - salt ^= ehdr->addr; /* __be32 */ + salt ^= __be32_to_cpu(ehdr->addr); else if (__dnode) salt ^= tipc_node_get_addr(__dnode); memcpy(iv, &salt, 4); @@ -929,7 +929,7 @@ static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead, ehdr = (struct tipc_ehdr *)skb->data; salt = aead->salt; if (aead->mode == CLUSTER_KEY) - salt ^= ehdr->addr; /* __be32 */ + salt ^= __be32_to_cpu(ehdr->addr); else if (ehdr->destined) salt ^= tipc_own_addr(net); memcpy(iv, &salt, 4); @@ -1946,16 +1946,16 @@ static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead, goto rcv; } tipc_aead_put(aead); - aead = tipc_aead_get(tmp); + aead = tipc_aead_get((struct tipc_aead __force __rcu *)tmp); } if (unlikely(err)) { - tipc_aead_users_dec(aead, INT_MIN); + tipc_aead_users_dec((struct tipc_aead __force __rcu *)aead, INT_MIN); goto free_skb; } /* Set the RX key's user */ - tipc_aead_users_set(aead, 1); + tipc_aead_users_set((struct tipc_aead __force __rcu *)aead, 1); /* Mark this point, RX works */ rx->timer1 = jiffies; diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c index 48fac3b17e40..407619697292 100644 --- a/net/tipc/monitor.c +++ b/net/tipc/monitor.c @@ -104,6 +104,36 @@ static struct tipc_monitor *tipc_monitor(struct net *net, int bearer_id) const int tipc_max_domain_size = sizeof(struct tipc_mon_domain); +static inline u16 mon_cpu_to_le16(u16 val) +{ + return (__force __u16)htons(val); +} + +static inline u32 mon_cpu_to_le32(u32 val) +{ + return (__force __u32)htonl(val); +} + +static inline u64 mon_cpu_to_le64(u64 val) +{ + return (__force __u64)cpu_to_be64(val); +} + +static inline u16 mon_le16_to_cpu(u16 val) +{ + return ntohs((__force __be16)val); +} + +static inline u32 mon_le32_to_cpu(u32 val) +{ + return ntohl((__force __be32)val); +} + +static inline u64 mon_le64_to_cpu(u64 val) +{ + return be64_to_cpu((__force __be64)val); +} + /* dom_rec_len(): actual length of domain record for transport */ static int dom_rec_len(struct tipc_mon_domain *dom, u16 mcnt) @@ -260,16 +290,16 @@ static void mon_update_local_domain(struct tipc_monitor *mon) diff |= dom->members[i] != peer->addr; dom->members[i] = peer->addr; map_set(&dom->up_map, i, peer->is_up); - cache->members[i] = htonl(peer->addr); + cache->members[i] = mon_cpu_to_le32(peer->addr); } diff |= dom->up_map != prev_up_map; if (!diff) return; dom->gen = ++mon->dom_gen; - cache->len = htons(dom->len); - cache->gen = htons(dom->gen); - cache->member_cnt = htons(member_cnt); - cache->up_map = cpu_to_be64(dom->up_map); + cache->len = mon_cpu_to_le16(dom->len); + cache->gen = mon_cpu_to_le16(dom->gen); + cache->member_cnt = mon_cpu_to_le16(member_cnt); + cache->up_map = mon_cpu_to_le64(dom->up_map); mon_apply_domain(mon, self); } @@ -455,10 +485,11 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr, struct tipc_mon_domain dom_bef; struct tipc_mon_domain *dom; struct tipc_peer *peer; - u16 new_member_cnt = ntohs(arrv_dom->member_cnt); + u16 new_member_cnt = mon_le16_to_cpu(arrv_dom->member_cnt); int new_dlen = dom_rec_len(arrv_dom, new_member_cnt); - u16 new_gen = ntohs(arrv_dom->gen); - u16 acked_gen = ntohs(arrv_dom->ack_gen); + u16 new_gen = mon_le16_to_cpu(arrv_dom->gen); + u16 acked_gen = mon_le16_to_cpu(arrv_dom->ack_gen); + u16 arrv_dlen = mon_le16_to_cpu(arrv_dom->len); bool probing = state->probing; int i, applied_bef; @@ -469,7 +500,7 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr, return; if (dlen != dom_rec_len(arrv_dom, new_member_cnt)) return; - if ((dlen < new_dlen) || ntohs(arrv_dom->len) != new_dlen) + if (dlen < new_dlen || arrv_dlen != new_dlen) return; /* Synch generation numbers with peer if link just came up */ @@ -517,9 +548,9 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr, dom->len = new_dlen; dom->gen = new_gen; dom->member_cnt = new_member_cnt; - dom->up_map = be64_to_cpu(arrv_dom->up_map); + dom->up_map = mon_le64_to_cpu(arrv_dom->up_map); for (i = 0; i < new_member_cnt; i++) - dom->members[i] = ntohl(arrv_dom->members[i]); + dom->members[i] = mon_le32_to_cpu(arrv_dom->members[i]); /* Update peers affected by this domain record */ applied_bef = peer->applied; @@ -548,19 +579,19 @@ void tipc_mon_prep(struct net *net, void *data, int *dlen, if (likely(state->acked_gen == gen)) { len = dom_rec_len(dom, 0); *dlen = len; - dom->len = htons(len); - dom->gen = htons(gen); - dom->ack_gen = htons(state->peer_gen); + dom->len = mon_cpu_to_le16(len); + dom->gen = mon_cpu_to_le16(gen); + dom->ack_gen = mon_cpu_to_le16(state->peer_gen); dom->member_cnt = 0; return; } /* Send the full record */ read_lock_bh(&mon->lock); - len = ntohs(mon->cache.len); + len = mon_le16_to_cpu(mon->cache.len); *dlen = len; memcpy(data, &mon->cache, len); read_unlock_bh(&mon->lock); - dom->ack_gen = htons(state->peer_gen); + dom->ack_gen = mon_cpu_to_le16(state->peer_gen); } void tipc_mon_get_state(struct net *net, u32 addr, diff --git a/net/tipc/node.c b/net/tipc/node.c index 008670d1f43e..9c95ef4b6326 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -372,26 +372,31 @@ static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id) } static void tipc_node_read_lock(struct tipc_node *n) + __acquires(n->lock) { read_lock_bh(&n->lock); } static void tipc_node_read_unlock(struct tipc_node *n) + __releases(n->lock) { read_unlock_bh(&n->lock); } static void tipc_node_write_lock(struct tipc_node *n) + __acquires(n->lock) { write_lock_bh(&n->lock); } static void tipc_node_write_unlock_fast(struct tipc_node *n) + __releases(n->lock) { write_unlock_bh(&n->lock); } static void tipc_node_write_unlock(struct tipc_node *n) + __releases(n->lock) { struct net *net = n->net; u32 addr = 0; -- 2.25.1 |
From: Jon M. <jm...@re...> - 2021-01-15 14:50:45
|
On 1/15/21 12:07 AM, Hoang Huu Le wrote: > This patch fixes the following warning from sparse: > --- > net/tipc/monitor.c:263:35: sparse: warning: incorrect type in assignment.. > net/tipc/monitor.c:263:35: sparse: expected unsigned int > net/tipc/monitor.c:263:35: sparse: got restricted __be32 [usertype] > [...] > net/tipc/monitor.c:522:35: sparse: warning: cast to restricted __be32 > [...] > --- > Signed-off-by: Hoang Huu Le <hoa...@de...> > --- > net/tipc/crypto.c | 12 ++++++------ > net/tipc/monitor.c | 32 ++++++++++++++++---------------- > net/tipc/netlink_compat.c | 2 +- > net/tipc/node.c | 5 +++++ > 4 files changed, 28 insertions(+), 23 deletions(-) > > diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c > index f4fca8f7f63f..096e1c903e3a 100644 > --- a/net/tipc/crypto.c > +++ b/net/tipc/crypto.c > @@ -317,7 +317,7 @@ static int tipc_aead_key_generate(struct tipc_aead_key *skey); > > #define tipc_aead_rcu_replace(rcu_ptr, ptr, lock) \ > do { \ > - typeof(rcu_ptr) __tmp = rcu_dereference_protected((rcu_ptr), \ > + struct tipc_aead *__tmp = rcu_dereference_protected((rcu_ptr), \ > lockdep_is_held(lock)); \ > rcu_assign_pointer((rcu_ptr), (ptr)); \ > tipc_aead_put(__tmp); \ > @@ -798,7 +798,7 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, > ehdr = (struct tipc_ehdr *)skb->data; > salt = aead->salt; > if (aead->mode == CLUSTER_KEY) > - salt ^= ehdr->addr; /* __be32 */ > + salt ^= __be32_to_cpu(ehdr->addr); /* __be32 */ The /* __be32 */ comment looks redundant in this case, and even in other cases further down. > else if (__dnode) > salt ^= tipc_node_get_addr(__dnode); > memcpy(iv, &salt, 4); > @@ -929,7 +929,7 @@ static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead, > ehdr = (struct tipc_ehdr *)skb->data; > salt = aead->salt; > if (aead->mode == CLUSTER_KEY) > - salt ^= ehdr->addr; /* __be32 */ > + salt ^= __be32_to_cpu(ehdr->addr); /* __be32 */ > else if (ehdr->destined) > salt ^= tipc_own_addr(net); > memcpy(iv, &salt, 4); > @@ -1946,16 +1946,16 @@ static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead, > goto rcv; > } > tipc_aead_put(aead); > - aead = tipc_aead_get(tmp); > + aead = tipc_aead_get((struct tipc_aead __force __rcu *)tmp); > } > > if (unlikely(err)) { > - tipc_aead_users_dec(aead, INT_MIN); > + tipc_aead_users_dec((struct tipc_aead __force __rcu *)aead, INT_MIN); > goto free_skb; > } > > /* Set the RX key's user */ > - tipc_aead_users_set(aead, 1); > + tipc_aead_users_set((struct tipc_aead __force __rcu *)aead, 1); > > /* Mark this point, RX works */ > rx->timer1 = jiffies; > diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c > index 48fac3b17e40..c9e753a1c98e 100644 > --- a/net/tipc/monitor.c > +++ b/net/tipc/monitor.c > @@ -260,16 +260,16 @@ static void mon_update_local_domain(struct tipc_monitor *mon) > diff |= dom->members[i] != peer->addr; > dom->members[i] = peer->addr; > map_set(&dom->up_map, i, peer->is_up); > - cache->members[i] = htonl(peer->addr); > + cache->members[i] = (__force __u32)htonl(peer->addr); > } > diff |= dom->up_map != prev_up_map; > if (!diff) > return; > dom->gen = ++mon->dom_gen; > - cache->len = htons(dom->len); > - cache->gen = htons(dom->gen); > - cache->member_cnt = htons(member_cnt); > - cache->up_map = cpu_to_be64(dom->up_map); > + cache->len = (__force __u32)htons(dom->len); > + cache->gen = (__force __u16)htons(dom->gen); > + cache->member_cnt = (__force __u32)htons(member_cnt); > + cache->up_map = (__force __u64)cpu_to_be64(dom->up_map); > mon_apply_domain(mon, self); > } > > @@ -455,10 +455,10 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr, > struct tipc_mon_domain dom_bef; > struct tipc_mon_domain *dom; > struct tipc_peer *peer; > - u16 new_member_cnt = ntohs(arrv_dom->member_cnt); > + u16 new_member_cnt = ntohs((__force __be16)arrv_dom->member_cnt); In general, it would be nice if we could have fewer of these ugly __force macros. Unfortunately, since we are using the members of struct tipc_mon_domain in both NBO and HBO this seems hard to avoid. So, until we have any better idea, ok. > int new_dlen = dom_rec_len(arrv_dom, new_member_cnt); > - u16 new_gen = ntohs(arrv_dom->gen); > - u16 acked_gen = ntohs(arrv_dom->ack_gen); > + u16 new_gen = ntohs((__force __be16)arrv_dom->gen); > + u16 acked_gen = ntohs((__force __be16)arrv_dom->ack_gen); > bool probing = state->probing; > int i, applied_bef; > > @@ -469,7 +469,7 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr, > return; > if (dlen != dom_rec_len(arrv_dom, new_member_cnt)) > return; > - if ((dlen < new_dlen) || ntohs(arrv_dom->len) != new_dlen) > + if (dlen < new_dlen || ntohs((__force __be16)arrv_dom->len) != new_dlen) > return; > > /* Synch generation numbers with peer if link just came up */ > @@ -517,9 +517,9 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr, > dom->len = new_dlen; > dom->gen = new_gen; > dom->member_cnt = new_member_cnt; > - dom->up_map = be64_to_cpu(arrv_dom->up_map); > + dom->up_map = be64_to_cpu((__force __be64)arrv_dom->up_map); > for (i = 0; i < new_member_cnt; i++) > - dom->members[i] = ntohl(arrv_dom->members[i]); > + dom->members[i] = ntohl((__force __be32)arrv_dom->members[i]); > > /* Update peers affected by this domain record */ > applied_bef = peer->applied; > @@ -548,19 +548,19 @@ void tipc_mon_prep(struct net *net, void *data, int *dlen, > if (likely(state->acked_gen == gen)) { > len = dom_rec_len(dom, 0); > *dlen = len; > - dom->len = htons(len); > - dom->gen = htons(gen); > - dom->ack_gen = htons(state->peer_gen); > + dom->len = (__force __u16)htons(len); > + dom->gen = (__force __u16)htons(gen); > + dom->ack_gen = (__force __u16)htons(state->peer_gen); > dom->member_cnt = 0; > return; > } > /* Send the full record */ > read_lock_bh(&mon->lock); > - len = ntohs(mon->cache.len); > + len = ntohs((__force __be16)mon->cache.len); > *dlen = len; > memcpy(data, &mon->cache, len); > read_unlock_bh(&mon->lock); > - dom->ack_gen = htons(state->peer_gen); > + dom->ack_gen = (__force __u16)htons(state->peer_gen); > } > > void tipc_mon_get_state(struct net *net, u32 addr, > diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c > index 5a1ce64039f7..0749df80454d 100644 > --- a/net/tipc/netlink_compat.c > +++ b/net/tipc/netlink_compat.c > @@ -696,7 +696,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg, > if (err) > return err; > > - link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); > + link_info.dest = htonl(nla_get_flag(link[TIPC_NLA_LINK_DEST])); This looks like bug correction. I suggest you put that in a separate patch. Acked-by: Jon Maloy <jm...@re...> ///jon > link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); > nla_strscpy(link_info.str, link[TIPC_NLA_LINK_NAME], > TIPC_MAX_LINK_NAME); > diff --git a/net/tipc/node.c b/net/tipc/node.c > index 83d9eb830592..8b43ff3e4d95 100644 > --- a/net/tipc/node.c > +++ b/net/tipc/node.c > @@ -372,26 +372,31 @@ static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id) > } > > static void tipc_node_read_lock(struct tipc_node *n) > + __acquires(n->lock) > { > read_lock_bh(&n->lock); > } > > static void tipc_node_read_unlock(struct tipc_node *n) > + __releases(n->lock) > { > read_unlock_bh(&n->lock); > } > > static void tipc_node_write_lock(struct tipc_node *n) > + __acquires(n->lock) > { > write_lock_bh(&n->lock); > } > > static void tipc_node_write_unlock_fast(struct tipc_node *n) > + __releases(n->lock) > { > write_unlock_bh(&n->lock); > } > > static void tipc_node_write_unlock(struct tipc_node *n) > + __releases(n->lock) > { > struct net *net = n->net; > u32 addr = 0; |
From: Hoang H. Le <hoa...@de...> - 2021-01-15 05:08:22
|
This patch fixes the following warning from sparse: --- net/tipc/monitor.c:263:35: sparse: warning: incorrect type in assignment.. net/tipc/monitor.c:263:35: sparse: expected unsigned int net/tipc/monitor.c:263:35: sparse: got restricted __be32 [usertype] [...] net/tipc/monitor.c:522:35: sparse: warning: cast to restricted __be32 [...] --- Signed-off-by: Hoang Huu Le <hoa...@de...> --- net/tipc/crypto.c | 12 ++++++------ net/tipc/monitor.c | 32 ++++++++++++++++---------------- net/tipc/netlink_compat.c | 2 +- net/tipc/node.c | 5 +++++ 4 files changed, 28 insertions(+), 23 deletions(-) diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index f4fca8f7f63f..096e1c903e3a 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -317,7 +317,7 @@ static int tipc_aead_key_generate(struct tipc_aead_key *skey); #define tipc_aead_rcu_replace(rcu_ptr, ptr, lock) \ do { \ - typeof(rcu_ptr) __tmp = rcu_dereference_protected((rcu_ptr), \ + struct tipc_aead *__tmp = rcu_dereference_protected((rcu_ptr), \ lockdep_is_held(lock)); \ rcu_assign_pointer((rcu_ptr), (ptr)); \ tipc_aead_put(__tmp); \ @@ -798,7 +798,7 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb, ehdr = (struct tipc_ehdr *)skb->data; salt = aead->salt; if (aead->mode == CLUSTER_KEY) - salt ^= ehdr->addr; /* __be32 */ + salt ^= __be32_to_cpu(ehdr->addr); /* __be32 */ else if (__dnode) salt ^= tipc_node_get_addr(__dnode); memcpy(iv, &salt, 4); @@ -929,7 +929,7 @@ static int tipc_aead_decrypt(struct net *net, struct tipc_aead *aead, ehdr = (struct tipc_ehdr *)skb->data; salt = aead->salt; if (aead->mode == CLUSTER_KEY) - salt ^= ehdr->addr; /* __be32 */ + salt ^= __be32_to_cpu(ehdr->addr); /* __be32 */ else if (ehdr->destined) salt ^= tipc_own_addr(net); memcpy(iv, &salt, 4); @@ -1946,16 +1946,16 @@ static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead, goto rcv; } tipc_aead_put(aead); - aead = tipc_aead_get(tmp); + aead = tipc_aead_get((struct tipc_aead __force __rcu *)tmp); } if (unlikely(err)) { - tipc_aead_users_dec(aead, INT_MIN); + tipc_aead_users_dec((struct tipc_aead __force __rcu *)aead, INT_MIN); goto free_skb; } /* Set the RX key's user */ - tipc_aead_users_set(aead, 1); + tipc_aead_users_set((struct tipc_aead __force __rcu *)aead, 1); /* Mark this point, RX works */ rx->timer1 = jiffies; diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c index 48fac3b17e40..c9e753a1c98e 100644 --- a/net/tipc/monitor.c +++ b/net/tipc/monitor.c @@ -260,16 +260,16 @@ static void mon_update_local_domain(struct tipc_monitor *mon) diff |= dom->members[i] != peer->addr; dom->members[i] = peer->addr; map_set(&dom->up_map, i, peer->is_up); - cache->members[i] = htonl(peer->addr); + cache->members[i] = (__force __u32)htonl(peer->addr); } diff |= dom->up_map != prev_up_map; if (!diff) return; dom->gen = ++mon->dom_gen; - cache->len = htons(dom->len); - cache->gen = htons(dom->gen); - cache->member_cnt = htons(member_cnt); - cache->up_map = cpu_to_be64(dom->up_map); + cache->len = (__force __u32)htons(dom->len); + cache->gen = (__force __u16)htons(dom->gen); + cache->member_cnt = (__force __u32)htons(member_cnt); + cache->up_map = (__force __u64)cpu_to_be64(dom->up_map); mon_apply_domain(mon, self); } @@ -455,10 +455,10 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr, struct tipc_mon_domain dom_bef; struct tipc_mon_domain *dom; struct tipc_peer *peer; - u16 new_member_cnt = ntohs(arrv_dom->member_cnt); + u16 new_member_cnt = ntohs((__force __be16)arrv_dom->member_cnt); int new_dlen = dom_rec_len(arrv_dom, new_member_cnt); - u16 new_gen = ntohs(arrv_dom->gen); - u16 acked_gen = ntohs(arrv_dom->ack_gen); + u16 new_gen = ntohs((__force __be16)arrv_dom->gen); + u16 acked_gen = ntohs((__force __be16)arrv_dom->ack_gen); bool probing = state->probing; int i, applied_bef; @@ -469,7 +469,7 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr, return; if (dlen != dom_rec_len(arrv_dom, new_member_cnt)) return; - if ((dlen < new_dlen) || ntohs(arrv_dom->len) != new_dlen) + if (dlen < new_dlen || ntohs((__force __be16)arrv_dom->len) != new_dlen) return; /* Synch generation numbers with peer if link just came up */ @@ -517,9 +517,9 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr, dom->len = new_dlen; dom->gen = new_gen; dom->member_cnt = new_member_cnt; - dom->up_map = be64_to_cpu(arrv_dom->up_map); + dom->up_map = be64_to_cpu((__force __be64)arrv_dom->up_map); for (i = 0; i < new_member_cnt; i++) - dom->members[i] = ntohl(arrv_dom->members[i]); + dom->members[i] = ntohl((__force __be32)arrv_dom->members[i]); /* Update peers affected by this domain record */ applied_bef = peer->applied; @@ -548,19 +548,19 @@ void tipc_mon_prep(struct net *net, void *data, int *dlen, if (likely(state->acked_gen == gen)) { len = dom_rec_len(dom, 0); *dlen = len; - dom->len = htons(len); - dom->gen = htons(gen); - dom->ack_gen = htons(state->peer_gen); + dom->len = (__force __u16)htons(len); + dom->gen = (__force __u16)htons(gen); + dom->ack_gen = (__force __u16)htons(state->peer_gen); dom->member_cnt = 0; return; } /* Send the full record */ read_lock_bh(&mon->lock); - len = ntohs(mon->cache.len); + len = ntohs((__force __be16)mon->cache.len); *dlen = len; memcpy(data, &mon->cache, len); read_unlock_bh(&mon->lock); - dom->ack_gen = htons(state->peer_gen); + dom->ack_gen = (__force __u16)htons(state->peer_gen); } void tipc_mon_get_state(struct net *net, u32 addr, diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 5a1ce64039f7..0749df80454d 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -696,7 +696,7 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg, if (err) return err; - link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); + link_info.dest = htonl(nla_get_flag(link[TIPC_NLA_LINK_DEST])); link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); nla_strscpy(link_info.str, link[TIPC_NLA_LINK_NAME], TIPC_MAX_LINK_NAME); diff --git a/net/tipc/node.c b/net/tipc/node.c index 83d9eb830592..8b43ff3e4d95 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -372,26 +372,31 @@ static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id) } static void tipc_node_read_lock(struct tipc_node *n) + __acquires(n->lock) { read_lock_bh(&n->lock); } static void tipc_node_read_unlock(struct tipc_node *n) + __releases(n->lock) { read_unlock_bh(&n->lock); } static void tipc_node_write_lock(struct tipc_node *n) + __acquires(n->lock) { write_lock_bh(&n->lock); } static void tipc_node_write_unlock_fast(struct tipc_node *n) + __releases(n->lock) { write_unlock_bh(&n->lock); } static void tipc_node_write_unlock(struct tipc_node *n) + __releases(n->lock) { struct net *net = n->net; u32 addr = 0; -- 2.25.1 |
From: Jon M. <jm...@re...> - 2021-01-14 15:59:29
|
On 1/14/21 3:04 AM, Mauro Carvalho Chehab wrote: > A function has a different name between their prototype > and its kernel-doc markup: > > ../net/tipc/link.c:2551: warning: expecting prototype for link_reset_stats(). Prototype was for tipc_link_reset_stats() instead > ../net/tipc/node.c:1678: warning: expecting prototype for is the general link level function for message sending(). Prototype was for tipc_node_xmit() instead > > Signed-off-by: Mauro Carvalho Chehab <mch...@ke...> > --- > net/tipc/link.c | 2 +- > net/tipc/node.c | 2 +- > 2 files changed, 2 insertions(+), 2 deletions(-) > > diff --git a/net/tipc/link.c b/net/tipc/link.c > index a6a694b78927..115109259430 100644 > --- a/net/tipc/link.c > +++ b/net/tipc/link.c > @@ -2544,7 +2544,7 @@ void tipc_link_set_queue_limits(struct tipc_link *l, u32 min_win, u32 max_win) > } > > /** > - * link_reset_stats - reset link statistics > + * tipc_link_reset_stats - reset link statistics > * @l: pointer to link > */ > void tipc_link_reset_stats(struct tipc_link *l) > diff --git a/net/tipc/node.c b/net/tipc/node.c > index 83d9eb830592..008670d1f43e 100644 > --- a/net/tipc/node.c > +++ b/net/tipc/node.c > @@ -1665,7 +1665,7 @@ static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list) > } > > /** > - * tipc_node_xmit() is the general link level function for message sending > + * tipc_node_xmit() - general link level function for message sending > * @net: the applicable net namespace > * @list: chain of buffers containing message > * @dnode: address of destination node Acked-by: Jon Maloy <jm...@re...> |
From: Ying X. <yin...@wi...> - 2021-01-13 13:17:29
|
Thank you for your patience Jon! This series looks good to me. Acked-by: Ying Xue <yin...@wi...> On 12/9/20 2:49 AM, jm...@re... wrote: > From: Jon Maloy <jm...@re...> > > We make a number of simplifications and cleanups, especially to call signatures > in the binding table. This makes the code easier to understand and serves as a > preparation for an upcoming functional addition. > > Jon Maloy (16): > tipc: re-organize members of struct publication > tipc: move creation of publication item one level up in call chain > tipc: introduce new unified address type for internal use > tipc: simplify signature of tipc_namtbl_publish() > tipc: simplify call signatures for publication creation > tipc: simplify signature of tipc_nametbl_withdraw() functions > tipc: rename binding table lookup functions > tipc: refactor tipc_sendmsg() and tipc_lookup_anycast() > tipc: simplify signature of tipc_namtbl_lookup_mcast_sockets() > tipc: simplify signature of tipc_nametbl_lookup_mcast_nodes() > tipc: simplify signature of tipc_nametbl_lookup_group() > tipc: simplify signature of tipc_service_find_range() > tipc: simplify signature of tipc_find_service() > tipc: simplify api between binding table and topology server > tipc: add host-endian copy of user subscription to struct > tipc_subscription > tipc: remove some unnecessary warnings > > net/tipc/addr.h | 44 +++++ > net/tipc/msg.c | 23 ++- > net/tipc/name_distr.c | 89 +++++---- > net/tipc/name_table.c | 419 ++++++++++++++++++++++-------------------- > net/tipc/name_table.h | 64 ++++--- > net/tipc/net.c | 8 +- > net/tipc/node.c | 28 +-- > net/tipc/socket.c | 313 +++++++++++++++---------------- > net/tipc/subscr.c | 84 +++++---- > net/tipc/subscr.h | 12 +- > 10 files changed, 567 insertions(+), 517 deletions(-) > |
From: Jon M. <jm...@re...> - 2021-01-11 19:35:01
|
On 1/11/21 9:20 AM, Xue, Ying wrote: > - seq = &dest->addr.nameseq; > - if (dest->addrtype == TIPC_ADDR_MCAST) > - return tipc_sendmcast(sock, seq, m, dlen, timeout); > - > - if (dest->addrtype == TIPC_SERVICE_ADDR) { > - type = dest->addr.name.name.type; > - inst = dest->addr.name.name.instance; > - dnode = dest->addr.name.domain; > - dport = tipc_nametbl_lookup_anycast(net, type, inst, &dnode); > - if (unlikely(!dport && !dnode)) > + /* Determine destination */ > + if (atype == TIPC_SERVICE_RANGE) { > > [Ying] Regarding my understanding, we should compare "atype" with TIPC_ADDR_MCAST rather than TIPC_SERVICE_RANGE. Please help to confirm. Right. Except that they are the same (value == 1). I wanted to simplify by only using one of the macros inside the kernel code. ///jon > > + return tipc_sendmcast(sock, &ua->sr, m, dlen, timeout); > + } else if (atype == TIPC_SERVICE_ADDR) { > + skaddr.node = ua->lookup_node; > + ua->scope = skaddr.node ? TIPC_NODE_SCOPE : TIPC_CLUSTER_SCOPE; > + if (!tipc_nametbl_lookup_anycast(net, ua, &skaddr)) > return -EHOSTUNREACH; > |
From: Xue, Y. <Yin...@wi...> - 2021-01-11 14:34:53
|
- seq = &dest->addr.nameseq; - if (dest->addrtype == TIPC_ADDR_MCAST) - return tipc_sendmcast(sock, seq, m, dlen, timeout); - - if (dest->addrtype == TIPC_SERVICE_ADDR) { - type = dest->addr.name.name.type; - inst = dest->addr.name.name.instance; - dnode = dest->addr.name.domain; - dport = tipc_nametbl_lookup_anycast(net, type, inst, &dnode); - if (unlikely(!dport && !dnode)) + /* Determine destination */ + if (atype == TIPC_SERVICE_RANGE) { [Ying] Regarding my understanding, we should compare "atype" with TIPC_ADDR_MCAST rather than TIPC_SERVICE_RANGE. Please help to confirm. + return tipc_sendmcast(sock, &ua->sr, m, dlen, timeout); + } else if (atype == TIPC_SERVICE_ADDR) { + skaddr.node = ua->lookup_node; + ua->scope = skaddr.node ? TIPC_NODE_SCOPE : TIPC_CLUSTER_SCOPE; + if (!tipc_nametbl_lookup_anycast(net, ua, &skaddr)) return -EHOSTUNREACH; |
From: Duzan, G. D <Gar...@fi...> - 2021-01-06 18:22:40
|
The eth link did eventually come up, as both the eth and udp links were there when we looked. It isn't clear whether this was a matter of elapsed time, some other traffic that allowed the broadcast to pass, or something else that we screwed up. This is VMware ESXi, so a different beast than Workstation. If we manage to narrow things down or come up with a fix I'll try to post it here. Thanks. Gary Duzan p.s. So far I've been fairly happy with TIPC as transport for a scalable RPC-like service. At least once I got past some of the quirks of the socket API and the tipc utility. ________________________________ From: Jon Maloy <ma...@do...> Sent: Tuesday, January 5, 2021 10:44 AM To: Jon Maloy <jm...@re...>; tip...@li... <tip...@li...>; Duzan, Gary D <Gar...@fi...> Subject: Re: [tipc-discussion] EXTERNAL: Re: Discovery between VMs Hi, When I was talking about VMs in my previous mail I was in reality referring to qemu/kvm, but I was was also using VMware Workstation for many years, without any problems. I doesn't sound unreasonable to think that they might have changed their default network policies, so this is a good place to start looking. Actually, the main reason for introducing UDP as an alternative to bare Ethernet was exactly that we anticipated this kind of problems. In your own network you can probably fix this, but there might be cases where you cannot influence the network provider this way. ///jon On Tuesday, January 5, 2021, 09:24:32 AM EST, Duzan, Gary D via tipc-discussion <tip...@li...> wrote: Thanks, Jon. I've forwarded your information request to the site. We did get things working over UDP (using the same interfaces), so we know IP is working and the netid configuration is ok. I still suspect that the VMware virtual network configuration is squashing non-IP traffic, so I'm focusing on that angle. Gary Duzan ________________________________ From: Jon Maloy <jm...@re...<mailto:jm...@re...>> Sent: Monday, January 4, 2021 8:01 PM To: Duzan, Gary D <Gar...@fi...<mailto:Gar...@fi...>>; tip...@li...<mailto:tip...@li...> <tip...@li...<mailto:tip...@li...>> Subject: EXTERNAL: Re: [tipc-discussion] Discovery between VMs CAUTION: This email originated from outside of the company. Do not click links or open attachments unless you recognize the sender and know the content is safe. Hi Gary, There are no known problems with connecting between VMs. As a matter of fact, that is the way we run almost all the time. Have you tried just to ping between the two VMs, using the same interfaces? If that works they should definitely find each other. If you can ping and they still don't discover each other, check the cluster id (aka network id) by doing 'tipc node get netid' on both sides. If the interfaces don't have IP addresses you could assign those temporarily, just to check. What do the commands 'tipc link list' and 'tipc bearer list' show you? BR Jon Maloy On 1/4/21 4:22 PM, Duzan, Gary D via tipc-discussion wrote: > I'm new here, and I have trouble getting to SourceForge through the corporate proxy, so forgive me if this is an FAQ. I'm trying to get TIPC working at a remote site between two VMs running Ubuntu 18.04 which at least appear to be on the same IP subnet. The node address is set to distinct values on both hosts (as 18.04 had other TIPC which required the manual config), and the broadcast bearer is there, but they don't appear to be discovering each other as the client gets a Host Unreachable trying to do a sendto(). I believe the VMs are running under VMware, but getting visibility on that could be tricky. The TIPC code works fine between physical hosts locally. > > Are there known problems getting the ethernet broadcast between VMs working? I believe I have the proper incantation to use a UDP bearer for use as a fallback, but I'd rather not use it if I don't have to. > > Thanks. > > Gary Duzan > > The information contained in this message is proprietary and/or confidential. If you are not the intended recipient, please: (i) delete the message and all copies; (ii) do not disclose, distribute or use the message in any manner; and (iii) notify the sender immediately. In addition, please be aware that any message addressed to our domain is subject to archiving and review by persons other than the intended recipient. Thank you. > > _______________________________________________ > tipc-discussion mailing list > tip...@li...<mailto:tip...@li...> > https://eur02.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.sourceforge.net%2Flists%2Flistinfo%2Ftipc-discussion&data=04%7C01%7Cgary.duzan%40fisglobal.com%7C9ba4ec9a0b2647532b7d08d8b11569ba%7Ce3ff91d834c84b15a0b418910a6ac575%7C0%7C0%7C637454052818711634%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=wwrG5EjpLCNRDSis9XdZYSGbclFp8BA19M6z4MCU5SI%3D&reserved=0<https://eur02.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.sourceforge.net%2Flists%2Flistinfo%2Ftipc-discussion&data=04%7C01%7Cgary.duzan%40fisglobal.com%7C6e37f5ca6df34e068a6a08d8b190e973%7Ce3ff91d834c84b15a0b418910a6ac575%7C0%7C0%7C637454583256619260%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C2000&sdata=%2BQxspim2wEOWPfqgYHnN0F5AJ%2BDpw1iZogPMh2WPP00%3D&reserved=0> > The information contained in this message is proprietary and/or confidential. If you are not the intended recipient, please: (i) delete the message and all copies; (ii) do not disclose, distribute or use the message in any manner; and (iii) notify the sender immediately. In addition, please be aware that any message addressed to our domain is subject to archiving and review by persons other than the intended recipient. Thank you. _______________________________________________ tipc-discussion mailing list tip...@li...<mailto:tip...@li...> https://lists.sourceforge.net/lists/listinfo/tipc-discussion<https://eur02.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.sourceforge.net%2Flists%2Flistinfo%2Ftipc-discussion&data=04%7C01%7Cgary.duzan%40fisglobal.com%7C6e37f5ca6df34e068a6a08d8b190e973%7Ce3ff91d834c84b15a0b418910a6ac575%7C0%7C0%7C637454583256619260%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C2000&sdata=%2BQxspim2wEOWPfqgYHnN0F5AJ%2BDpw1iZogPMh2WPP00%3D&reserved=0> The information contained in this message is proprietary and/or confidential. If you are not the intended recipient, please: (i) delete the message and all copies; (ii) do not disclose, distribute or use the message in any manner; and (iii) notify the sender immediately. In addition, please be aware that any message addressed to our domain is subject to archiving and review by persons other than the intended recipient. Thank you. |
From: Jon M. <jm...@re...> - 2021-01-06 13:55:39
|
On 1/5/21 11:31 PM, Hoang Huu Le wrote: > From: Hoang Le <hoa...@de...> > > The buffer list can have zero skb as following path: > tipc_named_node_up()->tipc_node_xmit()->tipc_link_xmit(), so > we need to check the list before casting an &sk_buff. > > Fault report: > [] tipc: Bulk publication failure > [] general protection fault, probably for non-canonical [#1] PREEMPT [...] > [] KASAN: null-ptr-deref in range [0x00000000000000c8-0x00000000000000cf] > [] CPU: 0 PID: 0 Comm: swapper/0 Kdump: loaded Not tainted 5.10.0-rc4+ #2 > [] Hardware name: Bochs ..., BIOS Bochs 01/01/2011 > [] RIP: 0010:tipc_link_xmit+0xc1/0x2180 > [] Code: 24 b8 00 00 00 00 4d 39 ec 4c 0f 44 e8 e8 d7 0a 10 f9 48 [...] > [] RSP: 0018:ffffc90000006ea0 EFLAGS: 00010202 > [] RAX: dffffc0000000000 RBX: ffff8880224da000 RCX: 1ffff11003d3cc0d > [] RDX: 0000000000000019 RSI: ffffffff886007b9 RDI: 00000000000000c8 > [] RBP: ffffc90000007018 R08: 0000000000000001 R09: fffff52000000ded > [] R10: 0000000000000003 R11: fffff52000000dec R12: ffffc90000007148 > [] R13: 0000000000000000 R14: 0000000000000000 R15: ffffc90000007018 > [] FS: 0000000000000000(0000) GS:ffff888037400000(0000) knlGS:000[...] > [] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 > [] CR2: 00007fffd2db5000 CR3: 000000002b08f000 CR4: 00000000000006f0 > > Signed-off-by: Hoang Le <hoa...@de...> > --- > net/tipc/link.c | 9 +++++++-- > 1 file changed, 7 insertions(+), 2 deletions(-) > > diff --git a/net/tipc/link.c b/net/tipc/link.c > index 6ae2140eb4f7..a6a694b78927 100644 > --- a/net/tipc/link.c > +++ b/net/tipc/link.c > @@ -1030,7 +1030,6 @@ void tipc_link_reset(struct tipc_link *l) > int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, > struct sk_buff_head *xmitq) > { > - struct tipc_msg *hdr = buf_msg(skb_peek(list)); > struct sk_buff_head *backlogq = &l->backlogq; > struct sk_buff_head *transmq = &l->transmq; > struct sk_buff *skb, *_skb; > @@ -1038,13 +1037,18 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, > u16 ack = l->rcv_nxt - 1; > u16 seqno = l->snd_nxt; > int pkt_cnt = skb_queue_len(list); > - int imp = msg_importance(hdr); > unsigned int mss = tipc_link_mss(l); > unsigned int cwin = l->window; > unsigned int mtu = l->mtu; > + struct tipc_msg *hdr; > bool new_bundle; > int rc = 0; > + int imp; > + > + if (pkt_cnt <= 0) > + return 0; > > + hdr = buf_msg(skb_peek(list)); > if (unlikely(msg_size(hdr) > mtu)) { > pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n", > skb_queue_len(list), msg_user(hdr), > @@ -1053,6 +1057,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, > return -EMSGSIZE; > } > > + imp = msg_importance(hdr); > /* Allow oversubscription of one data msg per source at congestion */ > if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) { > if (imp == TIPC_SYSTEM_IMPORTANCE) { Acked-by: Jon Maloy <jm...@re...> |
From: Hoang H. Le <hoa...@de...> - 2021-01-06 04:31:49
|
From: Hoang Le <hoa...@de...> The buffer list can have zero skb as following path: tipc_named_node_up()->tipc_node_xmit()->tipc_link_xmit(), so we need to check the list before casting an &sk_buff. Fault report: [] tipc: Bulk publication failure [] general protection fault, probably for non-canonical [#1] PREEMPT [...] [] KASAN: null-ptr-deref in range [0x00000000000000c8-0x00000000000000cf] [] CPU: 0 PID: 0 Comm: swapper/0 Kdump: loaded Not tainted 5.10.0-rc4+ #2 [] Hardware name: Bochs ..., BIOS Bochs 01/01/2011 [] RIP: 0010:tipc_link_xmit+0xc1/0x2180 [] Code: 24 b8 00 00 00 00 4d 39 ec 4c 0f 44 e8 e8 d7 0a 10 f9 48 [...] [] RSP: 0018:ffffc90000006ea0 EFLAGS: 00010202 [] RAX: dffffc0000000000 RBX: ffff8880224da000 RCX: 1ffff11003d3cc0d [] RDX: 0000000000000019 RSI: ffffffff886007b9 RDI: 00000000000000c8 [] RBP: ffffc90000007018 R08: 0000000000000001 R09: fffff52000000ded [] R10: 0000000000000003 R11: fffff52000000dec R12: ffffc90000007148 [] R13: 0000000000000000 R14: 0000000000000000 R15: ffffc90000007018 [] FS: 0000000000000000(0000) GS:ffff888037400000(0000) knlGS:000[...] [] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [] CR2: 00007fffd2db5000 CR3: 000000002b08f000 CR4: 00000000000006f0 Signed-off-by: Hoang Le <hoa...@de...> --- net/tipc/link.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/net/tipc/link.c b/net/tipc/link.c index 6ae2140eb4f7..a6a694b78927 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -1030,7 +1030,6 @@ void tipc_link_reset(struct tipc_link *l) int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, struct sk_buff_head *xmitq) { - struct tipc_msg *hdr = buf_msg(skb_peek(list)); struct sk_buff_head *backlogq = &l->backlogq; struct sk_buff_head *transmq = &l->transmq; struct sk_buff *skb, *_skb; @@ -1038,13 +1037,18 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, u16 ack = l->rcv_nxt - 1; u16 seqno = l->snd_nxt; int pkt_cnt = skb_queue_len(list); - int imp = msg_importance(hdr); unsigned int mss = tipc_link_mss(l); unsigned int cwin = l->window; unsigned int mtu = l->mtu; + struct tipc_msg *hdr; bool new_bundle; int rc = 0; + int imp; + + if (pkt_cnt <= 0) + return 0; + hdr = buf_msg(skb_peek(list)); if (unlikely(msg_size(hdr) > mtu)) { pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n", skb_queue_len(list), msg_user(hdr), @@ -1053,6 +1057,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, return -EMSGSIZE; } + imp = msg_importance(hdr); /* Allow oversubscription of one data msg per source at congestion */ if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) { if (imp == TIPC_SYSTEM_IMPORTANCE) { -- 2.25.1 |
From: Jon M. <ma...@do...> - 2021-01-05 16:16:04
|
Hi,When I was talking about VMs in my previous mail I was in reality referring to qemu/kvm, but I was was also using VMware Workstation for many years, without any problems.I doesn't sound unreasonable to think that they might have changed their default network policies, so this is a good place to start looking.Actually, the main reason for introducing UDP as an alternative to bare Ethernet was exactly that we anticipated this kind of problems.In your own network you can probably fix this, but there might be cases where you cannot influence the network provider this way. ///jon On Tuesday, January 5, 2021, 09:24:32 AM EST, Duzan, Gary D via tipc-discussion <tip...@li...> wrote: Thanks, Jon. I've forwarded your information request to the site. We did get things working over UDP (using the same interfaces), so we know IP is working and the netid configuration is ok. I still suspect that the VMware virtual network configuration is squashing non-IP traffic, so I'm focusing on that angle. Gary Duzan ________________________________ From: Jon Maloy <jm...@re...> Sent: Monday, January 4, 2021 8:01 PM To: Duzan, Gary D <Gar...@fi...>; tip...@li... <tip...@li...> Subject: EXTERNAL: Re: [tipc-discussion] Discovery between VMs CAUTION: This email originated from outside of the company. Do not click links or open attachments unless you recognize the sender and know the content is safe. Hi Gary, There are no known problems with connecting between VMs. As a matter of fact, that is the way we run almost all the time. Have you tried just to ping between the two VMs, using the same interfaces? If that works they should definitely find each other. If you can ping and they still don't discover each other, check the cluster id (aka network id) by doing 'tipc node get netid' on both sides. If the interfaces don't have IP addresses you could assign those temporarily, just to check. What do the commands 'tipc link list' and 'tipc bearer list' show you? BR Jon Maloy On 1/4/21 4:22 PM, Duzan, Gary D via tipc-discussion wrote: > I'm new here, and I have trouble getting to SourceForge through the corporate proxy, so forgive me if this is an FAQ. I'm trying to get TIPC working at a remote site between two VMs running Ubuntu 18.04 which at least appear to be on the same IP subnet. The node address is set to distinct values on both hosts (as 18.04 had other TIPC which required the manual config), and the broadcast bearer is there, but they don't appear to be discovering each other as the client gets a Host Unreachable trying to do a sendto(). I believe the VMs are running under VMware, but getting visibility on that could be tricky. The TIPC code works fine between physical hosts locally. > > Are there known problems getting the ethernet broadcast between VMs working? I believe I have the proper incantation to use a UDP bearer for use as a fallback, but I'd rather not use it if I don't have to. > > Thanks. > > Gary Duzan > > The information contained in this message is proprietary and/or confidential. If you are not the intended recipient, please: (i) delete the message and all copies; (ii) do not disclose, distribute or use the message in any manner; and (iii) notify the sender immediately. In addition, please be aware that any message addressed to our domain is subject to archiving and review by persons other than the intended recipient. Thank you. > > _______________________________________________ > tipc-discussion mailing list > tip...@li... > https://eur02.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.sourceforge.net%2Flists%2Flistinfo%2Ftipc-discussion&data=04%7C01%7Cgary.duzan%40fisglobal.com%7C9ba4ec9a0b2647532b7d08d8b11569ba%7Ce3ff91d834c84b15a0b418910a6ac575%7C0%7C0%7C637454052818711634%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=wwrG5EjpLCNRDSis9XdZYSGbclFp8BA19M6z4MCU5SI%3D&reserved=0 > The information contained in this message is proprietary and/or confidential. If you are not the intended recipient, please: (i) delete the message and all copies; (ii) do not disclose, distribute or use the message in any manner; and (iii) notify the sender immediately. In addition, please be aware that any message addressed to our domain is subject to archiving and review by persons other than the intended recipient. Thank you. _______________________________________________ tipc-discussion mailing list tip...@li... https://lists.sourceforge.net/lists/listinfo/tipc-discussion |
From: Duzan, G. D <Gar...@fi...> - 2021-01-05 14:24:14
|
Thanks, Jon. I've forwarded your information request to the site. We did get things working over UDP (using the same interfaces), so we know IP is working and the netid configuration is ok. I still suspect that the VMware virtual network configuration is squashing non-IP traffic, so I'm focusing on that angle. Gary Duzan ________________________________ From: Jon Maloy <jm...@re...> Sent: Monday, January 4, 2021 8:01 PM To: Duzan, Gary D <Gar...@fi...>; tip...@li... <tip...@li...> Subject: EXTERNAL: Re: [tipc-discussion] Discovery between VMs CAUTION: This email originated from outside of the company. Do not click links or open attachments unless you recognize the sender and know the content is safe. Hi Gary, There are no known problems with connecting between VMs. As a matter of fact, that is the way we run almost all the time. Have you tried just to ping between the two VMs, using the same interfaces? If that works they should definitely find each other. If you can ping and they still don't discover each other, check the cluster id (aka network id) by doing 'tipc node get netid' on both sides. If the interfaces don't have IP addresses you could assign those temporarily, just to check. What do the commands 'tipc link list' and 'tipc bearer list' show you? BR Jon Maloy On 1/4/21 4:22 PM, Duzan, Gary D via tipc-discussion wrote: > I'm new here, and I have trouble getting to SourceForge through the corporate proxy, so forgive me if this is an FAQ. I'm trying to get TIPC working at a remote site between two VMs running Ubuntu 18.04 which at least appear to be on the same IP subnet. The node address is set to distinct values on both hosts (as 18.04 had other TIPC which required the manual config), and the broadcast bearer is there, but they don't appear to be discovering each other as the client gets a Host Unreachable trying to do a sendto(). I believe the VMs are running under VMware, but getting visibility on that could be tricky. The TIPC code works fine between physical hosts locally. > > Are there known problems getting the ethernet broadcast between VMs working? I believe I have the proper incantation to use a UDP bearer for use as a fallback, but I'd rather not use it if I don't have to. > > Thanks. > > Gary Duzan > > The information contained in this message is proprietary and/or confidential. If you are not the intended recipient, please: (i) delete the message and all copies; (ii) do not disclose, distribute or use the message in any manner; and (iii) notify the sender immediately. In addition, please be aware that any message addressed to our domain is subject to archiving and review by persons other than the intended recipient. Thank you. > > _______________________________________________ > tipc-discussion mailing list > tip...@li... > https://eur02.safelinks.protection.outlook.com/?url=https%3A%2F%2Flists.sourceforge.net%2Flists%2Flistinfo%2Ftipc-discussion&data=04%7C01%7Cgary.duzan%40fisglobal.com%7C9ba4ec9a0b2647532b7d08d8b11569ba%7Ce3ff91d834c84b15a0b418910a6ac575%7C0%7C0%7C637454052818711634%7CUnknown%7CTWFpbGZsb3d8eyJWIjoiMC4wLjAwMDAiLCJQIjoiV2luMzIiLCJBTiI6Ik1haWwiLCJXVCI6Mn0%3D%7C1000&sdata=wwrG5EjpLCNRDSis9XdZYSGbclFp8BA19M6z4MCU5SI%3D&reserved=0 > The information contained in this message is proprietary and/or confidential. If you are not the intended recipient, please: (i) delete the message and all copies; (ii) do not disclose, distribute or use the message in any manner; and (iii) notify the sender immediately. In addition, please be aware that any message addressed to our domain is subject to archiving and review by persons other than the intended recipient. Thank you. |
From: Jon M. <jm...@re...> - 2021-01-05 01:01:42
|
Hi Gary, There are no known problems with connecting between VMs. As a matter of fact, that is the way we run almost all the time. Have you tried just to ping between the two VMs, using the same interfaces? If that works they should definitely find each other. If you can ping and they still don't discover each other, check the cluster id (aka network id) by doing 'tipc node get netid' on both sides. If the interfaces don't have IP addresses you could assign those temporarily, just to check. What do the commands 'tipc link list' and 'tipc bearer list' show you? BR Jon Maloy On 1/4/21 4:22 PM, Duzan, Gary D via tipc-discussion wrote: > I'm new here, and I have trouble getting to SourceForge through the corporate proxy, so forgive me if this is an FAQ. I'm trying to get TIPC working at a remote site between two VMs running Ubuntu 18.04 which at least appear to be on the same IP subnet. The node address is set to distinct values on both hosts (as 18.04 had other TIPC which required the manual config), and the broadcast bearer is there, but they don't appear to be discovering each other as the client gets a Host Unreachable trying to do a sendto(). I believe the VMs are running under VMware, but getting visibility on that could be tricky. The TIPC code works fine between physical hosts locally. > > Are there known problems getting the ethernet broadcast between VMs working? I believe I have the proper incantation to use a UDP bearer for use as a fallback, but I'd rather not use it if I don't have to. > > Thanks. > > Gary Duzan > > The information contained in this message is proprietary and/or confidential. If you are not the intended recipient, please: (i) delete the message and all copies; (ii) do not disclose, distribute or use the message in any manner; and (iii) notify the sender immediately. In addition, please be aware that any message addressed to our domain is subject to archiving and review by persons other than the intended recipient. Thank you. > > _______________________________________________ > tipc-discussion mailing list > tip...@li... > https://lists.sourceforge.net/lists/listinfo/tipc-discussion > |
From: Duzan, G. D <Gar...@fi...> - 2021-01-04 22:55:56
|
I'm new here, and I have trouble getting to SourceForge through the corporate proxy, so forgive me if this is an FAQ. I'm trying to get TIPC working at a remote site between two VMs running Ubuntu 18.04 which at least appear to be on the same IP subnet. The node address is set to distinct values on both hosts (as 18.04 had other TIPC which required the manual config), and the broadcast bearer is there, but they don't appear to be discovering each other as the client gets a Host Unreachable trying to do a sendto(). I believe the VMs are running under VMware, but getting visibility on that could be tricky. The TIPC code works fine between physical hosts locally. Are there known problems getting the ethernet broadcast between VMs working? I believe I have the proper incantation to use a UDP bearer for use as a fallback, but I'd rather not use it if I don't have to. Thanks. Gary Duzan The information contained in this message is proprietary and/or confidential. If you are not the intended recipient, please: (i) delete the message and all copies; (ii) do not disclose, distribute or use the message in any manner; and (iii) notify the sender immediately. In addition, please be aware that any message addressed to our domain is subject to archiving and review by persons other than the intended recipient. Thank you. |
From: Jon M. <jm...@re...> - 2020-12-14 21:23:56
|
On 12/11/20 6:32 AM, Hoang Huu Le wrote: > From: Hoang Le <hoa...@de...> > > We initialize nlmsghdr without any payload in tipc_nl_compat_dumpit(), > then, result of calling parse attributes always fails and return with > '-EINVAL' error. When we initialize nlmsghdr with no payload inside tipc_nl_compat_dumpit() the parsing function returns -EINVAL. We fix it by making the parsing call conditional. > > To fix error returning when parsing attributes of a netlink message, > we do a sanity check the length of message payload. > > Signed-off-by: Hoang Le <hoa...@de...> > --- > net/tipc/netlink_compat.c | 12 +++++++----- > 1 file changed, 7 insertions(+), 5 deletions(-) > > diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c > index 82f154989418..5a1ce64039f7 100644 > --- a/net/tipc/netlink_compat.c > +++ b/net/tipc/netlink_compat.c > @@ -213,12 +213,14 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, > } > > info.attrs = attrbuf; > - err = nlmsg_parse_deprecated(cb.nlh, GENL_HDRLEN, attrbuf, > - tipc_genl_family.maxattr, > - tipc_genl_family.policy, NULL); > - if (err) > - goto err_out; > > + if (nlmsg_len(cb.nlh) > 0) { > + err = nlmsg_parse_deprecated(cb.nlh, GENL_HDRLEN, attrbuf, > + tipc_genl_family.maxattr, > + tipc_genl_family.policy, NULL); > + if (err) > + goto err_out; > + } > do { > int rem; > Acked-by: Jon Maloy <jm...@re...> |
From: Hoang H. Le <hoa...@de...> - 2020-12-11 11:48:03
|
From: Hoang Le <hoa...@de...> We initialize nlmsghdr without any payload in tipc_nl_compat_dumpit(), then, result of calling parse attributes always fails and return with '-EINVAL' error. To fix error returning when parsing attributes of a netlink message, we do a sanity check the length of message payload. Signed-off-by: Hoang Le <hoa...@de...> --- net/tipc/netlink_compat.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index 82f154989418..5a1ce64039f7 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c @@ -213,12 +213,14 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd, } info.attrs = attrbuf; - err = nlmsg_parse_deprecated(cb.nlh, GENL_HDRLEN, attrbuf, - tipc_genl_family.maxattr, - tipc_genl_family.policy, NULL); - if (err) - goto err_out; + if (nlmsg_len(cb.nlh) > 0) { + err = nlmsg_parse_deprecated(cb.nlh, GENL_HDRLEN, attrbuf, + tipc_genl_family.maxattr, + tipc_genl_family.policy, NULL); + if (err) + goto err_out; + } do { int rem; -- 2.25.1 |
From: Hoang H. Le <hoa...@de...> - 2020-12-09 10:29:22
|
Hi Jon, See my inline comment … Regards, Hoang From: Jon Maloy <jm...@re...> Sent: Wednesday, December 9, 2020 5:22 PM To: Hoang Huu Le <hoa...@de...> Cc: tip...@li...; tipc-dek <tip...@de...> Subject: Fwd: BUG: rwlock bad magic on CPU, kworker/0:LINE/NUM, ADDR Hi Hoang, This was the one I had in mind. To me it looks like we still have a problem. ///jon -------- Forwarded Message -------- Subject: Re: BUG: rwlock bad magic on CPU, kworker/0:LINE/NUM, ADDR Date: Mon, 30 Nov 2020 12:35:30 +0100 From: Dmitry Vyukov <dv...@go...><mailto:dv...@go...> To: syzbot <syz...@sy...><mailto:syz...@sy...> CC: David Miller <da...@da...><mailto:da...@da...>, jm...@re...<mailto:jm...@re...>, Jakub Kicinski <ku...@ke...><mailto:ku...@ke...>, LKML <lin...@vg...><mailto:lin...@vg...>, netdev <ne...@vg...><mailto:ne...@vg...>, syzkaller-bugs <syz...@go...><mailto:syz...@go...>, tip...@li...<mailto:tip...@li...>, Ying Xue <yin...@wi...><mailto:yin...@wi...> On Mon, Nov 30, 2020 at 12:33 PM syzbot <syz...@sy...><mailto:syz...@sy...> wrote: Hello, syzbot found the following issue on: HEAD commit: 90cf87d1 enetc: Let the hardware auto-advance the taprio b.. git tree: net console output: https://syzkaller.appspot.com/x/log.txt?x=135479b3500000 kernel config: https://syzkaller.appspot.com/x/.config?x=5720c06118e6c4cc dashboard link: https://syzkaller.appspot.com/bug?extid=cb987a9c796abc570b47 compiler: gcc (GCC) 10.1.0-syz 20200507 Unfortunately, I don't have any reproducer for this issue yet. IMPORTANT: if you fix the issue, please add the following tag to the commit: Reported-by: syz...@sy...<mailto:syz...@sy...> tipc: 32-bit node address hash set to aa1414ac BUG: rwlock bad magic on CPU#0, kworker/0:18/18158, 00000000859f2a8d CPU: 0 PID: 18158 Comm: kworker/0:18 Not tainted 5.10.0-rc4-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Workqueue: events tipc_net_finalize_work Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x107/0x163 lib/dump_stack.c:118 rwlock_bug kernel/locking/spinlock_debug.c:144 [inline] debug_write_lock_before kernel/locking/spinlock_debug.c:182 [inline] do_raw_write_lock+0x1ef/0x280 kernel/locking/spinlock_debug.c:206 tipc_mon_reinit_self+0x1f7/0x630 net/tipc/monitor.c:685 [Hoang] This is new too me. I will take a look. There was also "general protection fault in tipc_mon_reinit_self": https://syzkaller.appspot.com/bug?id=dc141b9a05cb48d3d9b46837bc2fdc9e7d95dbe9 which also happened once. Smells like an intricate race condition. [Hoang] I guess the race condition already fixed in v5.10. tipc_net_finalize net/tipc/net.c:134 [inline] tipc_net_finalize+0x1df/0x310 net/tipc/net.c:125 process_one_work+0x933/0x15a0 kernel/workqueue.c:2272 worker_thread+0x64c/0x1120 kernel/workqueue.c:2418 kthread+0x3af/0x4a0 kernel/kthread.c:292 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:296 --- This report is generated by a bot. It may contain errors. See https://goo.gl/tpsmEJ for more information about syzbot. syzbot engineers can be reached at syz...@go...<mailto:syz...@go...>. syzbot will keep track of this issue. See: https://goo.gl/tpsmEJ#status for how to communicate with syzbot. -- You received this message because you are subscribed to the Google Groups "syzkaller-bugs" group. To unsubscribe from this group and stop receiving emails from it, send an email to syz...@go...<mailto:syz...@go...>. To view this discussion on the web visit https://groups.google.com/d/msgid/syzkaller-bugs/0000000000004e5bdb05b5516009%40google.com. |
From: Jon M. <jm...@re...> - 2020-12-09 10:22:02
|
Hi Hoang, This was the one I had in mind. To me it looks like we still have a problem. ///jon -------- Forwarded Message -------- Subject: Re: BUG: rwlock bad magic on CPU, kworker/0:LINE/NUM, ADDR Date: Mon, 30 Nov 2020 12:35:30 +0100 From: Dmitry Vyukov <dv...@go...> To: syzbot <syz...@sy...> CC: David Miller <da...@da...>, jm...@re..., Jakub Kicinski <ku...@ke...>, LKML <lin...@vg...>, netdev <ne...@vg...>, syzkaller-bugs <syz...@go...>, tip...@li..., Ying Xue <yin...@wi...> On Mon, Nov 30, 2020 at 12:33 PM syzbot <syz...@sy...> wrote: > Hello, > > syzbot found the following issue on: > > HEAD commit: 90cf87d1 enetc: Let the hardware auto-advance the taprio b.. > git tree: net > console output: https://syzkaller.appspot.com/x/log.txt?x=135479b3500000 > kernel config: https://syzkaller.appspot.com/x/.config?x=5720c06118e6c4cc > dashboard link: https://syzkaller.appspot.com/bug?extid=cb987a9c796abc570b47 > compiler: gcc (GCC) 10.1.0-syz 20200507 > > Unfortunately, I don't have any reproducer for this issue yet. > > IMPORTANT: if you fix the issue, please add the following tag to the commit: > Reported-by: syz...@sy... > > tipc: 32-bit node address hash set to aa1414ac > BUG: rwlock bad magic on CPU#0, kworker/0:18/18158, 00000000859f2a8d > CPU: 0 PID: 18158 Comm: kworker/0:18 Not tainted 5.10.0-rc4-syzkaller #0 > Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 > Workqueue: events tipc_net_finalize_work > Call Trace: > __dump_stack lib/dump_stack.c:77 [inline] > dump_stack+0x107/0x163 lib/dump_stack.c:118 > rwlock_bug kernel/locking/spinlock_debug.c:144 [inline] > debug_write_lock_before kernel/locking/spinlock_debug.c:182 [inline] > do_raw_write_lock+0x1ef/0x280 kernel/locking/spinlock_debug.c:206 > tipc_mon_reinit_self+0x1f7/0x630 net/tipc/monitor.c:685 There was also "general protection fault in tipc_mon_reinit_self": https://syzkaller.appspot.com/bug?id=dc141b9a05cb48d3d9b46837bc2fdc9e7d95dbe9 which also happened once. Smells like an intricate race condition. > tipc_net_finalize net/tipc/net.c:134 [inline] > tipc_net_finalize+0x1df/0x310 net/tipc/net.c:125 > process_one_work+0x933/0x15a0 kernel/workqueue.c:2272 > worker_thread+0x64c/0x1120 kernel/workqueue.c:2418 > kthread+0x3af/0x4a0 kernel/kthread.c:292 > ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:296 > > > --- > This report is generated by a bot. It may contain errors. > See https://goo.gl/tpsmEJ for more information about syzbot. > syzbot engineers can be reached at syz...@go.... > > syzbot will keep track of this issue. See: > https://goo.gl/tpsmEJ#status for how to communicate with syzbot. > > -- > You received this message because you are subscribed to the Google Groups "syzkaller-bugs" group. > To unsubscribe from this group and stop receiving emails from it, send an email to syz...@go.... > To view this discussion on the web visit https://groups.google.com/d/msgid/syzkaller-bugs/0000000000004e5bdb05b5516009%40google.com. |
From: David M. <da...@da...> - 2020-12-09 00:20:04
|
From: Cengiz Can <cengiz@kernel.wtf> Date: Mon, 7 Dec 2020 11:14:24 +0300 > `tipc_node_apply_property` does a null check on a `tipc_link_entry` > pointer but also accesses the same pointer out of the null check block. > > This triggers a warning on Coverity Static Analyzer because we're > implying that `e->link` can BE null. > > Move "Update MTU for node link entry" line into if block to make sure > that we're not in a state that `e->link` is null. > > Signed-off-by: Cengiz Can <cengiz@kernel.wtf> > --- Applied, thanks., |
From: <jm...@re...> - 2020-12-08 18:52:27
|
From: Jon Maloy <jm...@re...> We reduce the signature of tipc_nametbl_lookup_group() by using a struct tipc_uaddr pointer. This entails a couple of minor changes in the functions tipc_send_group_mcast/anycast/unicast/bcast() in socket.c Signed-off-by: Jon Maloy <jm...@re...> --- net/tipc/name_table.c | 14 +++++++------- net/tipc/name_table.h | 7 +++---- net/tipc/socket.c | 42 +++++++++++++++++------------------------- 3 files changed, 27 insertions(+), 36 deletions(-) diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 23af099370fb..7f2fb446233e 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -624,31 +624,31 @@ bool tipc_nametbl_lookup_anycast(struct net *net, * destination socket/node pairs matching the given address. * The requester may or may not want to exclude himself from the list. */ -bool tipc_nametbl_lookup_group(struct net *net, u32 type, u32 instance, - u32 scope, struct list_head *dsts, - int *dstcnt, u32 exclude, - bool mcast) +bool tipc_nametbl_lookup_group(struct net *net, struct tipc_uaddr *ua, + struct list_head *dsts, int *dstcnt, + u32 exclude, bool mcast) { u32 self = tipc_own_addr(net); + u32 inst = ua->sa.instance; struct service_range *sr; struct tipc_service *sc; struct publication *p; *dstcnt = 0; rcu_read_lock(); - sc = tipc_service_find(net, type); + sc = tipc_service_find(net, ua->sa.type); if (unlikely(!sc)) goto exit; spin_lock_bh(&sc->lock); /* Todo: a full search i.e. service_range_foreach_match() instead? */ - sr = service_range_match_first(sc->ranges.rb_node, instance, instance); + sr = service_range_match_first(sc->ranges.rb_node, inst, inst); if (!sr) goto no_match; list_for_each_entry(p, &sr->all_publ, all_publ) { - if (p->scope != scope) + if (p->scope != ua->scope) continue; if (p->sk.ref == exclude && p->sk.node == self) continue; diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index 259ba514193e..909eaf706553 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h @@ -117,10 +117,9 @@ void tipc_nametbl_lookup_mcast_sockets(struct net *net, struct tipc_uaddr *ua, bool exact, struct list_head *dports); void tipc_nametbl_lookup_mcast_nodes(struct net *net, struct tipc_uaddr *ua, struct tipc_nlist *nodes); -bool tipc_nametbl_lookup_group(struct net *net, u32 type, u32 instance, - u32 domain, struct list_head *dsts, - int *dstcnt, u32 exclude, - bool all); +bool tipc_nametbl_lookup_group(struct net *net, struct tipc_uaddr *ua, + struct list_head *dsts, int *dstcnt, + u32 exclude, bool mcast); void tipc_nametbl_build_group(struct net *net, struct tipc_group *grp, u32 type, u32 domain); struct publication *tipc_nametbl_publish(struct net *net, struct tipc_uaddr *ua, diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 7d4807d0e2d1..a349160a5ae4 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -958,7 +958,7 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m, int dlen, long timeout) { struct sock *sk = sock->sk; - DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); + struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name; int blks = tsk_blocks(GROUP_H_SIZE + dlen); struct tipc_sock *tsk = tipc_sk(sk); struct net *net = sock_net(sk); @@ -966,8 +966,8 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m, u32 node, port; int rc; - node = dest->addr.id.node; - port = dest->addr.id.ref; + node = ua->sk.node; + port = ua->sk.ref; if (!port && !node) return -EHOSTUNREACH; @@ -1001,7 +1001,7 @@ static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m, static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, int dlen, long timeout) { - DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); + struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name; struct sock *sk = sock->sk; struct tipc_sock *tsk = tipc_sk(sk); struct list_head *cong_links = &tsk->cong_links; @@ -1012,16 +1012,13 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, struct net *net = sock_net(sk); u32 node, port, exclude; struct list_head dsts; - u32 type, inst, scope; int lookups = 0; int dstcnt, rc; bool cong; INIT_LIST_HEAD(&dsts); - - type = msg_nametype(hdr); - inst = dest->addr.name.name.instance; - scope = msg_lookup_scope(hdr); + ua->sa.type = msg_nametype(hdr); + ua->scope = msg_lookup_scope(hdr); while (++lookups < 4) { exclude = tipc_group_exclude(tsk->group); @@ -1030,9 +1027,8 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, /* Look for a non-congested destination member, if any */ while (1) { - if (!tipc_nametbl_lookup_group(net, type, inst, scope, - &dsts, &dstcnt, exclude, - false)) + if (!tipc_nametbl_lookup_group(net, ua, &dsts, &dstcnt, + exclude, false)) return -EHOSTUNREACH; tipc_dest_pop(&dsts, &node, &port); cong = tipc_group_cong(tsk->group, node, port, blks, @@ -1087,7 +1083,7 @@ static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m, static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m, int dlen, long timeout) { - DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); + struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name; struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct tipc_sock *tsk = tipc_sk(sk); @@ -1112,9 +1108,9 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m, return -EHOSTUNREACH; /* Complete message header */ - if (dest) { + if (ua) { msg_set_type(hdr, TIPC_GRP_MCAST_MSG); - msg_set_nameinst(hdr, dest->addr.name.name.instance); + msg_set_nameinst(hdr, ua->sa.instance); } else { msg_set_type(hdr, TIPC_GRP_BCAST_MSG); msg_set_nameinst(hdr, 0); @@ -1161,29 +1157,25 @@ static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m, static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m, int dlen, long timeout) { + struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name; struct sock *sk = sock->sk; - DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); struct tipc_sock *tsk = tipc_sk(sk); struct tipc_group *grp = tsk->group; struct tipc_msg *hdr = &tsk->phdr; struct net *net = sock_net(sk); - u32 type, inst, scope, exclude; struct list_head dsts; - u32 dstcnt; + u32 dstcnt, exclude; INIT_LIST_HEAD(&dsts); - - type = msg_nametype(hdr); - inst = dest->addr.name.name.instance; - scope = msg_lookup_scope(hdr); + ua->sa.type = msg_nametype(hdr); + ua->scope = msg_lookup_scope(hdr); exclude = tipc_group_exclude(grp); - if (!tipc_nametbl_lookup_group(net, type, inst, scope, &dsts, - &dstcnt, exclude, true)) + if (!tipc_nametbl_lookup_group(net, ua, &dsts, &dstcnt, exclude, true)) return -EHOSTUNREACH; if (dstcnt == 1) { - tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref); + tipc_dest_pop(&dsts, &ua->sk.node, &ua->sk.ref); return tipc_send_group_unicast(sock, m, dlen, timeout); } -- 2.28.0 |
From: <jm...@re...> - 2020-12-08 18:51:35
|
From: Jon Maloy <jm...@re...> We move some warning printouts to more strategic locations to avoid duplicates and yield more detailed information about the reported problem. Signed-off-by: Jon Maloy <jm...@re...> --- net/tipc/name_distr.c | 7 ------- net/tipc/name_table.c | 36 +++++++++++++++++------------------- 2 files changed, 17 insertions(+), 26 deletions(-) diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index fe5f39792323..47f00e420377 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -253,13 +253,6 @@ static void tipc_publ_purge(struct net *net, struct publication *p, u32 addr) if (_p) tipc_node_unsubscribe(net, &_p->binding_node, addr); spin_unlock_bh(&tn->nametbl_lock); - - if (_p != p) { - pr_err("Unable to remove publication from failed node\n" - " (type=%u, lower=%u, node=%u, port=%u, key=%u)\n", - p->sr.type, p->sr.lower, p->sk.node, p->sk.ref, p->key); - } - if (_p) kfree_rcu(_p, rcu); } diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 9085c41a1709..946d3ed5de5a 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -337,17 +337,22 @@ static bool tipc_service_insert_publ(struct net *net, struct publication *_p; u32 node = p->sk.node; bool first = false; + u32 key = p->key; sr = tipc_service_create_range(sc, p); if (!sr) - goto err; + return false; first = list_empty(&sr->all_publ); /* Return if the publication already exists */ list_for_each_entry(_p, &sr->all_publ, all_publ) { - if (_p->key == p->key && (!_p->sk.node || _p->sk.node == node)) + if (_p->key == key && (!_p->sk.node || _p->sk.node == node)) { + pr_debug("Failed to bind duplicate %u,%u,%u/%u:%u/%u\n", + p->sr.type, p->sr.lower, p->sr.upper, + node, p->sk.ref, key); return false; + } } if (in_own_node(net, p->sk.node)) @@ -360,10 +365,6 @@ static bool tipc_service_insert_publ(struct net *net, tipc_sub_report_overlap(sub, p, TIPC_PUBLISHED, first); } return true; -err: - pr_warn("Failed to bind to %u,%u,%u, no memory\n", - p->sr.type, p->sr.lower, p->sr.upper); - return false; } /** @@ -473,18 +474,12 @@ struct publication *tipc_nametbl_insert_publ(struct net *net, { struct tipc_service *sc; struct publication *p; - u32 type = ua->sr.type; bool res = false; p = tipc_publ_create(ua, sk, key); if (!p) return NULL; - if (ua->sr.lower > ua->sr.upper) { - pr_debug("Failed to bind illegal {%u,%u,%u} from node %u\n", - type, ua->sr.lower, ua->sr.upper, sk->node); - return NULL; - } sc = tipc_service_find(net, ua); if (!sc) sc = tipc_service_create(net, ua); @@ -512,15 +507,15 @@ struct publication *tipc_nametbl_remove_publ(struct net *net, sc = tipc_service_find(net, ua); if (!sc) - return NULL; + goto exit; spin_lock_bh(&sc->lock); sr = tipc_service_find_range(sc, ua); if (!sr) - goto exit; + goto unlock; p = tipc_service_remove_publ(sr, ua, sk, key); if (!p) - goto exit; + goto unlock; /* Notify any waiting subscriptions */ last = list_empty(&sr->all_publ); @@ -539,8 +534,14 @@ struct publication *tipc_nametbl_remove_publ(struct net *net, hlist_del_init_rcu(&sc->service_list); kfree_rcu(sc, rcu); } -exit: +unlock: spin_unlock_bh(&sc->lock); +exit: + if (!p) { + pr_err("Failed to remove unknown binding: %u,%u,%u/%u:%u/%u\n", + ua->sr.type, ua->sr.lower, ua->sr.upper, + sk->node, sk->ref, key); + } return p; } @@ -809,9 +810,6 @@ void tipc_nametbl_withdraw(struct net *net, struct tipc_uaddr *ua, skb = tipc_named_withdraw(net, p); list_del_init(&p->binding_sock); kfree_rcu(p, rcu); - } else { - pr_err("Failed to remove local publication {%u,%u,%u}/%u\n", - ua->sr.type, ua->sr.lower, ua->sr.upper, key); } rc_dests = nt->rc_dests; spin_unlock_bh(&tn->nametbl_lock); -- 2.28.0 |
From: <jm...@re...> - 2020-12-08 18:51:21
|
From: Jon Maloy <jm...@re...> Following the priniciples of the preceding commits, we reduce the number of parameters passed along in tipc_sk_withdraw(), tipc_nametbl_withdraw() and associated functions. Signed-off-by: Jon Maloy <jm...@re...> --- net/tipc/name_distr.c | 11 ++++---- net/tipc/name_table.c | 54 +++++++++++++++++++----------------- net/tipc/name_table.h | 11 ++++---- net/tipc/node.c | 3 +- net/tipc/socket.c | 64 +++++++++++++++++++++---------------------- 5 files changed, 74 insertions(+), 69 deletions(-) diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index df42fc2b4536..fe5f39792323 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c @@ -244,17 +244,19 @@ static void tipc_publ_purge(struct net *net, struct publication *p, u32 addr) { struct tipc_net *tn = tipc_net(net); struct publication *_p; + struct tipc_uaddr ua; + tipc_uaddr(&ua, p->addrtype, p->scope, p->sr.type, + p->sr.lower, p->sr.upper); spin_lock_bh(&tn->nametbl_lock); - _p = tipc_nametbl_remove_publ(net, p->sr.type, p->sr.lower, - p->sr.upper, p->sk.node, p->key); + _p = tipc_nametbl_remove_publ(net, &ua, &p->sk, p->key); if (_p) tipc_node_unsubscribe(net, &_p->binding_node, addr); spin_unlock_bh(&tn->nametbl_lock); if (_p != p) { pr_err("Unable to remove publication from failed node\n" - " (type=%u, lower=%u, node=0x%x, port=%u, key=%u)\n", + " (type=%u, lower=%u, node=%u, port=%u, key=%u)\n", p->sr.type, p->sr.lower, p->sk.node, p->sk.ref, p->key); } @@ -309,8 +311,7 @@ static bool tipc_update_nametbl(struct net *net, struct distr_item *i, return true; } } else if (dtype == WITHDRAWAL) { - p = tipc_nametbl_remove_publ(net, ua.sr.type, ua.sr.lower, - ua.sr.upper, node, key); + p = tipc_nametbl_remove_publ(net, &ua, &sk, key); if (p) { tipc_node_unsubscribe(net, &p->binding_node, node); kfree_rcu(p, rcu); diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index ba96d5fc57f3..50562d086016 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -367,12 +367,15 @@ static bool tipc_service_insert_publ(struct net *net, * @node: target node * @key: target publication key */ -static struct publication *tipc_service_remove_publ(struct service_range *sr, - u32 node, u32 key) +static struct publication *tipc_service_remove_publ(struct service_range *r, + struct tipc_uaddr *ua, + struct tipc_socket_addr *sk, + u32 key) { struct publication *p; + u32 node = sk->node; - list_for_each_entry(p, &sr->all_publ, all_publ) { + list_for_each_entry(p, &r->all_publ, all_publ) { if (p->key != key || (node && node != p->sk.node)) continue; list_del(&p->all_publ); @@ -496,32 +499,35 @@ struct publication *tipc_nametbl_insert_publ(struct net *net, return NULL; } -struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type, - u32 lower, u32 upper, - u32 node, u32 key) +struct publication *tipc_nametbl_remove_publ(struct net *net, + struct tipc_uaddr *ua, + struct tipc_socket_addr *sk, + u32 key) { - struct tipc_service *sc = tipc_service_find(net, type); struct tipc_subscription *sub, *tmp; - struct service_range *sr = NULL; struct publication *p = NULL; + struct service_range *sr; + struct tipc_service *sc; bool last; + sc = tipc_service_find(net, ua->sr.type); if (!sc) return NULL; spin_lock_bh(&sc->lock); - sr = tipc_service_find_range(sc, lower, upper); + sr = tipc_service_find_range(sc, ua->sr.lower, ua->sr.upper); if (!sr) goto exit; - p = tipc_service_remove_publ(sr, node, key); + p = tipc_service_remove_publ(sr, ua, sk, key); if (!p) goto exit; /* Notify any waiting subscriptions */ last = list_empty(&sr->all_publ); list_for_each_entry_safe(sub, tmp, &sc->subscriptions, service_list) { - tipc_sub_report_overlap(sub, lower, upper, TIPC_WITHDRAWN, - p->sk.ref, node, p->scope, last); + tipc_sub_report_overlap(sub, ua->sr.lower, ua->sr.upper, + TIPC_WITHDRAWN, sk->ref, sk->node, + ua->scope, last); } /* Remove service range item if this was its last publication */ @@ -771,24 +777,22 @@ struct publication *tipc_nametbl_publish(struct net *net, struct tipc_uaddr *ua, /** * tipc_nametbl_withdraw - withdraw a service binding * @net: network namespace - * @type: service type - * @lower: service range lower bound - * @upper: service range upper bound + * @ua: service address/range being unbound + * @sk: address of socket bveing unbound from * @key: target publication key */ -int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, - u32 upper, u32 key) +void tipc_nametbl_withdraw(struct net *net, struct tipc_uaddr *ua, + struct tipc_socket_addr *sk, u32 key) { struct name_table *nt = tipc_name_table(net); struct tipc_net *tn = tipc_net(net); - u32 self = tipc_own_addr(net); struct sk_buff *skb = NULL; struct publication *p; u32 rc_dests; spin_lock_bh(&tn->nametbl_lock); - p = tipc_nametbl_remove_publ(net, type, lower, upper, self, key); + p = tipc_nametbl_remove_publ(net, ua, sk, key); if (p) { nt->local_publ_count--; skb = tipc_named_withdraw(net, p); @@ -796,16 +800,13 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, kfree_rcu(p, rcu); } else { pr_err("Failed to remove local publication {%u,%u,%u}/%u\n", - type, lower, upper, key); + ua->sr.type, ua->sr.lower, ua->sr.upper, key); } rc_dests = nt->rc_dests; spin_unlock_bh(&tn->nametbl_lock); - if (skb) { + if (skb) tipc_node_broadcast(net, skb, rc_dests); - return 1; - } - return 0; } /** @@ -899,11 +900,14 @@ static void tipc_service_delete(struct net *net, struct tipc_service *sc) { struct service_range *sr, *tmpr; struct publication *p, *tmp; + struct tipc_uaddr ua; spin_lock_bh(&sc->lock); rbtree_postorder_for_each_entry_safe(sr, tmpr, &sc->ranges, tree_node) { list_for_each_entry_safe(p, tmp, &sr->all_publ, all_publ) { - tipc_service_remove_publ(sr, p->sk.node, p->key); + tipc_uaddr(&ua, p->addrtype, p->scope, + p->sr.type, p->sr.lower, p->sr.upper); + tipc_service_remove_publ(sr, &ua, &p->sk, p->key); kfree_rcu(p, rcu); } rb_erase_augmented(&sr->tree_node, &sc->ranges, &sr_callbacks); diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index e12b9eb2c7f1..5f48f05b93be 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h @@ -124,15 +124,16 @@ bool tipc_nametbl_lookup(struct net *net, u32 type, u32 instance, u32 domain, bool all); struct publication *tipc_nametbl_publish(struct net *net, struct tipc_uaddr *ua, struct tipc_socket_addr *sk, u32 key); -int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 upper, - u32 key); +void tipc_nametbl_withdraw(struct net *net, struct tipc_uaddr *ua, + struct tipc_socket_addr *sk, u32 key); struct publication *tipc_nametbl_insert_publ(struct net *net, struct tipc_uaddr *ua, struct tipc_socket_addr *sk, u32 key); -struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type, - u32 lower, u32 upper, - u32 node, u32 key); +struct publication *tipc_nametbl_remove_publ(struct net *net, + struct tipc_uaddr *ua, + struct tipc_socket_addr *sk, + u32 key); bool tipc_nametbl_subscribe(struct tipc_subscription *s); void tipc_nametbl_unsubscribe(struct tipc_subscription *s); int tipc_nametbl_init(struct net *net); diff --git a/net/tipc/node.c b/net/tipc/node.c index 3a71e26c9509..24f3f5ea968d 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -429,8 +429,7 @@ static void tipc_node_write_unlock(struct tipc_node *n) } if (flags & TIPC_NOTIFY_LINK_DOWN) { tipc_mon_peer_down(net, n->addr, bearer_id); - tipc_nametbl_withdraw(net, TIPC_LINK_STATE, n->addr, - n->addr, n->link_id); + tipc_nametbl_withdraw(net, &ua, &sk, n->link_id); } } diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 0a92ebdd096d..5a017a34fb5f 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -152,8 +152,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags, bool kern); static void tipc_sk_timeout(struct timer_list *t); static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua); -static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, - struct tipc_service_range const *seq); +static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua); static int tipc_sk_leave(struct tipc_sock *tsk); static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid); static int tipc_sk_insert(struct tipc_sock *tsk); @@ -643,7 +642,7 @@ static int tipc_release(struct socket *sock) __tipc_shutdown(sock, TIPC_ERR_NO_PORT); sk->sk_shutdown = SHUTDOWN_MASK; tipc_sk_leave(tsk); - tipc_sk_withdraw(tsk, 0, NULL); + tipc_sk_withdraw(tsk, NULL); __skb_queue_purge(&tsk->mc_method.deferredq); sk_stop_timer(sk, &sk->sk_timer); tipc_sk_remove(tsk); @@ -681,7 +680,7 @@ static int __tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen) bool unbind = false; if (unlikely(!alen)) - return tipc_sk_withdraw(tsk, 0, NULL); + return tipc_sk_withdraw(tsk, NULL); if (ua->addrtype == TIPC_SERVICE_ADDR) { ua->addrtype = TIPC_SERVICE_RANGE; @@ -699,7 +698,7 @@ static int __tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen) return -EACCES; if (unbind) - return tipc_sk_withdraw(tsk, ua->scope, &ua->sr); + return tipc_sk_withdraw(tsk, ua); return tipc_sk_publish(tsk, ua); } @@ -2923,38 +2922,37 @@ static int tipc_sk_publish(struct tipc_sock *tsk, struct tipc_uaddr *ua) return 0; } -static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, - struct tipc_service_range const *seq) +static int tipc_sk_withdraw(struct tipc_sock *tsk, struct tipc_uaddr *ua) { struct net *net = sock_net(&tsk->sk); - struct publication *publ; - struct publication *safe; + struct publication *safe, *p; + struct tipc_uaddr _ua; int rc = -EINVAL; - if (scope != TIPC_NODE_SCOPE) - scope = TIPC_CLUSTER_SCOPE; - - list_for_each_entry_safe(publ, safe, &tsk->publications, binding_sock) { - if (seq) { - if (publ->scope != scope) - continue; - if (publ->sr.type != seq->type) - continue; - if (publ->sr.lower != seq->lower) - continue; - if (publ->sr.upper != seq->upper) - break; - tipc_nametbl_withdraw(net, publ->sr.type, publ->sr.lower, - publ->sr.upper, publ->key); - rc = 0; - break; + list_for_each_entry_safe(p, safe, &tsk->publications, binding_sock) { + if (!ua) { + tipc_uaddr(&_ua, p->addrtype, p->scope, + p->sr.type, p->sr.lower, p->sr.upper); + tipc_nametbl_withdraw(net, &_ua, &p->sk, p->key); + continue; } - tipc_nametbl_withdraw(net, publ->sr.type, publ->sr.lower, - publ->sr.upper, publ->key); + /* Unbind specific publication */ + if (p->scope != ua->scope) + continue; + if (p->sr.type != ua->sr.type) + continue; + if (p->sr.lower != ua->sr.lower) + continue; + if (p->sr.upper != ua->sr.upper) + break; + tipc_nametbl_withdraw(net, ua, &p->sk, p->key); rc = 0; + break; } - if (list_empty(&tsk->publications)) + if (list_empty(&tsk->publications)) { tsk->published = 0; + rc = 0; + } return rc; } @@ -3107,15 +3105,17 @@ static int tipc_sk_leave(struct tipc_sock *tsk) { struct net *net = sock_net(&tsk->sk); struct tipc_group *grp = tsk->group; - struct tipc_service_range seq; + struct tipc_uaddr ua; int scope; if (!grp) return -EINVAL; - tipc_group_self(grp, &seq, &scope); + ua.addrtype = TIPC_SERVICE_RANGE; + tipc_group_self(grp, &ua.sr, &scope); + ua.scope = scope; tipc_group_delete(net, grp); tsk->group = NULL; - tipc_sk_withdraw(tsk, scope, &seq); + tipc_sk_withdraw(tsk, &ua); return 0; } -- 2.28.0 |
From: <jm...@re...> - 2020-12-08 18:51:10
|
From: Jon Maloy <jm...@re...> We simplify the signature if function tipc_nametbl_lookup_anycast(), using address structures instead of dicrete integers. This also makes it possible to make some improvements to the functions __tipc_sendmsg() in socket.c and tipc_msg_lookup_dest() in msg.c. Signed-off-by: Jon Maloy <jm...@re...> --- net/tipc/msg.c | 23 ++++++------ net/tipc/name_table.c | 75 +++++++++++++++++++------------------- net/tipc/name_table.h | 5 ++- net/tipc/socket.c | 83 +++++++++++++++++++++---------------------- 4 files changed, 91 insertions(+), 95 deletions(-) diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 9eddbddb2fec..931245e93830 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -711,8 +711,11 @@ bool tipc_msg_skb_clone(struct sk_buff_head *msg, struct sk_buff_head *cpy) bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err) { struct tipc_msg *msg = buf_msg(skb); - u32 dport, dnode; - u32 onode = tipc_own_addr(net); + u32 scope = msg_lookup_scope(msg); + u32 self = tipc_own_addr(net); + u32 inst = msg_nameinst(msg); + struct tipc_socket_addr sk; + struct tipc_uaddr ua; if (!msg_isdata(msg)) return false; @@ -726,16 +729,16 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err) msg = buf_msg(skb); if (msg_reroute_cnt(msg)) return false; - dnode = tipc_scope2node(net, msg_lookup_scope(msg)); - dport = tipc_nametbl_lookup_anycast(net, msg_nametype(msg), - msg_nameinst(msg), &dnode); - if (!dport) + tipc_uaddr(&ua, TIPC_SERVICE_RANGE, scope, + msg_nametype(msg), inst, inst); + sk.node = tipc_scope2node(net, scope); + if (!tipc_nametbl_lookup_anycast(net, &ua, &sk)) return false; msg_incr_reroute_cnt(msg); - if (dnode != onode) - msg_set_prevnode(msg, onode); - msg_set_destnode(msg, dnode); - msg_set_destport(msg, dport); + if (sk.node != self) + msg_set_prevnode(msg, self); + msg_set_destnode(msg, sk.node); + msg_set_destport(msg, sk.ref); *err = TIPC_OK; return true; diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index e6177ccf1140..ed68db36bab9 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -549,66 +549,64 @@ struct publication *tipc_nametbl_remove_publ(struct net *net, /** * tipc_nametbl_lookup_anycast - perform service instance to socket translation * @net: network namespace - * @type: message type - * @instance: message instance - * @dnode: the search domain used during translation - * - * On entry, 'dnode' is the search domain used during the lookup + * @ua: service address to look ip + * @sk: address to socket we want to find * + * On entry, a non-zero 'sk->node' indicates the node where we want lookup to be + * performed, which may not be this one. * On exit: - * - if lookup is deferred to another node, leave 'dnode' unchanged and return 0 - * - if lookup is attempted and succeeds, set 'dnode' to the publishing node and - * return the published (non-zero) port number - * - if lookup is attempted and fails, set 'dnode' to 0 and return 0 + * - If lookup is deferred to another node, leave 'sk->node' unchanged and + * return 'true'. + * - If lookup is successful, set the 'sk->node' and 'sk->ref' (== portid) which + * represent the bound socket and return 'true'. + * - If lookup fails, return 'false' * * Note that for legacy users (node configured with Z.C.N address format) the - * 'closest-first' lookup algorithm must be maintained, i.e., if dnode is 0 + * 'closest-first' lookup algorithm must be maintained, i.e., if sk.node is 0 * we must look in the local binding list first */ -u32 tipc_nametbl_lookup_anycast(struct net *net, u32 type, - u32 instance, u32 *dnode) +bool tipc_nametbl_lookup_anycast(struct net *net, + struct tipc_uaddr *ua, + struct tipc_socket_addr *sk) { struct tipc_net *tn = tipc_net(net); bool legacy = tn->legacy_addr_format; u32 self = tipc_own_addr(net); - struct service_range *sr; + u32 inst = ua->sa.instance; + struct service_range *r; struct tipc_service *sc; - struct list_head *list; struct publication *p; - u32 port = 0; - u32 node = 0; + struct list_head *l; + bool res = false; - if (!tipc_in_scope(legacy, *dnode, self)) - return 0; + if (!tipc_in_scope(legacy, sk->node, self)) + return true; rcu_read_lock(); - sc = tipc_service_find(net, type); + sc = tipc_service_find(net, ua->sr.type); if (unlikely(!sc)) goto exit; spin_lock_bh(&sc->lock); - service_range_foreach_match(sr, sc, instance, instance) { + service_range_foreach_match(r, sc, inst, inst) { /* Select lookup algo: local, closest-first or round-robin */ - if (*dnode == self) { - list = &sr->local_publ; - if (list_empty(list)) + if (sk->node == self) { + l = &r->local_publ; + if (list_empty(l)) continue; - p = list_first_entry(list, struct publication, - local_publ); - list_move_tail(&p->local_publ, &sr->local_publ); - } else if (legacy && !*dnode && !list_empty(&sr->local_publ)) { - list = &sr->local_publ; - p = list_first_entry(list, struct publication, - local_publ); - list_move_tail(&p->local_publ, &sr->local_publ); + p = list_first_entry(l, struct publication, local_publ); + list_move_tail(&p->local_publ, &r->local_publ); + } else if (legacy && !sk->node && !list_empty(&r->local_publ)) { + l = &r->local_publ; + p = list_first_entry(l, struct publication, local_publ); + list_move_tail(&p->local_publ, &r->local_publ); } else { - list = &sr->all_publ; - p = list_first_entry(list, struct publication, - all_publ); - list_move_tail(&p->all_publ, &sr->all_publ); + l = &r->all_publ; + p = list_first_entry(l, struct publication, all_publ); + list_move_tail(&p->all_publ, &r->all_publ); } - port = p->sk.ref; - node = p->sk.node; + *sk = p->sk; + res = true; /* Todo: as for legacy, pick the first matching range only, a * "true" round-robin will be performed as needed. */ @@ -618,8 +616,7 @@ u32 tipc_nametbl_lookup_anycast(struct net *net, u32 type, exit: rcu_read_unlock(); - *dnode = node; - return port; + return res; } /* tipc_nametbl_lookup_group(): lookup destinaton(s) in a communication group diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index 9f6e8efca00f..f5e37f90a2ba 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h @@ -111,9 +111,8 @@ struct name_table { }; int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); - -u32 tipc_nametbl_lookup_anycast(struct net *net, u32 type, u32 instance, - u32 *node); +bool tipc_nametbl_lookup_anycast(struct net *net, struct tipc_uaddr *ua, + struct tipc_socket_addr *sk); void tipc_nametbl_lookup_mcast_sockets(struct net *net, u32 type, u32 lower, u32 upper, u32 scope, bool exact, struct list_head *dports); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 05cfe179458e..913b1a7be25b 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -1424,44 +1424,43 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct tipc_sock *tsk = tipc_sk(sk); - DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); + struct tipc_uaddr *ua = (struct tipc_uaddr *)m->msg_name; long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); struct list_head *clinks = &tsk->cong_links; bool syn = !tipc_sk_type_connectionless(sk); struct tipc_group *grp = tsk->group; struct tipc_msg *hdr = &tsk->phdr; - struct tipc_service_range *seq; + struct tipc_socket_addr skaddr; struct sk_buff_head pkts; - u32 dport = 0, dnode = 0; - u32 type = 0, inst = 0; - int mtu, rc; + int atype, mtu, rc; if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE)) return -EMSGSIZE; - if (likely(dest)) { - if (unlikely(m->msg_namelen < sizeof(*dest))) - return -EINVAL; - if (unlikely(dest->family != AF_TIPC)) + if (ua) { + if (!tipc_uaddr_valid(ua, m->msg_namelen)) return -EINVAL; + atype = ua->addrtype; } + /* If socket belongs to a communication group follow other paths */ if (grp) { - if (!dest) + if (!ua) return tipc_send_group_bcast(sock, m, dlen, timeout); - if (dest->addrtype == TIPC_SERVICE_ADDR) + if (atype == TIPC_SERVICE_ADDR) return tipc_send_group_anycast(sock, m, dlen, timeout); - if (dest->addrtype == TIPC_SOCKET_ADDR) + if (atype == TIPC_SOCKET_ADDR) return tipc_send_group_unicast(sock, m, dlen, timeout); - if (dest->addrtype == TIPC_ADDR_MCAST) + if (atype == TIPC_SERVICE_RANGE) return tipc_send_group_mcast(sock, m, dlen, timeout); return -EINVAL; } - if (unlikely(!dest)) { - dest = &tsk->peer; - if (!syn && dest->family != AF_TIPC) + if (!ua) { + ua = (struct tipc_uaddr *)&tsk->peer; + if (!syn && ua->family != AF_TIPC) return -EDESTADDRREQ; + atype = ua->addrtype; } if (unlikely(syn)) { @@ -1471,54 +1470,51 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) return -EISCONN; if (tsk->published) return -EOPNOTSUPP; - if (dest->addrtype == TIPC_SERVICE_ADDR) { - tsk->conn_type = dest->addr.name.name.type; - tsk->conn_instance = dest->addr.name.name.instance; + if (atype == TIPC_SERVICE_ADDR) { + tsk->conn_type = ua->sa.type; + tsk->conn_instance = ua->sa.instance; } msg_set_syn(hdr, 1); } - seq = &dest->addr.nameseq; - if (dest->addrtype == TIPC_ADDR_MCAST) - return tipc_sendmcast(sock, seq, m, dlen, timeout); - - if (dest->addrtype == TIPC_SERVICE_ADDR) { - type = dest->addr.name.name.type; - inst = dest->addr.name.name.instance; - dnode = dest->addr.name.domain; - dport = tipc_nametbl_lookup_anycast(net, type, inst, &dnode); - if (unlikely(!dport && !dnode)) + /* Determine destination */ + if (atype == TIPC_SERVICE_RANGE) { + return tipc_sendmcast(sock, &ua->sr, m, dlen, timeout); + } else if (atype == TIPC_SERVICE_ADDR) { + skaddr.node = ua->lookup_node; + ua->scope = skaddr.node ? TIPC_NODE_SCOPE : TIPC_CLUSTER_SCOPE; + if (!tipc_nametbl_lookup_anycast(net, ua, &skaddr)) return -EHOSTUNREACH; - } else if (dest->addrtype == TIPC_SOCKET_ADDR) { - dnode = dest->addr.id.node; + } else if (atype == TIPC_SOCKET_ADDR) { + skaddr = ua->sk; } else { return -EINVAL; } /* Block or return if destination link is congested */ rc = tipc_wait_for_cond(sock, &timeout, - !tipc_dest_find(clinks, dnode, 0)); + !tipc_dest_find(clinks, skaddr.node, 0)); if (unlikely(rc)) return rc; - if (dest->addrtype == TIPC_SERVICE_ADDR) { + /* Finally build message header */ + msg_set_destnode(hdr, skaddr.node); + msg_set_destport(hdr, skaddr.ref); + if (atype == TIPC_SERVICE_ADDR) { msg_set_type(hdr, TIPC_NAMED_MSG); msg_set_hdr_sz(hdr, NAMED_H_SIZE); - msg_set_nametype(hdr, type); - msg_set_nameinst(hdr, inst); - msg_set_lookup_scope(hdr, tipc_node2scope(dnode)); - msg_set_destnode(hdr, dnode); - msg_set_destport(hdr, dport); + msg_set_nametype(hdr, ua->sa.type); + msg_set_nameinst(hdr, ua->sa.instance); + msg_set_lookup_scope(hdr, ua->scope); } else { /* TIPC_SOCKET_ADDR */ msg_set_type(hdr, TIPC_DIRECT_MSG); msg_set_lookup_scope(hdr, 0); - msg_set_destnode(hdr, dnode); - msg_set_destport(hdr, dest->addr.id.ref); msg_set_hdr_sz(hdr, BASIC_H_SIZE); } + /* Add message body */ __skb_queue_head_init(&pkts); - mtu = tipc_node_get_mtu(net, dnode, tsk->portid, true); + mtu = tipc_node_get_mtu(net, skaddr.node, tsk->portid, true); rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); if (unlikely(rc != dlen)) return rc; @@ -1527,10 +1523,11 @@ static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen) return -ENOMEM; } + /* Send message */ trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " "); - rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); + rc = tipc_node_xmit(net, &pkts, skaddr.node, tsk->portid); if (unlikely(rc == -ELINKCONG)) { - tipc_dest_push(clinks, dnode, 0); + tipc_dest_push(clinks, skaddr.node, 0); tsk->cong_link_cnt++; rc = 0; } -- 2.28.0 |
From: <jm...@re...> - 2020-12-08 18:51:10
|
From: Jon Maloy <jm...@re...> We reduce the signature of this function according to the same principle as the preceding commits. Signed-off-by: Jon Maloy <jm...@re...> --- net/tipc/name_table.c | 10 +++++----- net/tipc/name_table.h | 5 ++--- net/tipc/socket.c | 22 +++++++++++----------- 3 files changed, 18 insertions(+), 19 deletions(-) diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index ed68db36bab9..ea4356dfb47d 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -671,21 +671,21 @@ bool tipc_nametbl_lookup_group(struct net *net, u32 type, u32 instance, * Used on nodes which have received a multicast/broadcast message * Returns a list of local sockets */ -void tipc_nametbl_lookup_mcast_sockets(struct net *net, u32 type, u32 lower, - u32 upper, u32 scope, bool exact, - struct list_head *dports) +void tipc_nametbl_lookup_mcast_sockets(struct net *net, struct tipc_uaddr *ua, + bool exact, struct list_head *dports) { struct service_range *sr; struct tipc_service *sc; struct publication *p; + u32 scope = ua->scope; rcu_read_lock(); - sc = tipc_service_find(net, type); + sc = tipc_service_find(net, ua->sr.type); if (!sc) goto exit; spin_lock_bh(&sc->lock); - service_range_foreach_match(sr, sc, lower, upper) { + service_range_foreach_match(sr, sc, ua->sr.lower, ua->sr.upper) { list_for_each_entry(p, &sr->local_publ, local_publ) { if (p->scope == scope || (!exact && p->scope < scope)) tipc_dest_push(dports, 0, p->sk.ref); diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h index f5e37f90a2ba..368a76f73892 100644 --- a/net/tipc/name_table.h +++ b/net/tipc/name_table.h @@ -113,9 +113,8 @@ struct name_table { int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); bool tipc_nametbl_lookup_anycast(struct net *net, struct tipc_uaddr *ua, struct tipc_socket_addr *sk); -void tipc_nametbl_lookup_mcast_sockets(struct net *net, u32 type, u32 lower, - u32 upper, u32 scope, bool exact, - struct list_head *dports); +void tipc_nametbl_lookup_mcast_sockets(struct net *net, struct tipc_uaddr *ua, + bool exact, struct list_head *dports); void tipc_nametbl_lookup_mcast_nodes(struct net *net, u32 type, u32 lower, u32 upper, struct tipc_nlist *nodes); bool tipc_nametbl_lookup_group(struct net *net, u32 type, u32 instance, diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 913b1a7be25b..68d457c41c89 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -1205,17 +1205,18 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, struct sk_buff_head *inputq) { u32 self = tipc_own_addr(net); - u32 type, lower, upper, scope; struct sk_buff *skb, *_skb; u32 portid, onode; struct sk_buff_head tmpq; struct list_head dports; struct tipc_msg *hdr; + struct tipc_uaddr ua; int user, mtyp, hlen; bool exact; __skb_queue_head_init(&tmpq); INIT_LIST_HEAD(&dports); + ua.addrtype = TIPC_SERVICE_RANGE; skb = tipc_skb_peek(arrvq, &inputq->lock); for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) { @@ -1224,7 +1225,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, mtyp = msg_type(hdr); hlen = skb_headroom(skb) + msg_hdr_sz(hdr); onode = msg_orignode(hdr); - type = msg_nametype(hdr); + ua.sr.type = msg_nametype(hdr); if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) { spin_lock_bh(&inputq->lock); @@ -1239,24 +1240,23 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, /* Group messages require exact scope match */ if (msg_in_group(hdr)) { - lower = 0; - upper = ~0; - scope = msg_lookup_scope(hdr); + ua.sr.lower = 0; + ua.sr.upper = ~0; + ua.scope = msg_lookup_scope(hdr); exact = true; } else { /* TIPC_NODE_SCOPE means "any scope" in this context */ if (onode == self) - scope = TIPC_NODE_SCOPE; + ua.scope = TIPC_NODE_SCOPE; else - scope = TIPC_CLUSTER_SCOPE; + ua.scope = TIPC_CLUSTER_SCOPE; exact = false; - lower = msg_namelower(hdr); - upper = msg_nameupper(hdr); + ua.sr.lower = msg_namelower(hdr); + ua.sr.upper = msg_nameupper(hdr); } /* Create destination port list: */ - tipc_nametbl_lookup_mcast_sockets(net, type, lower, upper, - scope, exact, &dports); + tipc_nametbl_lookup_mcast_sockets(net, &ua, exact, &dports); /* Clone message per destination */ while (tipc_dest_pop(&dports, NULL, &portid)) { -- 2.28.0 |