From: Jon M. <jon...@er...> - 2004-05-12 21:11:44
|
PS: There were more comments further down in my response, just in case you maye have missed them. /jon Mark Haverkamp wrote: On Wed, 2004-05-12 at 12:09, Jon Maloy wrote: Yeah, that's a classical one. I also think your solution is ok; the pending ports will be awakened at next message reception, so no harm done. Thanks /jon OK, here is what I plan to check in. Take a look. I fixed a few other things that seemed wrong. See comments by each diff. Mark. p.s. here is another problem I've been seeing: If I do management port access during congestion, the I/O never completes even when the congestion is over. ---------------------------------------- cvs diff -u media.c reg.c link.c -------------------------------------------------- I think that this should be an unlock since it is locked a few lines above. Index: media.c =================================================================== RCS file: /cvsroot/tipc/source/unstable/net/tipc/media.c,v retrieving revision 1.13 diff -u -r1.13 media.c --- media.c 6 May 2004 15:35:31 -0000 1.13 +++ media.c 12 May 2004 20:10:30 -0000 @@ -221,7 +221,7 @@ bearer_schedule_unlocked(this, link); res = 0; } - spin_lock_bh(&this->publ.lock); + spin_unlock_bh(&this->publ.lock); return res; } --------------------------------------------------- Daniel found this one. ref_lock_deref can grab an invalid object pointer if the lock is released too soon. This waits until the ref number is changed so it won't match in ref_lock_deref after it gets the lock. Index: reg.c =================================================================== RCS file: /cvsroot/tipc/source/unstable/net/tipc/reg.c,v retrieving revision 1.8 diff -u -r1.8 reg.c --- reg.c 5 May 2004 18:39:37 -0000 1.8 +++ reg.c 12 May 2004 20:10:31 -0000 @@ -167,7 +167,6 @@ assert(entry->object != 0); assert(entry->data.reference == ref_nb); entry->object = 0; - spin_unlock_bh(&entry->lock); if (ref_table.first_free == 0) ref_table.first_free = index; else @@ -175,6 +174,7 @@ ref_table.entries[ref_table.last_free].data.next_plus_upper |= index; ref_table.last_free = index; entry->data.next_plus_upper = (ref_nb & ~index_mask) + index_mask + 1; + spin_unlock_bh(&entry->lock); write_unlock_bh(&ref_lock); } ------------------------------------------------ link_schedule_port took the global port_lock/port lock in a different order than everywhere else. Added the spin_trylock_bh in link_wakeup_ports. Added an unlock in link_recv_fragment on an error exit. Fixed a couple places where cheking the this pointer looked wrong. Index: link.c =================================================================== RCS file: /cvsroot/tipc/source/unstable/net/tipc/link.c,v retrieving revision 1.24 diff -u -r1.24 link.c --- link.c 7 May 2004 23:16:03 -0000 1.24 +++ link.c 12 May 2004 20:10:32 -0000 @@ -440,10 +440,14 @@ int link_schedule_port(struct link *this, uint origport,uint sz) { - struct port *port = port_lock_deref(origport); - if (!port) - return TIPC_CONGESTION; + struct port *port; + spin_lock_bh(&port_lock); + port = port_lock_deref(origport); + if (!port) { + spin_unlock_bh(&port_lock); + return TIPC_CONGESTION; + } if (!port->wakeup) goto exit; if (!list_empty(&port->wait_list)) @@ -453,8 +457,8 @@ port->waiting_pkts = 1 + sz/link_max_pkt(this); list_add_tail(&port->wait_list, &this->waiting_ports); exit: - spin_unlock_bh(&port_lock); spin_unlock_bh(port->publ.lock); + spin_unlock_bh(&port_lock); return TIPC_CONGESTION; } @@ -467,7 +471,8 @@ win = 100000; if (win <= 0) return; - spin_lock_bh(&port_lock); + if (!spin_trylock_bh(&port_lock)) + return; if (link_congested(this)) goto exit; list_for_each_entry_safe(port, tp, &this->waiting_ports, wait_list) { @@ -2365,6 +2370,7 @@ if (msg_size(imsg) > TIPC_MAX_MSG_SIZE + LONG_H_SIZE){ msg_print(CONS,fragm,"<REC<Oversized: "); buf_discard(fbuf); + spin_unlock_bh(&this->owner->lock); return; } buf = buf_acquire(msg_size(imsg)); @@ -2623,7 +2629,7 @@ struct link *this; read_lock_bh(&net_lock); this = link_find_by_name(name); - if (!this){ + if (this){ this->exp_msg_count = LINK_BLOCKED; spin_unlock_bh(&this->owner->lock); res = TIPC_OK; @@ -2640,7 +2646,7 @@ struct link *this; read_lock_bh(&net_lock); this = link_find_by_name(name); - if (!this){ + if (this){ this->exp_msg_count = 0; spin_unlock_bh(&this->owner->lock); res = TIPC_OK; |