From 57732560d1aa7d454d10e557f8959d19d1454174 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Wed, 26 Oct 2011 11:41:45 -0400 Subject: tipc: Add missing broadcast link lock when sending NACK Ensures that any attempt to send a NACK message over TIPC's broadcast link has exclusive access to the link's main data structures, to prevent interference with a simultaneous attempt to send other broadcast link traffic (such as application-generated multicast messages). Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/bcast.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net/tipc/bcast.c') diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 8eb87b11d100..7342abc2cfa1 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -340,8 +340,10 @@ static void bclink_send_nack(struct tipc_node *n_ptr) msg_set_bcgap_to(msg, n_ptr->bclink.gap_to); msg_set_bcast_tag(msg, tipc_own_tag); + spin_lock_bh(&bc_lock); tipc_bearer_send(&bcbearer->bearer, buf, NULL); bcl->stats.sent_nacks++; + spin_unlock_bh(&bc_lock); buf_discard(buf); /* -- cgit v1.2.3 From 8a275a6a30ba871eb34ea41c1fbb507039f4c0dc Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Wed, 26 Oct 2011 15:33:44 -0400 Subject: tipc: Fix node lock reclamation issues in broadcast link reception Fixes a pair of problems in broadcast link message reception code relating to the reclamation of the node lock after consuming an in-sequence message. 1) Now retests to see if the sending node is still up after reclaiming the node lock, and bails out if it is non-operational. 2) Now manipulates the node's deferred message queue only after reclaiming the node lock, rather than using queue head pointer information that was cached previously. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/bcast.c | 58 ++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 40 insertions(+), 18 deletions(-) (limited to 'net/tipc/bcast.c') diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 7342abc2cfa1..e7df313020ce 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -474,7 +474,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf) struct tipc_node *node; u32 next_in; u32 seqno; - struct sk_buff *deferred; + int deferred; /* Screen out unwanted broadcast messages */ @@ -489,6 +489,8 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf) if (unlikely(!node->bclink.supported)) goto unlock; + /* Handle broadcast protocol message */ + if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { if (msg_type(msg) != STATE_MSG) goto unlock; @@ -513,11 +515,11 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf) /* Handle in-sequence broadcast message */ -receive: - next_in = mod(node->bclink.last_in + 1); seqno = msg_seqno(msg); + next_in = mod(node->bclink.last_in + 1); if (likely(seqno == next_in)) { +receive: bcl->stats.recv_info++; node->bclink.last_in++; bclink_set_gap(node); @@ -551,23 +553,40 @@ receive: buf_discard(buf); } buf = NULL; + + /* Determine new synchronization state */ + tipc_node_lock(node); - deferred = node->bclink.deferred_head; - if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) { - buf = deferred; - msg = buf_msg(buf); - node->bclink.deferred_head = deferred->next; - goto receive; - } - } else if (less(next_in, seqno)) { + if (unlikely(!tipc_node_is_up(node))) + goto unlock; + + if (!node->bclink.deferred_head) + goto unlock; + + msg = buf_msg(node->bclink.deferred_head); + seqno = msg_seqno(msg); + next_in = mod(next_in + 1); + if (seqno != next_in) + goto unlock; + + /* Take in-sequence message from deferred queue & deliver it */ + + buf = node->bclink.deferred_head; + node->bclink.deferred_head = buf->next; + goto receive; + } + + /* Handle out-of-sequence broadcast message */ + + if (less(next_in, seqno)) { u32 gap_after = node->bclink.gap_after; u32 gap_to = node->bclink.gap_to; - if (tipc_link_defer_pkt(&node->bclink.deferred_head, - &node->bclink.deferred_tail, - buf)) { + deferred = tipc_link_defer_pkt(&node->bclink.deferred_head, + &node->bclink.deferred_tail, + buf); + if (deferred) { node->bclink.nack_sync++; - bcl->stats.deferred_recv++; if (seqno == mod(gap_after + 1)) node->bclink.gap_after = seqno; else if (less(gap_after, seqno) && less(seqno, gap_to)) @@ -579,9 +598,12 @@ receive: bclink_send_nack(node); bclink_set_gap(node); } - } else { - bcl->stats.duplicates++; - } + } else + deferred = 0; + + if (deferred) + bcl->stats.deferred_recv++; + unlock: tipc_node_unlock(node); exit: -- cgit v1.2.3 From 0232c5a566ff52d5c9fc1dda70253c942628ca66 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Wed, 26 Oct 2011 15:57:26 -0400 Subject: tipc: Fix bug in broadcast link duplicate message statistics Modifies broadcast link so that it increments the "received duplicate message" count if an incoming message cannot be added to the deferred message queue because it is already present in the queue. (The aligns broadcast link behavior with that of TIPC's unicast links.) Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/bcast.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'net/tipc/bcast.c') diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index e7df313020ce..035b350be5c6 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -603,6 +603,8 @@ receive: if (deferred) bcl->stats.deferred_recv++; + else + bcl->stats.duplicates++; unlock: tipc_node_unlock(node); -- cgit v1.2.3 From b98158e3b36645305363a598d91c544fa31446f1 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Wed, 26 Oct 2011 16:13:35 -0400 Subject: tipc: Add missing locks in broadcast link statistics accumulation Ensures that all attempts to update broadcast link statistics are done only while holding the lock that protects the link's main data structures, to prevent interference by simultaneous updates caused by messages arriving on other interfaces. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/bcast.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'net/tipc/bcast.c') diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 035b350be5c6..facc216c6a92 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -520,6 +520,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf) if (likely(seqno == next_in)) { receive: + spin_lock_bh(&bc_lock); bcl->stats.recv_info++; node->bclink.last_in++; bclink_set_gap(node); @@ -527,7 +528,9 @@ receive: bclink_send_ack(node); bcl->stats.sent_acks++; } + if (likely(msg_isdata(msg))) { + spin_unlock_bh(&bc_lock); tipc_node_unlock(node); if (likely(msg_mcast(msg))) tipc_port_recv_mcast(buf, NULL); @@ -536,6 +539,7 @@ receive: } else if (msg_user(msg) == MSG_BUNDLER) { bcl->stats.recv_bundles++; bcl->stats.recv_bundled += msg_msgcnt(msg); + spin_unlock_bh(&bc_lock); tipc_node_unlock(node); tipc_link_recv_bundle(buf); } else if (msg_user(msg) == MSG_FRAGMENTER) { @@ -543,12 +547,15 @@ receive: if (tipc_link_recv_fragment(&node->bclink.defragm, &buf, &msg)) bcl->stats.recv_fragmented++; + spin_unlock_bh(&bc_lock); tipc_node_unlock(node); tipc_net_route_msg(buf); } else if (msg_user(msg) == NAME_DISTRIBUTOR) { + spin_unlock_bh(&bc_lock); tipc_node_unlock(node); tipc_named_recv(buf); } else { + spin_unlock_bh(&bc_lock); tipc_node_unlock(node); buf_discard(buf); } @@ -601,11 +608,15 @@ receive: } else deferred = 0; + spin_lock_bh(&bc_lock); + if (deferred) bcl->stats.deferred_recv++; else bcl->stats.duplicates++; + spin_unlock_bh(&bc_lock); + unlock: tipc_node_unlock(node); exit: -- cgit v1.2.3 From 7a54d4a99dcbbfdf1d4550faa19b615091137953 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Thu, 27 Oct 2011 14:17:53 -0400 Subject: tipc: Major redesign of broadcast link ACK/NACK algorithms Completely redesigns broadcast link ACK and NACK mechanisms to prevent spurious retransmit requests in dual LAN networks, and to prevent the broadcast link from stalling due to the failure of a receiving node to acknowledge receiving a broadcast message or request its retransmission. Note: These changes only impact the timing of when ACK and NACK messages are sent, and not the basic broadcast link protocol itself, so inter- operability with nodes using the "classic" algorithms is maintained. The revised algorithms are as follows: 1) An explicit ACK message is still sent after receiving 16 in-sequence messages, and implicit ACK information continues to be carried in other unicast link message headers (including link state messages). However, the timing of explicit ACKs is now based on the receiving node's absolute network address rather than its relative network address to ensure that the failure of another node does not delay the ACK beyond its 16 message target. 2) A NACK message is now typically sent only when a message gap persists for two consecutive incoming link state messages; this ensures that a suspected gap is not confirmed until both LANs in a dual LAN network have had an opportunity to deliver the message, thereby preventing spurious NACKs. A NACK message can also be generated by the arrival of a single link state message, if the deferred queue is so big that the current message gap cannot be the result of "normal" mis-ordering due to the use of dual LANs (or one LAN using a bonded interface). Since link state messages typically arrive at different nodes at different times the problem of multiple nodes issuing identical NACKs simultaneously is inherently avoided. 3) Nodes continue to "peek" at NACK messages sent by other nodes. If another node requests retransmission of a message gap suspected (but not yet confirmed) by the peeking node, the peeking node forgets about the gap and does not generate a duplicate retransmit request. (If the peeking node subsequently fails to receive the lost message, later link state messages will cause it to rediscover and confirm the gap and send another NACK.) 4) Message gap "equality" is now determined by the start of the gap only. This is sufficient to deal with the most common cases of message loss, and eliminates the need for complex end of gap computations. 5) A peeking node no longer tries to determine whether it should send a complementary NACK, since the most common cases of message loss don't require it to be sent. Consequently, the node no longer examines the "broadcast tag" field of a NACK message when peeking. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/bcast.c | 226 +++++++++++++++++++------------------------------------ 1 file changed, 78 insertions(+), 148 deletions(-) (limited to 'net/tipc/bcast.c') diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index facc216c6a92..1f3b1607d9d4 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -157,39 +157,14 @@ u32 tipc_bclink_get_last_sent(void) return bcl->fsm_msg_cnt; } -/** - * bclink_set_gap - set gap according to contents of current deferred pkt queue - * - * Called with 'node' locked, bc_lock unlocked - */ - -static void bclink_set_gap(struct tipc_node *n_ptr) -{ - struct sk_buff *buf = n_ptr->bclink.deferred_head; - - n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = - mod(n_ptr->bclink.last_in); - if (unlikely(buf != NULL)) - n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1); -} - -/** - * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment - * - * This mechanism endeavours to prevent all nodes in network from trying - * to ACK or NACK at the same time. - * - * Note: TIPC uses a different trigger to distribute ACKs than it does to - * distribute NACKs, but tries to use the same spacing (divide by 16). - */ - -static int bclink_ack_allowed(u32 n) +static void bclink_update_last_sent(struct tipc_node *node, u32 seqno) { - return (n % TIPC_MIN_LINK_WIN) == tipc_own_tag; + node->bclink.last_sent = less_eq(node->bclink.last_sent, seqno) ? + seqno : node->bclink.last_sent; } -/** +/* * tipc_bclink_retransmit_to - get most recent node to request retransmission * * Called with bc_lock locked @@ -300,44 +275,56 @@ exit: spin_unlock_bh(&bc_lock); } -/** - * bclink_send_ack - unicast an ACK msg +/* + * tipc_bclink_update_link_state - update broadcast link state * * tipc_net_lock and node lock set */ -static void bclink_send_ack(struct tipc_node *n_ptr) +void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) { - struct tipc_link *l_ptr = n_ptr->active_links[n_ptr->addr & 1]; + struct sk_buff *buf; - if (l_ptr != NULL) - tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); -} + /* Ignore "stale" link state info */ -/** - * bclink_send_nack- broadcast a NACK msg - * - * tipc_net_lock and node lock set - */ + if (less_eq(last_sent, n_ptr->bclink.last_in)) + return; -static void bclink_send_nack(struct tipc_node *n_ptr) -{ - struct sk_buff *buf; - struct tipc_msg *msg; + /* Update link synchronization state; quit if in sync */ + + bclink_update_last_sent(n_ptr, last_sent); + + if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in) + return; + + /* Update out-of-sync state; quit if loss is still unconfirmed */ - if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to)) + if ((++n_ptr->bclink.oos_state) == 1) { + if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2)) + return; + n_ptr->bclink.oos_state++; + } + + /* Don't NACK if one has been recently sent (or seen) */ + + if (n_ptr->bclink.oos_state & 0x1) return; + /* Send NACK */ + buf = tipc_buf_acquire(INT_H_SIZE); if (buf) { - msg = buf_msg(buf); + struct tipc_msg *msg = buf_msg(buf); + tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, - INT_H_SIZE, n_ptr->addr); + INT_H_SIZE, n_ptr->addr); msg_set_non_seq(msg, 1); msg_set_mc_netid(msg, tipc_net_id); - msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); - msg_set_bcgap_after(msg, n_ptr->bclink.gap_after); - msg_set_bcgap_to(msg, n_ptr->bclink.gap_to); + msg_set_bcast_ack(msg, n_ptr->bclink.last_in); + msg_set_bcgap_after(msg, n_ptr->bclink.last_in); + msg_set_bcgap_to(msg, n_ptr->bclink.deferred_head + ? buf_seqno(n_ptr->bclink.deferred_head) - 1 + : n_ptr->bclink.last_sent); msg_set_bcast_tag(msg, tipc_own_tag); spin_lock_bh(&bc_lock); @@ -346,96 +333,37 @@ static void bclink_send_nack(struct tipc_node *n_ptr) spin_unlock_bh(&bc_lock); buf_discard(buf); - /* - * Ensure we doesn't send another NACK msg to the node - * until 16 more deferred messages arrive from it - * (i.e. helps prevent all nodes from NACK'ing at same time) - */ - - n_ptr->bclink.nack_sync = tipc_own_tag; + n_ptr->bclink.oos_state++; } } -/** - * tipc_bclink_check_gap - send a NACK if a sequence gap exists +/* + * bclink_peek_nack - monitor retransmission requests sent by other nodes * - * tipc_net_lock and node lock set - */ - -void tipc_bclink_check_gap(struct tipc_node *n_ptr, u32 last_sent) -{ - if (!n_ptr->bclink.supported || - less_eq(last_sent, mod(n_ptr->bclink.last_in))) - return; - - bclink_set_gap(n_ptr); - if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to) - n_ptr->bclink.gap_to = last_sent; - bclink_send_nack(n_ptr); -} - -/** - * tipc_bclink_peek_nack - process a NACK msg meant for another node + * Delay any upcoming NACK by this node if another node has already + * requested the first message this node is going to ask for. * * Only tipc_net_lock set. */ -static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to) +static void bclink_peek_nack(struct tipc_msg *msg) { - struct tipc_node *n_ptr = tipc_node_find(dest); - u32 my_after, my_to; + struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg)); - if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr))) + if (unlikely(!n_ptr)) return; + tipc_node_lock(n_ptr); - /* - * Modify gap to suppress unnecessary NACKs from this node - */ - my_after = n_ptr->bclink.gap_after; - my_to = n_ptr->bclink.gap_to; - - if (less_eq(gap_after, my_after)) { - if (less(my_after, gap_to) && less(gap_to, my_to)) - n_ptr->bclink.gap_after = gap_to; - else if (less_eq(my_to, gap_to)) - n_ptr->bclink.gap_to = n_ptr->bclink.gap_after; - } else if (less_eq(gap_after, my_to)) { - if (less_eq(my_to, gap_to)) - n_ptr->bclink.gap_to = gap_after; - } else { - /* - * Expand gap if missing bufs not in deferred queue: - */ - struct sk_buff *buf = n_ptr->bclink.deferred_head; - u32 prev = n_ptr->bclink.gap_to; - for (; buf; buf = buf->next) { - u32 seqno = buf_seqno(buf); + if (n_ptr->bclink.supported && + (n_ptr->bclink.last_in != n_ptr->bclink.last_sent) && + (n_ptr->bclink.last_in == msg_bcgap_after(msg))) + n_ptr->bclink.oos_state = 2; - if (mod(seqno - prev) != 1) { - buf = NULL; - break; - } - if (seqno == gap_after) - break; - prev = seqno; - } - if (buf == NULL) - n_ptr->bclink.gap_to = gap_after; - } - /* - * Some nodes may send a complementary NACK now: - */ - if (bclink_ack_allowed(sender_tag + 1)) { - if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) { - bclink_send_nack(n_ptr); - bclink_set_gap(n_ptr); - } - } tipc_node_unlock(n_ptr); } -/** +/* * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster */ @@ -505,10 +433,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf) spin_unlock_bh(&bc_lock); } else { tipc_node_unlock(node); - tipc_bclink_peek_nack(msg_destnode(msg), - msg_bcast_tag(msg), - msg_bcgap_after(msg), - msg_bcgap_to(msg)); + bclink_peek_nack(msg); } goto exit; } @@ -519,16 +444,28 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf) next_in = mod(node->bclink.last_in + 1); if (likely(seqno == next_in)) { + bclink_update_last_sent(node, seqno); receive: + node->bclink.last_in = seqno; + node->bclink.oos_state = 0; + spin_lock_bh(&bc_lock); bcl->stats.recv_info++; - node->bclink.last_in++; - bclink_set_gap(node); - if (unlikely(bclink_ack_allowed(seqno))) { - bclink_send_ack(node); + + /* + * Unicast an ACK periodically, ensuring that + * all nodes in the cluster don't ACK at the same time + */ + + if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) { + tipc_link_send_proto_msg( + node->active_links[node->addr & 1], + STATE_MSG, 0, 0, 0, 0, 0); bcl->stats.sent_acks++; } + /* Deliver message to destination */ + if (likely(msg_isdata(msg))) { spin_unlock_bh(&bc_lock); tipc_node_unlock(node); @@ -567,9 +504,14 @@ receive: if (unlikely(!tipc_node_is_up(node))) goto unlock; - if (!node->bclink.deferred_head) + if (node->bclink.last_in == node->bclink.last_sent) goto unlock; + if (!node->bclink.deferred_head) { + node->bclink.oos_state = 1; + goto unlock; + } + msg = buf_msg(node->bclink.deferred_head); seqno = msg_seqno(msg); next_in = mod(next_in + 1); @@ -580,31 +522,19 @@ receive: buf = node->bclink.deferred_head; node->bclink.deferred_head = buf->next; + node->bclink.deferred_size--; goto receive; } /* Handle out-of-sequence broadcast message */ if (less(next_in, seqno)) { - u32 gap_after = node->bclink.gap_after; - u32 gap_to = node->bclink.gap_to; - deferred = tipc_link_defer_pkt(&node->bclink.deferred_head, &node->bclink.deferred_tail, buf); - if (deferred) { - node->bclink.nack_sync++; - if (seqno == mod(gap_after + 1)) - node->bclink.gap_after = seqno; - else if (less(gap_after, seqno) && less(seqno, gap_to)) - node->bclink.gap_to = seqno; - } + node->bclink.deferred_size += deferred; + bclink_update_last_sent(node, seqno); buf = NULL; - if (bclink_ack_allowed(node->bclink.nack_sync)) { - if (gap_to != gap_after) - bclink_send_nack(node); - bclink_set_gap(node); - } } else deferred = 0; -- cgit v1.2.3 From 1ec2bb08407b377e5954b3f9479c2bf67fc925a9 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Thu, 27 Oct 2011 15:03:24 -0400 Subject: tipc: Remove obsolete broadcast tag capability Eliminates support for the broadcast tag field, which is no longer used by broadcast link NACK messages. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/bcast.c | 1 - 1 file changed, 1 deletion(-) (limited to 'net/tipc/bcast.c') diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 1f3b1607d9d4..a9b7132d34f2 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -325,7 +325,6 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) msg_set_bcgap_to(msg, n_ptr->bclink.deferred_head ? buf_seqno(n_ptr->bclink.deferred_head) - 1 : n_ptr->bclink.last_sent); - msg_set_bcast_tag(msg, tipc_own_tag); spin_lock_bh(&bc_lock); tipc_bearer_send(&bcbearer->bearer, buf, NULL); -- cgit v1.2.3 From 63e7f1ac2855ba56f15d8189694ca9bd16ae4107 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Thu, 27 Oct 2011 16:43:09 -0400 Subject: tipc: Prevent loss of fragmented messages over broadcast link Modifies broadcast link so that an incoming fragmented message is not lost if reassembly cannot begin because there currently is no buffer big enough to hold the entire reassembled message. The broadcast link now ignores the first fragment completely, which causes the sending node to retransmit the first fragment so that reassembly can be re-attempted. Previously, the sender would have had no reason to retransmit the 1st fragment, so we would never have a chance to re-try the allocation. To do this cleanly without duplicaton, a new bclink_accept_pkt() function is introduced. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/bcast.c | 64 +++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 42 insertions(+), 22 deletions(-) (limited to 'net/tipc/bcast.c') diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index a9b7132d34f2..41ecf313073c 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -389,7 +389,33 @@ exit: return res; } -/** +/* + * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet + * + * Called with both sending node's lock and bc_lock taken. + */ + +static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) +{ + bclink_update_last_sent(node, seqno); + node->bclink.last_in = seqno; + node->bclink.oos_state = 0; + bcl->stats.recv_info++; + + /* + * Unicast an ACK periodically, ensuring that + * all nodes in the cluster don't ACK at the same time + */ + + if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) { + tipc_link_send_proto_msg( + node->active_links[node->addr & 1], + STATE_MSG, 0, 0, 0, 0, 0); + bcl->stats.sent_acks++; + } +} + +/* * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards * * tipc_net_lock is read_locked, no other locks set @@ -443,29 +469,12 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf) next_in = mod(node->bclink.last_in + 1); if (likely(seqno == next_in)) { - bclink_update_last_sent(node, seqno); receive: - node->bclink.last_in = seqno; - node->bclink.oos_state = 0; - - spin_lock_bh(&bc_lock); - bcl->stats.recv_info++; - - /* - * Unicast an ACK periodically, ensuring that - * all nodes in the cluster don't ACK at the same time - */ - - if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) { - tipc_link_send_proto_msg( - node->active_links[node->addr & 1], - STATE_MSG, 0, 0, 0, 0, 0); - bcl->stats.sent_acks++; - } - /* Deliver message to destination */ if (likely(msg_isdata(msg))) { + spin_lock_bh(&bc_lock); + bclink_accept_pkt(node, seqno); spin_unlock_bh(&bc_lock); tipc_node_unlock(node); if (likely(msg_mcast(msg))) @@ -473,24 +482,35 @@ receive: else buf_discard(buf); } else if (msg_user(msg) == MSG_BUNDLER) { + spin_lock_bh(&bc_lock); + bclink_accept_pkt(node, seqno); bcl->stats.recv_bundles++; bcl->stats.recv_bundled += msg_msgcnt(msg); spin_unlock_bh(&bc_lock); tipc_node_unlock(node); tipc_link_recv_bundle(buf); } else if (msg_user(msg) == MSG_FRAGMENTER) { + int ret = tipc_link_recv_fragment(&node->bclink.defragm, + &buf, &msg); + if (ret < 0) + goto unlock; + spin_lock_bh(&bc_lock); + bclink_accept_pkt(node, seqno); bcl->stats.recv_fragments++; - if (tipc_link_recv_fragment(&node->bclink.defragm, - &buf, &msg)) + if (ret > 0) bcl->stats.recv_fragmented++; spin_unlock_bh(&bc_lock); tipc_node_unlock(node); tipc_net_route_msg(buf); } else if (msg_user(msg) == NAME_DISTRIBUTOR) { + spin_lock_bh(&bc_lock); + bclink_accept_pkt(node, seqno); spin_unlock_bh(&bc_lock); tipc_node_unlock(node); tipc_named_recv(buf); } else { + spin_lock_bh(&bc_lock); + bclink_accept_pkt(node, seqno); spin_unlock_bh(&bc_lock); tipc_node_unlock(node); buf_discard(buf); -- cgit v1.2.3 From 5f6d9123f1c7ef7297b0da1620988fe16c738e75 Mon Sep 17 00:00:00 2001 From: Allan Stephens Date: Fri, 4 Nov 2011 13:24:29 -0400 Subject: tipc: Eliminate trivial buffer manipulation helper routines Gets rid of two inlined routines that simply call existing sk_buff manipulation routines, since there is no longer any extra processing done by the helper routines. Note that these changes are essentially cosmetic in nature, and have no impact on the actual operation of TIPC. Signed-off-by: Allan Stephens Signed-off-by: Paul Gortmaker --- net/tipc/bcast.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'net/tipc/bcast.c') diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 41ecf313073c..e00441a2092f 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -256,7 +256,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) if (bcbuf_acks(crs) == 0) { bcl->first_out = next; bcl->out_queue_size--; - buf_discard(crs); + kfree_skb(crs); released = 1; } crs = next; @@ -330,7 +330,7 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) tipc_bearer_send(&bcbearer->bearer, buf, NULL); bcl->stats.sent_nacks++; spin_unlock_bh(&bc_lock); - buf_discard(buf); + kfree_skb(buf); n_ptr->bclink.oos_state++; } @@ -374,7 +374,7 @@ int tipc_bclink_send_msg(struct sk_buff *buf) if (!bclink->bcast_nodes.count) { res = msg_data_sz(buf_msg(buf)); - buf_discard(buf); + kfree_skb(buf); goto exit; } @@ -480,7 +480,7 @@ receive: if (likely(msg_mcast(msg))) tipc_port_recv_mcast(buf, NULL); else - buf_discard(buf); + kfree_skb(buf); } else if (msg_user(msg) == MSG_BUNDLER) { spin_lock_bh(&bc_lock); bclink_accept_pkt(node, seqno); @@ -513,7 +513,7 @@ receive: bclink_accept_pkt(node, seqno); spin_unlock_bh(&bc_lock); tipc_node_unlock(node); - buf_discard(buf); + kfree_skb(buf); } buf = NULL; @@ -569,7 +569,7 @@ receive: unlock: tipc_node_unlock(node); exit: - buf_discard(buf); + kfree_skb(buf); } u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) -- cgit v1.2.3