summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarcelo Ricardo Leitner <mleitner@redhat.com>2014-10-13 14:03:30 -0300
committerBen Hutchings <ben@decadent.org.uk>2014-11-05 20:27:47 +0000
commit4715883ba814db9635baf74e378580bd27a534bd (patch)
treece6c60de0dc780f0fa2edfaa0152ff71e889b6f4
parentad5ca98f54c3b6604a7bb72059973a85deb0b779 (diff)
ipv4: disable bh while doing route gc
Further tests revealed that after moving the garbage collector to a work queue and protecting it with a spinlock may leave the system prone to soft lockups if bottom half gets very busy. It was reproced with a set of firewall rules that REJECTed packets. If the NIC bottom half handler ends up running on the same CPU that is running the garbage collector on a very large cache, the garbage collector will not be able to do its job due to the amount of work needed for handling the REJECTs and also won't reschedule. The fix is to disable bottom half during the garbage collecting, as it already was in the first place (most calls to it came from softirqs). Signed-off-by: Marcelo Ricardo Leitner <mleitner@redhat.com> Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Acked-by: David S. Miller <davem@davemloft.net> Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
-rw-r--r--net/ipv4/route.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6b7108e53fd9..8e79a9e04276 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1000,7 +1000,7 @@ static void __do_rt_garbage_collect(int elasticity, int min_interval)
* do not make it too frequently.
*/
- spin_lock(&rt_gc_lock);
+ spin_lock_bh(&rt_gc_lock);
RT_CACHE_STAT_INC(gc_total);
@@ -1103,7 +1103,7 @@ work_done:
dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
expire = ip_rt_gc_timeout;
out:
- spin_unlock(&rt_gc_lock);
+ spin_unlock_bh(&rt_gc_lock);
}
static void __rt_garbage_collect(struct work_struct *w)