summaryrefslogtreecommitdiff
path: root/include/net/sock.h
diff options
context:
space:
mode:
authorOtavio Salvador <otavio@ossystems.com.br>2020-05-20 08:43:03 -0300
committerGitHub <noreply@github.com>2020-05-20 08:43:03 -0300
commit1279cd128bba968ebe0a2df7f7ae38bae90250ef (patch)
treedf6b1a190760f51465122ca4c13492d5ac5984c6 /include/net/sock.h
parent0a8ab17689e628c84a666195bfc6ab85d11cf057 (diff)
parent2ae782ca839e0ee07de37122ddea362adff2e975 (diff)
Merge pull request #76 from toradex/4.9-2.3.x-imx
4.9 2.3.x imx
Diffstat (limited to 'include/net/sock.h')
-rw-r--r--include/net/sock.h28
1 files changed, 22 insertions, 6 deletions
diff --git a/include/net/sock.h b/include/net/sock.h
index 15bb04dec40e..d6bce19ca261 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -650,11 +650,22 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
hlist_add_head_rcu(&sk->sk_node, list);
}
+static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
+{
+ sock_hold(sk);
+ hlist_add_tail_rcu(&sk->sk_node, list);
+}
+
static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
}
+static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
+{
+ hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
+}
+
static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
sock_hold(sk);
@@ -1195,7 +1206,7 @@ static inline void sk_sockets_allocated_inc(struct sock *sk)
percpu_counter_inc(sk->sk_prot->sockets_allocated);
}
-static inline int
+static inline u64
sk_sockets_allocated_read_positive(struct sock *sk)
{
return percpu_counter_read_positive(sk->sk_prot->sockets_allocated);
@@ -2039,12 +2050,17 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
* sk_page_frag - return an appropriate page_frag
* @sk: socket
*
- * If socket allocation mode allows current thread to sleep, it means its
- * safe to use the per task page_frag instead of the per socket one.
+ * Use the per task page_frag instead of the per socket one for
+ * optimization when we know that we're in the normal context and owns
+ * everything that's associated with %current.
+ *
+ * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
+ * inside other socket operations and end up recursing into sk_page_frag()
+ * while it's already in use.
*/
static inline struct page_frag *sk_page_frag(struct sock *sk)
{
- if (gfpflags_allow_blocking(sk->sk_allocation))
+ if (gfpflags_normal_context(sk->sk_allocation))
return &current->task_frag;
return &sk->sk_frag;
@@ -2131,7 +2147,7 @@ static inline ktime_t sock_read_timestamp(struct sock *sk)
return kt;
#else
- return sk->sk_stamp;
+ return READ_ONCE(sk->sk_stamp);
#endif
}
@@ -2142,7 +2158,7 @@ static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
sk->sk_stamp = kt;
write_sequnlock(&sk->sk_stamp_seq);
#else
- sk->sk_stamp = kt;
+ WRITE_ONCE(sk->sk_stamp, kt);
#endif
}