summaryrefslogtreecommitdiff
path: root/include/net/tcp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r--include/net/tcp.h117
1 files changed, 60 insertions, 57 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index cf54034019d9..438014d57610 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -50,6 +50,7 @@ extern atomic_t tcp_orphan_count;
extern void tcp_time_wait(struct sock *sk, int state, int timeo);
#define MAX_TCP_HEADER (128 + MAX_HEADER)
+#define MAX_TCP_OPTION_SPACE 40
/*
* Never offer a window over 32767 without using window scaling. Some
@@ -184,6 +185,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOLEN_SACK_BASE_ALIGNED 4
#define TCPOLEN_SACK_PERBLOCK 8
#define TCPOLEN_MD5SIG_ALIGNED 20
+#define TCPOLEN_MSS_ALIGNED 4
/* Flags in tp->nonagle */
#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
@@ -265,13 +267,10 @@ static inline int tcp_too_many_orphans(struct sock *sk, int num)
extern struct proto tcp_prot;
-DECLARE_SNMP_STAT(struct tcp_mib, tcp_statistics);
-#define TCP_INC_STATS(field) SNMP_INC_STATS(tcp_statistics, field)
-#define TCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(tcp_statistics, field)
-#define TCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(tcp_statistics, field)
-#define TCP_DEC_STATS(field) SNMP_DEC_STATS(tcp_statistics, field)
-#define TCP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(tcp_statistics, field, val)
-#define TCP_ADD_STATS_USER(field, val) SNMP_ADD_STATS_USER(tcp_statistics, field, val)
+#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
+#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
+#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
+#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
extern void tcp_v4_err(struct sk_buff *skb, u32);
@@ -398,6 +397,8 @@ extern void tcp_parse_options(struct sk_buff *skb,
struct tcp_options_received *opt_rx,
int estab);
+extern u8 *tcp_parse_md5sig_option(struct tcphdr *th);
+
/*
* TCP v4 functions exported for the inet6 API
*/
@@ -471,6 +472,8 @@ extern void tcp_send_delayed_ack(struct sock *sk);
/* tcp_input.c */
extern void tcp_cwnd_application_limited(struct sock *sk);
+extern void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
+ struct sk_buff *skb);
/* tcp_timer.c */
extern void tcp_init_xmit_timers(struct sock *);
@@ -893,8 +896,8 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
BUG_ON(sock_owned_by_user(sk));
while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
- sk->sk_backlog_rcv(sk, skb1);
- NET_INC_STATS_BH(LINUX_MIB_TCPPREQUEUEDROPPED);
+ sk_backlog_rcv(sk, skb1);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED);
}
tp->ucopy.memory = 0;
@@ -973,9 +976,10 @@ static inline void tcp_openreq_init(struct request_sock *req,
ireq->acked = 0;
ireq->ecn_ok = 0;
ireq->rmt_port = tcp_hdr(skb)->source;
+ ireq->loc_port = tcp_hdr(skb)->dest;
}
-extern void tcp_enter_memory_pressure(void);
+extern void tcp_enter_memory_pressure(struct sock *sk);
static inline int keepalive_intvl_when(const struct tcp_sock *tp)
{
@@ -1024,13 +1028,13 @@ static inline int tcp_paws_check(const struct tcp_options_received *rx_opt, int
#define TCP_CHECK_TIMER(sk) do { } while (0)
-static inline void tcp_mib_init(void)
+static inline void tcp_mib_init(struct net *net)
{
/* See RFC 2012 */
- TCP_ADD_STATS_USER(TCP_MIB_RTOALGORITHM, 1);
- TCP_ADD_STATS_USER(TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
- TCP_ADD_STATS_USER(TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
- TCP_ADD_STATS_USER(TCP_MIB_MAXCONN, -1);
+ TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
+ TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
+ TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
+ TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
}
/* from STCP */
@@ -1038,13 +1042,12 @@ static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
{
tp->lost_skb_hint = NULL;
tp->scoreboard_skb_hint = NULL;
- tp->retransmit_skb_hint = NULL;
- tp->forward_skb_hint = NULL;
}
static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
{
tcp_clear_retrans_hints_partial(tp);
+ tp->retransmit_skb_hint = NULL;
}
/* MD5 Signature */
@@ -1113,14 +1116,12 @@ struct tcp_md5sig_pool {
#define TCP_MD5SIG_MAXKEYS (~(u32)0) /* really?! */
/* - functions */
-extern int tcp_v4_calc_md5_hash(char *md5_hash,
- struct tcp_md5sig_key *key,
- struct sock *sk,
- struct dst_entry *dst,
- struct request_sock *req,
- struct tcphdr *th,
- int protocol,
- unsigned int tcplen);
+extern int tcp_v4_md5_hash_skb(char *md5_hash,
+ struct tcp_md5sig_key *key,
+ struct sock *sk,
+ struct request_sock *req,
+ struct sk_buff *skb);
+
extern struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
struct sock *addr_sk);
@@ -1132,11 +1133,26 @@ extern int tcp_v4_md5_do_add(struct sock *sk,
extern int tcp_v4_md5_do_del(struct sock *sk,
__be32 addr);
+#ifdef CONFIG_TCP_MD5SIG
+#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_keylen ? \
+ &(struct tcp_md5sig_key) { \
+ .key = (twsk)->tw_md5_key, \
+ .keylen = (twsk)->tw_md5_keylen, \
+ } : NULL)
+#else
+#define tcp_twsk_md5_key(twsk) NULL
+#endif
+
extern struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void);
extern void tcp_free_md5sig_pool(void);
extern struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu);
extern void __tcp_put_md5sig_pool(void);
+extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, struct tcphdr *);
+extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, struct sk_buff *,
+ unsigned header_len);
+extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
+ struct tcp_md5sig_key *key);
static inline
struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
@@ -1166,49 +1182,45 @@ static inline void tcp_write_queue_purge(struct sock *sk)
static inline struct sk_buff *tcp_write_queue_head(struct sock *sk)
{
- struct sk_buff *skb = sk->sk_write_queue.next;
- if (skb == (struct sk_buff *) &sk->sk_write_queue)
- return NULL;
- return skb;
+ return skb_peek(&sk->sk_write_queue);
}
static inline struct sk_buff *tcp_write_queue_tail(struct sock *sk)
{
- struct sk_buff *skb = sk->sk_write_queue.prev;
- if (skb == (struct sk_buff *) &sk->sk_write_queue)
- return NULL;
- return skb;
+ return skb_peek_tail(&sk->sk_write_queue);
}
static inline struct sk_buff *tcp_write_queue_next(struct sock *sk, struct sk_buff *skb)
{
- return skb->next;
+ return skb_queue_next(&sk->sk_write_queue, skb);
}
#define tcp_for_write_queue(skb, sk) \
- for (skb = (sk)->sk_write_queue.next; \
- (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
- skb = skb->next)
+ skb_queue_walk(&(sk)->sk_write_queue, skb)
#define tcp_for_write_queue_from(skb, sk) \
- for (; (skb != (struct sk_buff *)&(sk)->sk_write_queue);\
- skb = skb->next)
+ skb_queue_walk_from(&(sk)->sk_write_queue, skb)
#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
- for (tmp = skb->next; \
- (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
- skb = tmp, tmp = skb->next)
+ skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
static inline struct sk_buff *tcp_send_head(struct sock *sk)
{
return sk->sk_send_head;
}
+static inline bool tcp_skb_is_last(const struct sock *sk,
+ const struct sk_buff *skb)
+{
+ return skb_queue_is_last(&sk->sk_write_queue, skb);
+}
+
static inline void tcp_advance_send_head(struct sock *sk, struct sk_buff *skb)
{
- sk->sk_send_head = skb->next;
- if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue)
+ if (tcp_skb_is_last(sk, skb))
sk->sk_send_head = NULL;
+ else
+ sk->sk_send_head = tcp_write_queue_next(sk, skb);
}
static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
@@ -1253,12 +1265,12 @@ static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
__skb_queue_after(&sk->sk_write_queue, skb, buff);
}
-/* Insert skb between prev and next on the write queue of sk. */
+/* Insert new before skb on the write queue of sk. */
static inline void tcp_insert_write_queue_before(struct sk_buff *new,
struct sk_buff *skb,
struct sock *sk)
{
- __skb_insert(new, skb->prev, skb, &sk->sk_write_queue);
+ __skb_queue_before(&sk->sk_write_queue, skb, new);
if (sk->sk_send_head == skb)
sk->sk_send_head = new;
@@ -1269,12 +1281,6 @@ static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
__skb_unlink(skb, &sk->sk_write_queue);
}
-static inline int tcp_skb_is_last(const struct sock *sk,
- const struct sk_buff *skb)
-{
- return skb->next == (struct sk_buff *)&sk->sk_write_queue;
-}
-
static inline int tcp_write_queue_empty(struct sock *sk)
{
return skb_queue_empty(&sk->sk_write_queue);
@@ -1348,7 +1354,7 @@ extern void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
extern struct request_sock_ops tcp_request_sock_ops;
extern struct request_sock_ops tcp6_request_sock_ops;
-extern int tcp_v4_destroy_sock(struct sock *sk);
+extern void tcp_v4_destroy_sock(struct sock *sk);
extern int tcp_v4_gso_send_check(struct sk_buff *skb);
extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features);
@@ -1366,11 +1372,8 @@ struct tcp_sock_af_ops {
int (*calc_md5_hash) (char *location,
struct tcp_md5sig_key *md5,
struct sock *sk,
- struct dst_entry *dst,
struct request_sock *req,
- struct tcphdr *th,
- int protocol,
- unsigned int len);
+ struct sk_buff *skb);
int (*md5_add) (struct sock *sk,
struct sock *addr_sk,
u8 *newkey,