summaryrefslogtreecommitdiff
path: root/include/net
diff options
context:
space:
mode:
Diffstat (limited to 'include/net')
-rw-r--r--include/net/tls.h70
1 files changed, 58 insertions, 12 deletions
diff --git a/include/net/tls.h b/include/net/tls.h
index 9f3c4ea9ad6f..3aa73e2d8823 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -41,7 +41,7 @@
#include <linux/tcp.h>
#include <net/tcp.h>
#include <net/strparser.h>
-
+#include <crypto/aead.h>
#include <uapi/linux/tls.h>
@@ -93,24 +93,47 @@ enum {
TLS_NUM_CONFIG,
};
-struct tls_sw_context_tx {
- struct crypto_aead *aead_send;
- struct crypto_wait async_wait;
-
- char aad_space[TLS_AAD_SPACE_SIZE];
-
- unsigned int sg_plaintext_size;
- int sg_plaintext_num_elem;
+/* TLS records are maintained in 'struct tls_rec'. It stores the memory pages
+ * allocated or mapped for each TLS record. After encryption, the records are
+ * stores in a linked list.
+ */
+struct tls_rec {
+ struct list_head list;
+ int tx_flags;
struct scatterlist sg_plaintext_data[MAX_SKB_FRAGS];
-
- unsigned int sg_encrypted_size;
- int sg_encrypted_num_elem;
struct scatterlist sg_encrypted_data[MAX_SKB_FRAGS];
/* AAD | sg_plaintext_data | sg_tag */
struct scatterlist sg_aead_in[2];
/* AAD | sg_encrypted_data (data contain overhead for hdr&iv&tag) */
struct scatterlist sg_aead_out[2];
+
+ unsigned int sg_plaintext_size;
+ unsigned int sg_encrypted_size;
+ int sg_plaintext_num_elem;
+ int sg_encrypted_num_elem;
+
+ char aad_space[TLS_AAD_SPACE_SIZE];
+ struct aead_request aead_req;
+ u8 aead_req_ctx[];
+};
+
+struct tx_work {
+ struct delayed_work work;
+ struct sock *sk;
+};
+
+struct tls_sw_context_tx {
+ struct crypto_aead *aead_send;
+ struct crypto_wait async_wait;
+ struct tx_work tx_work;
+ struct tls_rec *open_rec;
+ struct list_head tx_ready_list;
+ atomic_t encrypt_pending;
+ int async_notify;
+
+#define BIT_TX_SCHEDULED 0
+ unsigned long tx_bitmask;
};
struct tls_sw_context_rx {
@@ -197,6 +220,8 @@ struct tls_context {
struct scatterlist *partially_sent_record;
u16 partially_sent_offset;
+ u64 tx_seq_number; /* Next TLS seqnum to be transmitted */
+
unsigned long flags;
bool in_tcp_sendpages;
@@ -261,6 +286,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
void tls_device_sk_destruct(struct sock *sk);
void tls_device_init(void);
void tls_device_cleanup(void);
+int tls_tx_records(struct sock *sk, int flags);
struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
u32 seq, u64 *p_record_sn);
@@ -279,6 +305,9 @@ void tls_sk_destruct(struct sock *sk, struct tls_context *ctx);
int tls_push_sg(struct sock *sk, struct tls_context *ctx,
struct scatterlist *sg, u16 first_offset,
int flags);
+int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
+ int flags);
+
int tls_push_pending_closed_record(struct sock *sk, struct tls_context *ctx,
int flags, long *timeo);
@@ -312,6 +341,23 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
return tls_ctx->pending_open_record_frags;
}
+static inline bool is_tx_ready(struct tls_context *tls_ctx,
+ struct tls_sw_context_tx *ctx)
+{
+ struct tls_rec *rec;
+ u64 seq;
+
+ rec = list_first_entry(&ctx->tx_ready_list, struct tls_rec, list);
+ if (!rec)
+ return false;
+
+ seq = be64_to_cpup((const __be64 *)&rec->aad_space);
+ if (seq == tls_ctx->tx_seq_number)
+ return true;
+ else
+ return false;
+}
+
struct sk_buff *
tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
struct sk_buff *skb);