From 7e8b3dfef16375dbfeb1f36a83eb9f27117c51fd Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Thu, 23 Jun 2016 14:54:37 -0400 Subject: USB: EHCI: declare hostpc register as zero-length array The HOSTPC extension registers found in some EHCI implementations form a variable-length array, with one element for each port. Therefore the hostpc field in struct ehci_regs should be declared as a zero-length array, not a single-element array. This fixes a problem reported by UBSAN. Signed-off-by: Alan Stern Reported-by: Wilfried Klaebe Tested-by: Wilfried Klaebe CC: Signed-off-by: Greg Kroah-Hartman --- include/linux/usb/ehci_def.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index 966889a20ea3..e479033bd782 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h @@ -180,11 +180,11 @@ struct ehci_regs { * PORTSCx */ /* HOSTPC: offset 0x84 */ - u32 hostpc[1]; /* HOSTPC extension */ + u32 hostpc[0]; /* HOSTPC extension */ #define HOSTPC_PHCD (1<<22) /* Phy clock disable */ #define HOSTPC_PSPD (3<<25) /* Port speed detection */ - u32 reserved5[16]; + u32 reserved5[17]; /* USBMODE_EX: offset 0xc8 */ u32 usbmode_ex; /* USB Device mode extension */ -- cgit v1.2.3 From 65ee67084589c1783a74b4a4a5db38d7264ec8b5 Mon Sep 17 00:00:00 2001 From: Mohamad Haj Yahia Date: Thu, 30 Jun 2016 17:34:43 +0300 Subject: net/mlx5: Add timeout handle to commands with callback The current implementation does not handle timeout in case of command with callback request, and this can lead to deadlock if the command doesn't get fw response. Add delayed callback timeout work before posting the command to fw. In case of real fw command completion we will cancel the delayed work. In case of fw command timeout the callback timeout handler will be called and it will simulate fw completion with timeout error. Fixes: e126ba97dba9 ('mlx5: Add driver for Mellanox Connect-IB adapters') Signed-off-by: Mohamad Haj Yahia Signed-off-by: Saeed Mahameed Signed-off-by: David S. Miller --- include/linux/mlx5/driver.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 80776d0c52dc..fd72ecf0ce9f 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -629,6 +629,7 @@ struct mlx5_cmd_work_ent { void *uout; int uout_size; mlx5_cmd_cbk_t callback; + struct delayed_work cb_timeout_work; void *context; int idx; struct completion done; -- cgit v1.2.3 From eb70db8756717b90c01ccc765fdefc4dd969fc74 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Fri, 1 Jul 2016 16:07:50 -0400 Subject: packet: Use symmetric hash for PACKET_FANOUT_HASH. People who use PACKET_FANOUT_HASH want a symmetric hash, meaning that they want packets going in both directions on a flow to hash to the same bucket. The core kernel SKB hash became non-symmetric when the ipv6 flow label and other entities were incorporated into the standard flow hash order to increase entropy. But there are no users of PACKET_FANOUT_HASH who want an assymetric hash, they all want a symmetric one. Therefore, use the flow dissector to compute a flat symmetric hash over only the protocol, addresses and ports. This hash does not get installed into and override the normal skb hash, so this change has no effect whatsoever on the rest of the stack. Reported-by: Eric Leblond Tested-by: Eric Leblond Signed-off-by: David S. Miller --- include/linux/skbuff.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index ee38a4127475..24859d40acde 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1062,6 +1062,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4) } void __skb_get_hash(struct sk_buff *skb); +u32 __skb_get_hash_symmetric(struct sk_buff *skb); u32 skb_get_poff(const struct sk_buff *skb); u32 __skb_get_poff(const struct sk_buff *skb, void *data, const struct flow_keys *keys, int hlen); -- cgit v1.2.3 From 82a31b9231f02d9c1b7b290a46999d517b0d312a Mon Sep 17 00:00:00 2001 From: WANG Cong Date: Thu, 30 Jun 2016 10:15:22 -0700 Subject: net_sched: fix mirrored packets checksum Similar to commit 9b368814b336 ("net: fix bridge multicast packet checksum validation") we need to fixup the checksum for CHECKSUM_COMPLETE when pushing skb on RX path. Otherwise we get similar splats. Cc: Jamal Hadi Salim Cc: Tom Herbert Signed-off-by: Cong Wang Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- include/linux/skbuff.h | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'include/linux') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 24859d40acde..f39b37180c41 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2870,6 +2870,25 @@ static inline void skb_postpush_rcsum(struct sk_buff *skb, skb->csum = csum_partial(start, len, skb->csum); } +/** + * skb_push_rcsum - push skb and update receive checksum + * @skb: buffer to update + * @len: length of data pulled + * + * This function performs an skb_push on the packet and updates + * the CHECKSUM_COMPLETE checksum. It should be used on + * receive path processing instead of skb_push unless you know + * that the checksum difference is zero (e.g., a valid IP header) + * or you are setting ip_summed to CHECKSUM_NONE. + */ +static inline unsigned char *skb_push_rcsum(struct sk_buff *skb, + unsigned int len) +{ + skb_push(skb, len); + skb_postpush_rcsum(skb, skb->data, len); + return skb->data; +} + /** * pskb_trim_rcsum - trim received skb and update checksum * @skb: buffer to trim -- cgit v1.2.3 From 65b80179f9b8171b74625febf3457f41e792fa23 Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Wed, 29 Jun 2016 13:55:06 -0400 Subject: xprtrdma: No direct data placement with krb5i and krb5p Direct data placement is not allowed when using flavors that guarantee integrity or privacy. When such security flavors are in effect, don't allow the use of Read and Write chunks for moving individual data items. All messages larger than the inline threshold are sent via Long Call or Long Reply. On my systems (CX-3 Pro on FDR), for small I/O operations, the use of Long messages adds only around 5 usecs of latency in each direction. Note that when integrity or encryption is used, the host CPU touches every byte in these messages. Even if it could be used, data movement offload doesn't buy much in this case. Signed-off-by: Chuck Lever Tested-by: Steve Wise Signed-off-by: Anna Schumaker --- include/linux/sunrpc/auth.h | 3 +++ include/linux/sunrpc/gss_api.h | 2 ++ 2 files changed, 5 insertions(+) (limited to 'include/linux') diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index 899791573a40..3a40287b4d27 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h @@ -107,6 +107,9 @@ struct rpc_auth { /* per-flavor data */ }; +/* rpc_auth au_flags */ +#define RPCAUTH_AUTH_DATATOUCH 0x00000002 + struct rpc_auth_create_args { rpc_authflavor_t pseudoflavor; const char *target_name; diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h index 1f911ccb2a75..68ec78c1aa48 100644 --- a/include/linux/sunrpc/gss_api.h +++ b/include/linux/sunrpc/gss_api.h @@ -73,6 +73,7 @@ u32 gss_delete_sec_context( rpc_authflavor_t gss_svc_to_pseudoflavor(struct gss_api_mech *, u32 qop, u32 service); u32 gss_pseudoflavor_to_service(struct gss_api_mech *, u32 pseudoflavor); +bool gss_pseudoflavor_to_datatouch(struct gss_api_mech *, u32 pseudoflavor); char *gss_service_to_auth_domain_name(struct gss_api_mech *, u32 service); struct pf_desc { @@ -81,6 +82,7 @@ struct pf_desc { u32 service; char *name; char *auth_domain_name; + bool datatouch; }; /* Different mechanisms (e.g., krb5 or spkm3) may implement gss-api, and -- cgit v1.2.3