aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorblacksuan19 <i@blacksuan19.me>2020-05-20 19:32:35 +0800
committerblacksuan19 <i@blacksuan19.me>2020-05-20 19:32:35 +0800
commit7c3b21ee4ec75286af8d579ce96ac0ecc4bfe5c4 (patch)
tree304c5d48fee74ff07d6316c78f858c2626080a66
parentd6328503c46ef2d8769d1c847529943a76ac63fb (diff)
wireguard: checkout master
Signed-off-by: blacksuan19 <i@blacksuan19.me>
-rw-r--r--net/wireguard/compat/Kbuild.include12
-rw-r--r--net/wireguard/compat/compat.h65
-rw-r--r--net/wireguard/compat/ptr_ring/include/linux/ptr_ring.h106
-rw-r--r--net/wireguard/crypto/zinc/blake2s/blake2s.c5
-rw-r--r--net/wireguard/crypto/zinc/chacha20/chacha20.c2
-rw-r--r--net/wireguard/crypto/zinc/chacha20poly1305.c6
-rw-r--r--net/wireguard/crypto/zinc/curve25519/curve25519.c3
-rw-r--r--net/wireguard/crypto/zinc/poly1305/poly1305.c3
-rw-r--r--net/wireguard/dkms.conf2
-rw-r--r--net/wireguard/main.c1
-rw-r--r--net/wireguard/messages.h2
-rw-r--r--net/wireguard/noise.c22
-rw-r--r--net/wireguard/noise.h14
-rw-r--r--net/wireguard/queueing.c4
-rw-r--r--net/wireguard/queueing.h10
-rw-r--r--net/wireguard/receive.c66
-rw-r--r--net/wireguard/selftest/counter.c17
-rw-r--r--net/wireguard/selftest/ratelimiter.c4
-rw-r--r--net/wireguard/send.c34
-rw-r--r--net/wireguard/socket.c12
-rwxr-xr-xnet/wireguard/tests/netns.sh54
-rw-r--r--net/wireguard/tests/qemu/Makefile4
-rw-r--r--net/wireguard/tests/qemu/arch/powerpc64le.config1
-rw-r--r--net/wireguard/tests/qemu/init.c5
-rw-r--r--net/wireguard/version.h2
25 files changed, 263 insertions, 193 deletions
diff --git a/net/wireguard/compat/Kbuild.include b/net/wireguard/compat/Kbuild.include
index 53a8bde2d4c6..e93767120b54 100644
--- a/net/wireguard/compat/Kbuild.include
+++ b/net/wireguard/compat/Kbuild.include
@@ -52,12 +52,8 @@ ccflags-y += -DCOMPAT_CANNOT_USE_DEV_CNF
endif
ifdef CONFIG_HZ
-ifeq ($(wildcard $(srctree)/include/generated/timeconst.h),)
-HZ_IFDEF := $(shell echo 'define gcd(a,b){auto t;while(b){t=b;b=a%b;a=t;};return a;};hz=$(CONFIG_HZ);cd=gcd(hz,1000000);print "-DHZ_TO_USEC_NUM=",1000000/cd," -DHZ_TO_USEC_DEN=",hz/cd;halt;' | bc -q)
-ifeq ($(HZ_IFDEF),)
-$(error bc(1) is required for building)
-endif
-ccflags-y += $(HZ_IFDEF)
+ifeq ($(wildcard $(CURDIR)/include/generated/timeconst.h),)
+ccflags-y += $(shell bash -c '((a=$(CONFIG_HZ), b=1000000)); while ((b > 0)); do ((t=b, b=a%b, a=t)); done; echo "-DHZ_TO_USEC_NUM=$$((1000000/a)) -DHZ_TO_USEC_DEN=$$(($(CONFIG_HZ)/a))";')
endif
endif
@@ -100,3 +96,7 @@ ifeq ($(CONFIG_X86_64),y)
asflags-y += $(adx_instr)
endif
endif
+
+ifneq ($(shell grep -s -F "\#define LINUX_PACKAGE_ID \" Debian " "$(CURDIR)/include/generated/package.h"),)
+ccflags-y += -DISDEBIAN
+endif
diff --git a/net/wireguard/compat/compat.h b/net/wireguard/compat/compat.h
index f4cc5148eb67..901b60142a2d 100644
--- a/net/wireguard/compat/compat.h
+++ b/net/wireguard/compat/compat.h
@@ -16,18 +16,15 @@
#define ISRHEL7
#elif RHEL_MAJOR == 8
#define ISRHEL8
-#ifdef RHEL_MINOR
-#if RHEL_MINOR == 2
-#define ISRHEL82
-#endif
-#endif
#endif
#endif
#ifdef UTS_UBUNTU_RELEASE_ABI
#if LINUX_VERSION_CODE == KERNEL_VERSION(3, 13, 11)
#define ISUBUNTU1404
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
#define ISUBUNTU1604
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0)
+#define ISUBUNTU1910
#endif
#endif
#ifdef CONFIG_SUSE_KERNEL
@@ -96,16 +93,10 @@
#define ipv6_dst_lookup(a, b, c, d) ipv6_dst_lookup(b, c, d)
#endif
-#if (LINUX_VERSION_CODE == KERNEL_VERSION(4, 4, 0) || \
- (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 5) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0)) || \
- (LINUX_VERSION_CODE < KERNEL_VERSION(4, 1, 17) && LINUX_VERSION_CODE > KERNEL_VERSION(3, 19, 0)) || \
- (LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 27) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)) || \
- (LINUX_VERSION_CODE < KERNEL_VERSION(3, 16, 8) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)) || \
- (LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 40) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0)) || \
- (LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 54))) && !defined(ISUBUNTU1404) && !defined(ISRHEL7)
-#include <linux/if.h>
-#include <net/ip_tunnels.h>
-#define IP6_ECN_set_ce(a, b) IP6_ECN_set_ce(b)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 83)
+#define ipv6_dst_lookup_flow(a, b, c, d) ipv6_dst_lookup_flow(b, c, d)
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 5) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 18) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) || (!defined(ISHREL8) && !defined(ISDEBIAN) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 119) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 181) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 224) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 224)
+#define ipv6_dst_lookup_flow(a, b, c, d) ipv6_dst_lookup(a, b, &dst, c) + (void *)0 ?: dst
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && IS_ENABLED(CONFIG_IPV6) && !defined(ISRHEL7)
@@ -121,7 +112,6 @@ static const struct ipv6_stub_type ipv6_stub_impl = {
static const struct ipv6_stub_type *ipv6_stub = &ipv6_stub_impl;
#endif
-
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 8, 0) && IS_ENABLED(CONFIG_IPV6) && !defined(ISOPENSUSE42) && !defined(ISRHEL7)
#include <net/addrconf.h>
static inline bool ipv6_mod_enabled(void)
@@ -431,7 +421,8 @@ static inline u64 ktime_get_coarse_boottime_ns(void)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
return ktime_to_ns(ktime_get_boottime());
-#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 5) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 18) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) || (!defined(ISHREL8) && !defined(ISDEBIAN) && LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 119) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 15, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 14, 181) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 224) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 224) return ktime_to_ns(ktime_mono_to_any(ns_to_ktime(jiffies64_to_nsecs(get_jiffies_64())), TK_OFFS_BOOT));
+#elif (LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 12) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 20, 0)) || LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 53)
+ return ktime_to_ns(ktime_mono_to_any(ns_to_ktime(jiffies64_to_nsecs(get_jiffies_64())), TK_OFFS_BOOT));
#else
return ktime_to_ns(ktime_get_coarse_boottime());
#endif
@@ -769,7 +760,7 @@ static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
#define hlist_add_behind(a, b) hlist_add_after(b, a)
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 0, 0)
#define totalram_pages() totalram_pages
#endif
@@ -794,7 +785,7 @@ struct __kernel_timespec {
#endif
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) && !defined(ISRHEL82)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) && !defined(ISRHEL8)
#include <linux/skbuff.h>
#define skb_probe_transport_header(a) skb_probe_transport_header(a, 0)
#endif
@@ -803,7 +794,7 @@ struct __kernel_timespec {
#define ignore_df local_df
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) && !defined(ISRHEL82)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0) && !defined(ISRHEL8)
/* Note that all intentional uses of the non-_bh variety need to explicitly
* undef these, conditionalized on COMPAT_CANNOT_DEPRECIATE_BH_RCU.
*/
@@ -845,7 +836,7 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb)
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0) && !defined(ISRHEL8)
#define NLA_EXACT_LEN NLA_UNSPEC
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0) && !defined(ISRHEL82)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0) && !defined(ISRHEL8)
#define NLA_MIN_LEN NLA_UNSPEC
#define COMPAT_CANNOT_INDIVIDUAL_NETLINK_OPS_POLICY
#endif
@@ -872,10 +863,6 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb)
})
#endif
-#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 5) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 18) && !defined(ISRHEL82))
-#define ipv6_dst_lookup_flow(a, b, c, d) ipv6_dst_lookup(a, b, &dst, c) + (void *)0 ?: dst
-#endif
-
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
#include <linux/skbuff.h>
#ifndef skb_list_walk_safe
@@ -958,7 +945,7 @@ static inline int skb_ensure_writable(struct sk_buff *skb, int write_len)
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 1, 0)
#include <net/netfilter/nf_nat_core.h>
#endif
-static inline void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+static inline void __compat_icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
{
struct sk_buff *cloned_skb = NULL;
enum ip_conntrack_info ctinfo;
@@ -987,7 +974,7 @@ static inline void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __b
out:
consume_skb(cloned_skb);
}
-static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
+static inline void __compat_icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
{
struct sk_buff *cloned_skb = NULL;
enum ip_conntrack_info ctinfo;
@@ -1017,17 +1004,20 @@ out:
consume_skb(cloned_skb);
}
#else
-#define icmp_ndo_send icmp_send
-#define icmpv6_ndo_send icmpv6_send
+#define __compat_icmp_ndo_send icmp_send
+#define __compat_icmpv6_ndo_send icmpv6_send
#endif
+#define icmp_ndo_send __compat_icmp_ndo_send
+#define icmpv6_ndo_send __compat_icmpv6_ndo_send
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
#define COMPAT_CANNOT_USE_MAX_MTU
#endif
-#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 29) || (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 14))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 14) && LINUX_VERSION_CODE >= KERNEL_VERSION(5, 5, 0)) || (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 29) && !defined(ISUBUNTU1910))
#include <linux/skbuff.h>
+#include <net/sch_generic.h>
static inline void skb_reset_redirect(struct sk_buff *skb)
{
#ifdef CONFIG_NET_SCHED
@@ -1036,6 +1026,19 @@ static inline void skb_reset_redirect(struct sk_buff *skb)
}
#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+#define skb_get_hash skb_get_rxhash
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
+#define hash rxhash
+#define l4_hash l4_rxhash
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+#define sw_hash ignore_df = 0; skb->nf_trace = skb->ooo_okay
+#endif
+
#if defined(ISUBUNTU1604) || defined(ISRHEL7)
#include <linux/siphash.h>
#ifndef _WG_LINUX_SIPHASH_H
diff --git a/net/wireguard/compat/ptr_ring/include/linux/ptr_ring.h b/net/wireguard/compat/ptr_ring/include/linux/ptr_ring.h
index 37b4bb2545b3..9d514bac1388 100644
--- a/net/wireguard/compat/ptr_ring/include/linux/ptr_ring.h
+++ b/net/wireguard/compat/ptr_ring/include/linux/ptr_ring.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Definitions for the 'struct ptr_ring' datastructure.
*
@@ -6,11 +7,6 @@
*
* Copyright (C) 2016 Red Hat, Inc.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
* This is a limited-size FIFO maintaining pointers in FIFO order, with
* one CPU producing entries and another consuming entries from a FIFO.
*
@@ -26,8 +22,8 @@
#include <linux/cache.h>
#include <linux/types.h>
#include <linux/compiler.h>
-#include <linux/cache.h>
#include <linux/slab.h>
+#include <linux/mm.h>
#include <asm/errno.h>
#endif
@@ -45,9 +41,10 @@ struct ptr_ring {
};
/* Note: callers invoking this in a loop must use a compiler barrier,
- * for example cpu_relax(). If ring is ever resized, callers must hold
- * producer_lock - see e.g. ptr_ring_full. Otherwise, if callers don't hold
- * producer_lock, the next call to __ptr_ring_produce may fail.
+ * for example cpu_relax().
+ *
+ * NB: this is unlike __ptr_ring_empty in that callers must hold producer_lock:
+ * see e.g. ptr_ring_full.
*/
static inline bool __ptr_ring_full(struct ptr_ring *r)
{
@@ -101,13 +98,19 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r)
/* Note: callers invoking this in a loop must use a compiler barrier,
* for example cpu_relax(). Callers must hold producer_lock.
+ * Callers are responsible for making sure pointer that is being queued
+ * points to a valid data.
*/
static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
{
if (unlikely(!r->size) || r->queue[r->producer])
return -ENOSPC;
- r->queue[r->producer++] = ptr;
+ /* Make sure the pointer we are storing points to a valid data. */
+ /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
+ smp_wmb();
+
+ WRITE_ONCE(r->queue[r->producer++], ptr);
if (unlikely(r->producer >= r->size))
r->producer = 0;
return 0;
@@ -163,26 +166,36 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
return ret;
}
-/* Note: callers invoking this in a loop must use a compiler barrier,
- * for example cpu_relax(). Callers must take consumer_lock
- * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL.
- * If ring is never resized, and if the pointer is merely
- * tested, there's no need to take the lock - see e.g. __ptr_ring_empty.
- */
static inline void *__ptr_ring_peek(struct ptr_ring *r)
{
if (likely(r->size))
- return r->queue[r->consumer_head];
+ return READ_ONCE(r->queue[r->consumer_head]);
return NULL;
}
-/* Note: callers invoking this in a loop must use a compiler barrier,
- * for example cpu_relax(). Callers must take consumer_lock
- * if the ring is ever resized - see e.g. ptr_ring_empty.
+/*
+ * Test ring empty status without taking any locks.
+ *
+ * NB: This is only safe to call if ring is never resized.
+ *
+ * However, if some other CPU consumes ring entries at the same time, the value
+ * returned is not guaranteed to be correct.
+ *
+ * In this case - to avoid incorrectly detecting the ring
+ * as empty - the CPU consuming the ring entries is responsible
+ * for either consuming all ring entries until the ring is empty,
+ * or synchronizing with some other CPU and causing it to
+ * re-test __ptr_ring_empty and/or consume the ring enteries
+ * after the synchronization point.
+ *
+ * Note: callers invoking this in a loop must use a compiler barrier,
+ * for example cpu_relax().
*/
static inline bool __ptr_ring_empty(struct ptr_ring *r)
{
- return !__ptr_ring_peek(r);
+ if (likely(r->size))
+ return !r->queue[READ_ONCE(r->consumer_head)];
+ return true;
}
static inline bool ptr_ring_empty(struct ptr_ring *r)
@@ -236,22 +249,28 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r)
/* Fundamentally, what we want to do is update consumer
* index and zero out the entry so producer can reuse it.
* Doing it naively at each consume would be as simple as:
- * r->queue[r->consumer++] = NULL;
- * if (unlikely(r->consumer >= r->size))
- * r->consumer = 0;
+ * consumer = r->consumer;
+ * r->queue[consumer++] = NULL;
+ * if (unlikely(consumer >= r->size))
+ * consumer = 0;
+ * r->consumer = consumer;
* but that is suboptimal when the ring is full as producer is writing
* out new entries in the same cache line. Defer these updates until a
* batch of entries has been consumed.
*/
- int head = r->consumer_head++;
+ /* Note: we must keep consumer_head valid at all times for __ptr_ring_empty
+ * to work correctly.
+ */
+ int consumer_head = r->consumer_head;
+ int head = consumer_head++;
/* Once we have processed enough entries invalidate them in
* the ring all at once so producer can reuse their space in the ring.
* We also do this when we reach end of the ring - not mandatory
* but helps keep the implementation simple.
*/
- if (unlikely(r->consumer_head - r->consumer_tail >= r->batch ||
- r->consumer_head >= r->size)) {
+ if (unlikely(consumer_head - r->consumer_tail >= r->batch ||
+ consumer_head >= r->size)) {
/* Zero out entries in the reverse order: this way we touch the
* cache line that producer might currently be reading the last;
* producer won't make progress and touch other cache lines
@@ -259,12 +278,14 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r)
*/
while (likely(head >= r->consumer_tail))
r->queue[head--] = NULL;
- r->consumer_tail = r->consumer_head;
+ r->consumer_tail = consumer_head;
}
- if (unlikely(r->consumer_head >= r->size)) {
- r->consumer_head = 0;
+ if (unlikely(consumer_head >= r->size)) {
+ consumer_head = 0;
r->consumer_tail = 0;
}
+ /* matching READ_ONCE in __ptr_ring_empty for lockless tests */
+ WRITE_ONCE(r->consumer_head, consumer_head);
}
static inline void *__ptr_ring_consume(struct ptr_ring *r)
@@ -274,6 +295,10 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r)
ptr = __ptr_ring_peek(r);
if (ptr)
__ptr_ring_discard_one(r);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 15, 0)
+ /* The READ_ONCE in __ptr_ring_peek doesn't imply a barrier on old kernels. */
+ smp_read_barrier_depends();
+#endif
return ptr;
}
@@ -436,9 +461,14 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
__PTR_RING_PEEK_CALL_v; \
})
+/* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See
+ * documentation for vmalloc for which of them are legal.
+ */
static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
{
- return kcalloc(size, sizeof(void *), gfp);
+ if (size > KMALLOC_MAX_SIZE / sizeof(void *))
+ return NULL;
+ return kvmalloc(size * sizeof(void *), gfp | __GFP_ZERO);
}
static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
@@ -512,7 +542,9 @@ static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n,
goto done;
}
r->queue[head] = batch[--n];
- r->consumer_tail = r->consumer_head = head;
+ r->consumer_tail = head;
+ /* matching READ_ONCE in __ptr_ring_empty for lockless tests */
+ WRITE_ONCE(r->consumer_head, head);
}
done:
@@ -537,6 +569,8 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
else if (destroy)
destroy(ptr);
+ if (producer >= size)
+ producer = 0;
__ptr_ring_set_size(r, size);
r->producer = producer;
r->consumer_head = 0;
@@ -571,7 +605,7 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
spin_unlock(&(r)->producer_lock);
spin_unlock_irqrestore(&(r)->consumer_lock, flags);
- kfree(old);
+ kvfree(old);
return 0;
}
@@ -611,7 +645,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
}
for (i = 0; i < nrings; ++i)
- kfree(queues[i]);
+ kvfree(queues[i]);
kfree(queues);
@@ -619,7 +653,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
nomem:
while (--i >= 0)
- kfree(queues[i]);
+ kvfree(queues[i]);
kfree(queues);
@@ -634,7 +668,7 @@ static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *))
if (destroy)
while ((ptr = ptr_ring_consume(r)))
destroy(ptr);
- kfree(r->queue);
+ kvfree(r->queue);
}
#endif /* _LINUX_PTR_RING_H */
diff --git a/net/wireguard/crypto/zinc/blake2s/blake2s.c b/net/wireguard/crypto/zinc/blake2s/blake2s.c
index e9e0bdaffc82..a9b8a8d95998 100644
--- a/net/wireguard/crypto/zinc/blake2s/blake2s.c
+++ b/net/wireguard/crypto/zinc/blake2s/blake2s.c
@@ -66,7 +66,6 @@ void blake2s_init(struct blake2s_state *state, const size_t outlen)
blake2s_init_param(state, 0x01010000 | outlen);
state->outlen = outlen;
}
-EXPORT_SYMBOL(blake2s_init);
void blake2s_init_key(struct blake2s_state *state, const size_t outlen,
const void *key, const size_t keylen)
@@ -81,7 +80,6 @@ void blake2s_init_key(struct blake2s_state *state, const size_t outlen,
blake2s_update(state, block, BLAKE2S_BLOCK_SIZE);
memzero_explicit(block, BLAKE2S_BLOCK_SIZE);
}
-EXPORT_SYMBOL(blake2s_init_key);
#if defined(CONFIG_ZINC_ARCH_X86_64)
#include "blake2s-x86_64-glue.c"
@@ -192,7 +190,6 @@ void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen)
memcpy(state->buf + state->buflen, in, inlen);
state->buflen += inlen;
}
-EXPORT_SYMBOL(blake2s_update);
void blake2s_final(struct blake2s_state *state, u8 *out)
{
@@ -205,7 +202,6 @@ void blake2s_final(struct blake2s_state *state, u8 *out)
memcpy(out, state->h, state->outlen);
memzero_explicit(state, sizeof(*state));
}
-EXPORT_SYMBOL(blake2s_final);
void blake2s_hmac(u8 *out, const u8 *in, const u8 *key, const size_t outlen,
const size_t inlen, const size_t keylen)
@@ -242,7 +238,6 @@ void blake2s_hmac(u8 *out, const u8 *in, const u8 *key, const size_t outlen,
memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE);
memzero_explicit(i_hash, BLAKE2S_HASH_SIZE);
}
-EXPORT_SYMBOL(blake2s_hmac);
#include "../selftest/blake2s.c"
diff --git a/net/wireguard/crypto/zinc/chacha20/chacha20.c b/net/wireguard/crypto/zinc/chacha20/chacha20.c
index b4763c81e082..f4ca8b09d01e 100644
--- a/net/wireguard/crypto/zinc/chacha20/chacha20.c
+++ b/net/wireguard/crypto/zinc/chacha20/chacha20.c
@@ -121,7 +121,6 @@ void chacha20(struct chacha20_ctx *ctx, u8 *dst, const u8 *src, u32 len,
if (!chacha20_arch(ctx, dst, src, len, simd_context))
chacha20_generic(ctx, dst, src, len);
}
-EXPORT_SYMBOL(chacha20);
static void hchacha20_generic(u32 derived_key[CHACHA20_KEY_WORDS],
const u8 nonce[HCHACHA20_NONCE_SIZE],
@@ -159,7 +158,6 @@ void hchacha20(u32 derived_key[CHACHA20_KEY_WORDS],
if (!hchacha20_arch(derived_key, nonce, key, simd_context))
hchacha20_generic(derived_key, nonce, key);
}
-EXPORT_SYMBOL(hchacha20);
#include "../selftest/chacha20.c"
diff --git a/net/wireguard/crypto/zinc/chacha20poly1305.c b/net/wireguard/crypto/zinc/chacha20poly1305.c
index ff54bc4a255a..cee29db01bc0 100644
--- a/net/wireguard/crypto/zinc/chacha20poly1305.c
+++ b/net/wireguard/crypto/zinc/chacha20poly1305.c
@@ -71,7 +71,6 @@ void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
&simd_context);
simd_put(&simd_context);
}
-EXPORT_SYMBOL(chacha20poly1305_encrypt);
bool chacha20poly1305_encrypt_sg_inplace(struct scatterlist *src,
const size_t src_len,
@@ -165,7 +164,6 @@ bool chacha20poly1305_encrypt_sg_inplace(struct scatterlist *src,
memzero_explicit(&b, sizeof(b));
return true;
}
-EXPORT_SYMBOL(chacha20poly1305_encrypt_sg_inplace);
static inline bool
__chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
@@ -230,7 +228,6 @@ bool chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
simd_put(&simd_context);
return ret;
}
-EXPORT_SYMBOL(chacha20poly1305_decrypt);
bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src,
size_t src_len,
@@ -335,7 +332,6 @@ bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src,
memzero_explicit(&b, sizeof(b));
return ret;
}
-EXPORT_SYMBOL(chacha20poly1305_decrypt_sg_inplace);
void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
const u8 *ad, const size_t ad_len,
@@ -354,7 +350,6 @@ void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
memzero_explicit(derived_key, CHACHA20POLY1305_KEY_SIZE);
simd_put(&simd_context);
}
-EXPORT_SYMBOL(xchacha20poly1305_encrypt);
bool xchacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
const u8 *ad, const size_t ad_len,
@@ -375,7 +370,6 @@ bool xchacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
simd_put(&simd_context);
return ret;
}
-EXPORT_SYMBOL(xchacha20poly1305_decrypt);
#include "selftest/chacha20poly1305.c"
diff --git a/net/wireguard/crypto/zinc/curve25519/curve25519.c b/net/wireguard/crypto/zinc/curve25519/curve25519.c
index 82c669e4123c..d4066fe148af 100644
--- a/net/wireguard/crypto/zinc/curve25519/curve25519.c
+++ b/net/wireguard/crypto/zinc/curve25519/curve25519.c
@@ -58,7 +58,6 @@ bool curve25519(u8 mypublic[CURVE25519_KEY_SIZE],
curve25519_generic(mypublic, secret, basepoint);
return crypto_memneq(mypublic, null_point, CURVE25519_KEY_SIZE);
}
-EXPORT_SYMBOL(curve25519);
bool curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE],
const u8 secret[CURVE25519_KEY_SIZE])
@@ -72,14 +71,12 @@ bool curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE],
return crypto_memneq(pub, null_point, CURVE25519_KEY_SIZE);
return curve25519(pub, secret, basepoint);
}
-EXPORT_SYMBOL(curve25519_generate_public);
void curve25519_generate_secret(u8 secret[CURVE25519_KEY_SIZE])
{
get_random_bytes_wait(secret, CURVE25519_KEY_SIZE);
curve25519_clamp_secret(secret);
}
-EXPORT_SYMBOL(curve25519_generate_secret);
#include "../selftest/curve25519.c"
diff --git a/net/wireguard/crypto/zinc/poly1305/poly1305.c b/net/wireguard/crypto/zinc/poly1305/poly1305.c
index 7d373b90e011..a54bc3309cf2 100644
--- a/net/wireguard/crypto/zinc/poly1305/poly1305.c
+++ b/net/wireguard/crypto/zinc/poly1305/poly1305.c
@@ -64,7 +64,6 @@ void poly1305_init(struct poly1305_ctx *ctx, const u8 key[POLY1305_KEY_SIZE])
ctx->num = 0;
}
-EXPORT_SYMBOL(poly1305_init);
static inline void poly1305_blocks(void *ctx, const u8 *input, const size_t len,
const u32 padbit,
@@ -115,7 +114,6 @@ void poly1305_update(struct poly1305_ctx *ctx, const u8 *input, size_t len,
ctx->num = rem;
}
-EXPORT_SYMBOL(poly1305_update);
void poly1305_final(struct poly1305_ctx *ctx, u8 mac[POLY1305_MAC_SIZE],
simd_context_t *simd_context)
@@ -134,7 +132,6 @@ void poly1305_final(struct poly1305_ctx *ctx, u8 mac[POLY1305_MAC_SIZE],
memzero_explicit(ctx, sizeof(*ctx));
}
-EXPORT_SYMBOL(poly1305_final);
#include "../selftest/poly1305.c"
diff --git a/net/wireguard/dkms.conf b/net/wireguard/dkms.conf
index 73067217be13..27f0beafb068 100644
--- a/net/wireguard/dkms.conf
+++ b/net/wireguard/dkms.conf
@@ -1,5 +1,5 @@
PACKAGE_NAME="wireguard"
-PACKAGE_VERSION="1.0.20200413"
+PACKAGE_VERSION="1.0.20200506"
AUTOINSTALL=yes
BUILT_MODULE_NAME="wireguard"
diff --git a/net/wireguard/main.c b/net/wireguard/main.c
index 9ca4195bc985..543501158891 100644
--- a/net/wireguard/main.c
+++ b/net/wireguard/main.c
@@ -66,3 +66,4 @@ MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
MODULE_VERSION(WIREGUARD_VERSION);
MODULE_ALIAS_RTNL_LINK(KBUILD_MODNAME);
MODULE_ALIAS_GENL_FAMILY(WG_GENL_NAME);
+MODULE_INFO(intree, "Y");
diff --git a/net/wireguard/messages.h b/net/wireguard/messages.h
index f415cdd2e90a..1d1ed18f11f8 100644
--- a/net/wireguard/messages.h
+++ b/net/wireguard/messages.h
@@ -32,7 +32,7 @@ enum cookie_values {
};
enum counter_values {
- COUNTER_BITS_TOTAL = 2048,
+ COUNTER_BITS_TOTAL = 8192,
COUNTER_REDUNDANT_BITS = BITS_PER_LONG,
COUNTER_WINDOW_SIZE = COUNTER_BITS_TOTAL - COUNTER_REDUNDANT_BITS
};
diff --git a/net/wireguard/noise.c b/net/wireguard/noise.c
index 32172252e01e..d0987cdd5aab 100644
--- a/net/wireguard/noise.c
+++ b/net/wireguard/noise.c
@@ -104,6 +104,7 @@ static struct noise_keypair *keypair_create(struct wg_peer *peer)
if (unlikely(!keypair))
return NULL;
+ spin_lock_init(&keypair->receiving_counter.lock);
keypair->internal_id = atomic64_inc_return(&keypair_counter);
keypair->entry.type = INDEX_HASHTABLE_KEYPAIR;
keypair->entry.peer = peer;
@@ -360,25 +361,16 @@ out:
memzero_explicit(output, BLAKE2S_HASH_SIZE + 1);
}
-static void symmetric_key_init(struct noise_symmetric_key *key)
-{
- spin_lock_init(&key->counter.receive.lock);
- atomic64_set(&key->counter.counter, 0);
- memset(key->counter.receive.backtrack, 0,
- sizeof(key->counter.receive.backtrack));
- key->birthdate = ktime_get_coarse_boottime_ns();
- key->is_valid = true;
-}
-
static void derive_keys(struct noise_symmetric_key *first_dst,
struct noise_symmetric_key *second_dst,
const u8 chaining_key[NOISE_HASH_LEN])
{
+ u64 birthdate = ktime_get_coarse_boottime_ns();
kdf(first_dst->key, second_dst->key, NULL, NULL,
NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0,
chaining_key);
- symmetric_key_init(first_dst);
- symmetric_key_init(second_dst);
+ first_dst->birthdate = second_dst->birthdate = birthdate;
+ first_dst->is_valid = second_dst->is_valid = true;
}
static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN],
@@ -717,6 +709,7 @@ wg_noise_handshake_consume_response(struct message_handshake_response *src,
u8 e[NOISE_PUBLIC_KEY_LEN];
u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN];
u8 static_private[NOISE_PUBLIC_KEY_LEN];
+ u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN];
down_read(&wg->static_identity.lock);
@@ -735,6 +728,8 @@ wg_noise_handshake_consume_response(struct message_handshake_response *src,
memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN);
memcpy(ephemeral_private, handshake->ephemeral_private,
NOISE_PUBLIC_KEY_LEN);
+ memcpy(preshared_key, handshake->preshared_key,
+ NOISE_SYMMETRIC_KEY_LEN);
up_read(&handshake->lock);
if (state != HANDSHAKE_CREATED_INITIATION)
@@ -752,7 +747,7 @@ wg_noise_handshake_consume_response(struct message_handshake_response *src,
goto fail;
/* psk */
- mix_psk(chaining_key, hash, key, handshake->preshared_key);
+ mix_psk(chaining_key, hash, key, preshared_key);
/* {} */
if (!message_decrypt(NULL, src->encrypted_nothing,
@@ -785,6 +780,7 @@ out:
memzero_explicit(chaining_key, NOISE_HASH_LEN);
memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN);
memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN);
+ memzero_explicit(preshared_key, NOISE_SYMMETRIC_KEY_LEN);
up_read(&wg->static_identity.lock);
return ret_peer;
}
diff --git a/net/wireguard/noise.h b/net/wireguard/noise.h
index f532d59d3f19..c527253dba80 100644
--- a/net/wireguard/noise.h
+++ b/net/wireguard/noise.h
@@ -15,18 +15,14 @@
#include <linux/mutex.h>
#include <linux/kref.h>
-union noise_counter {
- struct {
- u64 counter;
- unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG];
- spinlock_t lock;
- } receive;
- atomic64_t counter;
+struct noise_replay_counter {
+ u64 counter;
+ spinlock_t lock;
+ unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG];
};
struct noise_symmetric_key {
u8 key[NOISE_SYMMETRIC_KEY_LEN];
- union noise_counter counter;
u64 birthdate;
bool is_valid;
};
@@ -34,7 +30,9 @@ struct noise_symmetric_key {
struct noise_keypair {
struct index_hashtable_entry entry;
struct noise_symmetric_key sending;
+ atomic64_t sending_counter;
struct noise_symmetric_key receiving;
+ struct noise_replay_counter receiving_counter;
__le32 remote_index;
bool i_am_the_initiator;
struct kref refcount;
diff --git a/net/wireguard/queueing.c b/net/wireguard/queueing.c
index 5c964fcb994e..71b8e80b58e1 100644
--- a/net/wireguard/queueing.c
+++ b/net/wireguard/queueing.c
@@ -35,8 +35,10 @@ int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function,
if (multicore) {
queue->worker = wg_packet_percpu_multicore_worker_alloc(
function, queue);
- if (!queue->worker)
+ if (!queue->worker) {
+ ptr_ring_cleanup(&queue->ring, NULL);
return -ENOMEM;
+ }
} else {
INIT_WORK(&queue->work, function);
}
diff --git a/net/wireguard/queueing.h b/net/wireguard/queueing.h
index fe6e5c952748..e88eb93af9d8 100644
--- a/net/wireguard/queueing.h
+++ b/net/wireguard/queueing.h
@@ -87,15 +87,23 @@ static inline bool wg_check_packet_protocol(struct sk_buff *skb)
return real_protocol && skb->protocol == real_protocol;
}
-static inline void wg_reset_packet(struct sk_buff *skb)
+static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating)
{
const int pfmemalloc = skb->pfmemalloc;
+ u32 hash = skb->hash;
+ u8 l4_hash = skb->l4_hash;
+ u8 sw_hash = skb->sw_hash;
skb_scrub_packet(skb, true);
memset(&skb->headers_start, 0,
offsetof(struct sk_buff, headers_end) -
offsetof(struct sk_buff, headers_start));
skb->pfmemalloc = pfmemalloc;
+ if (encapsulating) {
+ skb->hash = hash;
+ skb->l4_hash = l4_hash;
+ skb->sw_hash = sw_hash;
+ }
skb->queue_mapping = 0;
skb->nohdr = 0;
skb->peeked = 0;
diff --git a/net/wireguard/receive.c b/net/wireguard/receive.c
index c63b7a93ac91..055d3dd58077 100644
--- a/net/wireguard/receive.c
+++ b/net/wireguard/receive.c
@@ -227,27 +227,26 @@ void wg_packet_handshake_receive_worker(struct work_struct *work)
static void keep_key_fresh(struct wg_peer *peer)
{
struct noise_keypair *keypair;
- bool send = false;
+ bool send;
if (peer->sent_lastminute_handshake)
return;
rcu_read_lock_bh();
keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
- if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) &&
- keypair->i_am_the_initiator &&
- unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
- REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT)))
- send = true;
+ send = keypair && READ_ONCE(keypair->sending.is_valid) &&
+ keypair->i_am_the_initiator &&
+ wg_birthdate_has_expired(keypair->sending.birthdate,
+ REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT);
rcu_read_unlock_bh();
- if (send) {
+ if (unlikely(send)) {
peer->sent_lastminute_handshake = true;
wg_packet_send_queued_handshake_initiation(peer, false);
}
}
-static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key,
+static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair,
simd_context_t *simd_context)
{
struct scatterlist sg[MAX_SKB_FRAGS + 8];
@@ -255,13 +254,13 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key,
unsigned int offset;
int num_frags;
- if (unlikely(!key))
+ if (unlikely(!keypair))
return false;
- if (unlikely(!READ_ONCE(key->is_valid) ||
- wg_birthdate_has_expired(key->birthdate, REJECT_AFTER_TIME) ||
- key->counter.receive.counter >= REJECT_AFTER_MESSAGES)) {
- WRITE_ONCE(key->is_valid, false);
+ if (unlikely(!READ_ONCE(keypair->receiving.is_valid) ||
+ wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) ||
+ keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) {
+ WRITE_ONCE(keypair->receiving.is_valid, false);
return false;
}
@@ -285,7 +284,8 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key,
return false;
if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0,
- PACKET_CB(skb)->nonce, key->key,
+ PACKET_CB(skb)->nonce,
+ keypair->receiving.key,
simd_context))
return false;
@@ -301,41 +301,41 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key,
}
/* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */
-static bool counter_validate(union noise_counter *counter, u64 their_counter)
+static bool counter_validate(struct noise_replay_counter *counter, u64 their_counter)
{
unsigned long index, index_current, top, i;
bool ret = false;
- spin_lock_bh(&counter->receive.lock);
+ spin_lock_bh(&counter->lock);
- if (unlikely(counter->receive.counter >= REJECT_AFTER_MESSAGES + 1 ||
+ if (unlikely(counter->counter >= REJECT_AFTER_MESSAGES + 1 ||
their_counter >= REJECT_AFTER_MESSAGES))
goto out;
++their_counter;
if (unlikely((COUNTER_WINDOW_SIZE + their_counter) <
- counter->receive.counter))
+ counter->counter))
goto out;
index = their_counter >> ilog2(BITS_PER_LONG);
- if (likely(their_counter > counter->receive.counter)) {
- index_current = counter->receive.counter >> ilog2(BITS_PER_LONG);
+ if (likely(their_counter > counter->counter)) {
+ index_current = counter->counter >> ilog2(BITS_PER_LONG);
top = min_t(unsigned long, index - index_current,
COUNTER_BITS_TOTAL / BITS_PER_LONG);
for (i = 1; i <= top; ++i)
- counter->receive.backtrack[(i + index_current) &
+ counter->backtrack[(i + index_current) &
((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0;
- counter->receive.counter = their_counter;
+ counter->counter = their_counter;
}
index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1;
ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1),
- &counter->receive.backtrack[index]);
+ &counter->backtrack[index]);
out:
- spin_unlock_bh(&counter->receive.lock);
+ spin_unlock_bh(&counter->lock);
return ret;
}
@@ -397,13 +397,11 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
len = ntohs(ip_hdr(skb)->tot_len);
if (unlikely(len < sizeof(struct iphdr)))
goto dishonest_packet_size;
- if (INET_ECN_is_ce(PACKET_CB(skb)->ds))
- IP_ECN_set_ce(ip_hdr(skb));
+ INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ip_hdr(skb)->tos);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
len = ntohs(ipv6_hdr(skb)->payload_len) +
sizeof(struct ipv6hdr);
- if (INET_ECN_is_ce(PACKET_CB(skb)->ds))
- IP6_ECN_set_ce(skb, ipv6_hdr(skb));
+ INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ipv6_get_dsfield(ipv6_hdr(skb)));
} else {
goto dishonest_packet_type;
}
@@ -479,19 +477,19 @@ int wg_packet_rx_poll(struct napi_struct *napi, int budget)
if (unlikely(state != PACKET_STATE_CRYPTED))
goto next;
- if (unlikely(!counter_validate(&keypair->receiving.counter,
+ if (unlikely(!counter_validate(&keypair->receiving_counter,
PACKET_CB(skb)->nonce))) {
net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n",
peer->device->dev->name,
PACKET_CB(skb)->nonce,
- keypair->receiving.counter.receive.counter);
+ keypair->receiving_counter.counter);
goto next;
}
if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb)))
goto next;
- wg_reset_packet(skb);
+ wg_reset_packet(skb, false);
wg_packet_consume_data_done(peer, skb, &endpoint);
free = false;
@@ -520,9 +518,9 @@ void wg_packet_decrypt_worker(struct work_struct *work)
simd_get(&simd_context);
while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) {
- enum packet_state state = likely(decrypt_packet(skb,
- &PACKET_CB(skb)->keypair->receiving,
- &simd_context)) ?
+ enum packet_state state =
+ likely(decrypt_packet(skb, PACKET_CB(skb)->keypair,
+ &simd_context)) ?
PACKET_STATE_CRYPTED : PACKET_STATE_DEAD;
wg_queue_enqueue_per_peer_napi(skb, state);
simd_relax(&simd_context);
diff --git a/net/wireguard/selftest/counter.c b/net/wireguard/selftest/counter.c
index f4fbb9072ed7..ec3c156bf91b 100644
--- a/net/wireguard/selftest/counter.c
+++ b/net/wireguard/selftest/counter.c
@@ -6,18 +6,24 @@
#ifdef DEBUG
bool __init wg_packet_counter_selftest(void)
{
+ struct noise_replay_counter *counter;
unsigned int test_num = 0, i;
- union noise_counter counter;
bool success = true;
-#define T_INIT do { \
- memset(&counter, 0, sizeof(union noise_counter)); \
- spin_lock_init(&counter.receive.lock); \
+ counter = kmalloc(sizeof(*counter), GFP_KERNEL);
+ if (unlikely(!counter)) {
+ pr_err("nonce counter self-test malloc: FAIL\n");
+ return false;
+ }
+
+#define T_INIT do { \
+ memset(counter, 0, sizeof(*counter)); \
+ spin_lock_init(&counter->lock); \
} while (0)
#define T_LIM (COUNTER_WINDOW_SIZE + 1)
#define T(n, v) do { \
++test_num; \
- if (counter_validate(&counter, n) != (v)) { \
+ if (counter_validate(counter, n) != (v)) { \
pr_err("nonce counter self-test %u: FAIL\n", \
test_num); \
success = false; \
@@ -99,6 +105,7 @@ bool __init wg_packet_counter_selftest(void)
if (success)
pr_info("nonce counter self-tests: pass\n");
+ kfree(counter);
return success;
}
#endif
diff --git a/net/wireguard/selftest/ratelimiter.c b/net/wireguard/selftest/ratelimiter.c
index bcd6462e4540..007cd4457c5f 100644
--- a/net/wireguard/selftest/ratelimiter.c
+++ b/net/wireguard/selftest/ratelimiter.c
@@ -120,9 +120,9 @@ bool __init wg_ratelimiter_selftest(void)
enum { TRIALS_BEFORE_GIVING_UP = 5000 };
bool success = false;
int test = 0, trials;
- struct sk_buff *skb4, *skb6;
+ struct sk_buff *skb4, *skb6 = NULL;
struct iphdr *hdr4;
- struct ipv6hdr *hdr6;
+ struct ipv6hdr *hdr6 = NULL;
if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN))
return true;
diff --git a/net/wireguard/send.c b/net/wireguard/send.c
index 9290bb9d5b04..828b086abe31 100644
--- a/net/wireguard/send.c
+++ b/net/wireguard/send.c
@@ -125,20 +125,17 @@ void wg_packet_send_handshake_cookie(struct wg_device *wg,
static void keep_key_fresh(struct wg_peer *peer)
{
struct noise_keypair *keypair;
- bool send = false;
+ bool send;
rcu_read_lock_bh();
keypair = rcu_dereference_bh(peer->keypairs.current_keypair);
- if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) &&
- (unlikely(atomic64_read(&keypair->sending.counter.counter) >
- REKEY_AFTER_MESSAGES) ||
- (keypair->i_am_the_initiator &&
- unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
- REKEY_AFTER_TIME)))))
- send = true;
+ send = keypair && READ_ONCE(keypair->sending.is_valid) &&
+ (atomic64_read(&keypair->sending_counter) > REKEY_AFTER_MESSAGES ||
+ (keypair->i_am_the_initiator &&
+ wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME)));
rcu_read_unlock_bh();
- if (send)
+ if (unlikely(send))
wg_packet_send_queued_handshake_initiation(peer, false);
}
@@ -172,6 +169,11 @@ static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair,
struct sk_buff *trailer;
int num_frags;
+ /* Force hash calculation before encryption so that flow analysis is
+ * consistent over the inner packet.
+ */
+ skb_get_hash(skb);
+
/* Calculate lengths. */
padding_len = calculate_skb_padding(skb);
trailer_len = padding_len + noise_encrypted_len(0);
@@ -284,6 +286,8 @@ void wg_packet_tx_worker(struct work_struct *work)
wg_noise_keypair_put(keypair, false);
wg_peer_put(peer);
+ if (need_resched())
+ cond_resched();
}
}
@@ -302,7 +306,7 @@ void wg_packet_encrypt_worker(struct work_struct *work)
if (likely(encrypt_packet(skb,
PACKET_CB(first)->keypair,
&simd_context))) {
- wg_reset_packet(skb);
+ wg_reset_packet(skb, true);
} else {
state = PACKET_STATE_DEAD;
break;
@@ -352,7 +356,6 @@ void wg_packet_purge_staged_packets(struct wg_peer *peer)
void wg_packet_send_staged_packets(struct wg_peer *peer)
{
- struct noise_symmetric_key *key;
struct noise_keypair *keypair;
struct sk_buff_head packets;
struct sk_buff *skb;
@@ -372,10 +375,9 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
rcu_read_unlock_bh();
if (unlikely(!keypair))
goto out_nokey;
- key = &keypair->sending;
- if (unlikely(!READ_ONCE(key->is_valid)))
+ if (unlikely(!READ_ONCE(keypair->sending.is_valid)))
goto out_nokey;
- if (unlikely(wg_birthdate_has_expired(key->birthdate,
+ if (unlikely(wg_birthdate_has_expired(keypair->sending.birthdate,
REJECT_AFTER_TIME)))
goto out_invalid;
@@ -390,7 +392,7 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
*/
PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb);
PACKET_CB(skb)->nonce =
- atomic64_inc_return(&key->counter.counter) - 1;
+ atomic64_inc_return(&keypair->sending_counter) - 1;
if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES))
goto out_invalid;
}
@@ -402,7 +404,7 @@ void wg_packet_send_staged_packets(struct wg_peer *peer)
return;
out_invalid:
- WRITE_ONCE(key->is_valid, false);
+ WRITE_ONCE(keypair->sending.is_valid, false);
out_nokey:
wg_noise_keypair_put(keypair, false);
diff --git a/net/wireguard/socket.c b/net/wireguard/socket.c
index b0d6541582d3..f9018027fc13 100644
--- a/net/wireguard/socket.c
+++ b/net/wireguard/socket.c
@@ -76,12 +76,6 @@ static int send4(struct wg_device *wg, struct sk_buff *skb,
net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
wg->dev->name, &endpoint->addr, ret);
goto err;
- } else if (unlikely(rt->dst.dev == skb->dev)) {
- ip_rt_put(rt);
- ret = -ELOOP;
- net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n",
- wg->dev->name, &endpoint->addr);
- goto err;
}
if (cache)
dst_cache_set_ip4(cache, &rt->dst, fl.saddr);
@@ -149,12 +143,6 @@ static int send6(struct wg_device *wg, struct sk_buff *skb,
net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n",
wg->dev->name, &endpoint->addr, ret);
goto err;
- } else if (unlikely(dst->dev == skb->dev)) {
- dst_release(dst);
- ret = -ELOOP;
- net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n",
- wg->dev->name, &endpoint->addr);
- goto err;
}
if (cache)
dst_cache_set_ip6(cache, dst, &fl.saddr);
diff --git a/net/wireguard/tests/netns.sh b/net/wireguard/tests/netns.sh
index a5fd8a099993..e4dc485b6e43 100755
--- a/net/wireguard/tests/netns.sh
+++ b/net/wireguard/tests/netns.sh
@@ -48,8 +48,11 @@ cleanup() {
exec 2>/dev/null
printf "$orig_message_cost" > /proc/sys/net/core/message_cost
ip0 link del dev wg0
+ ip0 link del dev wg1
ip1 link del dev wg0
+ ip1 link del dev wg1
ip2 link del dev wg0
+ ip2 link del dev wg1
local to_kill="$(ip netns pids $netns0) $(ip netns pids $netns1) $(ip netns pids $netns2)"
[[ -n $to_kill ]] && kill $to_kill
pp ip netns del $netns1
@@ -77,18 +80,20 @@ ip0 link set wg0 netns $netns2
key1="$(pp wg genkey)"
key2="$(pp wg genkey)"
key3="$(pp wg genkey)"
+key4="$(pp wg genkey)"
pub1="$(pp wg pubkey <<<"$key1")"
pub2="$(pp wg pubkey <<<"$key2")"
pub3="$(pp wg pubkey <<<"$key3")"
+pub4="$(pp wg pubkey <<<"$key4")"
psk="$(pp wg genpsk)"
[[ -n $key1 && -n $key2 && -n $psk ]]
configure_peers() {
ip1 addr add 192.168.241.1/24 dev wg0
- ip1 addr add fd00::1/24 dev wg0
+ ip1 addr add fd00::1/112 dev wg0
ip2 addr add 192.168.241.2/24 dev wg0
- ip2 addr add fd00::2/24 dev wg0
+ ip2 addr add fd00::2/112 dev wg0
n1 wg set wg0 \
private-key <(echo "$key1") \
@@ -230,9 +235,38 @@ n1 ping -W 1 -c 1 192.168.241.2
n1 wg set wg0 private-key <(echo "$key3")
n2 wg set wg0 peer "$pub3" preshared-key <(echo "$psk") allowed-ips 192.168.241.1/32 peer "$pub1" remove
n1 ping -W 1 -c 1 192.168.241.2
+n2 wg set wg0 peer "$pub3" remove
+
+# Test that we can route wg through wg
+ip1 addr flush dev wg0
+ip2 addr flush dev wg0
+ip1 addr add fd00::5:1/112 dev wg0
+ip2 addr add fd00::5:2/112 dev wg0
+n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") allowed-ips fd00::5:2/128 endpoint 127.0.0.1:2
+n2 wg set wg0 private-key <(echo "$key2") listen-port 2 peer "$pub1" preshared-key <(echo "$psk") allowed-ips fd00::5:1/128 endpoint 127.212.121.99:9998
+ip1 link add wg1 type wireguard
+ip2 link add wg1 type wireguard
+ip1 addr add 192.168.241.1/24 dev wg1
+ip1 addr add fd00::1/112 dev wg1
+ip2 addr add 192.168.241.2/24 dev wg1
+ip2 addr add fd00::2/112 dev wg1
+ip1 link set mtu 1340 up dev wg1
+ip2 link set mtu 1340 up dev wg1
+n1 wg set wg1 listen-port 5 private-key <(echo "$key3") peer "$pub4" allowed-ips 192.168.241.2/32,fd00::2/128 endpoint [fd00::5:2]:5
+n2 wg set wg1 listen-port 5 private-key <(echo "$key4") peer "$pub3" allowed-ips 192.168.241.1/32,fd00::1/128 endpoint [fd00::5:1]:5
+tests
+# Try to set up a routing loop between the two namespaces
+ip1 link set netns $netns0 dev wg1
+ip0 addr add 192.168.241.1/24 dev wg1
+ip0 link set up dev wg1
+n0 ping -W 1 -c 1 192.168.241.2
+n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7
+ip2 link del wg0
+ip2 link del wg1
+! n0 ping -W 1 -c 10 -f 192.168.241.2 || false # Should not crash kernel
+ip0 link del wg1
ip1 link del wg0
-ip2 link del wg0
# Test using NAT. We now change the topology to this:
# ┌────────────────────────────────────────┐ ┌────────────────────────────────────────────────┐ ┌────────────────────────────────────────┐
@@ -282,6 +316,20 @@ pp sleep 3
n2 ping -W 1 -c 1 192.168.241.1
n1 wg set wg0 peer "$pub2" persistent-keepalive 0
+# Test that onion routing works, even when it loops
+n1 wg set wg0 peer "$pub3" allowed-ips 192.168.242.2/32 endpoint 192.168.241.2:5
+ip1 addr add 192.168.242.1/24 dev wg0
+ip2 link add wg1 type wireguard
+ip2 addr add 192.168.242.2/24 dev wg1
+n2 wg set wg1 private-key <(echo "$key3") listen-port 5 peer "$pub1" allowed-ips 192.168.242.1/32
+ip2 link set wg1 up
+n1 ping -W 1 -c 1 192.168.242.2
+ip2 link del wg1
+n1 wg set wg0 peer "$pub3" endpoint 192.168.242.2:5
+! n1 ping -W 1 -c 1 192.168.242.2 || false # Should not crash kernel
+n1 wg set wg0 peer "$pub3" remove
+ip1 addr del 192.168.242.1/24 dev wg0
+
# Do a wg-quick(8)-style policy routing for the default route, making sure vethc has a v6 address to tease out bugs.
ip1 -6 addr add fc00::9/96 dev vethc
ip1 -6 route add default via fc00::1
diff --git a/net/wireguard/tests/qemu/Makefile b/net/wireguard/tests/qemu/Makefile
index 62b1f3db50fb..a3b4a8134797 100644
--- a/net/wireguard/tests/qemu/Makefile
+++ b/net/wireguard/tests/qemu/Makefile
@@ -54,7 +54,7 @@ $(eval $(call tar_download,MUSL,musl,1.1.24,.tar.gz,https://www.musl-libc.org/re
$(eval $(call tar_download,LIBMNL,libmnl,1.0.4,.tar.bz2,https://www.netfilter.org/projects/libmnl/files/))
$(eval $(call tar_download,IPERF,iperf,3.7,.tar.gz,https://downloads.es.net/pub/iperf/))
$(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/))
-$(eval $(call tar_download,IPROUTE2,iproute2,5.4.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/))
+$(eval $(call tar_download,IPROUTE2,iproute2,5.6.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/))
$(eval $(call tar_download,IPTABLES,iptables,1.8.4,.tar.bz2,https://www.netfilter.org/projects/iptables/files/))
$(eval $(call tar_download,NMAP,nmap,7.80,.tar.bz2,https://nmap.org/dist/))
$(eval $(call tar_download,IPUTILS,iputils,s20161105,.tar.gz,https://github.com/iputils/iputils/archive/s20161105.tar.gz/#))
@@ -350,7 +350,7 @@ $(IPUTILS_PATH)/.installed: $(IPUTILS_TAR)
touch $@
$(IPUTILS_PATH)/ping: | $(IPUTILS_PATH)/.installed $(USERSPACE_DEPS)
- $(MAKE) -C $(IPUTILS_PATH) USE_CAP=no USE_IDN=no USE_NETTLE=no USE_CRYPTO=no ping
+ $(MAKE) -C $(IPUTILS_PATH) USE_CAP=no USE_IDN=no USE_NETTLE=no USE_CRYPTO=no CFLAGS="-fcommon $(CFLAGS)" ping
$(STRIP) -s $@
$(BASH_PATH)/.installed: $(BASH_TAR)
diff --git a/net/wireguard/tests/qemu/arch/powerpc64le.config b/net/wireguard/tests/qemu/arch/powerpc64le.config
index 990c510a9cfa..f52f1e2bc7f6 100644
--- a/net/wireguard/tests/qemu/arch/powerpc64le.config
+++ b/net/wireguard/tests/qemu/arch/powerpc64le.config
@@ -10,3 +10,4 @@ CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="console=hvc0 wg.success=hvc1"
CONFIG_SECTION_MISMATCH_WARN_ONLY=y
CONFIG_FRAME_WARN=1280
+CONFIG_THREAD_SHIFT=14
diff --git a/net/wireguard/tests/qemu/init.c b/net/wireguard/tests/qemu/init.c
index 90bc9813cadc..3e2a237e6ca6 100644
--- a/net/wireguard/tests/qemu/init.c
+++ b/net/wireguard/tests/qemu/init.c
@@ -18,6 +18,7 @@
#include <sys/types.h>
#include <sys/io.h>
#include <sys/ioctl.h>
+#include <sys/random.h>
#include <sys/reboot.h>
#include <sys/utsname.h>
#include <sys/sendfile.h>
@@ -73,7 +74,9 @@ static void seed_rng(void)
fd = open("/dev/urandom", O_WRONLY);
if (fd < 0)
panic("open(urandom)");
- for (int i = 0; i < 256; ++i) {
+ for (;;) {
+ if (getrandom(entropy.buffer, sizeof(entropy.buffer), GRND_NONBLOCK) != -1 || errno != EAGAIN)
+ break;
if (ioctl(fd, RNDADDENTROPY, &entropy) < 0)
panic("ioctl(urandom)");
}
diff --git a/net/wireguard/version.h b/net/wireguard/version.h
index 6d40efeb229e..89f181cc8d1a 100644
--- a/net/wireguard/version.h
+++ b/net/wireguard/version.h
@@ -1,3 +1,3 @@
#ifndef WIREGUARD_VERSION
-#define WIREGUARD_VERSION "1.0.20200413"
+#define WIREGUARD_VERSION "1.0.20200506"
#endif