tcp: md5: do not use alloc_percpu()
authorEric Dumazet <edumazet@google.com>
Thu, 23 Oct 2014 19:58:58 +0000 (12:58 -0700)
committerBen Hutchings <ben@decadent.org.uk>
Thu, 1 Jan 2015 01:27:52 +0000 (01:27 +0000)
commit 349ce993ac706869d553a1816426d3a4bfda02b1 upstream.

percpu tcp_md5sig_pool contains memory blobs that ultimately
go through sg_set_buf().

-> sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));

This requires that whole area is in a physically contiguous portion
of memory. And that @buf is not backed by vmalloc().

Given that alloc_percpu() can use vmalloc() areas, this does not
fit the requirements.

Replace alloc_percpu() by a static DEFINE_PER_CPU() as tcp_md5sig_pool
is small anyway, there is no gain to dynamically allocate it.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Fixes: 765cf9976e93 ("tcp: md5: remove one indirection level in tcp_md5sig_pool")
Reported-by: Crestez Dan Leonard <cdleonard@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
[bwh: Backported to 3.2: the deleted code differs slightly due to API changes]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
net/ipv4/tcp.c

index 573d786..32c9e83 100644 (file)
@@ -2863,61 +2863,42 @@ int tcp_gro_complete(struct sk_buff *skb)
 EXPORT_SYMBOL(tcp_gro_complete);
 
 #ifdef CONFIG_TCP_MD5SIG
-static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly;
+static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
 static DEFINE_MUTEX(tcp_md5sig_mutex);
-
-static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
-{
-       int cpu;
-
-       for_each_possible_cpu(cpu) {
-               struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
-
-               if (p->md5_desc.tfm)
-                       crypto_free_hash(p->md5_desc.tfm);
-       }
-       free_percpu(pool);
-}
+static bool tcp_md5sig_pool_populated = false;
 
 static void __tcp_alloc_md5sig_pool(void)
 {
        int cpu;
-       struct tcp_md5sig_pool __percpu *pool;
-
-       pool = alloc_percpu(struct tcp_md5sig_pool);
-       if (!pool)
-               return;
 
        for_each_possible_cpu(cpu) {
-               struct crypto_hash *hash;
-
-               hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
-               if (!hash || IS_ERR(hash))
-                       goto out_free;
+               if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) {
+                       struct crypto_hash *hash;
 
-               per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
+                       hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+                       if (IS_ERR_OR_NULL(hash))
+                               return;
+                       per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash;
+               }
        }
-       /* before setting tcp_md5sig_pool, we must commit all writes
-        * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool()
+       /* before setting tcp_md5sig_pool_populated, we must commit all writes
+        * to memory. See smp_rmb() in tcp_get_md5sig_pool()
         */
        smp_wmb();
-       tcp_md5sig_pool = pool;
-       return;
-out_free:
-       __tcp_free_md5sig_pool(pool);
+       tcp_md5sig_pool_populated = true;
 }
 
 bool tcp_alloc_md5sig_pool(void)
 {
-       if (unlikely(!tcp_md5sig_pool)) {
+       if (unlikely(!tcp_md5sig_pool_populated)) {
                mutex_lock(&tcp_md5sig_mutex);
 
-               if (!tcp_md5sig_pool)
+               if (!tcp_md5sig_pool_populated)
                        __tcp_alloc_md5sig_pool();
 
                mutex_unlock(&tcp_md5sig_mutex);
        }
-       return tcp_md5sig_pool != NULL;
+       return tcp_md5sig_pool_populated;
 }
 EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
 
@@ -2931,13 +2912,13 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
  */
 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
 {
-       struct tcp_md5sig_pool __percpu *p;
-
        local_bh_disable();
-       p = ACCESS_ONCE(tcp_md5sig_pool);
-       if (p)
-               return __this_cpu_ptr(p);
 
+       if (tcp_md5sig_pool_populated) {
+               /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
+               smp_rmb();
+               return this_cpu_ptr(&tcp_md5sig_pool);
+       }
        local_bh_enable();
        return NULL;
 }