Merge branch 'rhashtable-next'
[pandora-kernel.git] / lib / rhashtable.c
1 /*
2  * Resizable, Scalable, Concurrent Hash Table
3  *
4  * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
5  * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch>
6  * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
7  *
8  * Code partially derived from nft_hash
9  * Rewritten with rehash code from br_multicast plus single list
10  * pointer as suggested by Josh Triplett
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/log2.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/mm.h>
24 #include <linux/jhash.h>
25 #include <linux/random.h>
26 #include <linux/rhashtable.h>
27 #include <linux/err.h>
28
29 #define HASH_DEFAULT_SIZE       64UL
30 #define HASH_MIN_SIZE           4U
31 #define BUCKET_LOCKS_PER_CPU   128UL
32
33 static u32 head_hashfn(struct rhashtable *ht,
34                        const struct bucket_table *tbl,
35                        const struct rhash_head *he)
36 {
37         return rht_head_hashfn(ht, tbl, he, ht->p);
38 }
39
40 #ifdef CONFIG_PROVE_LOCKING
41 #define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
42
43 int lockdep_rht_mutex_is_held(struct rhashtable *ht)
44 {
45         return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
46 }
47 EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
48
49 int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
50 {
51         spinlock_t *lock = rht_bucket_lock(tbl, hash);
52
53         return (debug_locks) ? lockdep_is_held(lock) : 1;
54 }
55 EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
56 #else
57 #define ASSERT_RHT_MUTEX(HT)
58 #endif
59
60
61 static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl,
62                               gfp_t gfp)
63 {
64         unsigned int i, size;
65 #if defined(CONFIG_PROVE_LOCKING)
66         unsigned int nr_pcpus = 2;
67 #else
68         unsigned int nr_pcpus = num_possible_cpus();
69 #endif
70
71         nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
72         size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);
73
74         /* Never allocate more than 0.5 locks per bucket */
75         size = min_t(unsigned int, size, tbl->size >> 1);
76
77         if (sizeof(spinlock_t) != 0) {
78 #ifdef CONFIG_NUMA
79                 if (size * sizeof(spinlock_t) > PAGE_SIZE &&
80                     gfp == GFP_KERNEL)
81                         tbl->locks = vmalloc(size * sizeof(spinlock_t));
82                 else
83 #endif
84                 tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
85                                            gfp);
86                 if (!tbl->locks)
87                         return -ENOMEM;
88                 for (i = 0; i < size; i++)
89                         spin_lock_init(&tbl->locks[i]);
90         }
91         tbl->locks_mask = size - 1;
92
93         return 0;
94 }
95
96 static void bucket_table_free(const struct bucket_table *tbl)
97 {
98         if (tbl)
99                 kvfree(tbl->locks);
100
101         kvfree(tbl);
102 }
103
104 static void bucket_table_free_rcu(struct rcu_head *head)
105 {
106         bucket_table_free(container_of(head, struct bucket_table, rcu));
107 }
108
109 static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
110                                                size_t nbuckets,
111                                                gfp_t gfp)
112 {
113         struct bucket_table *tbl = NULL;
114         size_t size;
115         int i;
116
117         size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
118         if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER) ||
119             gfp != GFP_KERNEL)
120                 tbl = kzalloc(size, gfp | __GFP_NOWARN | __GFP_NORETRY);
121         if (tbl == NULL && gfp == GFP_KERNEL)
122                 tbl = vzalloc(size);
123         if (tbl == NULL)
124                 return NULL;
125
126         tbl->size = nbuckets;
127
128         if (alloc_bucket_locks(ht, tbl, gfp) < 0) {
129                 bucket_table_free(tbl);
130                 return NULL;
131         }
132
133         INIT_LIST_HEAD(&tbl->walkers);
134
135         get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
136
137         for (i = 0; i < nbuckets; i++)
138                 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
139
140         return tbl;
141 }
142
143 static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
144                                                   struct bucket_table *tbl)
145 {
146         struct bucket_table *new_tbl;
147
148         do {
149                 new_tbl = tbl;
150                 tbl = rht_dereference_rcu(tbl->future_tbl, ht);
151         } while (tbl);
152
153         return new_tbl;
154 }
155
156 static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
157 {
158         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
159         struct bucket_table *new_tbl = rhashtable_last_table(ht,
160                 rht_dereference_rcu(old_tbl->future_tbl, ht));
161         struct rhash_head __rcu **pprev = &old_tbl->buckets[old_hash];
162         int err = -ENOENT;
163         struct rhash_head *head, *next, *entry;
164         spinlock_t *new_bucket_lock;
165         unsigned new_hash;
166
167         rht_for_each(entry, old_tbl, old_hash) {
168                 err = 0;
169                 next = rht_dereference_bucket(entry->next, old_tbl, old_hash);
170
171                 if (rht_is_a_nulls(next))
172                         break;
173
174                 pprev = &entry->next;
175         }
176
177         if (err)
178                 goto out;
179
180         new_hash = head_hashfn(ht, new_tbl, entry);
181
182         new_bucket_lock = rht_bucket_lock(new_tbl, new_hash);
183
184         spin_lock_nested(new_bucket_lock, SINGLE_DEPTH_NESTING);
185         head = rht_dereference_bucket(new_tbl->buckets[new_hash],
186                                       new_tbl, new_hash);
187
188         if (rht_is_a_nulls(head))
189                 INIT_RHT_NULLS_HEAD(entry->next, ht, new_hash);
190         else
191                 RCU_INIT_POINTER(entry->next, head);
192
193         rcu_assign_pointer(new_tbl->buckets[new_hash], entry);
194         spin_unlock(new_bucket_lock);
195
196         rcu_assign_pointer(*pprev, next);
197
198 out:
199         return err;
200 }
201
202 static void rhashtable_rehash_chain(struct rhashtable *ht, unsigned old_hash)
203 {
204         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
205         spinlock_t *old_bucket_lock;
206
207         old_bucket_lock = rht_bucket_lock(old_tbl, old_hash);
208
209         spin_lock_bh(old_bucket_lock);
210         while (!rhashtable_rehash_one(ht, old_hash))
211                 ;
212         old_tbl->rehash++;
213         spin_unlock_bh(old_bucket_lock);
214 }
215
216 static int rhashtable_rehash_attach(struct rhashtable *ht,
217                                     struct bucket_table *old_tbl,
218                                     struct bucket_table *new_tbl)
219 {
220         /* Protect future_tbl using the first bucket lock. */
221         spin_lock_bh(old_tbl->locks);
222
223         /* Did somebody beat us to it? */
224         if (rcu_access_pointer(old_tbl->future_tbl)) {
225                 spin_unlock_bh(old_tbl->locks);
226                 return -EEXIST;
227         }
228
229         /* Make insertions go into the new, empty table right away. Deletions
230          * and lookups will be attempted in both tables until we synchronize.
231          */
232         rcu_assign_pointer(old_tbl->future_tbl, new_tbl);
233
234         /* Ensure the new table is visible to readers. */
235         smp_wmb();
236
237         spin_unlock_bh(old_tbl->locks);
238
239         return 0;
240 }
241
242 static int rhashtable_rehash_table(struct rhashtable *ht)
243 {
244         struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
245         struct bucket_table *new_tbl;
246         struct rhashtable_walker *walker;
247         unsigned old_hash;
248
249         new_tbl = rht_dereference(old_tbl->future_tbl, ht);
250         if (!new_tbl)
251                 return 0;
252
253         for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
254                 rhashtable_rehash_chain(ht, old_hash);
255
256         /* Publish the new table pointer. */
257         rcu_assign_pointer(ht->tbl, new_tbl);
258
259         list_for_each_entry(walker, &old_tbl->walkers, list)
260                 walker->tbl = NULL;
261
262         /* Wait for readers. All new readers will see the new
263          * table, and thus no references to the old table will
264          * remain.
265          */
266         call_rcu(&old_tbl->rcu, bucket_table_free_rcu);
267
268         return rht_dereference(new_tbl->future_tbl, ht) ? -EAGAIN : 0;
269 }
270
271 /**
272  * rhashtable_expand - Expand hash table while allowing concurrent lookups
273  * @ht:         the hash table to expand
274  *
275  * A secondary bucket array is allocated and the hash entries are migrated.
276  *
277  * This function may only be called in a context where it is safe to call
278  * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
279  *
280  * The caller must ensure that no concurrent resizing occurs by holding
281  * ht->mutex.
282  *
283  * It is valid to have concurrent insertions and deletions protected by per
284  * bucket locks or concurrent RCU protected lookups and traversals.
285  */
286 static int rhashtable_expand(struct rhashtable *ht)
287 {
288         struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
289         int err;
290
291         ASSERT_RHT_MUTEX(ht);
292
293         old_tbl = rhashtable_last_table(ht, old_tbl);
294
295         new_tbl = bucket_table_alloc(ht, old_tbl->size * 2, GFP_KERNEL);
296         if (new_tbl == NULL)
297                 return -ENOMEM;
298
299         err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
300         if (err)
301                 bucket_table_free(new_tbl);
302
303         return err;
304 }
305
306 /**
307  * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
308  * @ht:         the hash table to shrink
309  *
310  * This function shrinks the hash table to fit, i.e., the smallest
311  * size would not cause it to expand right away automatically.
312  *
313  * The caller must ensure that no concurrent resizing occurs by holding
314  * ht->mutex.
315  *
316  * The caller must ensure that no concurrent table mutations take place.
317  * It is however valid to have concurrent lookups if they are RCU protected.
318  *
319  * It is valid to have concurrent insertions and deletions protected by per
320  * bucket locks or concurrent RCU protected lookups and traversals.
321  */
322 static int rhashtable_shrink(struct rhashtable *ht)
323 {
324         struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
325         unsigned size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
326         int err;
327
328         ASSERT_RHT_MUTEX(ht);
329
330         if (size < ht->p.min_size)
331                 size = ht->p.min_size;
332
333         if (old_tbl->size <= size)
334                 return 0;
335
336         if (rht_dereference(old_tbl->future_tbl, ht))
337                 return -EEXIST;
338
339         new_tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
340         if (new_tbl == NULL)
341                 return -ENOMEM;
342
343         err = rhashtable_rehash_attach(ht, old_tbl, new_tbl);
344         if (err)
345                 bucket_table_free(new_tbl);
346
347         return err;
348 }
349
350 static void rht_deferred_worker(struct work_struct *work)
351 {
352         struct rhashtable *ht;
353         struct bucket_table *tbl;
354         int err = 0;
355
356         ht = container_of(work, struct rhashtable, run_work);
357         mutex_lock(&ht->mutex);
358         if (ht->being_destroyed)
359                 goto unlock;
360
361         tbl = rht_dereference(ht->tbl, ht);
362         tbl = rhashtable_last_table(ht, tbl);
363
364         if (rht_grow_above_75(ht, tbl))
365                 rhashtable_expand(ht);
366         else if (rht_shrink_below_30(ht, tbl))
367                 rhashtable_shrink(ht);
368
369         err = rhashtable_rehash_table(ht);
370
371 unlock:
372         mutex_unlock(&ht->mutex);
373
374         if (err)
375                 schedule_work(&ht->run_work);
376 }
377
378 static bool rhashtable_check_elasticity(struct rhashtable *ht,
379                                         struct bucket_table *tbl,
380                                         unsigned hash)
381 {
382         unsigned elasticity = ht->elasticity;
383         struct rhash_head *head;
384
385         rht_for_each(head, tbl, hash)
386                 if (!--elasticity)
387                         return true;
388
389         return false;
390 }
391
392 int rhashtable_insert_rehash(struct rhashtable *ht)
393 {
394         struct bucket_table *old_tbl;
395         struct bucket_table *new_tbl;
396         struct bucket_table *tbl;
397         unsigned int size;
398         int err;
399
400         old_tbl = rht_dereference_rcu(ht->tbl, ht);
401         tbl = rhashtable_last_table(ht, old_tbl);
402
403         size = tbl->size;
404
405         if (rht_grow_above_75(ht, tbl))
406                 size *= 2;
407         /* More than two rehashes (not resizes) detected. */
408         else if (WARN_ON(old_tbl != tbl && old_tbl->size == size))
409                 return -EBUSY;
410
411         new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
412         if (new_tbl == NULL)
413                 return -ENOMEM;
414
415         err = rhashtable_rehash_attach(ht, tbl, new_tbl);
416         if (err) {
417                 bucket_table_free(new_tbl);
418                 if (err == -EEXIST)
419                         err = 0;
420         } else
421                 schedule_work(&ht->run_work);
422
423         return err;
424 }
425 EXPORT_SYMBOL_GPL(rhashtable_insert_rehash);
426
427 int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
428                            struct rhash_head *obj,
429                            struct bucket_table *tbl)
430 {
431         struct rhash_head *head;
432         unsigned hash;
433         int err;
434
435         tbl = rhashtable_last_table(ht, tbl);
436         hash = head_hashfn(ht, tbl, obj);
437         spin_lock_nested(rht_bucket_lock(tbl, hash), SINGLE_DEPTH_NESTING);
438
439         err = -EEXIST;
440         if (key && rhashtable_lookup_fast(ht, key, ht->p))
441                 goto exit;
442
443         err = -EAGAIN;
444         if (rhashtable_check_elasticity(ht, tbl, hash) ||
445             rht_grow_above_100(ht, tbl))
446                 goto exit;
447
448         err = 0;
449
450         head = rht_dereference_bucket(tbl->buckets[hash], tbl, hash);
451
452         RCU_INIT_POINTER(obj->next, head);
453
454         rcu_assign_pointer(tbl->buckets[hash], obj);
455
456         atomic_inc(&ht->nelems);
457
458 exit:
459         spin_unlock(rht_bucket_lock(tbl, hash));
460
461         return err;
462 }
463 EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
464
465 /**
466  * rhashtable_walk_init - Initialise an iterator
467  * @ht:         Table to walk over
468  * @iter:       Hash table Iterator
469  *
470  * This function prepares a hash table walk.
471  *
472  * Note that if you restart a walk after rhashtable_walk_stop you
473  * may see the same object twice.  Also, you may miss objects if
474  * there are removals in between rhashtable_walk_stop and the next
475  * call to rhashtable_walk_start.
476  *
477  * For a completely stable walk you should construct your own data
478  * structure outside the hash table.
479  *
480  * This function may sleep so you must not call it from interrupt
481  * context or with spin locks held.
482  *
483  * You must call rhashtable_walk_exit if this function returns
484  * successfully.
485  */
486 int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
487 {
488         iter->ht = ht;
489         iter->p = NULL;
490         iter->slot = 0;
491         iter->skip = 0;
492
493         iter->walker = kmalloc(sizeof(*iter->walker), GFP_KERNEL);
494         if (!iter->walker)
495                 return -ENOMEM;
496
497         mutex_lock(&ht->mutex);
498         iter->walker->tbl = rht_dereference(ht->tbl, ht);
499         list_add(&iter->walker->list, &iter->walker->tbl->walkers);
500         mutex_unlock(&ht->mutex);
501
502         return 0;
503 }
504 EXPORT_SYMBOL_GPL(rhashtable_walk_init);
505
506 /**
507  * rhashtable_walk_exit - Free an iterator
508  * @iter:       Hash table Iterator
509  *
510  * This function frees resources allocated by rhashtable_walk_init.
511  */
512 void rhashtable_walk_exit(struct rhashtable_iter *iter)
513 {
514         mutex_lock(&iter->ht->mutex);
515         if (iter->walker->tbl)
516                 list_del(&iter->walker->list);
517         mutex_unlock(&iter->ht->mutex);
518         kfree(iter->walker);
519 }
520 EXPORT_SYMBOL_GPL(rhashtable_walk_exit);
521
522 /**
523  * rhashtable_walk_start - Start a hash table walk
524  * @iter:       Hash table iterator
525  *
526  * Start a hash table walk.  Note that we take the RCU lock in all
527  * cases including when we return an error.  So you must always call
528  * rhashtable_walk_stop to clean up.
529  *
530  * Returns zero if successful.
531  *
532  * Returns -EAGAIN if resize event occured.  Note that the iterator
533  * will rewind back to the beginning and you may use it immediately
534  * by calling rhashtable_walk_next.
535  */
536 int rhashtable_walk_start(struct rhashtable_iter *iter)
537         __acquires(RCU)
538 {
539         struct rhashtable *ht = iter->ht;
540
541         mutex_lock(&ht->mutex);
542
543         if (iter->walker->tbl)
544                 list_del(&iter->walker->list);
545
546         rcu_read_lock();
547
548         mutex_unlock(&ht->mutex);
549
550         if (!iter->walker->tbl) {
551                 iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht);
552                 return -EAGAIN;
553         }
554
555         return 0;
556 }
557 EXPORT_SYMBOL_GPL(rhashtable_walk_start);
558
559 /**
560  * rhashtable_walk_next - Return the next object and advance the iterator
561  * @iter:       Hash table iterator
562  *
563  * Note that you must call rhashtable_walk_stop when you are finished
564  * with the walk.
565  *
566  * Returns the next object or NULL when the end of the table is reached.
567  *
568  * Returns -EAGAIN if resize event occured.  Note that the iterator
569  * will rewind back to the beginning and you may continue to use it.
570  */
571 void *rhashtable_walk_next(struct rhashtable_iter *iter)
572 {
573         struct bucket_table *tbl = iter->walker->tbl;
574         struct rhashtable *ht = iter->ht;
575         struct rhash_head *p = iter->p;
576         void *obj = NULL;
577
578         if (p) {
579                 p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot);
580                 goto next;
581         }
582
583         for (; iter->slot < tbl->size; iter->slot++) {
584                 int skip = iter->skip;
585
586                 rht_for_each_rcu(p, tbl, iter->slot) {
587                         if (!skip)
588                                 break;
589                         skip--;
590                 }
591
592 next:
593                 if (!rht_is_a_nulls(p)) {
594                         iter->skip++;
595                         iter->p = p;
596                         obj = rht_obj(ht, p);
597                         goto out;
598                 }
599
600                 iter->skip = 0;
601         }
602
603         /* Ensure we see any new tables. */
604         smp_rmb();
605
606         iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht);
607         if (iter->walker->tbl) {
608                 iter->slot = 0;
609                 iter->skip = 0;
610                 return ERR_PTR(-EAGAIN);
611         }
612
613         iter->p = NULL;
614
615 out:
616
617         return obj;
618 }
619 EXPORT_SYMBOL_GPL(rhashtable_walk_next);
620
621 /**
622  * rhashtable_walk_stop - Finish a hash table walk
623  * @iter:       Hash table iterator
624  *
625  * Finish a hash table walk.
626  */
627 void rhashtable_walk_stop(struct rhashtable_iter *iter)
628         __releases(RCU)
629 {
630         struct rhashtable *ht;
631         struct bucket_table *tbl = iter->walker->tbl;
632
633         if (!tbl)
634                 goto out;
635
636         ht = iter->ht;
637
638         mutex_lock(&ht->mutex);
639         if (tbl->rehash < tbl->size)
640                 list_add(&iter->walker->list, &tbl->walkers);
641         else
642                 iter->walker->tbl = NULL;
643         mutex_unlock(&ht->mutex);
644
645         iter->p = NULL;
646
647 out:
648         rcu_read_unlock();
649 }
650 EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
651
652 static size_t rounded_hashtable_size(const struct rhashtable_params *params)
653 {
654         return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
655                    (unsigned long)params->min_size);
656 }
657
658 static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
659 {
660         return jhash2(key, length, seed);
661 }
662
663 /**
664  * rhashtable_init - initialize a new hash table
665  * @ht:         hash table to be initialized
666  * @params:     configuration parameters
667  *
668  * Initializes a new hash table based on the provided configuration
669  * parameters. A table can be configured either with a variable or
670  * fixed length key:
671  *
672  * Configuration Example 1: Fixed length keys
673  * struct test_obj {
674  *      int                     key;
675  *      void *                  my_member;
676  *      struct rhash_head       node;
677  * };
678  *
679  * struct rhashtable_params params = {
680  *      .head_offset = offsetof(struct test_obj, node),
681  *      .key_offset = offsetof(struct test_obj, key),
682  *      .key_len = sizeof(int),
683  *      .hashfn = jhash,
684  *      .nulls_base = (1U << RHT_BASE_SHIFT),
685  * };
686  *
687  * Configuration Example 2: Variable length keys
688  * struct test_obj {
689  *      [...]
690  *      struct rhash_head       node;
691  * };
692  *
693  * u32 my_hash_fn(const void *data, u32 seed)
694  * {
695  *      struct test_obj *obj = data;
696  *
697  *      return [... hash ...];
698  * }
699  *
700  * struct rhashtable_params params = {
701  *      .head_offset = offsetof(struct test_obj, node),
702  *      .hashfn = jhash,
703  *      .obj_hashfn = my_hash_fn,
704  * };
705  */
706 int rhashtable_init(struct rhashtable *ht,
707                     const struct rhashtable_params *params)
708 {
709         struct bucket_table *tbl;
710         size_t size;
711
712         size = HASH_DEFAULT_SIZE;
713
714         if ((!params->key_len && !params->obj_hashfn) ||
715             (params->obj_hashfn && !params->obj_cmpfn))
716                 return -EINVAL;
717
718         if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
719                 return -EINVAL;
720
721         if (params->nelem_hint)
722                 size = rounded_hashtable_size(params);
723
724         memset(ht, 0, sizeof(*ht));
725         mutex_init(&ht->mutex);
726         memcpy(&ht->p, params, sizeof(*params));
727
728         if (params->min_size)
729                 ht->p.min_size = roundup_pow_of_two(params->min_size);
730
731         if (params->max_size)
732                 ht->p.max_size = rounddown_pow_of_two(params->max_size);
733
734         ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
735
736         if (!params->insecure_elasticity)
737                 ht->elasticity = 16;
738
739         if (params->locks_mul)
740                 ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
741         else
742                 ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;
743
744         ht->key_len = ht->p.key_len;
745         if (!params->hashfn) {
746                 ht->p.hashfn = jhash;
747
748                 if (!(ht->key_len & (sizeof(u32) - 1))) {
749                         ht->key_len /= sizeof(u32);
750                         ht->p.hashfn = rhashtable_jhash2;
751                 }
752         }
753
754         tbl = bucket_table_alloc(ht, size, GFP_KERNEL);
755         if (tbl == NULL)
756                 return -ENOMEM;
757
758         atomic_set(&ht->nelems, 0);
759
760         RCU_INIT_POINTER(ht->tbl, tbl);
761
762         INIT_WORK(&ht->run_work, rht_deferred_worker);
763
764         return 0;
765 }
766 EXPORT_SYMBOL_GPL(rhashtable_init);
767
768 /**
769  * rhashtable_destroy - destroy hash table
770  * @ht:         the hash table to destroy
771  *
772  * Frees the bucket array. This function is not rcu safe, therefore the caller
773  * has to make sure that no resizing may happen by unpublishing the hashtable
774  * and waiting for the quiescent cycle before releasing the bucket array.
775  */
776 void rhashtable_destroy(struct rhashtable *ht)
777 {
778         ht->being_destroyed = true;
779
780         cancel_work_sync(&ht->run_work);
781
782         mutex_lock(&ht->mutex);
783         bucket_table_free(rht_dereference(ht->tbl, ht));
784         mutex_unlock(&ht->mutex);
785 }
786 EXPORT_SYMBOL_GPL(rhashtable_destroy);