+static void inetpeer_gc_worker(struct work_struct *work)
+{
+ struct inet_peer *p, *n;
+ LIST_HEAD(list);
+
+ spin_lock_bh(&gc_lock);
+ list_replace_init(&gc_list, &list);
+ spin_unlock_bh(&gc_lock);
+
+ if (list_empty(&list))
+ return;
+
+ list_for_each_entry_safe(p, n, &list, gc_list) {
+
+ if(need_resched())
+ cond_resched();
+
+ if (p->avl_left != peer_avl_empty) {
+ list_add_tail(&p->avl_left->gc_list, &list);
+ p->avl_left = peer_avl_empty;
+ }
+
+ if (p->avl_right != peer_avl_empty) {
+ list_add_tail(&p->avl_right->gc_list, &list);
+ p->avl_right = peer_avl_empty;
+ }
+
+ n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
+
+ if (!atomic_read(&p->refcnt)) {
+ list_del(&p->gc_list);
+ kmem_cache_free(peer_cachep, p);
+ }
+ }
+
+ if (list_empty(&list))
+ return;
+
+ spin_lock_bh(&gc_lock);
+ list_splice(&list, &gc_list);
+ spin_unlock_bh(&gc_lock);
+
+ schedule_delayed_work(&gc_work, gc_delay);
+}