Merge branch 'linus' into next
[pandora-kernel.git] / security / tomoyo / gc.c
1 /*
2  * security/tomoyo/gc.c
3  *
4  * Implementation of the Domain-Based Mandatory Access Control.
5  *
6  * Copyright (C) 2005-2010  NTT DATA CORPORATION
7  *
8  */
9
10 #include "common.h"
11 #include <linux/kthread.h>
12 #include <linux/slab.h>
13
14 /* The list for "struct tomoyo_io_buffer". */
15 static LIST_HEAD(tomoyo_io_buffer_list);
16 /* Lock for protecting tomoyo_io_buffer_list. */
17 static DEFINE_SPINLOCK(tomoyo_io_buffer_list_lock);
18
19 /* Size of an element. */
20 static const u8 tomoyo_element_size[TOMOYO_MAX_POLICY] = {
21         [TOMOYO_ID_GROUP] = sizeof(struct tomoyo_group),
22         [TOMOYO_ID_PATH_GROUP] = sizeof(struct tomoyo_path_group),
23         [TOMOYO_ID_NUMBER_GROUP] = sizeof(struct tomoyo_number_group),
24         [TOMOYO_ID_AGGREGATOR] = sizeof(struct tomoyo_aggregator),
25         [TOMOYO_ID_TRANSITION_CONTROL] =
26         sizeof(struct tomoyo_transition_control),
27         [TOMOYO_ID_MANAGER] = sizeof(struct tomoyo_manager),
28         /* [TOMOYO_ID_NAME] = "struct tomoyo_name"->size, */
29         /* [TOMOYO_ID_ACL] =
30            tomoyo_acl_size["struct tomoyo_acl_info"->type], */
31         [TOMOYO_ID_DOMAIN] = sizeof(struct tomoyo_domain_info),
32 };
33
34 /* Size of a domain ACL element. */
35 static const u8 tomoyo_acl_size[] = {
36         [TOMOYO_TYPE_PATH_ACL] = sizeof(struct tomoyo_path_acl),
37         [TOMOYO_TYPE_PATH2_ACL] = sizeof(struct tomoyo_path2_acl),
38         [TOMOYO_TYPE_PATH_NUMBER_ACL] = sizeof(struct tomoyo_path_number_acl),
39         [TOMOYO_TYPE_MKDEV_ACL] = sizeof(struct tomoyo_mkdev_acl),
40         [TOMOYO_TYPE_MOUNT_ACL] = sizeof(struct tomoyo_mount_acl),
41 };
42
43 /**
44  * tomoyo_struct_used_by_io_buffer - Check whether the list element is used by /sys/kernel/security/tomoyo/ users or not.
45  *
46  * @element: Pointer to "struct list_head".
47  *
48  * Returns true if @element is used by /sys/kernel/security/tomoyo/ users,
49  * false otherwise.
50  */
51 static bool tomoyo_struct_used_by_io_buffer(const struct list_head *element)
52 {
53         struct tomoyo_io_buffer *head;
54         bool in_use = false;
55
56         spin_lock(&tomoyo_io_buffer_list_lock);
57         list_for_each_entry(head, &tomoyo_io_buffer_list, list) {
58                 head->users++;
59                 spin_unlock(&tomoyo_io_buffer_list_lock);
60                 if (mutex_lock_interruptible(&head->io_sem)) {
61                         in_use = true;
62                         goto out;
63                 }
64                 if (head->r.domain == element || head->r.group == element ||
65                     head->r.acl == element || &head->w.domain->list == element)
66                         in_use = true;
67                 mutex_unlock(&head->io_sem);
68 out:
69                 spin_lock(&tomoyo_io_buffer_list_lock);
70                 head->users--;
71                 if (in_use)
72                         break;
73         }
74         spin_unlock(&tomoyo_io_buffer_list_lock);
75         return in_use;
76 }
77
78 /**
79  * tomoyo_name_used_by_io_buffer - Check whether the string is used by /sys/kernel/security/tomoyo/ users or not.
80  *
81  * @string: String to check.
82  * @size:   Memory allocated for @string .
83  *
84  * Returns true if @string is used by /sys/kernel/security/tomoyo/ users,
85  * false otherwise.
86  */
87 static bool tomoyo_name_used_by_io_buffer(const char *string,
88                                           const size_t size)
89 {
90         struct tomoyo_io_buffer *head;
91         bool in_use = false;
92
93         spin_lock(&tomoyo_io_buffer_list_lock);
94         list_for_each_entry(head, &tomoyo_io_buffer_list, list) {
95                 int i;
96                 head->users++;
97                 spin_unlock(&tomoyo_io_buffer_list_lock);
98                 if (mutex_lock_interruptible(&head->io_sem)) {
99                         in_use = true;
100                         goto out;
101                 }
102                 for (i = 0; i < TOMOYO_MAX_IO_READ_QUEUE; i++) {
103                         const char *w = head->r.w[i];
104                         if (w < string || w > string + size)
105                                 continue;
106                         in_use = true;
107                         break;
108                 }
109                 mutex_unlock(&head->io_sem);
110 out:
111                 spin_lock(&tomoyo_io_buffer_list_lock);
112                 head->users--;
113                 if (in_use)
114                         break;
115         }
116         spin_unlock(&tomoyo_io_buffer_list_lock);
117         return in_use;
118 }
119
120 /* Structure for garbage collection. */
121 struct tomoyo_gc {
122         struct list_head list;
123         enum tomoyo_policy_id type;
124         size_t size;
125         struct list_head *element;
126 };
127 /* List of entries to be deleted. */
128 static LIST_HEAD(tomoyo_gc_list);
129 /* Length of tomoyo_gc_list. */
130 static int tomoyo_gc_list_len;
131
132 /**
133  * tomoyo_add_to_gc - Add an entry to to be deleted list.
134  *
135  * @type:    One of values in "enum tomoyo_policy_id".
136  * @element: Pointer to "struct list_head".
137  *
138  * Returns true on success, false otherwise.
139  *
140  * Caller holds tomoyo_policy_lock mutex.
141  *
142  * Adding an entry needs kmalloc(). Thus, if we try to add thousands of
143  * entries at once, it will take too long time. Thus, do not add more than 128
144  * entries per a scan. But to be able to handle worst case where all entries
145  * are in-use, we accept one more entry per a scan.
146  *
147  * If we use singly linked list using "struct list_head"->prev (which is
148  * LIST_POISON2), we can avoid kmalloc().
149  */
150 static bool tomoyo_add_to_gc(const int type, struct list_head *element)
151 {
152         struct tomoyo_gc *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
153         if (!entry)
154                 return false;
155         entry->type = type;
156         if (type == TOMOYO_ID_ACL)
157                 entry->size = tomoyo_acl_size[
158                               container_of(element,
159                                            typeof(struct tomoyo_acl_info),
160                                            list)->type];
161         else if (type == TOMOYO_ID_NAME)
162                 entry->size = strlen(container_of(element,
163                                                   typeof(struct tomoyo_name),
164                                                   head.list)->entry.name) + 1;
165         else
166                 entry->size = tomoyo_element_size[type];
167         entry->element = element;
168         list_add(&entry->list, &tomoyo_gc_list);
169         list_del_rcu(element);
170         return tomoyo_gc_list_len++ < 128;
171 }
172
173 /**
174  * tomoyo_element_linked_by_gc - Validate next element of an entry.
175  *
176  * @element: Pointer to an element.
177  * @size:    Size of @element in byte.
178  *
179  * Returns true if @element is linked by other elements in the garbage
180  * collector's queue, false otherwise.
181  */
182 static bool tomoyo_element_linked_by_gc(const u8 *element, const size_t size)
183 {
184         struct tomoyo_gc *p;
185         list_for_each_entry(p, &tomoyo_gc_list, list) {
186                 const u8 *ptr = (const u8 *) p->element->next;
187                 if (ptr < element || element + size < ptr)
188                         continue;
189                 return true;
190         }
191         return false;
192 }
193
194 /**
195  * tomoyo_del_transition_control - Delete members in "struct tomoyo_transition_control".
196  *
197  * @element: Pointer to "struct list_head".
198  *
199  * Returns nothing.
200  */
201 static void tomoyo_del_transition_control(struct list_head *element)
202 {
203         struct tomoyo_transition_control *ptr =
204                 container_of(element, typeof(*ptr), head.list);
205         tomoyo_put_name(ptr->domainname);
206         tomoyo_put_name(ptr->program);
207 }
208
209 /**
210  * tomoyo_del_aggregator - Delete members in "struct tomoyo_aggregator".
211  *
212  * @element: Pointer to "struct list_head".
213  *
214  * Returns nothing.
215  */
216 static void tomoyo_del_aggregator(struct list_head *element)
217 {
218         struct tomoyo_aggregator *ptr =
219                 container_of(element, typeof(*ptr), head.list);
220         tomoyo_put_name(ptr->original_name);
221         tomoyo_put_name(ptr->aggregated_name);
222 }
223
224 /**
225  * tomoyo_del_manager - Delete members in "struct tomoyo_manager".
226  *
227  * @element: Pointer to "struct list_head".
228  *
229  * Returns nothing.
230  */
231 static void tomoyo_del_manager(struct list_head *element)
232 {
233         struct tomoyo_manager *ptr =
234                 container_of(element, typeof(*ptr), head.list);
235         tomoyo_put_name(ptr->manager);
236 }
237
238 /**
239  * tomoyo_del_acl - Delete members in "struct tomoyo_acl_info".
240  *
241  * @element: Pointer to "struct list_head".
242  *
243  * Returns nothing.
244  */
245 static void tomoyo_del_acl(struct list_head *element)
246 {
247         struct tomoyo_acl_info *acl =
248                 container_of(element, typeof(*acl), list);
249         switch (acl->type) {
250         case TOMOYO_TYPE_PATH_ACL:
251                 {
252                         struct tomoyo_path_acl *entry
253                                 = container_of(acl, typeof(*entry), head);
254                         tomoyo_put_name_union(&entry->name);
255                 }
256                 break;
257         case TOMOYO_TYPE_PATH2_ACL:
258                 {
259                         struct tomoyo_path2_acl *entry
260                                 = container_of(acl, typeof(*entry), head);
261                         tomoyo_put_name_union(&entry->name1);
262                         tomoyo_put_name_union(&entry->name2);
263                 }
264                 break;
265         case TOMOYO_TYPE_PATH_NUMBER_ACL:
266                 {
267                         struct tomoyo_path_number_acl *entry
268                                 = container_of(acl, typeof(*entry), head);
269                         tomoyo_put_name_union(&entry->name);
270                         tomoyo_put_number_union(&entry->number);
271                 }
272                 break;
273         case TOMOYO_TYPE_MKDEV_ACL:
274                 {
275                         struct tomoyo_mkdev_acl *entry
276                                 = container_of(acl, typeof(*entry), head);
277                         tomoyo_put_name_union(&entry->name);
278                         tomoyo_put_number_union(&entry->mode);
279                         tomoyo_put_number_union(&entry->major);
280                         tomoyo_put_number_union(&entry->minor);
281                 }
282                 break;
283         case TOMOYO_TYPE_MOUNT_ACL:
284                 {
285                         struct tomoyo_mount_acl *entry
286                                 = container_of(acl, typeof(*entry), head);
287                         tomoyo_put_name_union(&entry->dev_name);
288                         tomoyo_put_name_union(&entry->dir_name);
289                         tomoyo_put_name_union(&entry->fs_type);
290                         tomoyo_put_number_union(&entry->flags);
291                 }
292                 break;
293         }
294 }
295
296 /**
297  * tomoyo_del_domain - Delete members in "struct tomoyo_domain_info".
298  *
299  * @element: Pointer to "struct list_head".
300  *
301  * Returns true if deleted, false otherwise.
302  */
303 static bool tomoyo_del_domain(struct list_head *element)
304 {
305         struct tomoyo_domain_info *domain =
306                 container_of(element, typeof(*domain), list);
307         struct tomoyo_acl_info *acl;
308         struct tomoyo_acl_info *tmp;
309         /*
310          * Since we don't protect whole execve() operation using SRCU,
311          * we need to recheck domain->users at this point.
312          *
313          * (1) Reader starts SRCU section upon execve().
314          * (2) Reader traverses tomoyo_domain_list and finds this domain.
315          * (3) Writer marks this domain as deleted.
316          * (4) Garbage collector removes this domain from tomoyo_domain_list
317          *     because this domain is marked as deleted and used by nobody.
318          * (5) Reader saves reference to this domain into
319          *     "struct linux_binprm"->cred->security .
320          * (6) Reader finishes SRCU section, although execve() operation has
321          *     not finished yet.
322          * (7) Garbage collector waits for SRCU synchronization.
323          * (8) Garbage collector kfree() this domain because this domain is
324          *     used by nobody.
325          * (9) Reader finishes execve() operation and restores this domain from
326          *     "struct linux_binprm"->cred->security.
327          *
328          * By updating domain->users at (5), we can solve this race problem
329          * by rechecking domain->users at (8).
330          */
331         if (atomic_read(&domain->users))
332                 return false;
333         list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) {
334                 tomoyo_del_acl(&acl->list);
335                 tomoyo_memory_free(acl);
336         }
337         tomoyo_put_name(domain->domainname);
338         return true;
339 }
340
341
342 /**
343  * tomoyo_del_name - Delete members in "struct tomoyo_name".
344  *
345  * @element: Pointer to "struct list_head".
346  *
347  * Returns nothing.
348  */
349 static void tomoyo_del_name(struct list_head *element)
350 {
351         const struct tomoyo_name *ptr =
352                 container_of(element, typeof(*ptr), head.list);
353 }
354
355 /**
356  * tomoyo_del_path_group - Delete members in "struct tomoyo_path_group".
357  *
358  * @element: Pointer to "struct list_head".
359  *
360  * Returns nothing.
361  */
362 static void tomoyo_del_path_group(struct list_head *element)
363 {
364         struct tomoyo_path_group *member =
365                 container_of(element, typeof(*member), head.list);
366         tomoyo_put_name(member->member_name);
367 }
368
369 /**
370  * tomoyo_del_group - Delete "struct tomoyo_group".
371  *
372  * @element: Pointer to "struct list_head".
373  *
374  * Returns nothing.
375  */
376 static void tomoyo_del_group(struct list_head *element)
377 {
378         struct tomoyo_group *group =
379                 container_of(element, typeof(*group), head.list);
380         tomoyo_put_name(group->group_name);
381 }
382
383 /**
384  * tomoyo_del_number_group - Delete members in "struct tomoyo_number_group".
385  *
386  * @element: Pointer to "struct list_head".
387  *
388  * Returns nothing.
389  */
390 static void tomoyo_del_number_group(struct list_head *element)
391 {
392         struct tomoyo_number_group *member =
393                 container_of(element, typeof(*member), head.list);
394 }
395
396 /**
397  * tomoyo_collect_member - Delete elements with "struct tomoyo_acl_head".
398  *
399  * @id:          One of values in "enum tomoyo_policy_id".
400  * @member_list: Pointer to "struct list_head".
401  *
402  * Returns true if some elements are deleted, false otherwise.
403  */
404 static bool tomoyo_collect_member(const enum tomoyo_policy_id id,
405                                   struct list_head *member_list)
406 {
407         struct tomoyo_acl_head *member;
408         list_for_each_entry(member, member_list, list) {
409                 if (!member->is_deleted)
410                         continue;
411                 if (!tomoyo_add_to_gc(id, &member->list))
412                         return false;
413         }
414         return true;
415 }
416
417 /**
418  * tomoyo_collect_acl - Delete elements in "struct tomoyo_domain_info".
419  *
420  * @list: Pointer to "struct list_head".
421  *
422  * Returns true if some elements are deleted, false otherwise.
423  */
424 static bool tomoyo_collect_acl(struct list_head *list)
425 {
426         struct tomoyo_acl_info *acl;
427         list_for_each_entry(acl, list, list) {
428                 if (!acl->is_deleted)
429                         continue;
430                 if (!tomoyo_add_to_gc(TOMOYO_ID_ACL, &acl->list))
431                         return false;
432         }
433         return true;
434 }
435
436 /**
437  * tomoyo_collect_entry - Scan lists for deleted elements.
438  *
439  * Returns nothing.
440  */
441 static void tomoyo_collect_entry(void)
442 {
443         int i;
444         enum tomoyo_policy_id id;
445         struct tomoyo_policy_namespace *ns;
446         int idx;
447         if (mutex_lock_interruptible(&tomoyo_policy_lock))
448                 return;
449         idx = tomoyo_read_lock();
450         {
451                 struct tomoyo_domain_info *domain;
452                 list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
453                         if (!tomoyo_collect_acl(&domain->acl_info_list))
454                                 goto unlock;
455                         if (!domain->is_deleted || atomic_read(&domain->users))
456                                 continue;
457                         /*
458                          * Nobody is referring this domain. But somebody may
459                          * refer this domain after successful execve().
460                          * We recheck domain->users after SRCU synchronization.
461                          */
462                         if (!tomoyo_add_to_gc(TOMOYO_ID_DOMAIN, &domain->list))
463                                 goto unlock;
464                 }
465         }
466         list_for_each_entry_rcu(ns, &tomoyo_namespace_list, namespace_list) {
467                 for (id = 0; id < TOMOYO_MAX_POLICY; id++)
468                         if (!tomoyo_collect_member(id, &ns->policy_list[id]))
469                                 goto unlock;
470                 for (i = 0; i < TOMOYO_MAX_ACL_GROUPS; i++)
471                         if (!tomoyo_collect_acl(&ns->acl_group[i]))
472                                 goto unlock;
473                 for (i = 0; i < TOMOYO_MAX_GROUP; i++) {
474                         struct list_head *list = &ns->group_list[i];
475                         struct tomoyo_group *group;
476                         switch (i) {
477                         case 0:
478                                 id = TOMOYO_ID_PATH_GROUP;
479                                 break;
480                         default:
481                                 id = TOMOYO_ID_NUMBER_GROUP;
482                                 break;
483                         }
484                         list_for_each_entry(group, list, head.list) {
485                                 if (!tomoyo_collect_member
486                                     (id, &group->member_list))
487                                         goto unlock;
488                                 if (!list_empty(&group->member_list) ||
489                                     atomic_read(&group->head.users))
490                                         continue;
491                                 if (!tomoyo_add_to_gc(TOMOYO_ID_GROUP,
492                                                       &group->head.list))
493                                         goto unlock;
494                         }
495                 }
496         }
497         for (i = 0; i < TOMOYO_MAX_HASH; i++) {
498                 struct list_head *list = &tomoyo_name_list[i];
499                 struct tomoyo_shared_acl_head *ptr;
500                 list_for_each_entry(ptr, list, list) {
501                         if (atomic_read(&ptr->users))
502                                 continue;
503                         if (!tomoyo_add_to_gc(TOMOYO_ID_NAME, &ptr->list))
504                                 goto unlock;
505                 }
506         }
507 unlock:
508         tomoyo_read_unlock(idx);
509         mutex_unlock(&tomoyo_policy_lock);
510 }
511
512 /**
513  * tomoyo_kfree_entry - Delete entries in tomoyo_gc_list.
514  *
515  * Returns true if some entries were kfree()d, false otherwise.
516  */
517 static bool tomoyo_kfree_entry(void)
518 {
519         struct tomoyo_gc *p;
520         struct tomoyo_gc *tmp;
521         bool result = false;
522
523         list_for_each_entry_safe(p, tmp, &tomoyo_gc_list, list) {
524                 struct list_head *element = p->element;
525
526                 /*
527                  * list_del_rcu() in tomoyo_add_to_gc() guarantees that the
528                  * list element became no longer reachable from the list which
529                  * the element was originally on (e.g. tomoyo_domain_list).
530                  * Also, synchronize_srcu() in tomoyo_gc_thread() guarantees
531                  * that the list element became no longer referenced by syscall
532                  * users.
533                  *
534                  * However, there are three users which may still be using the
535                  * list element. We need to defer until all of these users
536                  * forget the list element.
537                  *
538                  * Firstly, defer until "struct tomoyo_io_buffer"->r.{domain,
539                  * group,acl} and "struct tomoyo_io_buffer"->w.domain forget
540                  * the list element.
541                  */
542                 if (tomoyo_struct_used_by_io_buffer(element))
543                         continue;
544                 /*
545                  * Secondly, defer until all other elements in the
546                  * tomoyo_gc_list list forget the list element.
547                  */
548                 if (tomoyo_element_linked_by_gc((const u8 *) element, p->size))
549                         continue;
550                 switch (p->type) {
551                 case TOMOYO_ID_TRANSITION_CONTROL:
552                         tomoyo_del_transition_control(element);
553                         break;
554                 case TOMOYO_ID_AGGREGATOR:
555                         tomoyo_del_aggregator(element);
556                         break;
557                 case TOMOYO_ID_MANAGER:
558                         tomoyo_del_manager(element);
559                         break;
560                 case TOMOYO_ID_NAME:
561                         /*
562                          * Thirdly, defer until all "struct tomoyo_io_buffer"
563                          * ->r.w[] forget the list element.
564                          */
565                         if (tomoyo_name_used_by_io_buffer(
566                             container_of(element, typeof(struct tomoyo_name),
567                                          head.list)->entry.name, p->size))
568                                 continue;
569                         tomoyo_del_name(element);
570                         break;
571                 case TOMOYO_ID_ACL:
572                         tomoyo_del_acl(element);
573                         break;
574                 case TOMOYO_ID_DOMAIN:
575                         if (!tomoyo_del_domain(element))
576                                 continue;
577                         break;
578                 case TOMOYO_ID_PATH_GROUP:
579                         tomoyo_del_path_group(element);
580                         break;
581                 case TOMOYO_ID_GROUP:
582                         tomoyo_del_group(element);
583                         break;
584                 case TOMOYO_ID_NUMBER_GROUP:
585                         tomoyo_del_number_group(element);
586                         break;
587                 case TOMOYO_MAX_POLICY:
588                         break;
589                 }
590                 tomoyo_memory_free(element);
591                 list_del(&p->list);
592                 kfree(p);
593                 tomoyo_gc_list_len--;
594                 result = true;
595         }
596         return result;
597 }
598
599 /**
600  * tomoyo_gc_thread - Garbage collector thread function.
601  *
602  * @unused: Unused.
603  *
604  * In case OOM-killer choose this thread for termination, we create this thread
605  * as a short live thread whenever /sys/kernel/security/tomoyo/ interface was
606  * close()d.
607  *
608  * Returns 0.
609  */
610 static int tomoyo_gc_thread(void *unused)
611 {
612         /* Garbage collector thread is exclusive. */
613         static DEFINE_MUTEX(tomoyo_gc_mutex);
614         if (!mutex_trylock(&tomoyo_gc_mutex))
615                 goto out;
616         daemonize("GC for TOMOYO");
617         do {
618                 tomoyo_collect_entry();
619                 if (list_empty(&tomoyo_gc_list))
620                         break;
621                 synchronize_srcu(&tomoyo_ss);
622         } while (tomoyo_kfree_entry());
623         {
624                 struct tomoyo_io_buffer *head;
625                 struct tomoyo_io_buffer *tmp;
626
627                 spin_lock(&tomoyo_io_buffer_list_lock);
628                 list_for_each_entry_safe(head, tmp, &tomoyo_io_buffer_list,
629                                          list) {
630                         if (head->users)
631                                 continue;
632                         list_del(&head->list);
633                         kfree(head->read_buf);
634                         kfree(head->write_buf);
635                         kfree(head);
636                 }
637                 spin_unlock(&tomoyo_io_buffer_list_lock);
638         }
639         mutex_unlock(&tomoyo_gc_mutex);
640 out:
641         /* This acts as do_exit(0). */
642         return 0;
643 }
644
645 /**
646  * tomoyo_notify_gc - Register/unregister /sys/kernel/security/tomoyo/ users.
647  *
648  * @head:        Pointer to "struct tomoyo_io_buffer".
649  * @is_register: True if register, false if unregister.
650  *
651  * Returns nothing.
652  */
653 void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register)
654 {
655         bool is_write = false;
656
657         spin_lock(&tomoyo_io_buffer_list_lock);
658         if (is_register) {
659                 head->users = 1;
660                 list_add(&head->list, &tomoyo_io_buffer_list);
661         } else {
662                 is_write = head->write_buf != NULL;
663                 if (!--head->users) {
664                         list_del(&head->list);
665                         kfree(head->read_buf);
666                         kfree(head->write_buf);
667                         kfree(head);
668                 }
669         }
670         spin_unlock(&tomoyo_io_buffer_list_lock);
671         if (is_write) {
672                 struct task_struct *task = kthread_create(tomoyo_gc_thread,
673                                                           NULL,
674                                                           "GC for TOMOYO");
675                 if (!IS_ERR(task))
676                         wake_up_process(task);
677         }
678 }