drivers/misc/sgi-xp: replace partid_t with a short
[pandora-kernel.git] / drivers / misc / sgi-xp / xpc_main.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (c) 2004-2008 Silicon Graphics, Inc.  All Rights Reserved.
7  */
8
9 /*
10  * Cross Partition Communication (XPC) support - standard version.
11  *
12  *      XPC provides a message passing capability that crosses partition
13  *      boundaries. This module is made up of two parts:
14  *
15  *          partition   This part detects the presence/absence of other
16  *                      partitions. It provides a heartbeat and monitors
17  *                      the heartbeats of other partitions.
18  *
19  *          channel     This part manages the channels and sends/receives
20  *                      messages across them to/from other partitions.
21  *
22  *      There are a couple of additional functions residing in XP, which
23  *      provide an interface to XPC for its users.
24  *
25  *
26  *      Caveats:
27  *
28  *        . We currently have no way to determine which nasid an IPI came
29  *          from. Thus, xpc_IPI_send() does a remote AMO write followed by
30  *          an IPI. The AMO indicates where data is to be pulled from, so
31  *          after the IPI arrives, the remote partition checks the AMO word.
32  *          The IPI can actually arrive before the AMO however, so other code
33  *          must periodically check for this case. Also, remote AMO operations
34  *          do not reliably time out. Thus we do a remote PIO read solely to
35  *          know whether the remote partition is down and whether we should
36  *          stop sending IPIs to it. This remote PIO read operation is set up
37  *          in a special nofault region so SAL knows to ignore (and cleanup)
38  *          any errors due to the remote AMO write, PIO read, and/or PIO
39  *          write operations.
40  *
41  *          If/when new hardware solves this IPI problem, we should abandon
42  *          the current approach.
43  *
44  */
45
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/init.h>
49 #include <linux/cache.h>
50 #include <linux/interrupt.h>
51 #include <linux/delay.h>
52 #include <linux/reboot.h>
53 #include <linux/completion.h>
54 #include <linux/kdebug.h>
55 #include <linux/kthread.h>
56 #include <linux/uaccess.h>
57 #include <asm/sn/intr.h>
58 #include <asm/sn/sn_sal.h>
59 #include "xpc.h"
60
61 /* define two XPC debug device structures to be used with dev_dbg() et al */
62
63 struct device_driver xpc_dbg_name = {
64         .name = "xpc"
65 };
66
67 struct device xpc_part_dbg_subname = {
68         .bus_id = {0},          /* set to "part" at xpc_init() time */
69         .driver = &xpc_dbg_name
70 };
71
72 struct device xpc_chan_dbg_subname = {
73         .bus_id = {0},          /* set to "chan" at xpc_init() time */
74         .driver = &xpc_dbg_name
75 };
76
77 struct device *xpc_part = &xpc_part_dbg_subname;
78 struct device *xpc_chan = &xpc_chan_dbg_subname;
79
80 static int xpc_kdebug_ignore;
81
82 /* systune related variables for /proc/sys directories */
83
84 static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
85 static int xpc_hb_min_interval = 1;
86 static int xpc_hb_max_interval = 10;
87
88 static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
89 static int xpc_hb_check_min_interval = 10;
90 static int xpc_hb_check_max_interval = 120;
91
92 int xpc_disengage_request_timelimit = XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT;
93 static int xpc_disengage_request_min_timelimit; /* = 0 */
94 static int xpc_disengage_request_max_timelimit = 120;
95
96 static ctl_table xpc_sys_xpc_hb_dir[] = {
97         {
98          .ctl_name = CTL_UNNUMBERED,
99          .procname = "hb_interval",
100          .data = &xpc_hb_interval,
101          .maxlen = sizeof(int),
102          .mode = 0644,
103          .proc_handler = &proc_dointvec_minmax,
104          .strategy = &sysctl_intvec,
105          .extra1 = &xpc_hb_min_interval,
106          .extra2 = &xpc_hb_max_interval},
107         {
108          .ctl_name = CTL_UNNUMBERED,
109          .procname = "hb_check_interval",
110          .data = &xpc_hb_check_interval,
111          .maxlen = sizeof(int),
112          .mode = 0644,
113          .proc_handler = &proc_dointvec_minmax,
114          .strategy = &sysctl_intvec,
115          .extra1 = &xpc_hb_check_min_interval,
116          .extra2 = &xpc_hb_check_max_interval},
117         {}
118 };
119 static ctl_table xpc_sys_xpc_dir[] = {
120         {
121          .ctl_name = CTL_UNNUMBERED,
122          .procname = "hb",
123          .mode = 0555,
124          .child = xpc_sys_xpc_hb_dir},
125         {
126          .ctl_name = CTL_UNNUMBERED,
127          .procname = "disengage_request_timelimit",
128          .data = &xpc_disengage_request_timelimit,
129          .maxlen = sizeof(int),
130          .mode = 0644,
131          .proc_handler = &proc_dointvec_minmax,
132          .strategy = &sysctl_intvec,
133          .extra1 = &xpc_disengage_request_min_timelimit,
134          .extra2 = &xpc_disengage_request_max_timelimit},
135         {}
136 };
137 static ctl_table xpc_sys_dir[] = {
138         {
139          .ctl_name = CTL_UNNUMBERED,
140          .procname = "xpc",
141          .mode = 0555,
142          .child = xpc_sys_xpc_dir},
143         {}
144 };
145 static struct ctl_table_header *xpc_sysctl;
146
147 /* non-zero if any remote partition disengage request was timed out */
148 int xpc_disengage_request_timedout;
149
150 /* #of IRQs received */
151 static atomic_t xpc_act_IRQ_rcvd;
152
153 /* IRQ handler notifies this wait queue on receipt of an IRQ */
154 static DECLARE_WAIT_QUEUE_HEAD(xpc_act_IRQ_wq);
155
156 static unsigned long xpc_hb_check_timeout;
157
158 /* notification that the xpc_hb_checker thread has exited */
159 static DECLARE_COMPLETION(xpc_hb_checker_exited);
160
161 /* notification that the xpc_discovery thread has exited */
162 static DECLARE_COMPLETION(xpc_discovery_exited);
163
164 static struct timer_list xpc_hb_timer;
165
166 static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
167
168 static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
169 static struct notifier_block xpc_reboot_notifier = {
170         .notifier_call = xpc_system_reboot,
171 };
172
173 static int xpc_system_die(struct notifier_block *, unsigned long, void *);
174 static struct notifier_block xpc_die_notifier = {
175         .notifier_call = xpc_system_die,
176 };
177
178 /*
179  * Timer function to enforce the timelimit on the partition disengage request.
180  */
181 static void
182 xpc_timeout_partition_disengage_request(unsigned long data)
183 {
184         struct xpc_partition *part = (struct xpc_partition *)data;
185
186         DBUG_ON(time_before(jiffies, part->disengage_request_timeout));
187
188         (void)xpc_partition_disengaged(part);
189
190         DBUG_ON(part->disengage_request_timeout != 0);
191         DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0);
192 }
193
194 /*
195  * Notify the heartbeat check thread that an IRQ has been received.
196  */
197 static irqreturn_t
198 xpc_act_IRQ_handler(int irq, void *dev_id)
199 {
200         atomic_inc(&xpc_act_IRQ_rcvd);
201         wake_up_interruptible(&xpc_act_IRQ_wq);
202         return IRQ_HANDLED;
203 }
204
205 /*
206  * Timer to produce the heartbeat.  The timer structures function is
207  * already set when this is initially called.  A tunable is used to
208  * specify when the next timeout should occur.
209  */
210 static void
211 xpc_hb_beater(unsigned long dummy)
212 {
213         xpc_vars->heartbeat++;
214
215         if (time_after_eq(jiffies, xpc_hb_check_timeout))
216                 wake_up_interruptible(&xpc_act_IRQ_wq);
217
218         xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
219         add_timer(&xpc_hb_timer);
220 }
221
222 /*
223  * This thread is responsible for nearly all of the partition
224  * activation/deactivation.
225  */
226 static int
227 xpc_hb_checker(void *ignore)
228 {
229         int last_IRQ_count = 0;
230         int new_IRQ_count;
231         int force_IRQ = 0;
232
233         /* this thread was marked active by xpc_hb_init() */
234
235         set_cpus_allowed(current, cpumask_of_cpu(XPC_HB_CHECK_CPU));
236
237         /* set our heartbeating to other partitions into motion */
238         xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
239         xpc_hb_beater(0);
240
241         while (!xpc_exiting) {
242
243                 dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
244                         "been received\n",
245                         (int)(xpc_hb_check_timeout - jiffies),
246                         atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count);
247
248                 /* checking of remote heartbeats is skewed by IRQ handling */
249                 if (time_after_eq(jiffies, xpc_hb_check_timeout)) {
250                         dev_dbg(xpc_part, "checking remote heartbeats\n");
251                         xpc_check_remote_hb();
252
253                         /*
254                          * We need to periodically recheck to ensure no
255                          * IPI/AMO pairs have been missed.  That check
256                          * must always reset xpc_hb_check_timeout.
257                          */
258                         force_IRQ = 1;
259                 }
260
261                 /* check for outstanding IRQs */
262                 new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd);
263                 if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
264                         force_IRQ = 0;
265
266                         dev_dbg(xpc_part, "found an IRQ to process; will be "
267                                 "resetting xpc_hb_check_timeout\n");
268
269                         last_IRQ_count += xpc_identify_act_IRQ_sender();
270                         if (last_IRQ_count < new_IRQ_count) {
271                                 /* retry once to help avoid missing AMO */
272                                 (void)xpc_identify_act_IRQ_sender();
273                         }
274                         last_IRQ_count = new_IRQ_count;
275
276                         xpc_hb_check_timeout = jiffies +
277                             (xpc_hb_check_interval * HZ);
278                 }
279
280                 /* wait for IRQ or timeout */
281                 (void)wait_event_interruptible(xpc_act_IRQ_wq,
282                                                (last_IRQ_count <
283                                                 atomic_read(&xpc_act_IRQ_rcvd)
284                                                 || time_after_eq(jiffies,
285                                                         xpc_hb_check_timeout) ||
286                                                 xpc_exiting));
287         }
288
289         dev_dbg(xpc_part, "heartbeat checker is exiting\n");
290
291         /* mark this thread as having exited */
292         complete(&xpc_hb_checker_exited);
293         return 0;
294 }
295
296 /*
297  * This thread will attempt to discover other partitions to activate
298  * based on info provided by SAL. This new thread is short lived and
299  * will exit once discovery is complete.
300  */
301 static int
302 xpc_initiate_discovery(void *ignore)
303 {
304         xpc_discovery();
305
306         dev_dbg(xpc_part, "discovery thread is exiting\n");
307
308         /* mark this thread as having exited */
309         complete(&xpc_discovery_exited);
310         return 0;
311 }
312
313 /*
314  * Establish first contact with the remote partititon. This involves pulling
315  * the XPC per partition variables from the remote partition and waiting for
316  * the remote partition to pull ours.
317  */
318 static enum xp_retval
319 xpc_make_first_contact(struct xpc_partition *part)
320 {
321         enum xp_retval ret;
322
323         while ((ret = xpc_pull_remote_vars_part(part)) != xpSuccess) {
324                 if (ret != xpRetry) {
325                         XPC_DEACTIVATE_PARTITION(part, ret);
326                         return ret;
327                 }
328
329                 dev_dbg(xpc_chan, "waiting to make first contact with "
330                         "partition %d\n", XPC_PARTID(part));
331
332                 /* wait a 1/4 of a second or so */
333                 (void)msleep_interruptible(250);
334
335                 if (part->act_state == XPC_P_DEACTIVATING)
336                         return part->reason;
337         }
338
339         return xpc_mark_partition_active(part);
340 }
341
342 /*
343  * The first kthread assigned to a newly activated partition is the one
344  * created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to
345  * that kthread until the partition is brought down, at which time that kthread
346  * returns back to XPC HB. (The return of that kthread will signify to XPC HB
347  * that XPC has dismantled all communication infrastructure for the associated
348  * partition.) This kthread becomes the channel manager for that partition.
349  *
350  * Each active partition has a channel manager, who, besides connecting and
351  * disconnecting channels, will ensure that each of the partition's connected
352  * channels has the required number of assigned kthreads to get the work done.
353  */
354 static void
355 xpc_channel_mgr(struct xpc_partition *part)
356 {
357         while (part->act_state != XPC_P_DEACTIVATING ||
358                atomic_read(&part->nchannels_active) > 0 ||
359                !xpc_partition_disengaged(part)) {
360
361                 xpc_process_channel_activity(part);
362
363                 /*
364                  * Wait until we've been requested to activate kthreads or
365                  * all of the channel's message queues have been torn down or
366                  * a signal is pending.
367                  *
368                  * The channel_mgr_requests is set to 1 after being awakened,
369                  * This is done to prevent the channel mgr from making one pass
370                  * through the loop for each request, since he will
371                  * be servicing all the requests in one pass. The reason it's
372                  * set to 1 instead of 0 is so that other kthreads will know
373                  * that the channel mgr is running and won't bother trying to
374                  * wake him up.
375                  */
376                 atomic_dec(&part->channel_mgr_requests);
377                 (void)wait_event_interruptible(part->channel_mgr_wq,
378                                 (atomic_read(&part->channel_mgr_requests) > 0 ||
379                                  part->local_IPI_amo != 0 ||
380                                  (part->act_state == XPC_P_DEACTIVATING &&
381                                  atomic_read(&part->nchannels_active) == 0 &&
382                                  xpc_partition_disengaged(part))));
383                 atomic_set(&part->channel_mgr_requests, 1);
384         }
385 }
386
387 /*
388  * When XPC HB determines that a partition has come up, it will create a new
389  * kthread and that kthread will call this function to attempt to set up the
390  * basic infrastructure used for Cross Partition Communication with the newly
391  * upped partition.
392  *
393  * The kthread that was created by XPC HB and which setup the XPC
394  * infrastructure will remain assigned to the partition until the partition
395  * goes down. At which time the kthread will teardown the XPC infrastructure
396  * and then exit.
397  *
398  * XPC HB will put the remote partition's XPC per partition specific variables
399  * physical address into xpc_partitions[partid].remote_vars_part_pa prior to
400  * calling xpc_partition_up().
401  */
402 static void
403 xpc_partition_up(struct xpc_partition *part)
404 {
405         DBUG_ON(part->channels != NULL);
406
407         dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part));
408
409         if (xpc_setup_infrastructure(part) != xpSuccess)
410                 return;
411
412         /*
413          * The kthread that XPC HB called us with will become the
414          * channel manager for this partition. It will not return
415          * back to XPC HB until the partition's XPC infrastructure
416          * has been dismantled.
417          */
418
419         (void)xpc_part_ref(part);       /* this will always succeed */
420
421         if (xpc_make_first_contact(part) == xpSuccess)
422                 xpc_channel_mgr(part);
423
424         xpc_part_deref(part);
425
426         xpc_teardown_infrastructure(part);
427 }
428
429 static int
430 xpc_activating(void *__partid)
431 {
432         short partid = (u64)__partid;
433         struct xpc_partition *part = &xpc_partitions[partid];
434         unsigned long irq_flags;
435
436         DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
437
438         spin_lock_irqsave(&part->act_lock, irq_flags);
439
440         if (part->act_state == XPC_P_DEACTIVATING) {
441                 part->act_state = XPC_P_INACTIVE;
442                 spin_unlock_irqrestore(&part->act_lock, irq_flags);
443                 part->remote_rp_pa = 0;
444                 return 0;
445         }
446
447         /* indicate the thread is activating */
448         DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ);
449         part->act_state = XPC_P_ACTIVATING;
450
451         XPC_SET_REASON(part, 0, 0);
452         spin_unlock_irqrestore(&part->act_lock, irq_flags);
453
454         dev_dbg(xpc_part, "bringing partition %d up\n", partid);
455
456         /*
457          * Register the remote partition's AMOs with SAL so it can handle
458          * and cleanup errors within that address range should the remote
459          * partition go down. We don't unregister this range because it is
460          * difficult to tell when outstanding writes to the remote partition
461          * are finished and thus when it is safe to unregister. This should
462          * not result in wasted space in the SAL xp_addr_region table because
463          * we should get the same page for remote_amos_page_pa after module
464          * reloads and system reboots.
465          */
466         if (sn_register_xp_addr_region(part->remote_amos_page_pa,
467                                        PAGE_SIZE, 1) < 0) {
468                 dev_warn(xpc_part, "xpc_partition_up(%d) failed to register "
469                          "xp_addr region\n", partid);
470
471                 spin_lock_irqsave(&part->act_lock, irq_flags);
472                 part->act_state = XPC_P_INACTIVE;
473                 XPC_SET_REASON(part, xpPhysAddrRegFailed, __LINE__);
474                 spin_unlock_irqrestore(&part->act_lock, irq_flags);
475                 part->remote_rp_pa = 0;
476                 return 0;
477         }
478
479         xpc_allow_hb(partid, xpc_vars);
480         xpc_IPI_send_activated(part);
481
482         /*
483          * xpc_partition_up() holds this thread and marks this partition as
484          * XPC_P_ACTIVE by calling xpc_hb_mark_active().
485          */
486         (void)xpc_partition_up(part);
487
488         xpc_disallow_hb(partid, xpc_vars);
489         xpc_mark_partition_inactive(part);
490
491         if (part->reason == xpReactivating) {
492                 /* interrupting ourselves results in activating partition */
493                 xpc_IPI_send_reactivate(part);
494         }
495
496         return 0;
497 }
498
499 void
500 xpc_activate_partition(struct xpc_partition *part)
501 {
502         short partid = XPC_PARTID(part);
503         unsigned long irq_flags;
504         struct task_struct *kthread;
505
506         spin_lock_irqsave(&part->act_lock, irq_flags);
507
508         DBUG_ON(part->act_state != XPC_P_INACTIVE);
509
510         part->act_state = XPC_P_ACTIVATION_REQ;
511         XPC_SET_REASON(part, xpCloneKThread, __LINE__);
512
513         spin_unlock_irqrestore(&part->act_lock, irq_flags);
514
515         kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
516                               partid);
517         if (IS_ERR(kthread)) {
518                 spin_lock_irqsave(&part->act_lock, irq_flags);
519                 part->act_state = XPC_P_INACTIVE;
520                 XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
521                 spin_unlock_irqrestore(&part->act_lock, irq_flags);
522         }
523 }
524
525 /*
526  * Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
527  * partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
528  * than one partition, we use an AMO_t structure per partition to indicate
529  * whether a partition has sent an IPI or not.  If it has, then wake up the
530  * associated kthread to handle it.
531  *
532  * All SGI_XPC_NOTIFY IRQs received by XPC are the result of IPIs sent by XPC
533  * running on other partitions.
534  *
535  * Noteworthy Arguments:
536  *
537  *      irq - Interrupt ReQuest number. NOT USED.
538  *
539  *      dev_id - partid of IPI's potential sender.
540  */
541 irqreturn_t
542 xpc_notify_IRQ_handler(int irq, void *dev_id)
543 {
544         short partid = (short)(u64)dev_id;
545         struct xpc_partition *part = &xpc_partitions[partid];
546
547         DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
548
549         if (xpc_part_ref(part)) {
550                 xpc_check_for_channel_activity(part);
551
552                 xpc_part_deref(part);
553         }
554         return IRQ_HANDLED;
555 }
556
557 /*
558  * Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
559  * because the write to their associated IPI amo completed after the IRQ/IPI
560  * was received.
561  */
562 void
563 xpc_dropped_IPI_check(struct xpc_partition *part)
564 {
565         if (xpc_part_ref(part)) {
566                 xpc_check_for_channel_activity(part);
567
568                 part->dropped_IPI_timer.expires = jiffies +
569                     XPC_P_DROPPED_IPI_WAIT;
570                 add_timer(&part->dropped_IPI_timer);
571                 xpc_part_deref(part);
572         }
573 }
574
575 void
576 xpc_activate_kthreads(struct xpc_channel *ch, int needed)
577 {
578         int idle = atomic_read(&ch->kthreads_idle);
579         int assigned = atomic_read(&ch->kthreads_assigned);
580         int wakeup;
581
582         DBUG_ON(needed <= 0);
583
584         if (idle > 0) {
585                 wakeup = (needed > idle) ? idle : needed;
586                 needed -= wakeup;
587
588                 dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
589                         "channel=%d\n", wakeup, ch->partid, ch->number);
590
591                 /* only wakeup the requested number of kthreads */
592                 wake_up_nr(&ch->idle_wq, wakeup);
593         }
594
595         if (needed <= 0)
596                 return;
597
598         if (needed + assigned > ch->kthreads_assigned_limit) {
599                 needed = ch->kthreads_assigned_limit - assigned;
600                 if (needed <= 0)
601                         return;
602         }
603
604         dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
605                 needed, ch->partid, ch->number);
606
607         xpc_create_kthreads(ch, needed, 0);
608 }
609
610 /*
611  * This function is where XPC's kthreads wait for messages to deliver.
612  */
613 static void
614 xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
615 {
616         do {
617                 /* deliver messages to their intended recipients */
618
619                 while (ch->w_local_GP.get < ch->w_remote_GP.put &&
620                        !(ch->flags & XPC_C_DISCONNECTING)) {
621                         xpc_deliver_msg(ch);
622                 }
623
624                 if (atomic_inc_return(&ch->kthreads_idle) >
625                     ch->kthreads_idle_limit) {
626                         /* too many idle kthreads on this channel */
627                         atomic_dec(&ch->kthreads_idle);
628                         break;
629                 }
630
631                 dev_dbg(xpc_chan, "idle kthread calling "
632                         "wait_event_interruptible_exclusive()\n");
633
634                 (void)wait_event_interruptible_exclusive(ch->idle_wq,
635                                 (ch->w_local_GP.get < ch->w_remote_GP.put ||
636                                  (ch->flags & XPC_C_DISCONNECTING)));
637
638                 atomic_dec(&ch->kthreads_idle);
639
640         } while (!(ch->flags & XPC_C_DISCONNECTING));
641 }
642
643 static int
644 xpc_kthread_start(void *args)
645 {
646         short partid = XPC_UNPACK_ARG1(args);
647         u16 ch_number = XPC_UNPACK_ARG2(args);
648         struct xpc_partition *part = &xpc_partitions[partid];
649         struct xpc_channel *ch;
650         int n_needed;
651         unsigned long irq_flags;
652
653         dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
654                 partid, ch_number);
655
656         ch = &part->channels[ch_number];
657
658         if (!(ch->flags & XPC_C_DISCONNECTING)) {
659
660                 /* let registerer know that connection has been established */
661
662                 spin_lock_irqsave(&ch->lock, irq_flags);
663                 if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) {
664                         ch->flags |= XPC_C_CONNECTEDCALLOUT;
665                         spin_unlock_irqrestore(&ch->lock, irq_flags);
666
667                         xpc_connected_callout(ch);
668
669                         spin_lock_irqsave(&ch->lock, irq_flags);
670                         ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE;
671                         spin_unlock_irqrestore(&ch->lock, irq_flags);
672
673                         /*
674                          * It is possible that while the callout was being
675                          * made that the remote partition sent some messages.
676                          * If that is the case, we may need to activate
677                          * additional kthreads to help deliver them. We only
678                          * need one less than total #of messages to deliver.
679                          */
680                         n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1;
681                         if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
682                                 xpc_activate_kthreads(ch, n_needed);
683
684                 } else {
685                         spin_unlock_irqrestore(&ch->lock, irq_flags);
686                 }
687
688                 xpc_kthread_waitmsgs(part, ch);
689         }
690
691         /* let registerer know that connection is disconnecting */
692
693         spin_lock_irqsave(&ch->lock, irq_flags);
694         if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
695             !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
696                 ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
697                 spin_unlock_irqrestore(&ch->lock, irq_flags);
698
699                 xpc_disconnect_callout(ch, xpDisconnecting);
700
701                 spin_lock_irqsave(&ch->lock, irq_flags);
702                 ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
703         }
704         spin_unlock_irqrestore(&ch->lock, irq_flags);
705
706         if (atomic_dec_return(&ch->kthreads_assigned) == 0) {
707                 if (atomic_dec_return(&part->nchannels_engaged) == 0) {
708                         xpc_mark_partition_disengaged(part);
709                         xpc_IPI_send_disengage(part);
710                 }
711         }
712
713         xpc_msgqueue_deref(ch);
714
715         dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
716                 partid, ch_number);
717
718         xpc_part_deref(part);
719         return 0;
720 }
721
722 /*
723  * For each partition that XPC has established communications with, there is
724  * a minimum of one kernel thread assigned to perform any operation that
725  * may potentially sleep or block (basically the callouts to the asynchronous
726  * functions registered via xpc_connect()).
727  *
728  * Additional kthreads are created and destroyed by XPC as the workload
729  * demands.
730  *
731  * A kthread is assigned to one of the active channels that exists for a given
732  * partition.
733  */
734 void
735 xpc_create_kthreads(struct xpc_channel *ch, int needed,
736                     int ignore_disconnecting)
737 {
738         unsigned long irq_flags;
739         u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
740         struct xpc_partition *part = &xpc_partitions[ch->partid];
741         struct task_struct *kthread;
742
743         while (needed-- > 0) {
744
745                 /*
746                  * The following is done on behalf of the newly created
747                  * kthread. That kthread is responsible for doing the
748                  * counterpart to the following before it exits.
749                  */
750                 if (ignore_disconnecting) {
751                         if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
752                                 /* kthreads assigned had gone to zero */
753                                 BUG_ON(!(ch->flags &
754                                          XPC_C_DISCONNECTINGCALLOUT_MADE));
755                                 break;
756                         }
757
758                 } else if (ch->flags & XPC_C_DISCONNECTING) {
759                         break;
760
761                 } else if (atomic_inc_return(&ch->kthreads_assigned) == 1) {
762                         if (atomic_inc_return(&part->nchannels_engaged) == 1)
763                                 xpc_mark_partition_engaged(part);
764                 }
765                 (void)xpc_part_ref(part);
766                 xpc_msgqueue_ref(ch);
767
768                 kthread = kthread_run(xpc_kthread_start, (void *)args,
769                                       "xpc%02dc%d", ch->partid, ch->number);
770                 if (IS_ERR(kthread)) {
771                         /* the fork failed */
772
773                         /*
774                          * NOTE: if (ignore_disconnecting &&
775                          * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
776                          * then we'll deadlock if all other kthreads assigned
777                          * to this channel are blocked in the channel's
778                          * registerer, because the only thing that will unblock
779                          * them is the xpDisconnecting callout that this
780                          * failed kthread_run() would have made.
781                          */
782
783                         if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
784                             atomic_dec_return(&part->nchannels_engaged) == 0) {
785                                 xpc_mark_partition_disengaged(part);
786                                 xpc_IPI_send_disengage(part);
787                         }
788                         xpc_msgqueue_deref(ch);
789                         xpc_part_deref(part);
790
791                         if (atomic_read(&ch->kthreads_assigned) <
792                             ch->kthreads_idle_limit) {
793                                 /*
794                                  * Flag this as an error only if we have an
795                                  * insufficient #of kthreads for the channel
796                                  * to function.
797                                  */
798                                 spin_lock_irqsave(&ch->lock, irq_flags);
799                                 XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
800                                                        &irq_flags);
801                                 spin_unlock_irqrestore(&ch->lock, irq_flags);
802                         }
803                         break;
804                 }
805         }
806 }
807
808 void
809 xpc_disconnect_wait(int ch_number)
810 {
811         unsigned long irq_flags;
812         short partid;
813         struct xpc_partition *part;
814         struct xpc_channel *ch;
815         int wakeup_channel_mgr;
816
817         /* now wait for all callouts to the caller's function to cease */
818         for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
819                 part = &xpc_partitions[partid];
820
821                 if (!xpc_part_ref(part))
822                         continue;
823
824                 ch = &part->channels[ch_number];
825
826                 if (!(ch->flags & XPC_C_WDISCONNECT)) {
827                         xpc_part_deref(part);
828                         continue;
829                 }
830
831                 wait_for_completion(&ch->wdisconnect_wait);
832
833                 spin_lock_irqsave(&ch->lock, irq_flags);
834                 DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
835                 wakeup_channel_mgr = 0;
836
837                 if (ch->delayed_IPI_flags) {
838                         if (part->act_state != XPC_P_DEACTIVATING) {
839                                 spin_lock(&part->IPI_lock);
840                                 XPC_SET_IPI_FLAGS(part->local_IPI_amo,
841                                                   ch->number,
842                                                   ch->delayed_IPI_flags);
843                                 spin_unlock(&part->IPI_lock);
844                                 wakeup_channel_mgr = 1;
845                         }
846                         ch->delayed_IPI_flags = 0;
847                 }
848
849                 ch->flags &= ~XPC_C_WDISCONNECT;
850                 spin_unlock_irqrestore(&ch->lock, irq_flags);
851
852                 if (wakeup_channel_mgr)
853                         xpc_wakeup_channel_mgr(part);
854
855                 xpc_part_deref(part);
856         }
857 }
858
859 static void
860 xpc_do_exit(enum xp_retval reason)
861 {
862         short partid;
863         int active_part_count, printed_waiting_msg = 0;
864         struct xpc_partition *part;
865         unsigned long printmsg_time, disengage_request_timeout = 0;
866
867         /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
868         DBUG_ON(xpc_exiting == 1);
869
870         /*
871          * Let the heartbeat checker thread and the discovery thread
872          * (if one is running) know that they should exit. Also wake up
873          * the heartbeat checker thread in case it's sleeping.
874          */
875         xpc_exiting = 1;
876         wake_up_interruptible(&xpc_act_IRQ_wq);
877
878         /* ignore all incoming interrupts */
879         free_irq(SGI_XPC_ACTIVATE, NULL);
880
881         /* wait for the discovery thread to exit */
882         wait_for_completion(&xpc_discovery_exited);
883
884         /* wait for the heartbeat checker thread to exit */
885         wait_for_completion(&xpc_hb_checker_exited);
886
887         /* sleep for a 1/3 of a second or so */
888         (void)msleep_interruptible(300);
889
890         /* wait for all partitions to become inactive */
891
892         printmsg_time = jiffies + (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
893         xpc_disengage_request_timedout = 0;
894
895         do {
896                 active_part_count = 0;
897
898                 for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
899                         part = &xpc_partitions[partid];
900
901                         if (xpc_partition_disengaged(part) &&
902                             part->act_state == XPC_P_INACTIVE) {
903                                 continue;
904                         }
905
906                         active_part_count++;
907
908                         XPC_DEACTIVATE_PARTITION(part, reason);
909
910                         if (part->disengage_request_timeout >
911                             disengage_request_timeout) {
912                                 disengage_request_timeout =
913                                     part->disengage_request_timeout;
914                         }
915                 }
916
917                 if (xpc_partition_engaged(-1UL)) {
918                         if (time_after(jiffies, printmsg_time)) {
919                                 dev_info(xpc_part, "waiting for remote "
920                                          "partitions to disengage, timeout in "
921                                          "%ld seconds\n",
922                                          (disengage_request_timeout - jiffies)
923                                          / HZ);
924                                 printmsg_time = jiffies +
925                                     (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
926                                 printed_waiting_msg = 1;
927                         }
928
929                 } else if (active_part_count > 0) {
930                         if (printed_waiting_msg) {
931                                 dev_info(xpc_part, "waiting for local partition"
932                                          " to disengage\n");
933                                 printed_waiting_msg = 0;
934                         }
935
936                 } else {
937                         if (!xpc_disengage_request_timedout) {
938                                 dev_info(xpc_part, "all partitions have "
939                                          "disengaged\n");
940                         }
941                         break;
942                 }
943
944                 /* sleep for a 1/3 of a second or so */
945                 (void)msleep_interruptible(300);
946
947         } while (1);
948
949         DBUG_ON(xpc_partition_engaged(-1UL));
950
951         /* indicate to others that our reserved page is uninitialized */
952         xpc_rsvd_page->vars_pa = 0;
953
954         /* now it's time to eliminate our heartbeat */
955         del_timer_sync(&xpc_hb_timer);
956         DBUG_ON(xpc_vars->heartbeating_to_mask != 0);
957
958         if (reason == xpUnloading) {
959                 /* take ourselves off of the reboot_notifier_list */
960                 (void)unregister_reboot_notifier(&xpc_reboot_notifier);
961
962                 /* take ourselves off of the die_notifier list */
963                 (void)unregister_die_notifier(&xpc_die_notifier);
964         }
965
966         /* close down protections for IPI operations */
967         xpc_restrict_IPI_ops();
968
969         /* clear the interface to XPC's functions */
970         xpc_clear_interface();
971
972         if (xpc_sysctl)
973                 unregister_sysctl_table(xpc_sysctl);
974
975         kfree(xpc_remote_copy_buffer_base);
976 }
977
978 /*
979  * This function is called when the system is being rebooted.
980  */
981 static int
982 xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
983 {
984         enum xp_retval reason;
985
986         switch (event) {
987         case SYS_RESTART:
988                 reason = xpSystemReboot;
989                 break;
990         case SYS_HALT:
991                 reason = xpSystemHalt;
992                 break;
993         case SYS_POWER_OFF:
994                 reason = xpSystemPoweroff;
995                 break;
996         default:
997                 reason = xpSystemGoingDown;
998         }
999
1000         xpc_do_exit(reason);
1001         return NOTIFY_DONE;
1002 }
1003
1004 /*
1005  * Notify other partitions to disengage from all references to our memory.
1006  */
1007 static void
1008 xpc_die_disengage(void)
1009 {
1010         struct xpc_partition *part;
1011         short partid;
1012         unsigned long engaged;
1013         long time, printmsg_time, disengage_request_timeout;
1014
1015         /* keep xpc_hb_checker thread from doing anything (just in case) */
1016         xpc_exiting = 1;
1017
1018         xpc_vars->heartbeating_to_mask = 0;     /* indicate we're deactivated */
1019
1020         for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1021                 part = &xpc_partitions[partid];
1022
1023                 if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->
1024                     remote_vars_version)) {
1025
1026                         /* just in case it was left set by an earlier XPC */
1027                         xpc_clear_partition_engaged(1UL << partid);
1028                         continue;
1029                 }
1030
1031                 if (xpc_partition_engaged(1UL << partid) ||
1032                     part->act_state != XPC_P_INACTIVE) {
1033                         xpc_request_partition_disengage(part);
1034                         xpc_mark_partition_disengaged(part);
1035                         xpc_IPI_send_disengage(part);
1036                 }
1037         }
1038
1039         time = rtc_time();
1040         printmsg_time = time +
1041             (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
1042         disengage_request_timeout = time +
1043             (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second);
1044
1045         /* wait for all other partitions to disengage from us */
1046
1047         while (1) {
1048                 engaged = xpc_partition_engaged(-1UL);
1049                 if (!engaged) {
1050                         dev_info(xpc_part, "all partitions have disengaged\n");
1051                         break;
1052                 }
1053
1054                 time = rtc_time();
1055                 if (time >= disengage_request_timeout) {
1056                         for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1057                                 if (engaged & (1UL << partid)) {
1058                                         dev_info(xpc_part, "disengage from "
1059                                                  "remote partition %d timed "
1060                                                  "out\n", partid);
1061                                 }
1062                         }
1063                         break;
1064                 }
1065
1066                 if (time >= printmsg_time) {
1067                         dev_info(xpc_part, "waiting for remote partitions to "
1068                                  "disengage, timeout in %ld seconds\n",
1069                                  (disengage_request_timeout - time) /
1070                                  sn_rtc_cycles_per_second);
1071                         printmsg_time = time +
1072                             (XPC_DISENGAGE_PRINTMSG_INTERVAL *
1073                              sn_rtc_cycles_per_second);
1074                 }
1075         }
1076 }
1077
1078 /*
1079  * This function is called when the system is being restarted or halted due
1080  * to some sort of system failure. If this is the case we need to notify the
1081  * other partitions to disengage from all references to our memory.
1082  * This function can also be called when our heartbeater could be offlined
1083  * for a time. In this case we need to notify other partitions to not worry
1084  * about the lack of a heartbeat.
1085  */
1086 static int
1087 xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
1088 {
1089         switch (event) {
1090         case DIE_MACHINE_RESTART:
1091         case DIE_MACHINE_HALT:
1092                 xpc_die_disengage();
1093                 break;
1094
1095         case DIE_KDEBUG_ENTER:
1096                 /* Should lack of heartbeat be ignored by other partitions? */
1097                 if (!xpc_kdebug_ignore)
1098                         break;
1099
1100                 /* fall through */
1101         case DIE_MCA_MONARCH_ENTER:
1102         case DIE_INIT_MONARCH_ENTER:
1103                 xpc_vars->heartbeat++;
1104                 xpc_vars->heartbeat_offline = 1;
1105                 break;
1106
1107         case DIE_KDEBUG_LEAVE:
1108                 /* Is lack of heartbeat being ignored by other partitions? */
1109                 if (!xpc_kdebug_ignore)
1110                         break;
1111
1112                 /* fall through */
1113         case DIE_MCA_MONARCH_LEAVE:
1114         case DIE_INIT_MONARCH_LEAVE:
1115                 xpc_vars->heartbeat++;
1116                 xpc_vars->heartbeat_offline = 0;
1117                 break;
1118         }
1119
1120         return NOTIFY_DONE;
1121 }
1122
1123 int __init
1124 xpc_init(void)
1125 {
1126         int ret;
1127         short partid;
1128         struct xpc_partition *part;
1129         struct task_struct *kthread;
1130         size_t buf_size;
1131
1132         if (!ia64_platform_is("sn2"))
1133                 return -ENODEV;
1134
1135         buf_size = max(XPC_RP_VARS_SIZE,
1136                        XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
1137         xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
1138                                                                GFP_KERNEL,
1139                                                   &xpc_remote_copy_buffer_base);
1140         if (xpc_remote_copy_buffer == NULL)
1141                 return -ENOMEM;
1142
1143         snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
1144         snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");
1145
1146         xpc_sysctl = register_sysctl_table(xpc_sys_dir);
1147
1148         /*
1149          * The first few fields of each entry of xpc_partitions[] need to
1150          * be initialized now so that calls to xpc_connect() and
1151          * xpc_disconnect() can be made prior to the activation of any remote
1152          * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
1153          * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
1154          * PARTITION HAS BEEN ACTIVATED.
1155          */
1156         for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
1157                 part = &xpc_partitions[partid];
1158
1159                 DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
1160
1161                 part->act_IRQ_rcvd = 0;
1162                 spin_lock_init(&part->act_lock);
1163                 part->act_state = XPC_P_INACTIVE;
1164                 XPC_SET_REASON(part, 0, 0);
1165
1166                 init_timer(&part->disengage_request_timer);
1167                 part->disengage_request_timer.function =
1168                     xpc_timeout_partition_disengage_request;
1169                 part->disengage_request_timer.data = (unsigned long)part;
1170
1171                 part->setup_state = XPC_P_UNSET;
1172                 init_waitqueue_head(&part->teardown_wq);
1173                 atomic_set(&part->references, 0);
1174         }
1175
1176         /*
1177          * Open up protections for IPI operations (and AMO operations on
1178          * Shub 1.1 systems).
1179          */
1180         xpc_allow_IPI_ops();
1181
1182         /*
1183          * Interrupts being processed will increment this atomic variable and
1184          * awaken the heartbeat thread which will process the interrupts.
1185          */
1186         atomic_set(&xpc_act_IRQ_rcvd, 0);
1187
1188         /*
1189          * This is safe to do before the xpc_hb_checker thread has started
1190          * because the handler releases a wait queue.  If an interrupt is
1191          * received before the thread is waiting, it will not go to sleep,
1192          * but rather immediately process the interrupt.
1193          */
1194         ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
1195                           "xpc hb", NULL);
1196         if (ret != 0) {
1197                 dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
1198                         "errno=%d\n", -ret);
1199
1200                 xpc_restrict_IPI_ops();
1201
1202                 if (xpc_sysctl)
1203                         unregister_sysctl_table(xpc_sysctl);
1204
1205                 kfree(xpc_remote_copy_buffer_base);
1206                 return -EBUSY;
1207         }
1208
1209         /*
1210          * Fill the partition reserved page with the information needed by
1211          * other partitions to discover we are alive and establish initial
1212          * communications.
1213          */
1214         xpc_rsvd_page = xpc_rsvd_page_init();
1215         if (xpc_rsvd_page == NULL) {
1216                 dev_err(xpc_part, "could not setup our reserved page\n");
1217
1218                 free_irq(SGI_XPC_ACTIVATE, NULL);
1219                 xpc_restrict_IPI_ops();
1220
1221                 if (xpc_sysctl)
1222                         unregister_sysctl_table(xpc_sysctl);
1223
1224                 kfree(xpc_remote_copy_buffer_base);
1225                 return -EBUSY;
1226         }
1227
1228         /* add ourselves to the reboot_notifier_list */
1229         ret = register_reboot_notifier(&xpc_reboot_notifier);
1230         if (ret != 0)
1231                 dev_warn(xpc_part, "can't register reboot notifier\n");
1232
1233         /* add ourselves to the die_notifier list */
1234         ret = register_die_notifier(&xpc_die_notifier);
1235         if (ret != 0)
1236                 dev_warn(xpc_part, "can't register die notifier\n");
1237
1238         init_timer(&xpc_hb_timer);
1239         xpc_hb_timer.function = xpc_hb_beater;
1240
1241         /*
1242          * The real work-horse behind xpc.  This processes incoming
1243          * interrupts and monitors remote heartbeats.
1244          */
1245         kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
1246         if (IS_ERR(kthread)) {
1247                 dev_err(xpc_part, "failed while forking hb check thread\n");
1248
1249                 /* indicate to others that our reserved page is uninitialized */
1250                 xpc_rsvd_page->vars_pa = 0;
1251
1252                 /* take ourselves off of the reboot_notifier_list */
1253                 (void)unregister_reboot_notifier(&xpc_reboot_notifier);
1254
1255                 /* take ourselves off of the die_notifier list */
1256                 (void)unregister_die_notifier(&xpc_die_notifier);
1257
1258                 del_timer_sync(&xpc_hb_timer);
1259                 free_irq(SGI_XPC_ACTIVATE, NULL);
1260                 xpc_restrict_IPI_ops();
1261
1262                 if (xpc_sysctl)
1263                         unregister_sysctl_table(xpc_sysctl);
1264
1265                 kfree(xpc_remote_copy_buffer_base);
1266                 return -EBUSY;
1267         }
1268
1269         /*
1270          * Startup a thread that will attempt to discover other partitions to
1271          * activate based on info provided by SAL. This new thread is short
1272          * lived and will exit once discovery is complete.
1273          */
1274         kthread = kthread_run(xpc_initiate_discovery, NULL,
1275                               XPC_DISCOVERY_THREAD_NAME);
1276         if (IS_ERR(kthread)) {
1277                 dev_err(xpc_part, "failed while forking discovery thread\n");
1278
1279                 /* mark this new thread as a non-starter */
1280                 complete(&xpc_discovery_exited);
1281
1282                 xpc_do_exit(xpUnloading);
1283                 return -EBUSY;
1284         }
1285
1286         /* set the interface to point at XPC's functions */
1287         xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
1288                           xpc_initiate_allocate, xpc_initiate_send,
1289                           xpc_initiate_send_notify, xpc_initiate_received,
1290                           xpc_initiate_partid_to_nasids);
1291
1292         return 0;
1293 }
1294
1295 module_init(xpc_init);
1296
1297 void __exit
1298 xpc_exit(void)
1299 {
1300         xpc_do_exit(xpUnloading);
1301 }
1302
1303 module_exit(xpc_exit);
1304
1305 MODULE_AUTHOR("Silicon Graphics, Inc.");
1306 MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
1307 MODULE_LICENSE("GPL");
1308
1309 module_param(xpc_hb_interval, int, 0);
1310 MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
1311                  "heartbeat increments.");
1312
1313 module_param(xpc_hb_check_interval, int, 0);
1314 MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
1315                  "heartbeat checks.");
1316
1317 module_param(xpc_disengage_request_timelimit, int, 0);
1318 MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait "
1319                  "for disengage request to complete.");
1320
1321 module_param(xpc_kdebug_ignore, int, 0);
1322 MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
1323                  "other partitions when dropping into kdebug.");