Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzi...
[pandora-kernel.git] / arch / x86 / platform / uv / tlb_uv.c
1 /*
2  *      SGI UltraViolet TLB flush routines.
3  *
4  *      (c) 2008-2011 Cliff Wickman <cpw@sgi.com>, SGI.
5  *
6  *      This code is released under the GNU General Public License version 2 or
7  *      later.
8  */
9 #include <linux/seq_file.h>
10 #include <linux/proc_fs.h>
11 #include <linux/debugfs.h>
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/delay.h>
15
16 #include <asm/mmu_context.h>
17 #include <asm/uv/uv.h>
18 #include <asm/uv/uv_mmrs.h>
19 #include <asm/uv/uv_hub.h>
20 #include <asm/uv/uv_bau.h>
21 #include <asm/apic.h>
22 #include <asm/idle.h>
23 #include <asm/tsc.h>
24 #include <asm/irq_vectors.h>
25 #include <asm/timer.h>
26
27 /* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
28 static int timeout_base_ns[] = {
29                 20,
30                 160,
31                 1280,
32                 10240,
33                 81920,
34                 655360,
35                 5242880,
36                 167772160
37 };
38
39 static int timeout_us;
40 static int nobau;
41 static int baudisabled;
42 static spinlock_t disable_lock;
43 static cycles_t congested_cycles;
44
45 /* tunables: */
46 static int max_concurr          = MAX_BAU_CONCURRENT;
47 static int max_concurr_const    = MAX_BAU_CONCURRENT;
48 static int plugged_delay        = PLUGGED_DELAY;
49 static int plugsb4reset         = PLUGSB4RESET;
50 static int timeoutsb4reset      = TIMEOUTSB4RESET;
51 static int ipi_reset_limit      = IPI_RESET_LIMIT;
52 static int complete_threshold   = COMPLETE_THRESHOLD;
53 static int congested_respns_us  = CONGESTED_RESPONSE_US;
54 static int congested_reps       = CONGESTED_REPS;
55 static int congested_period     = CONGESTED_PERIOD;
56
57 static struct tunables tunables[] = {
58         {&max_concurr, MAX_BAU_CONCURRENT}, /* must be [0] */
59         {&plugged_delay, PLUGGED_DELAY},
60         {&plugsb4reset, PLUGSB4RESET},
61         {&timeoutsb4reset, TIMEOUTSB4RESET},
62         {&ipi_reset_limit, IPI_RESET_LIMIT},
63         {&complete_threshold, COMPLETE_THRESHOLD},
64         {&congested_respns_us, CONGESTED_RESPONSE_US},
65         {&congested_reps, CONGESTED_REPS},
66         {&congested_period, CONGESTED_PERIOD}
67 };
68
69 static struct dentry *tunables_dir;
70 static struct dentry *tunables_file;
71
72 /* these correspond to the statistics printed by ptc_seq_show() */
73 static char *stat_description[] = {
74         "sent:     number of shootdown messages sent",
75         "stime:    time spent sending messages",
76         "numuvhubs: number of hubs targeted with shootdown",
77         "numuvhubs16: number times 16 or more hubs targeted",
78         "numuvhubs8: number times 8 or more hubs targeted",
79         "numuvhubs4: number times 4 or more hubs targeted",
80         "numuvhubs2: number times 2 or more hubs targeted",
81         "numuvhubs1: number times 1 hub targeted",
82         "numcpus:  number of cpus targeted with shootdown",
83         "dto:      number of destination timeouts",
84         "retries:  destination timeout retries sent",
85         "rok:   :  destination timeouts successfully retried",
86         "resetp:   ipi-style resource resets for plugs",
87         "resett:   ipi-style resource resets for timeouts",
88         "giveup:   fall-backs to ipi-style shootdowns",
89         "sto:      number of source timeouts",
90         "bz:       number of stay-busy's",
91         "throt:    number times spun in throttle",
92         "swack:   image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE",
93         "recv:     shootdown messages received",
94         "rtime:    time spent processing messages",
95         "all:      shootdown all-tlb messages",
96         "one:      shootdown one-tlb messages",
97         "mult:     interrupts that found multiple messages",
98         "none:     interrupts that found no messages",
99         "retry:    number of retry messages processed",
100         "canc:     number messages canceled by retries",
101         "nocan:    number retries that found nothing to cancel",
102         "reset:    number of ipi-style reset requests processed",
103         "rcan:     number messages canceled by reset requests",
104         "disable:  number times use of the BAU was disabled",
105         "enable:   number times use of the BAU was re-enabled"
106 };
107
108 static int __init
109 setup_nobau(char *arg)
110 {
111         nobau = 1;
112         return 0;
113 }
114 early_param("nobau", setup_nobau);
115
116 /* base pnode in this partition */
117 static int uv_base_pnode __read_mostly;
118 /* position of pnode (which is nasid>>1): */
119 static int uv_nshift __read_mostly;
120 static unsigned long uv_mmask __read_mostly;
121
122 static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
123 static DEFINE_PER_CPU(struct bau_control, bau_control);
124 static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
125
126 /*
127  * Determine the first node on a uvhub. 'Nodes' are used for kernel
128  * memory allocation.
129  */
130 static int __init uvhub_to_first_node(int uvhub)
131 {
132         int node, b;
133
134         for_each_online_node(node) {
135                 b = uv_node_to_blade_id(node);
136                 if (uvhub == b)
137                         return node;
138         }
139         return -1;
140 }
141
142 /*
143  * Determine the apicid of the first cpu on a uvhub.
144  */
145 static int __init uvhub_to_first_apicid(int uvhub)
146 {
147         int cpu;
148
149         for_each_present_cpu(cpu)
150                 if (uvhub == uv_cpu_to_blade_id(cpu))
151                         return per_cpu(x86_cpu_to_apicid, cpu);
152         return -1;
153 }
154
155 /*
156  * Free a software acknowledge hardware resource by clearing its Pending
157  * bit. This will return a reply to the sender.
158  * If the message has timed out, a reply has already been sent by the
159  * hardware but the resource has not been released. In that case our
160  * clear of the Timeout bit (as well) will free the resource. No reply will
161  * be sent (the hardware will only do one reply per message).
162  */
163 static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp)
164 {
165         unsigned long dw;
166         struct bau_pq_entry *msg;
167
168         msg = mdp->msg;
169         if (!msg->canceled) {
170                 dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
171                 write_mmr_sw_ack(dw);
172         }
173         msg->replied_to = 1;
174         msg->swack_vec = 0;
175 }
176
177 /*
178  * Process the receipt of a RETRY message
179  */
180 static void bau_process_retry_msg(struct msg_desc *mdp,
181                                         struct bau_control *bcp)
182 {
183         int i;
184         int cancel_count = 0;
185         unsigned long msg_res;
186         unsigned long mmr = 0;
187         struct bau_pq_entry *msg = mdp->msg;
188         struct bau_pq_entry *msg2;
189         struct ptc_stats *stat = bcp->statp;
190
191         stat->d_retries++;
192         /*
193          * cancel any message from msg+1 to the retry itself
194          */
195         for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
196                 if (msg2 > mdp->queue_last)
197                         msg2 = mdp->queue_first;
198                 if (msg2 == msg)
199                         break;
200
201                 /* same conditions for cancellation as do_reset */
202                 if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
203                     (msg2->swack_vec) && ((msg2->swack_vec &
204                         msg->swack_vec) == 0) &&
205                     (msg2->sending_cpu == msg->sending_cpu) &&
206                     (msg2->msg_type != MSG_NOOP)) {
207                         mmr = read_mmr_sw_ack();
208                         msg_res = msg2->swack_vec;
209                         /*
210                          * This is a message retry; clear the resources held
211                          * by the previous message only if they timed out.
212                          * If it has not timed out we have an unexpected
213                          * situation to report.
214                          */
215                         if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
216                                 unsigned long mr;
217                                 /*
218                                  * is the resource timed out?
219                                  * make everyone ignore the cancelled message.
220                                  */
221                                 msg2->canceled = 1;
222                                 stat->d_canceled++;
223                                 cancel_count++;
224                                 mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
225                                 write_mmr_sw_ack(mr);
226                         }
227                 }
228         }
229         if (!cancel_count)
230                 stat->d_nocanceled++;
231 }
232
233 /*
234  * Do all the things a cpu should do for a TLB shootdown message.
235  * Other cpu's may come here at the same time for this message.
236  */
237 static void bau_process_message(struct msg_desc *mdp,
238                                         struct bau_control *bcp)
239 {
240         short socket_ack_count = 0;
241         short *sp;
242         struct atomic_short *asp;
243         struct ptc_stats *stat = bcp->statp;
244         struct bau_pq_entry *msg = mdp->msg;
245         struct bau_control *smaster = bcp->socket_master;
246
247         /*
248          * This must be a normal message, or retry of a normal message
249          */
250         if (msg->address == TLB_FLUSH_ALL) {
251                 local_flush_tlb();
252                 stat->d_alltlb++;
253         } else {
254                 __flush_tlb_one(msg->address);
255                 stat->d_onetlb++;
256         }
257         stat->d_requestee++;
258
259         /*
260          * One cpu on each uvhub has the additional job on a RETRY
261          * of releasing the resource held by the message that is
262          * being retried.  That message is identified by sending
263          * cpu number.
264          */
265         if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
266                 bau_process_retry_msg(mdp, bcp);
267
268         /*
269          * This is a swack message, so we have to reply to it.
270          * Count each responding cpu on the socket. This avoids
271          * pinging the count's cache line back and forth between
272          * the sockets.
273          */
274         sp = &smaster->socket_acknowledge_count[mdp->msg_slot];
275         asp = (struct atomic_short *)sp;
276         socket_ack_count = atom_asr(1, asp);
277         if (socket_ack_count == bcp->cpus_in_socket) {
278                 int msg_ack_count;
279                 /*
280                  * Both sockets dump their completed count total into
281                  * the message's count.
282                  */
283                 smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
284                 asp = (struct atomic_short *)&msg->acknowledge_count;
285                 msg_ack_count = atom_asr(socket_ack_count, asp);
286
287                 if (msg_ack_count == bcp->cpus_in_uvhub) {
288                         /*
289                          * All cpus in uvhub saw it; reply
290                          */
291                         reply_to_message(mdp, bcp);
292                 }
293         }
294
295         return;
296 }
297
298 /*
299  * Determine the first cpu on a pnode.
300  */
301 static int pnode_to_first_cpu(int pnode, struct bau_control *smaster)
302 {
303         int cpu;
304         struct hub_and_pnode *hpp;
305
306         for_each_present_cpu(cpu) {
307                 hpp = &smaster->thp[cpu];
308                 if (pnode == hpp->pnode)
309                         return cpu;
310         }
311         return -1;
312 }
313
314 /*
315  * Last resort when we get a large number of destination timeouts is
316  * to clear resources held by a given cpu.
317  * Do this with IPI so that all messages in the BAU message queue
318  * can be identified by their nonzero swack_vec field.
319  *
320  * This is entered for a single cpu on the uvhub.
321  * The sender want's this uvhub to free a specific message's
322  * swack resources.
323  */
324 static void do_reset(void *ptr)
325 {
326         int i;
327         struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id());
328         struct reset_args *rap = (struct reset_args *)ptr;
329         struct bau_pq_entry *msg;
330         struct ptc_stats *stat = bcp->statp;
331
332         stat->d_resets++;
333         /*
334          * We're looking for the given sender, and
335          * will free its swack resource.
336          * If all cpu's finally responded after the timeout, its
337          * message 'replied_to' was set.
338          */
339         for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
340                 unsigned long msg_res;
341                 /* do_reset: same conditions for cancellation as
342                    bau_process_retry_msg() */
343                 if ((msg->replied_to == 0) &&
344                     (msg->canceled == 0) &&
345                     (msg->sending_cpu == rap->sender) &&
346                     (msg->swack_vec) &&
347                     (msg->msg_type != MSG_NOOP)) {
348                         unsigned long mmr;
349                         unsigned long mr;
350                         /*
351                          * make everyone else ignore this message
352                          */
353                         msg->canceled = 1;
354                         /*
355                          * only reset the resource if it is still pending
356                          */
357                         mmr = read_mmr_sw_ack();
358                         msg_res = msg->swack_vec;
359                         mr = (msg_res << UV_SW_ACK_NPENDING) | msg_res;
360                         if (mmr & msg_res) {
361                                 stat->d_rcanceled++;
362                                 write_mmr_sw_ack(mr);
363                         }
364                 }
365         }
366         return;
367 }
368
369 /*
370  * Use IPI to get all target uvhubs to release resources held by
371  * a given sending cpu number.
372  */
373 static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
374 {
375         int pnode;
376         int apnode;
377         int maskbits;
378         int sender = bcp->cpu;
379         cpumask_t *mask = bcp->uvhub_master->cpumask;
380         struct bau_control *smaster = bcp->socket_master;
381         struct reset_args reset_args;
382
383         reset_args.sender = sender;
384         cpus_clear(*mask);
385         /* find a single cpu for each uvhub in this distribution mask */
386         maskbits = sizeof(struct pnmask) * BITSPERBYTE;
387         /* each bit is a pnode relative to the partition base pnode */
388         for (pnode = 0; pnode < maskbits; pnode++) {
389                 int cpu;
390                 if (!bau_uvhub_isset(pnode, distribution))
391                         continue;
392                 apnode = pnode + bcp->partition_base_pnode;
393                 cpu = pnode_to_first_cpu(apnode, smaster);
394                 cpu_set(cpu, *mask);
395         }
396
397         /* IPI all cpus; preemption is already disabled */
398         smp_call_function_many(mask, do_reset, (void *)&reset_args, 1);
399         return;
400 }
401
402 static inline unsigned long cycles_2_us(unsigned long long cyc)
403 {
404         unsigned long long ns;
405         unsigned long us;
406         int cpu = smp_processor_id();
407
408         ns =  (cyc * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR;
409         us = ns / 1000;
410         return us;
411 }
412
413 /*
414  * wait for all cpus on this hub to finish their sends and go quiet
415  * leaves uvhub_quiesce set so that no new broadcasts are started by
416  * bau_flush_send_and_wait()
417  */
418 static inline void quiesce_local_uvhub(struct bau_control *hmaster)
419 {
420         atom_asr(1, (struct atomic_short *)&hmaster->uvhub_quiesce);
421 }
422
423 /*
424  * mark this quiet-requestor as done
425  */
426 static inline void end_uvhub_quiesce(struct bau_control *hmaster)
427 {
428         atom_asr(-1, (struct atomic_short *)&hmaster->uvhub_quiesce);
429 }
430
431 static unsigned long uv1_read_status(unsigned long mmr_offset, int right_shift)
432 {
433         unsigned long descriptor_status;
434
435         descriptor_status = uv_read_local_mmr(mmr_offset);
436         descriptor_status >>= right_shift;
437         descriptor_status &= UV_ACT_STATUS_MASK;
438         return descriptor_status;
439 }
440
441 /*
442  * Wait for completion of a broadcast software ack message
443  * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
444  */
445 static int uv1_wait_completion(struct bau_desc *bau_desc,
446                                 unsigned long mmr_offset, int right_shift,
447                                 struct bau_control *bcp, long try)
448 {
449         unsigned long descriptor_status;
450         cycles_t ttm;
451         struct ptc_stats *stat = bcp->statp;
452
453         descriptor_status = uv1_read_status(mmr_offset, right_shift);
454         /* spin on the status MMR, waiting for it to go idle */
455         while ((descriptor_status != DS_IDLE)) {
456                 /*
457                  * Our software ack messages may be blocked because
458                  * there are no swack resources available.  As long
459                  * as none of them has timed out hardware will NACK
460                  * our message and its state will stay IDLE.
461                  */
462                 if (descriptor_status == DS_SOURCE_TIMEOUT) {
463                         stat->s_stimeout++;
464                         return FLUSH_GIVEUP;
465                 } else if (descriptor_status == DS_DESTINATION_TIMEOUT) {
466                         stat->s_dtimeout++;
467                         ttm = get_cycles();
468
469                         /*
470                          * Our retries may be blocked by all destination
471                          * swack resources being consumed, and a timeout
472                          * pending.  In that case hardware returns the
473                          * ERROR that looks like a destination timeout.
474                          */
475                         if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
476                                 bcp->conseccompletes = 0;
477                                 return FLUSH_RETRY_PLUGGED;
478                         }
479
480                         bcp->conseccompletes = 0;
481                         return FLUSH_RETRY_TIMEOUT;
482                 } else {
483                         /*
484                          * descriptor_status is still BUSY
485                          */
486                         cpu_relax();
487                 }
488                 descriptor_status = uv1_read_status(mmr_offset, right_shift);
489         }
490         bcp->conseccompletes++;
491         return FLUSH_COMPLETE;
492 }
493
494 /*
495  * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
496  */
497 static unsigned long uv2_read_status(unsigned long offset, int rshft, int cpu)
498 {
499         unsigned long descriptor_status;
500         unsigned long descriptor_status2;
501
502         descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
503         descriptor_status2 = (read_mmr_uv2_status() >> cpu) & 0x1UL;
504         descriptor_status = (descriptor_status << 1) | descriptor_status2;
505         return descriptor_status;
506 }
507
508 static int uv2_wait_completion(struct bau_desc *bau_desc,
509                                 unsigned long mmr_offset, int right_shift,
510                                 struct bau_control *bcp, long try)
511 {
512         unsigned long descriptor_stat;
513         cycles_t ttm;
514         int cpu = bcp->uvhub_cpu;
515         struct ptc_stats *stat = bcp->statp;
516
517         descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
518
519         /* spin on the status MMR, waiting for it to go idle */
520         while (descriptor_stat != UV2H_DESC_IDLE) {
521                 /*
522                  * Our software ack messages may be blocked because
523                  * there are no swack resources available.  As long
524                  * as none of them has timed out hardware will NACK
525                  * our message and its state will stay IDLE.
526                  */
527                 if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) ||
528                     (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) ||
529                     (descriptor_stat == UV2H_DESC_DEST_PUT_ERR)) {
530                         stat->s_stimeout++;
531                         return FLUSH_GIVEUP;
532                 } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
533                         stat->s_dtimeout++;
534                         ttm = get_cycles();
535                         /*
536                          * Our retries may be blocked by all destination
537                          * swack resources being consumed, and a timeout
538                          * pending.  In that case hardware returns the
539                          * ERROR that looks like a destination timeout.
540                          */
541                         if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
542                                 bcp->conseccompletes = 0;
543                                 return FLUSH_RETRY_PLUGGED;
544                         }
545                         bcp->conseccompletes = 0;
546                         return FLUSH_RETRY_TIMEOUT;
547                 } else {
548                         /*
549                          * descriptor_stat is still BUSY
550                          */
551                         cpu_relax();
552                 }
553                 descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
554         }
555         bcp->conseccompletes++;
556         return FLUSH_COMPLETE;
557 }
558
559 /*
560  * There are 2 status registers; each and array[32] of 2 bits. Set up for
561  * which register to read and position in that register based on cpu in
562  * current hub.
563  */
564 static int wait_completion(struct bau_desc *bau_desc,
565                                 struct bau_control *bcp, long try)
566 {
567         int right_shift;
568         unsigned long mmr_offset;
569         int cpu = bcp->uvhub_cpu;
570
571         if (cpu < UV_CPUS_PER_AS) {
572                 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
573                 right_shift = cpu * UV_ACT_STATUS_SIZE;
574         } else {
575                 mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
576                 right_shift = ((cpu - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
577         }
578
579         if (is_uv1_hub())
580                 return uv1_wait_completion(bau_desc, mmr_offset, right_shift,
581                                                                 bcp, try);
582         else
583                 return uv2_wait_completion(bau_desc, mmr_offset, right_shift,
584                                                                 bcp, try);
585 }
586
587 static inline cycles_t sec_2_cycles(unsigned long sec)
588 {
589         unsigned long ns;
590         cycles_t cyc;
591
592         ns = sec * 1000000000;
593         cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
594         return cyc;
595 }
596
597 /*
598  * Our retries are blocked by all destination sw ack resources being
599  * in use, and a timeout is pending. In that case hardware immediately
600  * returns the ERROR that looks like a destination timeout.
601  */
602 static void destination_plugged(struct bau_desc *bau_desc,
603                         struct bau_control *bcp,
604                         struct bau_control *hmaster, struct ptc_stats *stat)
605 {
606         udelay(bcp->plugged_delay);
607         bcp->plugged_tries++;
608
609         if (bcp->plugged_tries >= bcp->plugsb4reset) {
610                 bcp->plugged_tries = 0;
611
612                 quiesce_local_uvhub(hmaster);
613
614                 spin_lock(&hmaster->queue_lock);
615                 reset_with_ipi(&bau_desc->distribution, bcp);
616                 spin_unlock(&hmaster->queue_lock);
617
618                 end_uvhub_quiesce(hmaster);
619
620                 bcp->ipi_attempts++;
621                 stat->s_resets_plug++;
622         }
623 }
624
625 static void destination_timeout(struct bau_desc *bau_desc,
626                         struct bau_control *bcp, struct bau_control *hmaster,
627                         struct ptc_stats *stat)
628 {
629         hmaster->max_concurr = 1;
630         bcp->timeout_tries++;
631         if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
632                 bcp->timeout_tries = 0;
633
634                 quiesce_local_uvhub(hmaster);
635
636                 spin_lock(&hmaster->queue_lock);
637                 reset_with_ipi(&bau_desc->distribution, bcp);
638                 spin_unlock(&hmaster->queue_lock);
639
640                 end_uvhub_quiesce(hmaster);
641
642                 bcp->ipi_attempts++;
643                 stat->s_resets_timeout++;
644         }
645 }
646
647 /*
648  * Completions are taking a very long time due to a congested numalink
649  * network.
650  */
651 static void disable_for_congestion(struct bau_control *bcp,
652                                         struct ptc_stats *stat)
653 {
654         /* let only one cpu do this disabling */
655         spin_lock(&disable_lock);
656
657         if (!baudisabled && bcp->period_requests &&
658             ((bcp->period_time / bcp->period_requests) > congested_cycles)) {
659                 int tcpu;
660                 struct bau_control *tbcp;
661                 /* it becomes this cpu's job to turn on the use of the
662                    BAU again */
663                 baudisabled = 1;
664                 bcp->set_bau_off = 1;
665                 bcp->set_bau_on_time = get_cycles();
666                 bcp->set_bau_on_time += sec_2_cycles(bcp->cong_period);
667                 stat->s_bau_disabled++;
668                 for_each_present_cpu(tcpu) {
669                         tbcp = &per_cpu(bau_control, tcpu);
670                         tbcp->baudisabled = 1;
671                 }
672         }
673
674         spin_unlock(&disable_lock);
675 }
676
677 static void count_max_concurr(int stat, struct bau_control *bcp,
678                                 struct bau_control *hmaster)
679 {
680         bcp->plugged_tries = 0;
681         bcp->timeout_tries = 0;
682         if (stat != FLUSH_COMPLETE)
683                 return;
684         if (bcp->conseccompletes <= bcp->complete_threshold)
685                 return;
686         if (hmaster->max_concurr >= hmaster->max_concurr_const)
687                 return;
688         hmaster->max_concurr++;
689 }
690
691 static void record_send_stats(cycles_t time1, cycles_t time2,
692                 struct bau_control *bcp, struct ptc_stats *stat,
693                 int completion_status, int try)
694 {
695         cycles_t elapsed;
696
697         if (time2 > time1) {
698                 elapsed = time2 - time1;
699                 stat->s_time += elapsed;
700
701                 if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
702                         bcp->period_requests++;
703                         bcp->period_time += elapsed;
704                         if ((elapsed > congested_cycles) &&
705                             (bcp->period_requests > bcp->cong_reps))
706                                 disable_for_congestion(bcp, stat);
707                 }
708         } else
709                 stat->s_requestor--;
710
711         if (completion_status == FLUSH_COMPLETE && try > 1)
712                 stat->s_retriesok++;
713         else if (completion_status == FLUSH_GIVEUP)
714                 stat->s_giveup++;
715 }
716
717 /*
718  * Because of a uv1 hardware bug only a limited number of concurrent
719  * requests can be made.
720  */
721 static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
722 {
723         spinlock_t *lock = &hmaster->uvhub_lock;
724         atomic_t *v;
725
726         v = &hmaster->active_descriptor_count;
727         if (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr)) {
728                 stat->s_throttles++;
729                 do {
730                         cpu_relax();
731                 } while (!atomic_inc_unless_ge(lock, v, hmaster->max_concurr));
732         }
733 }
734
735 /*
736  * Handle the completion status of a message send.
737  */
738 static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
739                         struct bau_control *bcp, struct bau_control *hmaster,
740                         struct ptc_stats *stat)
741 {
742         if (completion_status == FLUSH_RETRY_PLUGGED)
743                 destination_plugged(bau_desc, bcp, hmaster, stat);
744         else if (completion_status == FLUSH_RETRY_TIMEOUT)
745                 destination_timeout(bau_desc, bcp, hmaster, stat);
746 }
747
748 /*
749  * Send a broadcast and wait for it to complete.
750  *
751  * The flush_mask contains the cpus the broadcast is to be sent to including
752  * cpus that are on the local uvhub.
753  *
754  * Returns 0 if all flushing represented in the mask was done.
755  * Returns 1 if it gives up entirely and the original cpu mask is to be
756  * returned to the kernel.
757  */
758 int uv_flush_send_and_wait(struct bau_desc *bau_desc,
759                         struct cpumask *flush_mask, struct bau_control *bcp)
760 {
761         int seq_number = 0;
762         int completion_stat = 0;
763         long try = 0;
764         unsigned long index;
765         cycles_t time1;
766         cycles_t time2;
767         struct ptc_stats *stat = bcp->statp;
768         struct bau_control *hmaster = bcp->uvhub_master;
769
770         if (is_uv1_hub())
771                 uv1_throttle(hmaster, stat);
772
773         while (hmaster->uvhub_quiesce)
774                 cpu_relax();
775
776         time1 = get_cycles();
777         do {
778                 if (try == 0) {
779                         bau_desc->header.msg_type = MSG_REGULAR;
780                         seq_number = bcp->message_number++;
781                 } else {
782                         bau_desc->header.msg_type = MSG_RETRY;
783                         stat->s_retry_messages++;
784                 }
785
786                 bau_desc->header.sequence = seq_number;
787                 index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
788                 bcp->send_message = get_cycles();
789
790                 write_mmr_activation(index);
791
792                 try++;
793                 completion_stat = wait_completion(bau_desc, bcp, try);
794
795                 handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
796
797                 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
798                         bcp->ipi_attempts = 0;
799                         completion_stat = FLUSH_GIVEUP;
800                         break;
801                 }
802                 cpu_relax();
803         } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
804                  (completion_stat == FLUSH_RETRY_TIMEOUT));
805
806         time2 = get_cycles();
807
808         count_max_concurr(completion_stat, bcp, hmaster);
809
810         while (hmaster->uvhub_quiesce)
811                 cpu_relax();
812
813         atomic_dec(&hmaster->active_descriptor_count);
814
815         record_send_stats(time1, time2, bcp, stat, completion_stat, try);
816
817         if (completion_stat == FLUSH_GIVEUP)
818                 return 1;
819         return 0;
820 }
821
822 /*
823  * The BAU is disabled. When the disabled time period has expired, the cpu
824  * that disabled it must re-enable it.
825  * Return 0 if it is re-enabled for all cpus.
826  */
827 static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
828 {
829         int tcpu;
830         struct bau_control *tbcp;
831
832         if (bcp->set_bau_off) {
833                 if (get_cycles() >= bcp->set_bau_on_time) {
834                         stat->s_bau_reenabled++;
835                         baudisabled = 0;
836                         for_each_present_cpu(tcpu) {
837                                 tbcp = &per_cpu(bau_control, tcpu);
838                                 tbcp->baudisabled = 0;
839                                 tbcp->period_requests = 0;
840                                 tbcp->period_time = 0;
841                         }
842                         return 0;
843                 }
844         }
845         return -1;
846 }
847
848 static void record_send_statistics(struct ptc_stats *stat, int locals, int hubs,
849                                 int remotes, struct bau_desc *bau_desc)
850 {
851         stat->s_requestor++;
852         stat->s_ntargcpu += remotes + locals;
853         stat->s_ntargremotes += remotes;
854         stat->s_ntarglocals += locals;
855
856         /* uvhub statistics */
857         hubs = bau_uvhub_weight(&bau_desc->distribution);
858         if (locals) {
859                 stat->s_ntarglocaluvhub++;
860                 stat->s_ntargremoteuvhub += (hubs - 1);
861         } else
862                 stat->s_ntargremoteuvhub += hubs;
863
864         stat->s_ntarguvhub += hubs;
865
866         if (hubs >= 16)
867                 stat->s_ntarguvhub16++;
868         else if (hubs >= 8)
869                 stat->s_ntarguvhub8++;
870         else if (hubs >= 4)
871                 stat->s_ntarguvhub4++;
872         else if (hubs >= 2)
873                 stat->s_ntarguvhub2++;
874         else
875                 stat->s_ntarguvhub1++;
876 }
877
878 /*
879  * Translate a cpu mask to the uvhub distribution mask in the BAU
880  * activation descriptor.
881  */
882 static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp,
883                         struct bau_desc *bau_desc, int *localsp, int *remotesp)
884 {
885         int cpu;
886         int pnode;
887         int cnt = 0;
888         struct hub_and_pnode *hpp;
889
890         for_each_cpu(cpu, flush_mask) {
891                 /*
892                  * The distribution vector is a bit map of pnodes, relative
893                  * to the partition base pnode (and the partition base nasid
894                  * in the header).
895                  * Translate cpu to pnode and hub using a local memory array.
896                  */
897                 hpp = &bcp->socket_master->thp[cpu];
898                 pnode = hpp->pnode - bcp->partition_base_pnode;
899                 bau_uvhub_set(pnode, &bau_desc->distribution);
900                 cnt++;
901                 if (hpp->uvhub == bcp->uvhub)
902                         (*localsp)++;
903                 else
904                         (*remotesp)++;
905         }
906         if (!cnt)
907                 return 1;
908         return 0;
909 }
910
911 /*
912  * globally purge translation cache of a virtual address or all TLB's
913  * @cpumask: mask of all cpu's in which the address is to be removed
914  * @mm: mm_struct containing virtual address range
915  * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
916  * @cpu: the current cpu
917  *
918  * This is the entry point for initiating any UV global TLB shootdown.
919  *
920  * Purges the translation caches of all specified processors of the given
921  * virtual address, or purges all TLB's on specified processors.
922  *
923  * The caller has derived the cpumask from the mm_struct.  This function
924  * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
925  *
926  * The cpumask is converted into a uvhubmask of the uvhubs containing
927  * those cpus.
928  *
929  * Note that this function should be called with preemption disabled.
930  *
931  * Returns NULL if all remote flushing was done.
932  * Returns pointer to cpumask if some remote flushing remains to be
933  * done.  The returned pointer is valid till preemption is re-enabled.
934  */
935 const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
936                                 struct mm_struct *mm, unsigned long va,
937                                 unsigned int cpu)
938 {
939         int locals = 0;
940         int remotes = 0;
941         int hubs = 0;
942         struct bau_desc *bau_desc;
943         struct cpumask *flush_mask;
944         struct ptc_stats *stat;
945         struct bau_control *bcp;
946
947         /* kernel was booted 'nobau' */
948         if (nobau)
949                 return cpumask;
950
951         bcp = &per_cpu(bau_control, cpu);
952         stat = bcp->statp;
953
954         /* bau was disabled due to slow response */
955         if (bcp->baudisabled) {
956                 if (check_enable(bcp, stat))
957                         return cpumask;
958         }
959
960         /*
961          * Each sending cpu has a per-cpu mask which it fills from the caller's
962          * cpu mask.  All cpus are converted to uvhubs and copied to the
963          * activation descriptor.
964          */
965         flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
966         /* don't actually do a shootdown of the local cpu */
967         cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
968
969         if (cpu_isset(cpu, *cpumask))
970                 stat->s_ntargself++;
971
972         bau_desc = bcp->descriptor_base;
973         bau_desc += ITEMS_PER_DESC * bcp->uvhub_cpu;
974         bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
975         if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
976                 return NULL;
977
978         record_send_statistics(stat, locals, hubs, remotes, bau_desc);
979
980         bau_desc->payload.address = va;
981         bau_desc->payload.sending_cpu = cpu;
982         /*
983          * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
984          * or 1 if it gave up and the original cpumask should be returned.
985          */
986         if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp))
987                 return NULL;
988         else
989                 return cpumask;
990 }
991
992 /*
993  * The BAU message interrupt comes here. (registered by set_intr_gate)
994  * See entry_64.S
995  *
996  * We received a broadcast assist message.
997  *
998  * Interrupts are disabled; this interrupt could represent
999  * the receipt of several messages.
1000  *
1001  * All cores/threads on this hub get this interrupt.
1002  * The last one to see it does the software ack.
1003  * (the resource will not be freed until noninterruptable cpus see this
1004  *  interrupt; hardware may timeout the s/w ack and reply ERROR)
1005  */
1006 void uv_bau_message_interrupt(struct pt_regs *regs)
1007 {
1008         int count = 0;
1009         cycles_t time_start;
1010         struct bau_pq_entry *msg;
1011         struct bau_control *bcp;
1012         struct ptc_stats *stat;
1013         struct msg_desc msgdesc;
1014
1015         time_start = get_cycles();
1016
1017         bcp = &per_cpu(bau_control, smp_processor_id());
1018         stat = bcp->statp;
1019
1020         msgdesc.queue_first = bcp->queue_first;
1021         msgdesc.queue_last = bcp->queue_last;
1022
1023         msg = bcp->bau_msg_head;
1024         while (msg->swack_vec) {
1025                 count++;
1026
1027                 msgdesc.msg_slot = msg - msgdesc.queue_first;
1028                 msgdesc.swack_slot = ffs(msg->swack_vec) - 1;
1029                 msgdesc.msg = msg;
1030                 bau_process_message(&msgdesc, bcp);
1031
1032                 msg++;
1033                 if (msg > msgdesc.queue_last)
1034                         msg = msgdesc.queue_first;
1035                 bcp->bau_msg_head = msg;
1036         }
1037         stat->d_time += (get_cycles() - time_start);
1038         if (!count)
1039                 stat->d_nomsg++;
1040         else if (count > 1)
1041                 stat->d_multmsg++;
1042
1043         ack_APIC_irq();
1044 }
1045
1046 /*
1047  * Each target uvhub (i.e. a uvhub that has cpu's) needs to have
1048  * shootdown message timeouts enabled.  The timeout does not cause
1049  * an interrupt, but causes an error message to be returned to
1050  * the sender.
1051  */
1052 static void __init enable_timeouts(void)
1053 {
1054         int uvhub;
1055         int nuvhubs;
1056         int pnode;
1057         unsigned long mmr_image;
1058
1059         nuvhubs = uv_num_possible_blades();
1060
1061         for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1062                 if (!uv_blade_nr_possible_cpus(uvhub))
1063                         continue;
1064
1065                 pnode = uv_blade_to_pnode(uvhub);
1066                 mmr_image = read_mmr_misc_control(pnode);
1067                 /*
1068                  * Set the timeout period and then lock it in, in three
1069                  * steps; captures and locks in the period.
1070                  *
1071                  * To program the period, the SOFT_ACK_MODE must be off.
1072                  */
1073                 mmr_image &= ~(1L << SOFTACK_MSHIFT);
1074                 write_mmr_misc_control(pnode, mmr_image);
1075                 /*
1076                  * Set the 4-bit period.
1077                  */
1078                 mmr_image &= ~((unsigned long)0xf << SOFTACK_PSHIFT);
1079                 mmr_image |= (SOFTACK_TIMEOUT_PERIOD << SOFTACK_PSHIFT);
1080                 write_mmr_misc_control(pnode, mmr_image);
1081                 /*
1082                  * UV1:
1083                  * Subsequent reversals of the timebase bit (3) cause an
1084                  * immediate timeout of one or all INTD resources as
1085                  * indicated in bits 2:0 (7 causes all of them to timeout).
1086                  */
1087                 mmr_image |= (1L << SOFTACK_MSHIFT);
1088                 if (is_uv2_hub()) {
1089                         mmr_image |= (1L << UV2_LEG_SHFT);
1090                         mmr_image |= (1L << UV2_EXT_SHFT);
1091                 }
1092                 write_mmr_misc_control(pnode, mmr_image);
1093         }
1094 }
1095
1096 static void *ptc_seq_start(struct seq_file *file, loff_t *offset)
1097 {
1098         if (*offset < num_possible_cpus())
1099                 return offset;
1100         return NULL;
1101 }
1102
1103 static void *ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
1104 {
1105         (*offset)++;
1106         if (*offset < num_possible_cpus())
1107                 return offset;
1108         return NULL;
1109 }
1110
1111 static void ptc_seq_stop(struct seq_file *file, void *data)
1112 {
1113 }
1114
1115 static inline unsigned long long usec_2_cycles(unsigned long microsec)
1116 {
1117         unsigned long ns;
1118         unsigned long long cyc;
1119
1120         ns = microsec * 1000;
1121         cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
1122         return cyc;
1123 }
1124
1125 /*
1126  * Display the statistics thru /proc/sgi_uv/ptc_statistics
1127  * 'data' points to the cpu number
1128  * Note: see the descriptions in stat_description[].
1129  */
1130 static int ptc_seq_show(struct seq_file *file, void *data)
1131 {
1132         struct ptc_stats *stat;
1133         int cpu;
1134
1135         cpu = *(loff_t *)data;
1136         if (!cpu) {
1137                 seq_printf(file,
1138                         "# cpu sent stime self locals remotes ncpus localhub ");
1139                 seq_printf(file,
1140                         "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
1141                 seq_printf(file,
1142                         "numuvhubs4 numuvhubs2 numuvhubs1 dto retries rok ");
1143                 seq_printf(file,
1144                         "resetp resett giveup sto bz throt swack recv rtime ");
1145                 seq_printf(file,
1146                         "all one mult none retry canc nocan reset rcan ");
1147                 seq_printf(file,
1148                         "disable enable\n");
1149         }
1150         if (cpu < num_possible_cpus() && cpu_online(cpu)) {
1151                 stat = &per_cpu(ptcstats, cpu);
1152                 /* source side statistics */
1153                 seq_printf(file,
1154                         "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1155                            cpu, stat->s_requestor, cycles_2_us(stat->s_time),
1156                            stat->s_ntargself, stat->s_ntarglocals,
1157                            stat->s_ntargremotes, stat->s_ntargcpu,
1158                            stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
1159                            stat->s_ntarguvhub, stat->s_ntarguvhub16);
1160                 seq_printf(file, "%ld %ld %ld %ld %ld ",
1161                            stat->s_ntarguvhub8, stat->s_ntarguvhub4,
1162                            stat->s_ntarguvhub2, stat->s_ntarguvhub1,
1163                            stat->s_dtimeout);
1164                 seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
1165                            stat->s_retry_messages, stat->s_retriesok,
1166                            stat->s_resets_plug, stat->s_resets_timeout,
1167                            stat->s_giveup, stat->s_stimeout,
1168                            stat->s_busy, stat->s_throttles);
1169
1170                 /* destination side statistics */
1171                 seq_printf(file,
1172                            "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
1173                            read_gmmr_sw_ack(uv_cpu_to_pnode(cpu)),
1174                            stat->d_requestee, cycles_2_us(stat->d_time),
1175                            stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
1176                            stat->d_nomsg, stat->d_retries, stat->d_canceled,
1177                            stat->d_nocanceled, stat->d_resets,
1178                            stat->d_rcanceled);
1179                 seq_printf(file, "%ld %ld\n",
1180                         stat->s_bau_disabled, stat->s_bau_reenabled);
1181         }
1182         return 0;
1183 }
1184
1185 /*
1186  * Display the tunables thru debugfs
1187  */
1188 static ssize_t tunables_read(struct file *file, char __user *userbuf,
1189                                 size_t count, loff_t *ppos)
1190 {
1191         char *buf;
1192         int ret;
1193
1194         buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
1195                 "max_concur plugged_delay plugsb4reset",
1196                 "timeoutsb4reset ipi_reset_limit complete_threshold",
1197                 "congested_response_us congested_reps congested_period",
1198                 max_concurr, plugged_delay, plugsb4reset,
1199                 timeoutsb4reset, ipi_reset_limit, complete_threshold,
1200                 congested_respns_us, congested_reps, congested_period);
1201
1202         if (!buf)
1203                 return -ENOMEM;
1204
1205         ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
1206         kfree(buf);
1207         return ret;
1208 }
1209
1210 /*
1211  * handle a write to /proc/sgi_uv/ptc_statistics
1212  * -1: reset the statistics
1213  *  0: display meaning of the statistics
1214  */
1215 static ssize_t ptc_proc_write(struct file *file, const char __user *user,
1216                                 size_t count, loff_t *data)
1217 {
1218         int cpu;
1219         int i;
1220         int elements;
1221         long input_arg;
1222         char optstr[64];
1223         struct ptc_stats *stat;
1224
1225         if (count == 0 || count > sizeof(optstr))
1226                 return -EINVAL;
1227         if (copy_from_user(optstr, user, count))
1228                 return -EFAULT;
1229         optstr[count - 1] = '\0';
1230
1231         if (strict_strtol(optstr, 10, &input_arg) < 0) {
1232                 printk(KERN_DEBUG "%s is invalid\n", optstr);
1233                 return -EINVAL;
1234         }
1235
1236         if (input_arg == 0) {
1237                 elements = sizeof(stat_description)/sizeof(*stat_description);
1238                 printk(KERN_DEBUG "# cpu:      cpu number\n");
1239                 printk(KERN_DEBUG "Sender statistics:\n");
1240                 for (i = 0; i < elements; i++)
1241                         printk(KERN_DEBUG "%s\n", stat_description[i]);
1242         } else if (input_arg == -1) {
1243                 for_each_present_cpu(cpu) {
1244                         stat = &per_cpu(ptcstats, cpu);
1245                         memset(stat, 0, sizeof(struct ptc_stats));
1246                 }
1247         }
1248
1249         return count;
1250 }
1251
1252 static int local_atoi(const char *name)
1253 {
1254         int val = 0;
1255
1256         for (;; name++) {
1257                 switch (*name) {
1258                 case '0' ... '9':
1259                         val = 10*val+(*name-'0');
1260                         break;
1261                 default:
1262                         return val;
1263                 }
1264         }
1265 }
1266
1267 /*
1268  * Parse the values written to /sys/kernel/debug/sgi_uv/bau_tunables.
1269  * Zero values reset them to defaults.
1270  */
1271 static int parse_tunables_write(struct bau_control *bcp, char *instr,
1272                                 int count)
1273 {
1274         char *p;
1275         char *q;
1276         int cnt = 0;
1277         int val;
1278         int e = sizeof(tunables) / sizeof(*tunables);
1279
1280         p = instr + strspn(instr, WHITESPACE);
1281         q = p;
1282         for (; *p; p = q + strspn(q, WHITESPACE)) {
1283                 q = p + strcspn(p, WHITESPACE);
1284                 cnt++;
1285                 if (q == p)
1286                         break;
1287         }
1288         if (cnt != e) {
1289                 printk(KERN_INFO "bau tunable error: should be %d values\n", e);
1290                 return -EINVAL;
1291         }
1292
1293         p = instr + strspn(instr, WHITESPACE);
1294         q = p;
1295         for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
1296                 q = p + strcspn(p, WHITESPACE);
1297                 val = local_atoi(p);
1298                 switch (cnt) {
1299                 case 0:
1300                         if (val == 0) {
1301                                 max_concurr = MAX_BAU_CONCURRENT;
1302                                 max_concurr_const = MAX_BAU_CONCURRENT;
1303                                 continue;
1304                         }
1305                         if (val < 1 || val > bcp->cpus_in_uvhub) {
1306                                 printk(KERN_DEBUG
1307                                 "Error: BAU max concurrent %d is invalid\n",
1308                                 val);
1309                                 return -EINVAL;
1310                         }
1311                         max_concurr = val;
1312                         max_concurr_const = val;
1313                         continue;
1314                 default:
1315                         if (val == 0)
1316                                 *tunables[cnt].tunp = tunables[cnt].deflt;
1317                         else
1318                                 *tunables[cnt].tunp = val;
1319                         continue;
1320                 }
1321                 if (q == p)
1322                         break;
1323         }
1324         return 0;
1325 }
1326
1327 /*
1328  * Handle a write to debugfs. (/sys/kernel/debug/sgi_uv/bau_tunables)
1329  */
1330 static ssize_t tunables_write(struct file *file, const char __user *user,
1331                                 size_t count, loff_t *data)
1332 {
1333         int cpu;
1334         int ret;
1335         char instr[100];
1336         struct bau_control *bcp;
1337
1338         if (count == 0 || count > sizeof(instr)-1)
1339                 return -EINVAL;
1340         if (copy_from_user(instr, user, count))
1341                 return -EFAULT;
1342
1343         instr[count] = '\0';
1344
1345         cpu = get_cpu();
1346         bcp = &per_cpu(bau_control, cpu);
1347         ret = parse_tunables_write(bcp, instr, count);
1348         put_cpu();
1349         if (ret)
1350                 return ret;
1351
1352         for_each_present_cpu(cpu) {
1353                 bcp = &per_cpu(bau_control, cpu);
1354                 bcp->max_concurr =              max_concurr;
1355                 bcp->max_concurr_const =        max_concurr;
1356                 bcp->plugged_delay =            plugged_delay;
1357                 bcp->plugsb4reset =             plugsb4reset;
1358                 bcp->timeoutsb4reset =          timeoutsb4reset;
1359                 bcp->ipi_reset_limit =          ipi_reset_limit;
1360                 bcp->complete_threshold =       complete_threshold;
1361                 bcp->cong_response_us =         congested_respns_us;
1362                 bcp->cong_reps =                congested_reps;
1363                 bcp->cong_period =              congested_period;
1364         }
1365         return count;
1366 }
1367
1368 static const struct seq_operations uv_ptc_seq_ops = {
1369         .start          = ptc_seq_start,
1370         .next           = ptc_seq_next,
1371         .stop           = ptc_seq_stop,
1372         .show           = ptc_seq_show
1373 };
1374
1375 static int ptc_proc_open(struct inode *inode, struct file *file)
1376 {
1377         return seq_open(file, &uv_ptc_seq_ops);
1378 }
1379
1380 static int tunables_open(struct inode *inode, struct file *file)
1381 {
1382         return 0;
1383 }
1384
1385 static const struct file_operations proc_uv_ptc_operations = {
1386         .open           = ptc_proc_open,
1387         .read           = seq_read,
1388         .write          = ptc_proc_write,
1389         .llseek         = seq_lseek,
1390         .release        = seq_release,
1391 };
1392
1393 static const struct file_operations tunables_fops = {
1394         .open           = tunables_open,
1395         .read           = tunables_read,
1396         .write          = tunables_write,
1397         .llseek         = default_llseek,
1398 };
1399
1400 static int __init uv_ptc_init(void)
1401 {
1402         struct proc_dir_entry *proc_uv_ptc;
1403
1404         if (!is_uv_system())
1405                 return 0;
1406
1407         proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
1408                                   &proc_uv_ptc_operations);
1409         if (!proc_uv_ptc) {
1410                 printk(KERN_ERR "unable to create %s proc entry\n",
1411                        UV_PTC_BASENAME);
1412                 return -EINVAL;
1413         }
1414
1415         tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
1416         if (!tunables_dir) {
1417                 printk(KERN_ERR "unable to create debugfs directory %s\n",
1418                        UV_BAU_TUNABLES_DIR);
1419                 return -EINVAL;
1420         }
1421         tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
1422                                         tunables_dir, NULL, &tunables_fops);
1423         if (!tunables_file) {
1424                 printk(KERN_ERR "unable to create debugfs file %s\n",
1425                        UV_BAU_TUNABLES_FILE);
1426                 return -EINVAL;
1427         }
1428         return 0;
1429 }
1430
1431 /*
1432  * Initialize the sending side's sending buffers.
1433  */
1434 static void activation_descriptor_init(int node, int pnode, int base_pnode)
1435 {
1436         int i;
1437         int cpu;
1438         unsigned long pa;
1439         unsigned long m;
1440         unsigned long n;
1441         size_t dsize;
1442         struct bau_desc *bau_desc;
1443         struct bau_desc *bd2;
1444         struct bau_control *bcp;
1445
1446         /*
1447          * each bau_desc is 64 bytes; there are 8 (ITEMS_PER_DESC)
1448          * per cpu; and one per cpu on the uvhub (ADP_SZ)
1449          */
1450         dsize = sizeof(struct bau_desc) * ADP_SZ * ITEMS_PER_DESC;
1451         bau_desc = kmalloc_node(dsize, GFP_KERNEL, node);
1452         BUG_ON(!bau_desc);
1453
1454         pa = uv_gpa(bau_desc); /* need the real nasid*/
1455         n = pa >> uv_nshift;
1456         m = pa & uv_mmask;
1457
1458         /* the 14-bit pnode */
1459         write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
1460         /*
1461          * Initializing all 8 (ITEMS_PER_DESC) descriptors for each
1462          * cpu even though we only use the first one; one descriptor can
1463          * describe a broadcast to 256 uv hubs.
1464          */
1465         for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
1466                 memset(bd2, 0, sizeof(struct bau_desc));
1467                 bd2->header.swack_flag =        1;
1468                 /*
1469                  * The base_dest_nasid set in the message header is the nasid
1470                  * of the first uvhub in the partition. The bit map will
1471                  * indicate destination pnode numbers relative to that base.
1472                  * They may not be consecutive if nasid striding is being used.
1473                  */
1474                 bd2->header.base_dest_nasid =   UV_PNODE_TO_NASID(base_pnode);
1475                 bd2->header.dest_subnodeid =    UV_LB_SUBNODEID;
1476                 bd2->header.command =           UV_NET_ENDPOINT_INTD;
1477                 bd2->header.int_both =          1;
1478                 /*
1479                  * all others need to be set to zero:
1480                  *   fairness chaining multilevel count replied_to
1481                  */
1482         }
1483         for_each_present_cpu(cpu) {
1484                 if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
1485                         continue;
1486                 bcp = &per_cpu(bau_control, cpu);
1487                 bcp->descriptor_base = bau_desc;
1488         }
1489 }
1490
1491 /*
1492  * initialize the destination side's receiving buffers
1493  * entered for each uvhub in the partition
1494  * - node is first node (kernel memory notion) on the uvhub
1495  * - pnode is the uvhub's physical identifier
1496  */
1497 static void pq_init(int node, int pnode)
1498 {
1499         int cpu;
1500         size_t plsize;
1501         char *cp;
1502         void *vp;
1503         unsigned long pn;
1504         unsigned long first;
1505         unsigned long pn_first;
1506         unsigned long last;
1507         struct bau_pq_entry *pqp;
1508         struct bau_control *bcp;
1509
1510         plsize = (DEST_Q_SIZE + 1) * sizeof(struct bau_pq_entry);
1511         vp = kmalloc_node(plsize, GFP_KERNEL, node);
1512         pqp = (struct bau_pq_entry *)vp;
1513         BUG_ON(!pqp);
1514
1515         cp = (char *)pqp + 31;
1516         pqp = (struct bau_pq_entry *)(((unsigned long)cp >> 5) << 5);
1517
1518         for_each_present_cpu(cpu) {
1519                 if (pnode != uv_cpu_to_pnode(cpu))
1520                         continue;
1521                 /* for every cpu on this pnode: */
1522                 bcp = &per_cpu(bau_control, cpu);
1523                 bcp->queue_first        = pqp;
1524                 bcp->bau_msg_head       = pqp;
1525                 bcp->queue_last         = pqp + (DEST_Q_SIZE - 1);
1526         }
1527         /*
1528          * need the pnode of where the memory was really allocated
1529          */
1530         pn = uv_gpa(pqp) >> uv_nshift;
1531         first = uv_physnodeaddr(pqp);
1532         pn_first = ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) | first;
1533         last = uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1));
1534         write_mmr_payload_first(pnode, pn_first);
1535         write_mmr_payload_tail(pnode, first);
1536         write_mmr_payload_last(pnode, last);
1537
1538         /* in effect, all msg_type's are set to MSG_NOOP */
1539         memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
1540 }
1541
1542 /*
1543  * Initialization of each UV hub's structures
1544  */
1545 static void __init init_uvhub(int uvhub, int vector, int base_pnode)
1546 {
1547         int node;
1548         int pnode;
1549         unsigned long apicid;
1550
1551         node = uvhub_to_first_node(uvhub);
1552         pnode = uv_blade_to_pnode(uvhub);
1553
1554         activation_descriptor_init(node, pnode, base_pnode);
1555
1556         pq_init(node, pnode);
1557         /*
1558          * The below initialization can't be in firmware because the
1559          * messaging IRQ will be determined by the OS.
1560          */
1561         apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
1562         write_mmr_data_config(pnode, ((apicid << 32) | vector));
1563 }
1564
1565 /*
1566  * We will set BAU_MISC_CONTROL with a timeout period.
1567  * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
1568  * So the destination timeout period has to be calculated from them.
1569  */
1570 static int calculate_destination_timeout(void)
1571 {
1572         unsigned long mmr_image;
1573         int mult1;
1574         int mult2;
1575         int index;
1576         int base;
1577         int ret;
1578         unsigned long ts_ns;
1579
1580         if (is_uv1_hub()) {
1581                 mult1 = SOFTACK_TIMEOUT_PERIOD & BAU_MISC_CONTROL_MULT_MASK;
1582                 mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
1583                 index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
1584                 mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
1585                 mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
1586                 base = timeout_base_ns[index];
1587                 ts_ns = base * mult1 * mult2;
1588                 ret = ts_ns / 1000;
1589         } else {
1590                 /* 4 bits  0/1 for 10/80us, 3 bits of multiplier */
1591                 mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
1592                 mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
1593                 if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
1594                         mult1 = 80;
1595                 else
1596                         mult1 = 10;
1597                 base = mmr_image & UV2_ACK_MASK;
1598                 ret = mult1 * base;
1599         }
1600         return ret;
1601 }
1602
1603 static void __init init_per_cpu_tunables(void)
1604 {
1605         int cpu;
1606         struct bau_control *bcp;
1607
1608         for_each_present_cpu(cpu) {
1609                 bcp = &per_cpu(bau_control, cpu);
1610                 bcp->baudisabled                = 0;
1611                 bcp->statp                      = &per_cpu(ptcstats, cpu);
1612                 /* time interval to catch a hardware stay-busy bug */
1613                 bcp->timeout_interval           = usec_2_cycles(2*timeout_us);
1614                 bcp->max_concurr                = max_concurr;
1615                 bcp->max_concurr_const          = max_concurr;
1616                 bcp->plugged_delay              = plugged_delay;
1617                 bcp->plugsb4reset               = plugsb4reset;
1618                 bcp->timeoutsb4reset            = timeoutsb4reset;
1619                 bcp->ipi_reset_limit            = ipi_reset_limit;
1620                 bcp->complete_threshold         = complete_threshold;
1621                 bcp->cong_response_us           = congested_respns_us;
1622                 bcp->cong_reps                  = congested_reps;
1623                 bcp->cong_period                = congested_period;
1624         }
1625 }
1626
1627 /*
1628  * Scan all cpus to collect blade and socket summaries.
1629  */
1630 static int __init get_cpu_topology(int base_pnode,
1631                                         struct uvhub_desc *uvhub_descs,
1632                                         unsigned char *uvhub_mask)
1633 {
1634         int cpu;
1635         int pnode;
1636         int uvhub;
1637         int socket;
1638         struct bau_control *bcp;
1639         struct uvhub_desc *bdp;
1640         struct socket_desc *sdp;
1641
1642         for_each_present_cpu(cpu) {
1643                 bcp = &per_cpu(bau_control, cpu);
1644
1645                 memset(bcp, 0, sizeof(struct bau_control));
1646
1647                 pnode = uv_cpu_hub_info(cpu)->pnode;
1648                 if ((pnode - base_pnode) >= UV_DISTRIBUTION_SIZE) {
1649                         printk(KERN_EMERG
1650                                 "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
1651                                 cpu, pnode, base_pnode, UV_DISTRIBUTION_SIZE);
1652                         return 1;
1653                 }
1654
1655                 bcp->osnode = cpu_to_node(cpu);
1656                 bcp->partition_base_pnode = base_pnode;
1657
1658                 uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1659                 *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
1660                 bdp = &uvhub_descs[uvhub];
1661
1662                 bdp->num_cpus++;
1663                 bdp->uvhub = uvhub;
1664                 bdp->pnode = pnode;
1665
1666                 /* kludge: 'assuming' one node per socket, and assuming that
1667                    disabling a socket just leaves a gap in node numbers */
1668                 socket = bcp->osnode & 1;
1669                 bdp->socket_mask |= (1 << socket);
1670                 sdp = &bdp->socket[socket];
1671                 sdp->cpu_number[sdp->num_cpus] = cpu;
1672                 sdp->num_cpus++;
1673                 if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
1674                         printk(KERN_EMERG "%d cpus per socket invalid\n",
1675                                 sdp->num_cpus);
1676                         return 1;
1677                 }
1678         }
1679         return 0;
1680 }
1681
1682 /*
1683  * Each socket is to get a local array of pnodes/hubs.
1684  */
1685 static void make_per_cpu_thp(struct bau_control *smaster)
1686 {
1687         int cpu;
1688         size_t hpsz = sizeof(struct hub_and_pnode) * num_possible_cpus();
1689
1690         smaster->thp = kmalloc_node(hpsz, GFP_KERNEL, smaster->osnode);
1691         memset(smaster->thp, 0, hpsz);
1692         for_each_present_cpu(cpu) {
1693                 smaster->thp[cpu].pnode = uv_cpu_hub_info(cpu)->pnode;
1694                 smaster->thp[cpu].uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
1695         }
1696 }
1697
1698 /*
1699  * Each uvhub is to get a local cpumask.
1700  */
1701 static void make_per_hub_cpumask(struct bau_control *hmaster)
1702 {
1703         int sz = sizeof(cpumask_t);
1704
1705         hmaster->cpumask = kzalloc_node(sz, GFP_KERNEL, hmaster->osnode);
1706 }
1707
1708 /*
1709  * Initialize all the per_cpu information for the cpu's on a given socket,
1710  * given what has been gathered into the socket_desc struct.
1711  * And reports the chosen hub and socket masters back to the caller.
1712  */
1713 static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
1714                         struct bau_control **smasterp,
1715                         struct bau_control **hmasterp)
1716 {
1717         int i;
1718         int cpu;
1719         struct bau_control *bcp;
1720
1721         for (i = 0; i < sdp->num_cpus; i++) {
1722                 cpu = sdp->cpu_number[i];
1723                 bcp = &per_cpu(bau_control, cpu);
1724                 bcp->cpu = cpu;
1725                 if (i == 0) {
1726                         *smasterp = bcp;
1727                         if (!(*hmasterp))
1728                                 *hmasterp = bcp;
1729                 }
1730                 bcp->cpus_in_uvhub = bdp->num_cpus;
1731                 bcp->cpus_in_socket = sdp->num_cpus;
1732                 bcp->socket_master = *smasterp;
1733                 bcp->uvhub = bdp->uvhub;
1734                 bcp->uvhub_master = *hmasterp;
1735                 bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
1736                 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
1737                         printk(KERN_EMERG "%d cpus per uvhub invalid\n",
1738                                 bcp->uvhub_cpu);
1739                         return 1;
1740                 }
1741         }
1742         return 0;
1743 }
1744
1745 /*
1746  * Summarize the blade and socket topology into the per_cpu structures.
1747  */
1748 static int __init summarize_uvhub_sockets(int nuvhubs,
1749                         struct uvhub_desc *uvhub_descs,
1750                         unsigned char *uvhub_mask)
1751 {
1752         int socket;
1753         int uvhub;
1754         unsigned short socket_mask;
1755
1756         for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1757                 struct uvhub_desc *bdp;
1758                 struct bau_control *smaster = NULL;
1759                 struct bau_control *hmaster = NULL;
1760
1761                 if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
1762                         continue;
1763
1764                 bdp = &uvhub_descs[uvhub];
1765                 socket_mask = bdp->socket_mask;
1766                 socket = 0;
1767                 while (socket_mask) {
1768                         struct socket_desc *sdp;
1769                         if ((socket_mask & 1)) {
1770                                 sdp = &bdp->socket[socket];
1771                                 if (scan_sock(sdp, bdp, &smaster, &hmaster))
1772                                         return 1;
1773                                 make_per_cpu_thp(smaster);
1774                         }
1775                         socket++;
1776                         socket_mask = (socket_mask >> 1);
1777                 }
1778                 make_per_hub_cpumask(hmaster);
1779         }
1780         return 0;
1781 }
1782
1783 /*
1784  * initialize the bau_control structure for each cpu
1785  */
1786 static int __init init_per_cpu(int nuvhubs, int base_part_pnode)
1787 {
1788         unsigned char *uvhub_mask;
1789         void *vp;
1790         struct uvhub_desc *uvhub_descs;
1791
1792         timeout_us = calculate_destination_timeout();
1793
1794         vp = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
1795         uvhub_descs = (struct uvhub_desc *)vp;
1796         memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
1797         uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
1798
1799         if (get_cpu_topology(base_part_pnode, uvhub_descs, uvhub_mask))
1800                 goto fail;
1801
1802         if (summarize_uvhub_sockets(nuvhubs, uvhub_descs, uvhub_mask))
1803                 goto fail;
1804
1805         kfree(uvhub_descs);
1806         kfree(uvhub_mask);
1807         init_per_cpu_tunables();
1808         return 0;
1809
1810 fail:
1811         kfree(uvhub_descs);
1812         kfree(uvhub_mask);
1813         return 1;
1814 }
1815
1816 /*
1817  * Initialization of BAU-related structures
1818  */
1819 static int __init uv_bau_init(void)
1820 {
1821         int uvhub;
1822         int pnode;
1823         int nuvhubs;
1824         int cur_cpu;
1825         int cpus;
1826         int vector;
1827         cpumask_var_t *mask;
1828
1829         if (!is_uv_system())
1830                 return 0;
1831
1832         if (nobau)
1833                 return 0;
1834
1835         for_each_possible_cpu(cur_cpu) {
1836                 mask = &per_cpu(uv_flush_tlb_mask, cur_cpu);
1837                 zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cur_cpu));
1838         }
1839
1840         uv_nshift = uv_hub_info->m_val;
1841         uv_mmask = (1UL << uv_hub_info->m_val) - 1;
1842         nuvhubs = uv_num_possible_blades();
1843         spin_lock_init(&disable_lock);
1844         congested_cycles = usec_2_cycles(congested_respns_us);
1845
1846         uv_base_pnode = 0x7fffffff;
1847         for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
1848                 cpus = uv_blade_nr_possible_cpus(uvhub);
1849                 if (cpus && (uv_blade_to_pnode(uvhub) < uv_base_pnode))
1850                         uv_base_pnode = uv_blade_to_pnode(uvhub);
1851         }
1852
1853         if (init_per_cpu(nuvhubs, uv_base_pnode)) {
1854                 nobau = 1;
1855                 return 0;
1856         }
1857
1858         vector = UV_BAU_MESSAGE;
1859         for_each_possible_blade(uvhub)
1860                 if (uv_blade_nr_possible_cpus(uvhub))
1861                         init_uvhub(uvhub, vector, uv_base_pnode);
1862
1863         enable_timeouts();
1864         alloc_intr_gate(vector, uv_bau_message_intr1);
1865
1866         for_each_possible_blade(uvhub) {
1867                 if (uv_blade_nr_possible_cpus(uvhub)) {
1868                         unsigned long val;
1869                         unsigned long mmr;
1870                         pnode = uv_blade_to_pnode(uvhub);
1871                         /* INIT the bau */
1872                         val = 1L << 63;
1873                         write_gmmr_activation(pnode, val);
1874                         mmr = 1; /* should be 1 to broadcast to both sockets */
1875                         write_mmr_data_broadcast(pnode, mmr);
1876                 }
1877         }
1878
1879         return 0;
1880 }
1881 core_initcall(uv_bau_init);
1882 fs_initcall(uv_ptc_init);