2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * SGI UV Broadcast Assist Unit definitions
8 * Copyright (C) 2008-2011 Silicon Graphics, Inc. All rights reserved.
11 #ifndef _ASM_X86_UV_UV_BAU_H
12 #define _ASM_X86_UV_UV_BAU_H
14 #include <linux/bitmap.h>
18 * Broadcast Assist Unit messaging structures
20 * Selective Broadcast activations are induced by software action
21 * specifying a particular 8-descriptor "set" via a 6-bit index written
23 * Thus there are 64 unique 512-byte sets of SB descriptors - one set for
24 * each 6-bit index value. These descriptor sets are mapped in sequence
25 * starting with set 0 located at the address specified in the
26 * BAU_SB_DESCRIPTOR_BASE register, set 1 is located at BASE + 512,
27 * set 2 is at BASE + 2*512, set 3 at BASE + 3*512, and so on.
29 * We will use one set for sending BAU messages from each of the
32 * TLB shootdown will use the first of the 8 descriptors of each set.
33 * Each of the descriptors is 64 bytes in size (8*64 = 512 bytes in a set).
36 #define MAX_CPUS_PER_UVHUB 64
37 #define MAX_CPUS_PER_SOCKET 32
38 #define ADP_SZ 64 /* hardware-provided max. */
39 #define UV_CPUS_PER_AS 32 /* hardware-provided max. */
40 #define ITEMS_PER_DESC 8
41 /* the 'throttle' to prevent the hardware stay-busy bug */
42 #define MAX_BAU_CONCURRENT 3
43 #define UV_ACT_STATUS_MASK 0x3
44 #define UV_ACT_STATUS_SIZE 2
45 #define UV_DISTRIBUTION_SIZE 256
46 #define UV_SW_ACK_NPENDING 8
47 #define UV1_NET_ENDPOINT_INTD 0x38
48 #define UV2_NET_ENDPOINT_INTD 0x28
49 #define UV_NET_ENDPOINT_INTD (is_uv1_hub() ? \
50 UV1_NET_ENDPOINT_INTD : UV2_NET_ENDPOINT_INTD)
51 #define UV_DESC_PSHIFT 49
52 #define UV_PAYLOADQ_PNODE_SHIFT 49
53 #define UV_PTC_BASENAME "sgi_uv/ptc_statistics"
54 #define UV_BAU_BASENAME "sgi_uv/bau_tunables"
55 #define UV_BAU_TUNABLES_DIR "sgi_uv"
56 #define UV_BAU_TUNABLES_FILE "bau_tunables"
57 #define WHITESPACE " \t\n"
58 #define uv_physnodeaddr(x) ((__pa((unsigned long)(x)) & uv_mmask))
59 #define cpubit_isset(cpu, bau_local_cpumask) \
60 test_bit((cpu), (bau_local_cpumask).bits)
62 /* [19:16] SOFT_ACK timeout period 19: 1 is urgency 7 17:16 1 is multiplier */
64 * UV2: Bit 19 selects between
65 * (0): 10 microsecond timebase and
66 * (1): 80 microseconds
67 * we're using 655us, similar to UV1: 65 units of 10us
69 #define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
70 #define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (65*10UL)
72 #define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD (is_uv1_hub() ? \
73 UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD : \
74 UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD)
76 #define BAU_MISC_CONTROL_MULT_MASK 3
78 #define UVH_AGING_PRESCALE_SEL 0x000000b000UL
79 /* [30:28] URGENCY_7 an index into a table of times */
80 #define BAU_URGENCY_7_SHIFT 28
81 #define BAU_URGENCY_7_MASK 7
83 #define UVH_TRANSACTION_TIMEOUT 0x000000b200UL
84 /* [45:40] BAU - BAU transaction timeout select - a multiplier */
85 #define BAU_TRANS_SHIFT 40
86 #define BAU_TRANS_MASK 0x3f
89 * shorten some awkward names
91 #define AS_PUSH_SHIFT UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT
92 #define SOFTACK_MSHIFT UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT
93 #define SOFTACK_PSHIFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
94 #define SOFTACK_TIMEOUT_PERIOD UV_INTD_SOFT_ACK_TIMEOUT_PERIOD
95 #define write_gmmr uv_write_global_mmr64
96 #define write_lmmr uv_write_local_mmr
97 #define read_lmmr uv_read_local_mmr
98 #define read_gmmr uv_read_global_mmr64
101 * bits in UVH_LB_BAU_SB_ACTIVATION_STATUS_0/1
105 #define DS_DESTINATION_TIMEOUT 2
106 #define DS_SOURCE_TIMEOUT 3
108 * bits put together from HRP_LB_BAU_SB_ACTIVATION_STATUS_0/1/2
109 * values 1 and 5 will not occur
111 #define UV2H_DESC_IDLE 0
112 #define UV2H_DESC_DEST_TIMEOUT 2
113 #define UV2H_DESC_DEST_STRONG_NACK 3
114 #define UV2H_DESC_BUSY 4
115 #define UV2H_DESC_SOURCE_TIMEOUT 6
116 #define UV2H_DESC_DEST_PUT_ERR 7
119 * delay for 'plugged' timeout retries, in microseconds
121 #define PLUGGED_DELAY 10
124 * threshholds at which to use IPI to free resources
126 /* after this # consecutive 'plugged' timeouts, use IPI to release resources */
127 #define PLUGSB4RESET 100
128 /* after this many consecutive timeouts, use IPI to release resources */
129 #define TIMEOUTSB4RESET 1
130 /* at this number uses of IPI to release resources, giveup the request */
131 #define IPI_RESET_LIMIT 1
132 /* after this # consecutive successes, bump up the throttle if it was lowered */
133 #define COMPLETE_THRESHOLD 5
135 #define UV_LB_SUBNODEID 0x10
137 /* these two are the same for UV1 and UV2: */
138 #define UV_SA_SHFT UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
139 #define UV_SA_MASK UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_MASK
140 /* 4 bits of software ack period */
141 #define UV2_ACK_MASK 0x7UL
142 #define UV2_ACK_UNITS_SHFT 3
143 #define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT
144 #define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
147 * number of entries in the destination side payload queue
149 #define DEST_Q_SIZE 20
151 * number of destination side software ack resources
153 #define DEST_NUM_RESOURCES 8
155 * completion statuses for sending a TLB flush message
157 #define FLUSH_RETRY_PLUGGED 1
158 #define FLUSH_RETRY_TIMEOUT 2
159 #define FLUSH_GIVEUP 3
160 #define FLUSH_COMPLETE 4
163 * tuning the action when the numalink network is extremely delayed
165 #define CONGESTED_RESPONSE_US 1000 /* 'long' response time, in
167 #define CONGESTED_REPS 10 /* long delays averaged over
168 this many broadcasts */
169 #define CONGESTED_PERIOD 30 /* time for the bau to be
170 disabled, in seconds */
173 #define MSG_REGULAR 1
177 * Distribution: 32 bytes (256 bits) (bytes 0-0x1f of descriptor)
178 * If the 'multilevel' flag in the header portion of the descriptor
179 * has been set to 0, then endpoint multi-unicast mode is selected.
180 * The distribution specification (32 bytes) is interpreted as a 256-bit
181 * distribution vector. Adjacent bits correspond to consecutive even numbered
182 * nodeIDs. The result of adding the index of a given bit to the 15-bit
183 * 'base_dest_nasid' field of the header corresponds to the
184 * destination nodeID associated with that specified bit.
186 struct bau_targ_hubmask {
187 unsigned long bits[BITS_TO_LONGS(UV_DISTRIBUTION_SIZE)];
191 * mask of cpu's on a uvhub
192 * (during initialization we need to check that unsigned long has
193 * enough bits for max. cpu's per uvhub)
195 struct bau_local_cpumask {
200 * Payload: 16 bytes (128 bits) (bytes 0x20-0x2f of descriptor)
201 * only 12 bytes (96 bits) of the payload area are usable.
202 * An additional 3 bytes (bits 27:4) of the header address are carried
203 * to the next bytes of the destination payload queue.
204 * And an additional 2 bytes of the header Suppl_A field are also
205 * carried to the destination payload queue.
206 * But the first byte of the Suppl_A becomes bits 127:120 (the 16th byte)
207 * of the destination payload queue, which is written by the hardware
208 * with the s/w ack resource bit vector.
209 * [ effective message contents (16 bytes (128 bits) maximum), not counting
210 * the s/w ack bit vector ]
214 * The payload is software-defined for INTD transactions
216 struct bau_msg_payload {
217 unsigned long address; /* signifies a page or all
220 unsigned short sending_cpu; /* filled in by sender */
222 unsigned short acknowledge_count; /* filled in by destination */
224 unsigned int reserved1:32; /* not usable */
229 * Message header: 16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
230 * see table 4.2.3.0.1 in broacast_assist spec.
232 struct bau_msg_header {
233 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
235 unsigned int base_dest_nasid:15; /* nasid of the first bit */
236 /* bits 20:6 */ /* in uvhub map */
237 unsigned int command:8; /* message type */
239 /* 0x38: SN3net EndPoint Message */
240 unsigned int rsvd_1:3; /* must be zero */
242 /* int will align on 32 bits */
243 unsigned int rsvd_2:9; /* must be zero */
245 /* Suppl_A is 56-41 */
246 unsigned int sequence:16; /* message sequence number */
247 /* bits 56:41 */ /* becomes bytes 16-17 of msg */
248 /* Address field (96:57) is
249 never used as an address
250 (these are address bits
253 unsigned int rsvd_3:1; /* must be zero */
255 /* address bits 27:4 are payload */
256 /* these next 24 (58-81) bits become bytes 12-14 of msg */
257 /* bits 65:58 land in byte 12 */
258 unsigned int replied_to:1; /* sent as 0 by the source to
261 unsigned int msg_type:3; /* software type of the
264 unsigned int canceled:1; /* message canceled, resource
267 unsigned int payload_1a:1; /* not currently used */
269 unsigned int payload_1b:2; /* not currently used */
272 /* bits 73:66 land in byte 13 */
273 unsigned int payload_1ca:6; /* not currently used */
275 unsigned int payload_1c:2; /* not currently used */
278 /* bits 81:74 land in byte 14 */
279 unsigned int payload_1d:6; /* not currently used */
281 unsigned int payload_1e:2; /* not currently used */
284 unsigned int rsvd_4:7; /* must be zero */
286 unsigned int swack_flag:1; /* software acknowledge flag */
288 /* INTD trasactions at
289 destination are to wait for
290 software acknowledge */
291 unsigned int rsvd_5:6; /* must be zero */
293 unsigned int rsvd_6:5; /* must be zero */
295 unsigned int int_both:1; /* if 1, interrupt both sockets
298 unsigned int fairness:3; /* usually zero */
300 unsigned int multilevel:1; /* multi-level multicast
303 /* 0 for TLB: endpoint multi-unicast messages */
304 unsigned int chaining:1; /* next descriptor is part of
307 unsigned int rsvd_7:21; /* must be zero */
312 * The activation descriptor:
313 * The format of the message to send, plus all accompanying control
317 struct bau_targ_hubmask distribution;
319 * message template, consisting of header and payload:
321 struct bau_msg_header header;
322 struct bau_msg_payload payload;
325 * -payload-- ---------header------
326 * bytes 0-11 bits 41-56 bits 58-81
329 * A/B/C are moved to:
331 * bytes 0-11 bytes 12-14 bytes 16-17 (byte 15 filled in by hw as vector)
332 * ------------payload queue-----------
336 * The payload queue on the destination side is an array of these.
337 * With BAU_MISC_CONTROL set for software acknowledge mode, the messages
338 * are 32 bytes (2 micropackets) (256 bits) in length, but contain only 17
339 * bytes of usable data, including the sw ack vector in byte 15 (bits 127:120)
340 * (12 bytes come from bau_msg_payload, 3 from payload_1, 2 from
341 * swack_vec and payload_2)
342 * "Enabling Software Acknowledgment mode (see Section 4.3.3 Software
343 * Acknowledge Processing) also selects 32 byte (17 bytes usable) payload
346 struct bau_pq_entry {
347 unsigned long address; /* signifies a page or all TLB's
349 /* 64 bits, bytes 0-7 */
350 unsigned short sending_cpu; /* cpu that sent the message */
351 /* 16 bits, bytes 8-9 */
352 unsigned short acknowledge_count; /* filled in by destination */
353 /* 16 bits, bytes 10-11 */
354 /* these next 3 bytes come from bits 58-81 of the message header */
355 unsigned short replied_to:1; /* sent as 0 by the source */
356 unsigned short msg_type:3; /* software message type */
357 unsigned short canceled:1; /* sent as 0 by the source */
358 unsigned short unused1:3; /* not currently using */
360 unsigned char unused2a; /* not currently using */
362 unsigned char unused2; /* not currently using */
364 unsigned char swack_vec; /* filled in by the hardware */
365 /* byte 15 (bits 127:120) */
366 unsigned short sequence; /* message sequence number */
368 unsigned char unused4[2]; /* not currently using bytes 18-19 */
370 int number_of_cpus; /* filled in at destination */
371 /* 32 bits, bytes 20-23 (aligned) */
372 unsigned char unused5[8]; /* not using */
377 struct bau_pq_entry *msg;
380 struct bau_pq_entry *queue_first;
381 struct bau_pq_entry *queue_last;
389 * This structure is allocated per_cpu for UV TLB shootdown statistics.
392 /* sender statistics */
393 unsigned long s_giveup; /* number of fall backs to
395 unsigned long s_requestor; /* number of shootdown
397 unsigned long s_stimeout; /* source side timeouts */
398 unsigned long s_dtimeout; /* destination side timeouts */
399 unsigned long s_time; /* time spent in sending side */
400 unsigned long s_retriesok; /* successful retries */
401 unsigned long s_ntargcpu; /* total number of cpu's
403 unsigned long s_ntargself; /* times the sending cpu was
405 unsigned long s_ntarglocals; /* targets of cpus on the local
407 unsigned long s_ntargremotes; /* targets of cpus on remote
409 unsigned long s_ntarglocaluvhub; /* targets of the local hub */
410 unsigned long s_ntargremoteuvhub; /* remotes hubs targeted */
411 unsigned long s_ntarguvhub; /* total number of uvhubs
413 unsigned long s_ntarguvhub16; /* number of times target
415 unsigned long s_ntarguvhub8; /* number of times target
417 unsigned long s_ntarguvhub4; /* number of times target
419 unsigned long s_ntarguvhub2; /* number of times target
421 unsigned long s_ntarguvhub1; /* number of times target
423 unsigned long s_resets_plug; /* ipi-style resets from plug
425 unsigned long s_resets_timeout; /* ipi-style resets from
427 unsigned long s_busy; /* status stayed busy past
429 unsigned long s_throttles; /* waits in throttle */
430 unsigned long s_retry_messages; /* retry broadcasts */
431 unsigned long s_bau_reenabled; /* for bau enable/disable */
432 unsigned long s_bau_disabled; /* for bau enable/disable */
433 /* destination statistics */
434 unsigned long d_alltlb; /* times all tlb's on this
436 unsigned long d_onetlb; /* times just one tlb on this
438 unsigned long d_multmsg; /* interrupts with multiple
440 unsigned long d_nomsg; /* interrupts with no message */
441 unsigned long d_time; /* time spent on destination
443 unsigned long d_requestee; /* number of messages
445 unsigned long d_retries; /* number of retry messages
447 unsigned long d_canceled; /* number of messages canceled
449 unsigned long d_nocanceled; /* retries that found nothing
451 unsigned long d_resets; /* number of ipi-style requests
453 unsigned long d_rcanceled; /* number of messages canceled
462 struct hub_and_pnode {
469 short cpu_number[MAX_CPUS_PER_SOCKET];
473 unsigned short socket_mask;
477 struct socket_desc socket[2];
481 * one per-cpu; to locate the software tables
484 struct bau_desc *descriptor_base;
485 struct bau_pq_entry *queue_first;
486 struct bau_pq_entry *queue_last;
487 struct bau_pq_entry *bau_msg_head;
488 struct bau_control *uvhub_master;
489 struct bau_control *socket_master;
490 struct ptc_stats *statp;
491 unsigned long timeout_interval;
492 unsigned long set_bau_on_time;
493 atomic_t active_descriptor_count;
504 short cpus_in_socket;
506 short partition_base_pnode;
507 unsigned short message_number;
508 unsigned short uvhub_quiesce;
509 short socket_acknowledge_count[DEST_Q_SIZE];
510 cycles_t send_message;
511 spinlock_t uvhub_lock;
512 spinlock_t queue_lock;
515 int max_concurr_const;
520 int complete_threshold;
521 int cong_response_us;
524 cycles_t period_time;
525 long period_requests;
526 struct hub_and_pnode *thp;
529 static unsigned long read_mmr_uv2_status(void)
531 return read_lmmr(UV2H_LB_BAU_SB_ACTIVATION_STATUS_2);
534 static void write_mmr_data_broadcast(int pnode, unsigned long mmr_image)
536 write_gmmr(pnode, UVH_BAU_DATA_BROADCAST, mmr_image);
539 static void write_mmr_descriptor_base(int pnode, unsigned long mmr_image)
541 write_gmmr(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE, mmr_image);
544 static void write_mmr_activation(unsigned long index)
546 write_lmmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
549 static void write_gmmr_activation(int pnode, unsigned long mmr_image)
551 write_gmmr(pnode, UVH_LB_BAU_SB_ACTIVATION_CONTROL, mmr_image);
554 static void write_mmr_payload_first(int pnode, unsigned long mmr_image)
556 write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST, mmr_image);
559 static void write_mmr_payload_tail(int pnode, unsigned long mmr_image)
561 write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL, mmr_image);
564 static void write_mmr_payload_last(int pnode, unsigned long mmr_image)
566 write_gmmr(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST, mmr_image);
569 static void write_mmr_misc_control(int pnode, unsigned long mmr_image)
571 write_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
574 static unsigned long read_mmr_misc_control(int pnode)
576 return read_gmmr(pnode, UVH_LB_BAU_MISC_CONTROL);
579 static void write_mmr_sw_ack(unsigned long mr)
581 uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
584 static unsigned long read_mmr_sw_ack(void)
586 return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
589 static unsigned long read_gmmr_sw_ack(int pnode)
591 return read_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
594 static void write_mmr_data_config(int pnode, unsigned long mr)
596 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, mr);
599 static inline int bau_uvhub_isset(int uvhub, struct bau_targ_hubmask *dstp)
601 return constant_test_bit(uvhub, &dstp->bits[0]);
603 static inline void bau_uvhub_set(int pnode, struct bau_targ_hubmask *dstp)
605 __set_bit(pnode, &dstp->bits[0]);
607 static inline void bau_uvhubs_clear(struct bau_targ_hubmask *dstp,
610 bitmap_zero(&dstp->bits[0], nbits);
612 static inline int bau_uvhub_weight(struct bau_targ_hubmask *dstp)
614 return bitmap_weight((unsigned long *)&dstp->bits[0],
615 UV_DISTRIBUTION_SIZE);
618 static inline void bau_cpubits_clear(struct bau_local_cpumask *dstp, int nbits)
620 bitmap_zero(&dstp->bits, nbits);
623 extern void uv_bau_message_intr1(void);
624 extern void uv_bau_timeout_intr1(void);
626 struct atomic_short {
631 * atomic_read_short - read a short atomic variable
632 * @v: pointer of type atomic_short
634 * Atomically reads the value of @v.
636 static inline int atomic_read_short(const struct atomic_short *v)
642 * atom_asr - add and return a short int
643 * @i: short value to add
644 * @v: pointer of type atomic_short
646 * Atomically adds @i to @v and returns @i + @v
648 static inline int atom_asr(short i, struct atomic_short *v)
651 asm volatile(LOCK_PREFIX "xaddw %0, %1"
652 : "+r" (i), "+m" (v->counter)
658 * conditionally add 1 to *v, unless *v is >= u
659 * return 0 if we cannot add 1 to *v because it is >= u
660 * return 1 if we can add 1 to *v because it is < u
663 * This is close to atomic_add_unless(), but this allows the 'u' value
664 * to be lowered below the current 'v'. atomic_add_unless can only stop
667 static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
670 if (atomic_read(v) >= u) {
679 #endif /* _ASM_X86_UV_UV_BAU_H */