2 * SGI UltraViolet TLB flush routines.
4 * (c) 2008-2010 Cliff Wickman <cpw@sgi.com>, SGI.
6 * This code is released under the GNU General Public License version 2 or
9 #include <linux/seq_file.h>
10 #include <linux/proc_fs.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
14 #include <asm/mmu_context.h>
15 #include <asm/uv/uv.h>
16 #include <asm/uv/uv_mmrs.h>
17 #include <asm/uv/uv_hub.h>
18 #include <asm/uv/uv_bau.h>
22 #include <asm/irq_vectors.h>
23 #include <asm/timer.h>
26 struct bau_payload_queue_entry
*msg
;
29 struct bau_payload_queue_entry
*va_queue_first
;
30 struct bau_payload_queue_entry
*va_queue_last
;
33 #define UV_INTD_SOFT_ACK_TIMEOUT_PERIOD 0x000000000bUL
35 static int uv_bau_max_concurrent __read_mostly
;
38 static int __init
setup_nobau(char *arg
)
43 early_param("nobau", setup_nobau
);
45 /* base pnode in this partition */
46 static int uv_partition_base_pnode __read_mostly
;
47 /* position of pnode (which is nasid>>1): */
48 static int uv_nshift __read_mostly
;
49 static unsigned long uv_mmask __read_mostly
;
51 static DEFINE_PER_CPU(struct ptc_stats
, ptcstats
);
52 static DEFINE_PER_CPU(struct bau_control
, bau_control
);
53 static DEFINE_PER_CPU(cpumask_var_t
, uv_flush_tlb_mask
);
60 * Determine the first node on a uvhub. 'Nodes' are used for kernel
63 static int __init
uvhub_to_first_node(int uvhub
)
67 for_each_online_node(node
) {
68 b
= uv_node_to_blade_id(node
);
76 * Determine the apicid of the first cpu on a uvhub.
78 static int __init
uvhub_to_first_apicid(int uvhub
)
82 for_each_present_cpu(cpu
)
83 if (uvhub
== uv_cpu_to_blade_id(cpu
))
84 return per_cpu(x86_cpu_to_apicid
, cpu
);
89 * Free a software acknowledge hardware resource by clearing its Pending
90 * bit. This will return a reply to the sender.
91 * If the message has timed out, a reply has already been sent by the
92 * hardware but the resource has not been released. In that case our
93 * clear of the Timeout bit (as well) will free the resource. No reply will
94 * be sent (the hardware will only do one reply per message).
96 static inline void uv_reply_to_message(struct msg_desc
*mdp
,
97 struct bau_control
*bcp
)
100 struct bau_payload_queue_entry
*msg
;
103 if (!msg
->canceled
) {
104 dw
= (msg
->sw_ack_vector
<< UV_SW_ACK_NPENDING
) |
107 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS
, dw
);
110 msg
->sw_ack_vector
= 0;
114 * Process the receipt of a RETRY message
116 static inline void uv_bau_process_retry_msg(struct msg_desc
*mdp
,
117 struct bau_control
*bcp
)
120 int cancel_count
= 0;
122 unsigned long msg_res
;
123 unsigned long mmr
= 0;
124 struct bau_payload_queue_entry
*msg
;
125 struct bau_payload_queue_entry
*msg2
;
126 struct ptc_stats
*stat
;
129 stat
= &per_cpu(ptcstats
, bcp
->cpu
);
132 * cancel any message from msg+1 to the retry itself
134 for (msg2
= msg
+1, i
= 0; i
< DEST_Q_SIZE
; msg2
++, i
++) {
135 if (msg2
> mdp
->va_queue_last
)
136 msg2
= mdp
->va_queue_first
;
140 /* same conditions for cancellation as uv_do_reset */
141 if ((msg2
->replied_to
== 0) && (msg2
->canceled
== 0) &&
142 (msg2
->sw_ack_vector
) && ((msg2
->sw_ack_vector
&
143 msg
->sw_ack_vector
) == 0) &&
144 (msg2
->sending_cpu
== msg
->sending_cpu
) &&
145 (msg2
->msg_type
!= MSG_NOOP
)) {
146 slot2
= msg2
- mdp
->va_queue_first
;
147 mmr
= uv_read_local_mmr
148 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE
);
149 msg_res
= ((msg2
->sw_ack_vector
<< 8) |
150 msg2
->sw_ack_vector
);
152 * This is a message retry; clear the resources held
153 * by the previous message only if they timed out.
154 * If it has not timed out we have an unexpected
155 * situation to report.
157 if (mmr
& (msg_res
<< 8)) {
159 * is the resource timed out?
160 * make everyone ignore the cancelled message.
166 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS
,
167 (msg_res
<< 8) | msg_res
);
169 printk(KERN_INFO
"note bau retry: no effect\n");
173 stat
->d_nocanceled
++;
177 * Do all the things a cpu should do for a TLB shootdown message.
178 * Other cpu's may come here at the same time for this message.
180 static void uv_bau_process_message(struct msg_desc
*mdp
,
181 struct bau_control
*bcp
)
184 short socket_ack_count
= 0;
185 struct ptc_stats
*stat
;
186 struct bau_payload_queue_entry
*msg
;
187 struct bau_control
*smaster
= bcp
->socket_master
;
190 * This must be a normal message, or retry of a normal message
193 stat
= &per_cpu(ptcstats
, bcp
->cpu
);
194 if (msg
->address
== TLB_FLUSH_ALL
) {
198 __flush_tlb_one(msg
->address
);
204 * One cpu on each uvhub has the additional job on a RETRY
205 * of releasing the resource held by the message that is
206 * being retried. That message is identified by sending
209 if (msg
->msg_type
== MSG_RETRY
&& bcp
== bcp
->uvhub_master
)
210 uv_bau_process_retry_msg(mdp
, bcp
);
213 * This is a sw_ack message, so we have to reply to it.
214 * Count each responding cpu on the socket. This avoids
215 * pinging the count's cache line back and forth between
218 socket_ack_count
= atomic_add_short_return(1, (struct atomic_short
*)
219 &smaster
->socket_acknowledge_count
[mdp
->msg_slot
]);
220 if (socket_ack_count
== bcp
->cpus_in_socket
) {
222 * Both sockets dump their completed count total into
223 * the message's count.
225 smaster
->socket_acknowledge_count
[mdp
->msg_slot
] = 0;
226 msg_ack_count
= atomic_add_short_return(socket_ack_count
,
227 (struct atomic_short
*)&msg
->acknowledge_count
);
229 if (msg_ack_count
== bcp
->cpus_in_uvhub
) {
231 * All cpus in uvhub saw it; reply
233 uv_reply_to_message(mdp
, bcp
);
241 * Determine the first cpu on a uvhub.
243 static int uvhub_to_first_cpu(int uvhub
)
246 for_each_present_cpu(cpu
)
247 if (uvhub
== uv_cpu_to_blade_id(cpu
))
253 * Last resort when we get a large number of destination timeouts is
254 * to clear resources held by a given cpu.
255 * Do this with IPI so that all messages in the BAU message queue
256 * can be identified by their nonzero sw_ack_vector field.
258 * This is entered for a single cpu on the uvhub.
259 * The sender want's this uvhub to free a specific message's
263 uv_do_reset(void *ptr
)
269 unsigned long msg_res
;
270 struct bau_control
*bcp
;
271 struct reset_args
*rap
;
272 struct bau_payload_queue_entry
*msg
;
273 struct ptc_stats
*stat
;
275 bcp
= &per_cpu(bau_control
, smp_processor_id());
276 rap
= (struct reset_args
*)ptr
;
277 stat
= &per_cpu(ptcstats
, bcp
->cpu
);
281 * We're looking for the given sender, and
282 * will free its sw_ack resource.
283 * If all cpu's finally responded after the timeout, its
284 * message 'replied_to' was set.
286 for (msg
= bcp
->va_queue_first
, i
= 0; i
< DEST_Q_SIZE
; msg
++, i
++) {
287 /* uv_do_reset: same conditions for cancellation as
288 uv_bau_process_retry_msg() */
289 if ((msg
->replied_to
== 0) &&
290 (msg
->canceled
== 0) &&
291 (msg
->sending_cpu
== rap
->sender
) &&
292 (msg
->sw_ack_vector
) &&
293 (msg
->msg_type
!= MSG_NOOP
)) {
295 * make everyone else ignore this message
298 slot
= msg
- bcp
->va_queue_first
;
301 * only reset the resource if it is still pending
303 mmr
= uv_read_local_mmr
304 (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE
);
305 msg_res
= ((msg
->sw_ack_vector
<< 8) |
310 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS
,
319 * Use IPI to get all target uvhubs to release resources held by
320 * a given sending cpu number.
322 static void uv_reset_with_ipi(struct bau_target_uvhubmask
*distribution
,
328 struct reset_args reset_args
;
330 reset_args
.sender
= sender
;
333 /* find a single cpu for each uvhub in this distribution mask */
335 uvhub
< sizeof(struct bau_target_uvhubmask
) * BITSPERBYTE
;
337 if (!bau_uvhub_isset(uvhub
, distribution
))
339 /* find a cpu for this uvhub */
340 cpu
= uvhub_to_first_cpu(uvhub
);
343 /* IPI all cpus; Preemption is already disabled */
344 smp_call_function_many(&mask
, uv_do_reset
, (void *)&reset_args
, 1);
348 static inline unsigned long
349 cycles_2_us(unsigned long long cyc
)
351 unsigned long long ns
;
353 ns
= (cyc
* per_cpu(cyc2ns
, smp_processor_id()))
354 >> CYC2NS_SCALE_FACTOR
;
360 * wait for all cpus on this hub to finish their sends and go quiet
361 * leaves uvhub_quiesce set so that no new broadcasts are started by
362 * bau_flush_send_and_wait()
365 quiesce_local_uvhub(struct bau_control
*hmaster
)
367 atomic_add_short_return(1, (struct atomic_short
*)
368 &hmaster
->uvhub_quiesce
);
372 * mark this quiet-requestor as done
375 end_uvhub_quiesce(struct bau_control
*hmaster
)
377 atomic_add_short_return(-1, (struct atomic_short
*)
378 &hmaster
->uvhub_quiesce
);
382 * Wait for completion of a broadcast software ack message
383 * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
385 static int uv_wait_completion(struct bau_desc
*bau_desc
,
386 unsigned long mmr_offset
, int right_shift
, int this_cpu
,
387 struct bau_control
*bcp
, struct bau_control
*smaster
, long try)
390 unsigned long descriptor_status
;
394 cycles_t timeout_time
;
395 struct ptc_stats
*stat
= &per_cpu(ptcstats
, this_cpu
);
396 struct bau_control
*hmaster
;
398 hmaster
= bcp
->uvhub_master
;
399 timeout_time
= get_cycles() + bcp
->timeout_interval
;
401 /* spin on the status MMR, waiting for it to go idle */
402 while ((descriptor_status
= (((unsigned long)
403 uv_read_local_mmr(mmr_offset
) >>
404 right_shift
) & UV_ACT_STATUS_MASK
)) !=
407 * Our software ack messages may be blocked because there are
408 * no swack resources available. As long as none of them
409 * has timed out hardware will NACK our message and its
410 * state will stay IDLE.
412 if (descriptor_status
== DESC_STATUS_SOURCE_TIMEOUT
) {
415 } else if (descriptor_status
==
416 DESC_STATUS_DESTINATION_TIMEOUT
) {
418 ttime
= get_cycles();
421 * Our retries may be blocked by all destination
422 * swack resources being consumed, and a timeout
423 * pending. In that case hardware returns the
424 * ERROR that looks like a destination timeout.
426 if (cycles_2_us(ttime
- bcp
->send_message
) < BIOS_TO
) {
427 bcp
->conseccompletes
= 0;
428 return FLUSH_RETRY_PLUGGED
;
431 bcp
->conseccompletes
= 0;
432 return FLUSH_RETRY_TIMEOUT
;
435 * descriptor_status is still BUSY
439 if (relaxes
>= 10000) {
441 if (get_cycles() > timeout_time
) {
442 quiesce_local_uvhub(hmaster
);
444 /* single-thread the register change */
445 spin_lock(&hmaster
->masks_lock
);
446 mmr
= uv_read_local_mmr(mmr_offset
);
448 mask
|= (3UL < right_shift
);
451 uv_write_local_mmr(mmr_offset
, mmr
);
452 spin_unlock(&hmaster
->masks_lock
);
453 end_uvhub_quiesce(hmaster
);
460 bcp
->conseccompletes
++;
461 return FLUSH_COMPLETE
;
464 static inline cycles_t
465 sec_2_cycles(unsigned long sec
)
470 ns
= sec
* 1000000000;
471 cyc
= (ns
<< CYC2NS_SCALE_FACTOR
)/(per_cpu(cyc2ns
, smp_processor_id()));
476 * conditionally add 1 to *v, unless *v is >= u
477 * return 0 if we cannot add 1 to *v because it is >= u
478 * return 1 if we can add 1 to *v because it is < u
481 * This is close to atomic_add_unless(), but this allows the 'u' value
482 * to be lowered below the current 'v'. atomic_add_unless can only stop
485 static inline int atomic_inc_unless_ge(spinlock_t
*lock
, atomic_t
*v
, int u
)
488 if (atomic_read(v
) >= u
) {
498 * uv_flush_send_and_wait
500 * Send a broadcast and wait for it to complete.
502 * The flush_mask contains the cpus the broadcast is to be sent to, plus
503 * cpus that are on the local uvhub.
505 * Returns NULL if all flushing represented in the mask was done. The mask
507 * Returns @flush_mask if some remote flushing remains to be done. The
508 * mask will have some bits still set, representing any cpus on the local
509 * uvhub (not current cpu) and any on remote uvhubs if the broadcast failed.
511 const struct cpumask
*uv_flush_send_and_wait(struct bau_desc
*bau_desc
,
512 struct cpumask
*flush_mask
,
513 struct bau_control
*bcp
)
518 int completion_status
= 0;
521 int cpu
= bcp
->uvhub_cpu
;
522 int this_cpu
= bcp
->cpu
;
523 int this_uvhub
= bcp
->uvhub
;
524 unsigned long mmr_offset
;
528 struct ptc_stats
*stat
= &per_cpu(ptcstats
, bcp
->cpu
);
529 struct bau_control
*smaster
= bcp
->socket_master
;
530 struct bau_control
*hmaster
= bcp
->uvhub_master
;
533 * Spin here while there are hmaster->max_concurrent or more active
534 * descriptors. This is the per-uvhub 'throttle'.
536 if (!atomic_inc_unless_ge(&hmaster
->uvhub_lock
,
537 &hmaster
->active_descriptor_count
,
538 hmaster
->max_concurrent
)) {
542 } while (!atomic_inc_unless_ge(&hmaster
->uvhub_lock
,
543 &hmaster
->active_descriptor_count
,
544 hmaster
->max_concurrent
));
547 while (hmaster
->uvhub_quiesce
)
550 if (cpu
< UV_CPUS_PER_ACT_STATUS
) {
551 mmr_offset
= UVH_LB_BAU_SB_ACTIVATION_STATUS_0
;
552 right_shift
= cpu
* UV_ACT_STATUS_SIZE
;
554 mmr_offset
= UVH_LB_BAU_SB_ACTIVATION_STATUS_1
;
556 ((cpu
- UV_CPUS_PER_ACT_STATUS
) * UV_ACT_STATUS_SIZE
);
558 time1
= get_cycles();
561 * Every message from any given cpu gets a unique message
562 * sequence number. But retries use that same number.
563 * Our message may have timed out at the destination because
564 * all sw-ack resources are in use and there is a timeout
565 * pending there. In that case, our last send never got
566 * placed into the queue and we need to persist until it
569 * Make any retry a type MSG_RETRY so that the destination will
570 * free any resource held by a previous message from this cpu.
573 /* use message type set by the caller the first time */
574 seq_number
= bcp
->message_number
++;
576 /* use RETRY type on all the rest; same sequence */
577 bau_desc
->header
.msg_type
= MSG_RETRY
;
578 stat
->s_retry_messages
++;
580 bau_desc
->header
.sequence
= seq_number
;
581 index
= (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT
) |
583 bcp
->send_message
= get_cycles();
585 uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL
, index
);
588 completion_status
= uv_wait_completion(bau_desc
, mmr_offset
,
589 right_shift
, this_cpu
, bcp
, smaster
, try);
591 if (completion_status
== FLUSH_RETRY_PLUGGED
) {
593 * Our retries may be blocked by all destination swack
594 * resources being consumed, and a timeout pending. In
595 * that case hardware immediately returns the ERROR
596 * that looks like a destination timeout.
598 udelay(TIMEOUT_DELAY
);
599 bcp
->plugged_tries
++;
600 if (bcp
->plugged_tries
>= PLUGSB4RESET
) {
601 bcp
->plugged_tries
= 0;
602 quiesce_local_uvhub(hmaster
);
603 spin_lock(&hmaster
->queue_lock
);
604 uv_reset_with_ipi(&bau_desc
->distribution
,
606 spin_unlock(&hmaster
->queue_lock
);
607 end_uvhub_quiesce(hmaster
);
609 stat
->s_resets_plug
++;
611 } else if (completion_status
== FLUSH_RETRY_TIMEOUT
) {
612 hmaster
->max_concurrent
= 1;
613 bcp
->timeout_tries
++;
614 udelay(TIMEOUT_DELAY
);
615 if (bcp
->timeout_tries
>= TIMEOUTSB4RESET
) {
616 bcp
->timeout_tries
= 0;
617 quiesce_local_uvhub(hmaster
);
618 spin_lock(&hmaster
->queue_lock
);
619 uv_reset_with_ipi(&bau_desc
->distribution
,
621 spin_unlock(&hmaster
->queue_lock
);
622 end_uvhub_quiesce(hmaster
);
624 stat
->s_resets_timeout
++;
627 if (bcp
->ipi_attempts
>= 3) {
628 bcp
->ipi_attempts
= 0;
629 completion_status
= FLUSH_GIVEUP
;
633 } while ((completion_status
== FLUSH_RETRY_PLUGGED
) ||
634 (completion_status
== FLUSH_RETRY_TIMEOUT
));
635 time2
= get_cycles();
637 if ((completion_status
== FLUSH_COMPLETE
) && (bcp
->conseccompletes
> 5)
638 && (hmaster
->max_concurrent
< hmaster
->max_concurrent_constant
))
639 hmaster
->max_concurrent
++;
642 * hold any cpu not timing out here; no other cpu currently held by
643 * the 'throttle' should enter the activation code
645 while (hmaster
->uvhub_quiesce
)
647 atomic_dec(&hmaster
->active_descriptor_count
);
649 /* guard against cycles wrap */
651 stat
->s_time
+= (time2
- time1
);
653 stat
->s_requestor
--; /* don't count this one */
654 if (completion_status
== FLUSH_COMPLETE
&& try > 1)
656 else if (completion_status
== FLUSH_GIVEUP
) {
658 * Cause the caller to do an IPI-style TLB shootdown on
659 * the target cpu's, all of which are still in the mask.
666 * Success, so clear the remote cpu's from the mask so we don't
667 * use the IPI method of shootdown on them.
669 for_each_cpu(bit
, flush_mask
) {
670 uvhub
= uv_cpu_to_blade_id(bit
);
671 if (uvhub
== this_uvhub
)
673 cpumask_clear_cpu(bit
, flush_mask
);
675 if (!cpumask_empty(flush_mask
))
682 * uv_flush_tlb_others - globally purge translation cache of a virtual
683 * address or all TLB's
684 * @cpumask: mask of all cpu's in which the address is to be removed
685 * @mm: mm_struct containing virtual address range
686 * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
687 * @cpu: the current cpu
689 * This is the entry point for initiating any UV global TLB shootdown.
691 * Purges the translation caches of all specified processors of the given
692 * virtual address, or purges all TLB's on specified processors.
694 * The caller has derived the cpumask from the mm_struct. This function
695 * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
697 * The cpumask is converted into a uvhubmask of the uvhubs containing
700 * Note that this function should be called with preemption disabled.
702 * Returns NULL if all remote flushing was done.
703 * Returns pointer to cpumask if some remote flushing remains to be
704 * done. The returned pointer is valid till preemption is re-enabled.
706 const struct cpumask
*uv_flush_tlb_others(const struct cpumask
*cpumask
,
707 struct mm_struct
*mm
,
708 unsigned long va
, unsigned int cpu
)
714 struct bau_desc
*bau_desc
;
715 struct cpumask
*flush_mask
;
716 struct ptc_stats
*stat
;
717 struct bau_control
*bcp
;
722 bcp
= &per_cpu(bau_control
, cpu
);
724 * Each sending cpu has a per-cpu mask which it fills from the caller's
725 * cpu mask. Only remote cpus are converted to uvhubs and copied.
727 flush_mask
= (struct cpumask
*)per_cpu(uv_flush_tlb_mask
, cpu
);
729 * copy cpumask to flush_mask, removing current cpu
730 * (current cpu should already have been flushed by the caller and
731 * should never be returned if we return flush_mask)
733 cpumask_andnot(flush_mask
, cpumask
, cpumask_of(cpu
));
734 if (cpu_isset(cpu
, *cpumask
))
735 locals
++; /* current cpu was targeted */
737 bau_desc
= bcp
->descriptor_base
;
738 bau_desc
+= UV_ITEMS_PER_DESCRIPTOR
* bcp
->uvhub_cpu
;
740 bau_uvhubs_clear(&bau_desc
->distribution
, UV_DISTRIBUTION_SIZE
);
742 for_each_cpu(tcpu
, flush_mask
) {
743 uvhub
= uv_cpu_to_blade_id(tcpu
);
744 if (uvhub
== bcp
->uvhub
) {
748 bau_uvhub_set(uvhub
, &bau_desc
->distribution
);
753 * No off_hub flushing; return status for local hub.
754 * Return the caller's mask if all were local (the current
755 * cpu may be in that mask).
762 stat
= &per_cpu(ptcstats
, cpu
);
764 stat
->s_ntargcpu
+= remotes
;
765 remotes
= bau_uvhub_weight(&bau_desc
->distribution
);
766 stat
->s_ntarguvhub
+= remotes
;
768 stat
->s_ntarguvhub16
++;
769 else if (remotes
>= 8)
770 stat
->s_ntarguvhub8
++;
771 else if (remotes
>= 4)
772 stat
->s_ntarguvhub4
++;
773 else if (remotes
>= 2)
774 stat
->s_ntarguvhub2
++;
776 stat
->s_ntarguvhub1
++;
778 bau_desc
->payload
.address
= va
;
779 bau_desc
->payload
.sending_cpu
= cpu
;
782 * uv_flush_send_and_wait returns null if all cpu's were messaged, or
783 * the adjusted flush_mask if any cpu's were not messaged.
785 return uv_flush_send_and_wait(bau_desc
, flush_mask
, bcp
);
789 * The BAU message interrupt comes here. (registered by set_intr_gate)
792 * We received a broadcast assist message.
794 * Interrupts are disabled; this interrupt could represent
795 * the receipt of several messages.
797 * All cores/threads on this hub get this interrupt.
798 * The last one to see it does the software ack.
799 * (the resource will not be freed until noninterruptable cpus see this
800 * interrupt; hardware may timeout the s/w ack and reply ERROR)
802 void uv_bau_message_interrupt(struct pt_regs
*regs
)
806 struct bau_payload_queue_entry
*msg
;
807 struct bau_control
*bcp
;
808 struct ptc_stats
*stat
;
809 struct msg_desc msgdesc
;
811 time_start
= get_cycles();
812 bcp
= &per_cpu(bau_control
, smp_processor_id());
813 stat
= &per_cpu(ptcstats
, smp_processor_id());
814 msgdesc
.va_queue_first
= bcp
->va_queue_first
;
815 msgdesc
.va_queue_last
= bcp
->va_queue_last
;
816 msg
= bcp
->bau_msg_head
;
817 while (msg
->sw_ack_vector
) {
819 msgdesc
.msg_slot
= msg
- msgdesc
.va_queue_first
;
820 msgdesc
.sw_ack_slot
= ffs(msg
->sw_ack_vector
) - 1;
822 uv_bau_process_message(&msgdesc
, bcp
);
824 if (msg
> msgdesc
.va_queue_last
)
825 msg
= msgdesc
.va_queue_first
;
826 bcp
->bau_msg_head
= msg
;
828 stat
->d_time
+= (get_cycles() - time_start
);
839 * Each target uvhub (i.e. a uvhub that has no cpu's) needs to have
840 * shootdown message timeouts enabled. The timeout does not cause
841 * an interrupt, but causes an error message to be returned to
844 static void uv_enable_timeouts(void)
849 unsigned long mmr_image
;
851 nuvhubs
= uv_num_possible_blades();
853 for (uvhub
= 0; uvhub
< nuvhubs
; uvhub
++) {
854 if (!uv_blade_nr_possible_cpus(uvhub
))
857 pnode
= uv_blade_to_pnode(uvhub
);
859 uv_read_global_mmr64(pnode
, UVH_LB_BAU_MISC_CONTROL
);
861 * Set the timeout period and then lock it in, in three
862 * steps; captures and locks in the period.
864 * To program the period, the SOFT_ACK_MODE must be off.
866 mmr_image
&= ~((unsigned long)1 <<
867 UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT
);
868 uv_write_global_mmr64
869 (pnode
, UVH_LB_BAU_MISC_CONTROL
, mmr_image
);
871 * Set the 4-bit period.
873 mmr_image
&= ~((unsigned long)0xf <<
874 UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
);
875 mmr_image
|= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD
<<
876 UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT
);
877 uv_write_global_mmr64
878 (pnode
, UVH_LB_BAU_MISC_CONTROL
, mmr_image
);
880 * Subsequent reversals of the timebase bit (3) cause an
881 * immediate timeout of one or all INTD resources as
882 * indicated in bits 2:0 (7 causes all of them to timeout).
884 mmr_image
|= ((unsigned long)1 <<
885 UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT
);
886 uv_write_global_mmr64
887 (pnode
, UVH_LB_BAU_MISC_CONTROL
, mmr_image
);
891 static void *uv_ptc_seq_start(struct seq_file
*file
, loff_t
*offset
)
893 if (*offset
< num_possible_cpus())
898 static void *uv_ptc_seq_next(struct seq_file
*file
, void *data
, loff_t
*offset
)
901 if (*offset
< num_possible_cpus())
906 static void uv_ptc_seq_stop(struct seq_file
*file
, void *data
)
910 static inline unsigned long long
911 millisec_2_cycles(unsigned long millisec
)
914 unsigned long long cyc
;
916 ns
= millisec
* 1000;
917 cyc
= (ns
<< CYC2NS_SCALE_FACTOR
)/(per_cpu(cyc2ns
, smp_processor_id()));
922 * Display the statistics thru /proc.
923 * 'data' points to the cpu number
925 static int uv_ptc_seq_show(struct seq_file
*file
, void *data
)
927 struct ptc_stats
*stat
;
930 cpu
= *(loff_t
*)data
;
934 "# cpu sent stime numuvhubs numuvhubs16 numuvhubs8 ");
936 "numuvhubs4 numuvhubs2 numuvhubs1 numcpus dto ");
938 "retries rok resetp resett giveup sto bz throt ");
940 "sw_ack recv rtime all ");
942 "one mult none retry canc nocan reset rcan\n");
944 if (cpu
< num_possible_cpus() && cpu_online(cpu
)) {
945 stat
= &per_cpu(ptcstats
, cpu
);
946 /* source side statistics */
948 "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
949 cpu
, stat
->s_requestor
, cycles_2_us(stat
->s_time
),
950 stat
->s_ntarguvhub
, stat
->s_ntarguvhub16
,
951 stat
->s_ntarguvhub8
, stat
->s_ntarguvhub4
,
952 stat
->s_ntarguvhub2
, stat
->s_ntarguvhub1
,
953 stat
->s_ntargcpu
, stat
->s_dtimeout
);
954 seq_printf(file
, "%ld %ld %ld %ld %ld %ld %ld %ld ",
955 stat
->s_retry_messages
, stat
->s_retriesok
,
956 stat
->s_resets_plug
, stat
->s_resets_timeout
,
957 stat
->s_giveup
, stat
->s_stimeout
,
958 stat
->s_busy
, stat
->s_throttles
);
959 /* destination side statistics */
961 "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n",
962 uv_read_global_mmr64(uv_cpu_to_pnode(cpu
),
963 UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE
),
964 stat
->d_requestee
, cycles_2_us(stat
->d_time
),
965 stat
->d_alltlb
, stat
->d_onetlb
, stat
->d_multmsg
,
966 stat
->d_nomsg
, stat
->d_retries
, stat
->d_canceled
,
967 stat
->d_nocanceled
, stat
->d_resets
,
975 * -1: resetf the statistics
976 * 0: display meaning of the statistics
977 * >0: maximum concurrent active descriptors per uvhub (throttle)
979 static ssize_t
uv_ptc_proc_write(struct file
*file
, const char __user
*user
,
980 size_t count
, loff_t
*data
)
985 struct ptc_stats
*stat
;
986 struct bau_control
*bcp
;
988 if (count
== 0 || count
> sizeof(optstr
))
990 if (copy_from_user(optstr
, user
, count
))
992 optstr
[count
- 1] = '\0';
993 if (strict_strtol(optstr
, 10, &input_arg
) < 0) {
994 printk(KERN_DEBUG
"%s is invalid\n", optstr
);
998 if (input_arg
== 0) {
999 printk(KERN_DEBUG
"# cpu: cpu number\n");
1000 printk(KERN_DEBUG
"Sender statistics:\n");
1002 "sent: number of shootdown messages sent\n");
1004 "stime: time spent sending messages\n");
1006 "numuvhubs: number of hubs targeted with shootdown\n");
1008 "numuvhubs16: number times 16 or more hubs targeted\n");
1010 "numuvhubs8: number times 8 or more hubs targeted\n");
1012 "numuvhubs4: number times 4 or more hubs targeted\n");
1014 "numuvhubs2: number times 2 or more hubs targeted\n");
1016 "numuvhubs1: number times 1 hub targeted\n");
1018 "numcpus: number of cpus targeted with shootdown\n");
1020 "dto: number of destination timeouts\n");
1022 "retries: destination timeout retries sent\n");
1024 "rok: : destination timeouts successfully retried\n");
1026 "resetp: ipi-style resource resets for plugs\n");
1028 "resett: ipi-style resource resets for timeouts\n");
1030 "giveup: fall-backs to ipi-style shootdowns\n");
1032 "sto: number of source timeouts\n");
1034 "bz: number of stay-busy's\n");
1036 "throt: number times spun in throttle\n");
1037 printk(KERN_DEBUG
"Destination side statistics:\n");
1039 "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
1041 "recv: shootdown messages received\n");
1043 "rtime: time spent processing messages\n");
1045 "all: shootdown all-tlb messages\n");
1047 "one: shootdown one-tlb messages\n");
1049 "mult: interrupts that found multiple messages\n");
1051 "none: interrupts that found no messages\n");
1053 "retry: number of retry messages processed\n");
1055 "canc: number messages canceled by retries\n");
1057 "nocan: number retries that found nothing to cancel\n");
1059 "reset: number of ipi-style reset requests processed\n");
1061 "rcan: number messages canceled by reset requests\n");
1062 } else if (input_arg
== -1) {
1063 for_each_present_cpu(cpu
) {
1064 stat
= &per_cpu(ptcstats
, cpu
);
1065 memset(stat
, 0, sizeof(struct ptc_stats
));
1068 uv_bau_max_concurrent
= input_arg
;
1069 bcp
= &per_cpu(bau_control
, smp_processor_id());
1070 if (uv_bau_max_concurrent
< 1 ||
1071 uv_bau_max_concurrent
> bcp
->cpus_in_uvhub
) {
1073 "Error: BAU max concurrent %d; %d is invalid\n",
1074 bcp
->max_concurrent
, uv_bau_max_concurrent
);
1077 printk(KERN_DEBUG
"Set BAU max concurrent:%d\n",
1078 uv_bau_max_concurrent
);
1079 for_each_present_cpu(cpu
) {
1080 bcp
= &per_cpu(bau_control
, cpu
);
1081 bcp
->max_concurrent
= uv_bau_max_concurrent
;
1088 static const struct seq_operations uv_ptc_seq_ops
= {
1089 .start
= uv_ptc_seq_start
,
1090 .next
= uv_ptc_seq_next
,
1091 .stop
= uv_ptc_seq_stop
,
1092 .show
= uv_ptc_seq_show
1095 static int uv_ptc_proc_open(struct inode
*inode
, struct file
*file
)
1097 return seq_open(file
, &uv_ptc_seq_ops
);
1100 static const struct file_operations proc_uv_ptc_operations
= {
1101 .open
= uv_ptc_proc_open
,
1103 .write
= uv_ptc_proc_write
,
1104 .llseek
= seq_lseek
,
1105 .release
= seq_release
,
1108 static int __init
uv_ptc_init(void)
1110 struct proc_dir_entry
*proc_uv_ptc
;
1112 if (!is_uv_system())
1115 proc_uv_ptc
= proc_create(UV_PTC_BASENAME
, 0444, NULL
,
1116 &proc_uv_ptc_operations
);
1118 printk(KERN_ERR
"unable to create %s proc entry\n",
1126 * initialize the sending side's sending buffers
1129 uv_activation_descriptor_init(int node
, int pnode
)
1136 struct bau_desc
*bau_desc
;
1137 struct bau_desc
*bd2
;
1138 struct bau_control
*bcp
;
1141 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
1142 * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
1144 bau_desc
= (struct bau_desc
*)kmalloc_node(sizeof(struct bau_desc
)*
1145 UV_ADP_SIZE
*UV_ITEMS_PER_DESCRIPTOR
, GFP_KERNEL
, node
);
1148 pa
= uv_gpa(bau_desc
); /* need the real nasid*/
1149 n
= pa
>> uv_nshift
;
1152 uv_write_global_mmr64(pnode
, UVH_LB_BAU_SB_DESCRIPTOR_BASE
,
1153 (n
<< UV_DESC_BASE_PNODE_SHIFT
| m
));
1156 * initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
1157 * cpu even though we only use the first one; one descriptor can
1158 * describe a broadcast to 256 uv hubs.
1160 for (i
= 0, bd2
= bau_desc
; i
< (UV_ADP_SIZE
*UV_ITEMS_PER_DESCRIPTOR
);
1162 memset(bd2
, 0, sizeof(struct bau_desc
));
1163 bd2
->header
.sw_ack_flag
= 1;
1165 * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub
1166 * in the partition. The bit map will indicate uvhub numbers,
1167 * which are 0-N in a partition. Pnodes are unique system-wide.
1169 bd2
->header
.base_dest_nodeid
= uv_partition_base_pnode
<< 1;
1170 bd2
->header
.dest_subnodeid
= 0x10; /* the LB */
1171 bd2
->header
.command
= UV_NET_ENDPOINT_INTD
;
1172 bd2
->header
.int_both
= 1;
1174 * all others need to be set to zero:
1175 * fairness chaining multilevel count replied_to
1178 for_each_present_cpu(cpu
) {
1179 if (pnode
!= uv_blade_to_pnode(uv_cpu_to_blade_id(cpu
)))
1181 bcp
= &per_cpu(bau_control
, cpu
);
1182 bcp
->descriptor_base
= bau_desc
;
1187 * initialize the destination side's receiving buffers
1188 * entered for each uvhub in the partition
1189 * - node is first node (kernel memory notion) on the uvhub
1190 * - pnode is the uvhub's physical identifier
1193 uv_payload_queue_init(int node
, int pnode
)
1199 struct bau_payload_queue_entry
*pqp
;
1200 struct bau_payload_queue_entry
*pqp_malloc
;
1201 struct bau_control
*bcp
;
1203 pqp
= (struct bau_payload_queue_entry
*) kmalloc_node(
1204 (DEST_Q_SIZE
+ 1) * sizeof(struct bau_payload_queue_entry
),
1209 cp
= (char *)pqp
+ 31;
1210 pqp
= (struct bau_payload_queue_entry
*)(((unsigned long)cp
>> 5) << 5);
1212 for_each_present_cpu(cpu
) {
1213 if (pnode
!= uv_cpu_to_pnode(cpu
))
1215 /* for every cpu on this pnode: */
1216 bcp
= &per_cpu(bau_control
, cpu
);
1217 bcp
->va_queue_first
= pqp
;
1218 bcp
->bau_msg_head
= pqp
;
1219 bcp
->va_queue_last
= pqp
+ (DEST_Q_SIZE
- 1);
1222 * need the pnode of where the memory was really allocated
1225 pn
= pa
>> uv_nshift
;
1226 uv_write_global_mmr64(pnode
,
1227 UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST
,
1228 ((unsigned long)pn
<< UV_PAYLOADQ_PNODE_SHIFT
) |
1229 uv_physnodeaddr(pqp
));
1230 uv_write_global_mmr64(pnode
, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL
,
1231 uv_physnodeaddr(pqp
));
1232 uv_write_global_mmr64(pnode
, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST
,
1234 uv_physnodeaddr(pqp
+ (DEST_Q_SIZE
- 1)));
1235 /* in effect, all msg_type's are set to MSG_NOOP */
1236 memset(pqp
, 0, sizeof(struct bau_payload_queue_entry
) * DEST_Q_SIZE
);
1240 * Initialization of each UV hub's structures
1242 static void __init
uv_init_uvhub(int uvhub
, int vector
)
1246 unsigned long apicid
;
1248 node
= uvhub_to_first_node(uvhub
);
1249 pnode
= uv_blade_to_pnode(uvhub
);
1250 uv_activation_descriptor_init(node
, pnode
);
1251 uv_payload_queue_init(node
, pnode
);
1253 * the below initialization can't be in firmware because the
1254 * messaging IRQ will be determined by the OS
1256 apicid
= uvhub_to_first_apicid(uvhub
);
1257 uv_write_global_mmr64(pnode
, UVH_BAU_DATA_CONFIG
,
1258 ((apicid
<< 32) | vector
));
1262 * initialize the bau_control structure for each cpu
1264 static void uv_init_per_cpu(int nuvhubs
)
1271 struct bau_control
*bcp
;
1272 struct uvhub_desc
*bdp
;
1273 struct socket_desc
*sdp
;
1274 struct bau_control
*hmaster
= NULL
;
1275 struct bau_control
*smaster
= NULL
;
1276 struct socket_desc
{
1278 short cpu_number
[16];
1285 struct socket_desc socket
[2];
1287 struct uvhub_desc
*uvhub_descs
;
1289 uvhub_descs
= (struct uvhub_desc
*)
1290 kmalloc(nuvhubs
* sizeof(struct uvhub_desc
), GFP_KERNEL
);
1291 memset(uvhub_descs
, 0, nuvhubs
* sizeof(struct uvhub_desc
));
1292 for_each_present_cpu(cpu
) {
1293 bcp
= &per_cpu(bau_control
, cpu
);
1294 memset(bcp
, 0, sizeof(struct bau_control
));
1295 spin_lock_init(&bcp
->masks_lock
);
1296 bcp
->max_concurrent
= uv_bau_max_concurrent
;
1297 pnode
= uv_cpu_hub_info(cpu
)->pnode
;
1298 uvhub
= uv_cpu_hub_info(cpu
)->numa_blade_id
;
1299 bdp
= &uvhub_descs
[uvhub
];
1303 /* time interval to catch a hardware stay-busy bug */
1304 bcp
->timeout_interval
= millisec_2_cycles(3);
1305 /* kludge: assume uv_hub.h is constant */
1306 socket
= (cpu_physical_id(cpu
)>>5)&1;
1307 if (socket
>= bdp
->num_sockets
)
1308 bdp
->num_sockets
= socket
+1;
1309 sdp
= &bdp
->socket
[socket
];
1310 sdp
->cpu_number
[sdp
->num_cpus
] = cpu
;
1314 for_each_possible_blade(uvhub
) {
1315 bdp
= &uvhub_descs
[uvhub
];
1316 for (i
= 0; i
< bdp
->num_sockets
; i
++) {
1317 sdp
= &bdp
->socket
[i
];
1318 for (j
= 0; j
< sdp
->num_cpus
; j
++) {
1319 cpu
= sdp
->cpu_number
[j
];
1320 bcp
= &per_cpu(bau_control
, cpu
);
1327 bcp
->cpus_in_uvhub
= bdp
->num_cpus
;
1328 bcp
->cpus_in_socket
= sdp
->num_cpus
;
1329 bcp
->socket_master
= smaster
;
1330 bcp
->uvhub_master
= hmaster
;
1331 for (k
= 0; k
< DEST_Q_SIZE
; k
++)
1332 bcp
->socket_acknowledge_count
[k
] = 0;
1334 uv_cpu_hub_info(cpu
)->blade_processor_id
;
1343 * Initialization of BAU-related structures
1345 static int __init
uv_bau_init(void)
1354 if (!is_uv_system())
1360 for_each_possible_cpu(cur_cpu
)
1361 zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask
, cur_cpu
),
1362 GFP_KERNEL
, cpu_to_node(cur_cpu
));
1364 uv_bau_max_concurrent
= MAX_BAU_CONCURRENT
;
1365 uv_nshift
= uv_hub_info
->m_val
;
1366 uv_mmask
= (1UL << uv_hub_info
->m_val
) - 1;
1367 nuvhubs
= uv_num_possible_blades();
1369 uv_init_per_cpu(nuvhubs
);
1371 uv_partition_base_pnode
= 0x7fffffff;
1372 for (uvhub
= 0; uvhub
< nuvhubs
; uvhub
++)
1373 if (uv_blade_nr_possible_cpus(uvhub
) &&
1374 (uv_blade_to_pnode(uvhub
) < uv_partition_base_pnode
))
1375 uv_partition_base_pnode
= uv_blade_to_pnode(uvhub
);
1377 vector
= UV_BAU_MESSAGE
;
1378 for_each_possible_blade(uvhub
)
1379 if (uv_blade_nr_possible_cpus(uvhub
))
1380 uv_init_uvhub(uvhub
, vector
);
1382 uv_enable_timeouts();
1383 alloc_intr_gate(vector
, uv_bau_message_intr1
);
1385 for_each_possible_blade(uvhub
) {
1386 pnode
= uv_blade_to_pnode(uvhub
);
1388 uv_write_global_mmr64(pnode
, UVH_LB_BAU_SB_ACTIVATION_CONTROL
,
1389 ((unsigned long)1 << 63));
1390 mmr
= 1; /* should be 1 to broadcast to both sockets */
1391 uv_write_global_mmr64(pnode
, UVH_BAU_DATA_BROADCAST
, mmr
);
1396 core_initcall(uv_bau_init
);
1397 core_initcall(uv_ptc_init
);