1 /* sched.c - SPU scheduler.
3 * Copyright (C) IBM 2005
4 * Author: Mark Nutter <mnutter@us.ibm.com>
6 * 2006-03-31 NUMA domains added.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 #include <linux/module.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/kernel.h>
30 #include <linux/completion.h>
31 #include <linux/vmalloc.h>
32 #include <linux/smp.h>
33 #include <linux/stddef.h>
34 #include <linux/unistd.h>
35 #include <linux/numa.h>
36 #include <linux/mutex.h>
37 #include <linux/notifier.h>
38 #include <linux/kthread.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/proc_fs.h>
41 #include <linux/seq_file.h>
44 #include <asm/mmu_context.h>
46 #include <asm/spu_csa.h>
47 #include <asm/spu_priv1.h>
50 struct spu_prio_array {
51 DECLARE_BITMAP(bitmap, MAX_PRIO);
52 struct list_head runq[MAX_PRIO];
57 static unsigned long spu_avenrun[3];
58 static struct spu_prio_array *spu_prio;
59 static struct task_struct *spusched_task;
60 static struct timer_list spusched_timer;
63 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
65 #define NORMAL_PRIO 120
68 * Frequency of the spu scheduler tick. By default we do one SPU scheduler
69 * tick for every 10 CPU scheduler ticks.
71 #define SPUSCHED_TICK (10)
74 * These are the 'tuning knobs' of the scheduler:
76 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
77 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
79 #define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
80 #define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
82 #define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO)
83 #define SCALE_PRIO(x, prio) \
84 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
87 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
88 * [800ms ... 100ms ... 5ms]
90 * The higher a thread's priority, the bigger timeslices
91 * it gets during one round of execution. But even the lowest
92 * priority thread gets MIN_TIMESLICE worth of execution time.
94 void spu_set_timeslice(struct spu_context *ctx)
96 if (ctx->prio < NORMAL_PRIO)
97 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
99 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
103 * Update scheduling information from the owning thread.
105 void __spu_update_sched_info(struct spu_context *ctx)
108 * 32-Bit assignments are atomic on powerpc, and we don't care about
109 * memory ordering here because retrieving the controlling thread is
110 * per definition racy.
112 ctx->tid = current->pid;
115 * We do our own priority calculations, so we normally want
116 * ->static_prio to start with. Unfortunately this field
117 * contains junk for threads with a realtime scheduling
118 * policy so we have to look at ->prio in this case.
120 if (rt_prio(current->prio))
121 ctx->prio = current->prio;
123 ctx->prio = current->static_prio;
124 ctx->policy = current->policy;
127 * A lot of places that don't hold list_mutex poke into
128 * cpus_allowed, including grab_runnable_context which
129 * already holds the runq_lock. So abuse runq_lock
130 * to protect this field as well.
132 spin_lock(&spu_prio->runq_lock);
133 ctx->cpus_allowed = current->cpus_allowed;
134 spin_unlock(&spu_prio->runq_lock);
137 void spu_update_sched_info(struct spu_context *ctx)
139 int node = ctx->spu->node;
141 mutex_lock(&cbe_spu_info[node].list_mutex);
142 __spu_update_sched_info(ctx);
143 mutex_unlock(&cbe_spu_info[node].list_mutex);
146 static int __node_allowed(struct spu_context *ctx, int node)
148 if (nr_cpus_node(node)) {
149 cpumask_t mask = node_to_cpumask(node);
151 if (cpus_intersects(mask, ctx->cpus_allowed))
158 static int node_allowed(struct spu_context *ctx, int node)
162 spin_lock(&spu_prio->runq_lock);
163 rval = __node_allowed(ctx, node);
164 spin_unlock(&spu_prio->runq_lock);
169 static BLOCKING_NOTIFIER_HEAD(spu_switch_notifier);
171 void spu_switch_notify(struct spu *spu, struct spu_context *ctx)
173 blocking_notifier_call_chain(&spu_switch_notifier,
174 ctx ? ctx->object_id : 0, spu);
177 static void notify_spus_active(void)
182 * Wake up the active spu_contexts.
184 * When the awakened processes see their "notify_active" flag is set,
185 * they will call spu_switch_notify().
187 for_each_online_node(node) {
190 mutex_lock(&cbe_spu_info[node].list_mutex);
191 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
192 if (spu->alloc_state != SPU_FREE) {
193 struct spu_context *ctx = spu->ctx;
194 set_bit(SPU_SCHED_NOTIFY_ACTIVE,
197 wake_up_all(&ctx->stop_wq);
200 mutex_unlock(&cbe_spu_info[node].list_mutex);
204 int spu_switch_event_register(struct notifier_block * n)
207 ret = blocking_notifier_chain_register(&spu_switch_notifier, n);
209 notify_spus_active();
212 EXPORT_SYMBOL_GPL(spu_switch_event_register);
214 int spu_switch_event_unregister(struct notifier_block * n)
216 return blocking_notifier_chain_unregister(&spu_switch_notifier, n);
218 EXPORT_SYMBOL_GPL(spu_switch_event_unregister);
221 * spu_bind_context - bind spu context to physical spu
222 * @spu: physical spu to bind to
223 * @ctx: context to bind
225 static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
227 pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
228 spu->number, spu->node);
229 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
231 if (ctx->flags & SPU_CREATE_NOSCHED)
232 atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
234 ctx->stats.slb_flt_base = spu->stats.slb_flt;
235 ctx->stats.class2_intr_base = spu->stats.class2_intr;
240 ctx->ops = &spu_hw_ops;
241 spu->pid = current->pid;
242 spu->tgid = current->tgid;
243 spu_associate_mm(spu, ctx->owner);
244 spu->ibox_callback = spufs_ibox_callback;
245 spu->wbox_callback = spufs_wbox_callback;
246 spu->stop_callback = spufs_stop_callback;
247 spu->mfc_callback = spufs_mfc_callback;
249 spu_unmap_mappings(ctx);
250 spu_restore(&ctx->csa, spu);
251 spu->timestamp = jiffies;
252 spu_cpu_affinity_set(spu, raw_smp_processor_id());
253 spu_switch_notify(spu, ctx);
254 ctx->state = SPU_STATE_RUNNABLE;
256 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
260 * Must be used with the list_mutex held.
262 static inline int sched_spu(struct spu *spu)
264 BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
266 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
269 static void aff_merge_remaining_ctxs(struct spu_gang *gang)
271 struct spu_context *ctx;
273 list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
274 if (list_empty(&ctx->aff_list))
275 list_add(&ctx->aff_list, &gang->aff_list_head);
277 gang->aff_flags |= AFF_MERGED;
280 static void aff_set_offsets(struct spu_gang *gang)
282 struct spu_context *ctx;
286 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
288 if (&ctx->aff_list == &gang->aff_list_head)
290 ctx->aff_offset = offset--;
294 list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
295 if (&ctx->aff_list == &gang->aff_list_head)
297 ctx->aff_offset = offset++;
300 gang->aff_flags |= AFF_OFFSETS_SET;
303 static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
304 int group_size, int lowest_offset)
310 * TODO: A better algorithm could be used to find a good spu to be
311 * used as reference location for the ctxs chain.
313 node = cpu_to_node(raw_smp_processor_id());
314 for (n = 0; n < MAX_NUMNODES; n++, node++) {
315 node = (node < MAX_NUMNODES) ? node : 0;
316 if (!node_allowed(ctx, node))
318 mutex_lock(&cbe_spu_info[node].list_mutex);
319 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
320 if ((!mem_aff || spu->has_mem_affinity) &&
322 mutex_unlock(&cbe_spu_info[node].list_mutex);
326 mutex_unlock(&cbe_spu_info[node].list_mutex);
331 static void aff_set_ref_point_location(struct spu_gang *gang)
333 int mem_aff, gs, lowest_offset;
334 struct spu_context *ctx;
337 mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
341 list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
344 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
346 if (&ctx->aff_list == &gang->aff_list_head)
348 lowest_offset = ctx->aff_offset;
351 gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
355 static struct spu *ctx_location(struct spu *ref, int offset, int node)
361 list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
362 BUG_ON(spu->node != node);
369 list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
370 BUG_ON(spu->node != node);
382 * affinity_check is called each time a context is going to be scheduled.
383 * It returns the spu ptr on which the context must run.
385 static int has_affinity(struct spu_context *ctx)
387 struct spu_gang *gang = ctx->gang;
389 if (list_empty(&ctx->aff_list))
392 if (!gang->aff_ref_spu) {
393 if (!(gang->aff_flags & AFF_MERGED))
394 aff_merge_remaining_ctxs(gang);
395 if (!(gang->aff_flags & AFF_OFFSETS_SET))
396 aff_set_offsets(gang);
397 aff_set_ref_point_location(gang);
400 return gang->aff_ref_spu != NULL;
404 * spu_unbind_context - unbind spu context from physical spu
405 * @spu: physical spu to unbind from
406 * @ctx: context to unbind
408 static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
410 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
411 spu->pid, spu->number, spu->node);
412 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
414 if (spu->ctx->flags & SPU_CREATE_NOSCHED)
415 atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
418 mutex_lock(&ctx->gang->aff_mutex);
419 if (has_affinity(ctx)) {
420 if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
421 ctx->gang->aff_ref_spu = NULL;
423 mutex_unlock(&ctx->gang->aff_mutex);
426 spu_switch_notify(spu, NULL);
427 spu_unmap_mappings(ctx);
428 spu_save(&ctx->csa, spu);
429 spu->timestamp = jiffies;
430 ctx->state = SPU_STATE_SAVED;
431 spu->ibox_callback = NULL;
432 spu->wbox_callback = NULL;
433 spu->stop_callback = NULL;
434 spu->mfc_callback = NULL;
435 spu_associate_mm(spu, NULL);
438 ctx->ops = &spu_backing_ops;
442 ctx->stats.slb_flt +=
443 (spu->stats.slb_flt - ctx->stats.slb_flt_base);
444 ctx->stats.class2_intr +=
445 (spu->stats.class2_intr - ctx->stats.class2_intr_base);
447 /* This maps the underlying spu state to idle */
448 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
453 * spu_add_to_rq - add a context to the runqueue
454 * @ctx: context to add
456 static void __spu_add_to_rq(struct spu_context *ctx)
459 * Unfortunately this code path can be called from multiple threads
460 * on behalf of a single context due to the way the problem state
461 * mmap support works.
463 * Fortunately we need to wake up all these threads at the same time
464 * and can simply skip the runqueue addition for every but the first
465 * thread getting into this codepath.
467 * It's still quite hacky, and long-term we should proxy all other
468 * threads through the owner thread so that spu_run is in control
469 * of all the scheduling activity for a given context.
471 if (list_empty(&ctx->rq)) {
472 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
473 set_bit(ctx->prio, spu_prio->bitmap);
474 if (!spu_prio->nr_waiting++)
475 __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
479 static void __spu_del_from_rq(struct spu_context *ctx)
481 int prio = ctx->prio;
483 if (!list_empty(&ctx->rq)) {
484 if (!--spu_prio->nr_waiting)
485 del_timer(&spusched_timer);
486 list_del_init(&ctx->rq);
488 if (list_empty(&spu_prio->runq[prio]))
489 clear_bit(prio, spu_prio->bitmap);
493 static void spu_prio_wait(struct spu_context *ctx)
497 spin_lock(&spu_prio->runq_lock);
498 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
499 if (!signal_pending(current)) {
500 __spu_add_to_rq(ctx);
501 spin_unlock(&spu_prio->runq_lock);
502 mutex_unlock(&ctx->state_mutex);
504 mutex_lock(&ctx->state_mutex);
505 spin_lock(&spu_prio->runq_lock);
506 __spu_del_from_rq(ctx);
508 spin_unlock(&spu_prio->runq_lock);
509 __set_current_state(TASK_RUNNING);
510 remove_wait_queue(&ctx->stop_wq, &wait);
513 static struct spu *spu_get_idle(struct spu_context *ctx)
515 struct spu *spu, *aff_ref_spu;
519 mutex_lock(&ctx->gang->aff_mutex);
520 if (has_affinity(ctx)) {
521 aff_ref_spu = ctx->gang->aff_ref_spu;
522 atomic_inc(&ctx->gang->aff_sched_count);
523 mutex_unlock(&ctx->gang->aff_mutex);
524 node = aff_ref_spu->node;
526 mutex_lock(&cbe_spu_info[node].list_mutex);
527 spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
528 if (spu && spu->alloc_state == SPU_FREE)
530 mutex_unlock(&cbe_spu_info[node].list_mutex);
532 mutex_lock(&ctx->gang->aff_mutex);
533 if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
534 ctx->gang->aff_ref_spu = NULL;
535 mutex_unlock(&ctx->gang->aff_mutex);
539 mutex_unlock(&ctx->gang->aff_mutex);
541 node = cpu_to_node(raw_smp_processor_id());
542 for (n = 0; n < MAX_NUMNODES; n++, node++) {
543 node = (node < MAX_NUMNODES) ? node : 0;
544 if (!node_allowed(ctx, node))
547 mutex_lock(&cbe_spu_info[node].list_mutex);
548 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
549 if (spu->alloc_state == SPU_FREE)
552 mutex_unlock(&cbe_spu_info[node].list_mutex);
558 spu->alloc_state = SPU_USED;
559 mutex_unlock(&cbe_spu_info[node].list_mutex);
560 pr_debug("Got SPU %d %d\n", spu->number, spu->node);
561 spu_init_channels(spu);
566 * find_victim - find a lower priority context to preempt
567 * @ctx: canidate context for running
569 * Returns the freed physical spu to run the new context on.
571 static struct spu *find_victim(struct spu_context *ctx)
573 struct spu_context *victim = NULL;
578 * Look for a possible preemption candidate on the local node first.
579 * If there is no candidate look at the other nodes. This isn't
580 * exactly fair, but so far the whole spu scheduler tries to keep
581 * a strong node affinity. We might want to fine-tune this in
585 node = cpu_to_node(raw_smp_processor_id());
586 for (n = 0; n < MAX_NUMNODES; n++, node++) {
587 node = (node < MAX_NUMNODES) ? node : 0;
588 if (!node_allowed(ctx, node))
591 mutex_lock(&cbe_spu_info[node].list_mutex);
592 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
593 struct spu_context *tmp = spu->ctx;
595 if (tmp && tmp->prio > ctx->prio &&
596 (!victim || tmp->prio > victim->prio))
599 mutex_unlock(&cbe_spu_info[node].list_mutex);
603 * This nests ctx->state_mutex, but we always lock
604 * higher priority contexts before lower priority
605 * ones, so this is safe until we introduce
606 * priority inheritance schemes.
608 if (!mutex_trylock(&victim->state_mutex)) {
616 * This race can happen because we've dropped
617 * the active list mutex. No a problem, just
618 * restart the search.
620 mutex_unlock(&victim->state_mutex);
625 mutex_lock(&cbe_spu_info[node].list_mutex);
626 cbe_spu_info[node].nr_active--;
627 spu_unbind_context(spu, victim);
628 mutex_unlock(&cbe_spu_info[node].list_mutex);
630 victim->stats.invol_ctx_switch++;
631 spu->stats.invol_ctx_switch++;
632 mutex_unlock(&victim->state_mutex);
634 * We need to break out of the wait loop in spu_run
635 * manually to ensure this context gets put on the
636 * runqueue again ASAP.
638 wake_up(&victim->stop_wq);
647 * spu_activate - find a free spu for a context and execute it
648 * @ctx: spu context to schedule
649 * @flags: flags (currently ignored)
651 * Tries to find a free spu to run @ctx. If no free spu is available
652 * add the context to the runqueue so it gets woken up once an spu
655 int spu_activate(struct spu_context *ctx, unsigned long flags)
661 * If there are multiple threads waiting for a single context
662 * only one actually binds the context while the others will
663 * only be able to acquire the state_mutex once the context
664 * already is in runnable state.
669 spu = spu_get_idle(ctx);
671 * If this is a realtime thread we try to get it running by
672 * preempting a lower priority thread.
674 if (!spu && rt_prio(ctx->prio))
675 spu = find_victim(ctx);
677 int node = spu->node;
679 mutex_lock(&cbe_spu_info[node].list_mutex);
680 spu_bind_context(spu, ctx);
681 cbe_spu_info[node].nr_active++;
682 mutex_unlock(&cbe_spu_info[node].list_mutex);
683 wake_up_all(&ctx->run_wq);
688 } while (!signal_pending(current));
694 * grab_runnable_context - try to find a runnable context
696 * Remove the highest priority context on the runqueue and return it
697 * to the caller. Returns %NULL if no runnable context was found.
699 static struct spu_context *grab_runnable_context(int prio, int node)
701 struct spu_context *ctx;
704 spin_lock(&spu_prio->runq_lock);
705 best = find_first_bit(spu_prio->bitmap, prio);
706 while (best < prio) {
707 struct list_head *rq = &spu_prio->runq[best];
709 list_for_each_entry(ctx, rq, rq) {
710 /* XXX(hch): check for affinity here aswell */
711 if (__node_allowed(ctx, node)) {
712 __spu_del_from_rq(ctx);
720 spin_unlock(&spu_prio->runq_lock);
724 static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
726 struct spu *spu = ctx->spu;
727 struct spu_context *new = NULL;
730 new = grab_runnable_context(max_prio, spu->node);
732 int node = spu->node;
734 mutex_lock(&cbe_spu_info[node].list_mutex);
735 spu_unbind_context(spu, ctx);
736 spu->alloc_state = SPU_FREE;
737 cbe_spu_info[node].nr_active--;
738 mutex_unlock(&cbe_spu_info[node].list_mutex);
740 ctx->stats.vol_ctx_switch++;
741 spu->stats.vol_ctx_switch++;
744 wake_up(&new->stop_wq);
753 * spu_deactivate - unbind a context from it's physical spu
754 * @ctx: spu context to unbind
756 * Unbind @ctx from the physical spu it is running on and schedule
757 * the highest priority context to run on the freed physical spu.
759 void spu_deactivate(struct spu_context *ctx)
761 __spu_deactivate(ctx, 1, MAX_PRIO);
765 * spu_yield - yield a physical spu if others are waiting
766 * @ctx: spu context to yield
768 * Check if there is a higher priority context waiting and if yes
769 * unbind @ctx from the physical spu and schedule the highest
770 * priority context to run on the freed physical spu instead.
772 void spu_yield(struct spu_context *ctx)
774 if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
775 mutex_lock(&ctx->state_mutex);
776 __spu_deactivate(ctx, 0, MAX_PRIO);
777 mutex_unlock(&ctx->state_mutex);
781 static noinline void spusched_tick(struct spu_context *ctx)
783 if (ctx->flags & SPU_CREATE_NOSCHED)
785 if (ctx->policy == SCHED_FIFO)
788 if (--ctx->time_slice)
792 * Unfortunately list_mutex ranks outside of state_mutex, so
793 * we have to trylock here. If we fail give the context another
794 * tick and try again.
796 if (mutex_trylock(&ctx->state_mutex)) {
797 struct spu *spu = ctx->spu;
798 struct spu_context *new;
800 new = grab_runnable_context(ctx->prio + 1, spu->node);
802 spu_unbind_context(spu, ctx);
803 ctx->stats.invol_ctx_switch++;
804 spu->stats.invol_ctx_switch++;
805 spu->alloc_state = SPU_FREE;
806 cbe_spu_info[spu->node].nr_active--;
807 wake_up(&new->stop_wq);
809 * We need to break out of the wait loop in
810 * spu_run manually to ensure this context
811 * gets put on the runqueue again ASAP.
813 wake_up(&ctx->stop_wq);
815 spu_set_timeslice(ctx);
816 mutex_unlock(&ctx->state_mutex);
823 * count_active_contexts - count nr of active tasks
825 * Return the number of tasks currently running or waiting to run.
827 * Note that we don't take runq_lock / list_mutex here. Reading
828 * a single 32bit value is atomic on powerpc, and we don't care
829 * about memory ordering issues here.
831 static unsigned long count_active_contexts(void)
833 int nr_active = 0, node;
835 for (node = 0; node < MAX_NUMNODES; node++)
836 nr_active += cbe_spu_info[node].nr_active;
837 nr_active += spu_prio->nr_waiting;
843 * spu_calc_load - given tick count, update the avenrun load estimates.
846 * No locking against reading these values from userspace, as for
847 * the CPU loadavg code.
849 static void spu_calc_load(unsigned long ticks)
851 unsigned long active_tasks; /* fixed-point */
852 static int count = LOAD_FREQ;
856 if (unlikely(count < 0)) {
857 active_tasks = count_active_contexts() * FIXED_1;
859 CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
860 CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
861 CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
867 static void spusched_wake(unsigned long data)
869 mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
870 wake_up_process(spusched_task);
871 spu_calc_load(SPUSCHED_TICK);
874 static int spusched_thread(void *unused)
879 while (!kthread_should_stop()) {
880 set_current_state(TASK_INTERRUPTIBLE);
882 for (node = 0; node < MAX_NUMNODES; node++) {
883 mutex_lock(&cbe_spu_info[node].list_mutex);
884 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
886 spusched_tick(spu->ctx);
887 mutex_unlock(&cbe_spu_info[node].list_mutex);
894 void spuctx_switch_state(struct spu_context *ctx,
895 enum spu_utilization_state new_state)
897 unsigned long long curtime;
898 signed long long delta;
901 enum spu_utilization_state old_state;
904 curtime = timespec_to_ns(&ts);
905 delta = curtime - ctx->stats.tstamp;
907 WARN_ON(!mutex_is_locked(&ctx->state_mutex));
911 old_state = ctx->stats.util_state;
912 ctx->stats.util_state = new_state;
913 ctx->stats.tstamp = curtime;
916 * Update the physical SPU utilization statistics.
919 ctx->stats.times[old_state] += delta;
920 spu->stats.times[old_state] += delta;
921 spu->stats.util_state = new_state;
922 spu->stats.tstamp = curtime;
926 #define LOAD_INT(x) ((x) >> FSHIFT)
927 #define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
929 static int show_spu_loadavg(struct seq_file *s, void *private)
933 a = spu_avenrun[0] + (FIXED_1/200);
934 b = spu_avenrun[1] + (FIXED_1/200);
935 c = spu_avenrun[2] + (FIXED_1/200);
938 * Note that last_pid doesn't really make much sense for the
939 * SPU loadavg (it even seems very odd on the CPU side...),
940 * but we include it here to have a 100% compatible interface.
942 seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
943 LOAD_INT(a), LOAD_FRAC(a),
944 LOAD_INT(b), LOAD_FRAC(b),
945 LOAD_INT(c), LOAD_FRAC(c),
946 count_active_contexts(),
947 atomic_read(&nr_spu_contexts),
948 current->nsproxy->pid_ns->last_pid);
952 static int spu_loadavg_open(struct inode *inode, struct file *file)
954 return single_open(file, show_spu_loadavg, NULL);
957 static const struct file_operations spu_loadavg_fops = {
958 .open = spu_loadavg_open,
961 .release = single_release,
964 int __init spu_sched_init(void)
966 struct proc_dir_entry *entry;
967 int err = -ENOMEM, i;
969 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
973 for (i = 0; i < MAX_PRIO; i++) {
974 INIT_LIST_HEAD(&spu_prio->runq[i]);
975 __clear_bit(i, spu_prio->bitmap);
977 spin_lock_init(&spu_prio->runq_lock);
979 setup_timer(&spusched_timer, spusched_wake, 0);
981 spusched_task = kthread_run(spusched_thread, NULL, "spusched");
982 if (IS_ERR(spusched_task)) {
983 err = PTR_ERR(spusched_task);
984 goto out_free_spu_prio;
987 entry = create_proc_entry("spu_loadavg", 0, NULL);
989 goto out_stop_kthread;
990 entry->proc_fops = &spu_loadavg_fops;
992 pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
993 SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
997 kthread_stop(spusched_task);
1004 void spu_sched_exit(void)
1009 remove_proc_entry("spu_loadavg", NULL);
1011 del_timer_sync(&spusched_timer);
1012 kthread_stop(spusched_task);
1014 for (node = 0; node < MAX_NUMNODES; node++) {
1015 mutex_lock(&cbe_spu_info[node].list_mutex);
1016 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
1017 if (spu->alloc_state != SPU_FREE)
1018 spu->alloc_state = SPU_FREE;
1019 mutex_unlock(&cbe_spu_info[node].list_mutex);