4 * (C) Copyright IBM Corp. 2005
6 * Author: Mark Nutter <mnutter@us.ibm.com>
8 * Host-side part of SPU context switch sequence outlined in
9 * Synergistic Processor Element, Book IV.
11 * A fully premptive switch of an SPE is very expensive in terms
12 * of time and system resources. SPE Book IV indicates that SPE
13 * allocation should follow a "serially reusable device" model,
14 * in which the SPE is assigned a task until it completes. When
15 * this is not possible, this sequence may be used to premptively
16 * save, and then later (optionally) restore the context of a
17 * program executing on an SPE.
20 * This program is free software; you can redistribute it and/or modify
21 * it under the terms of the GNU General Public License as published by
22 * the Free Software Foundation; either version 2, or (at your option)
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
35 #include <linux/config.h>
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/sched.h>
39 #include <linux/kernel.h>
41 #include <linux/vmalloc.h>
42 #include <linux/smp.h>
43 #include <linux/smp_lock.h>
44 #include <linux/stddef.h>
45 #include <linux/unistd.h>
49 #include <asm/spu_priv1.h>
50 #include <asm/spu_csa.h>
51 #include <asm/mmu_context.h>
53 #include "spu_save_dump.h"
54 #include "spu_restore_dump.h"
57 #define POLL_WHILE_TRUE(_c) { \
62 #define RELAX_SPIN_COUNT 1000
63 #define POLL_WHILE_TRUE(_c) { \
66 for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \
69 if (unlikely(_c)) yield(); \
75 #define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
77 static inline void acquire_spu_lock(struct spu *spu)
81 * Acquire SPU-specific mutual exclusion lock.
86 static inline void release_spu_lock(struct spu *spu)
89 * Release SPU-specific mutual exclusion lock.
94 static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
96 struct spu_problem __iomem *prob = spu->problem;
101 * If SPU_Status[E,L,IS] any field is '1', this
102 * SPU is in isolate state and cannot be context
103 * saved at this time.
105 isolate_state = SPU_STATUS_ISOLATED_STATE |
106 SPU_STATUS_ISOLATED_LOAD_STAUTUS | SPU_STATUS_ISOLATED_EXIT_STAUTUS;
107 return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
110 static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
114 * Save INT_Mask_class0 in CSA.
115 * Write INT_MASK_class0 with value of 0.
116 * Save INT_Mask_class1 in CSA.
117 * Write INT_MASK_class1 with value of 0.
118 * Save INT_Mask_class2 in CSA.
119 * Write INT_MASK_class2 with value of 0.
121 spin_lock_irq(&spu->register_lock);
123 csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0);
124 csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1);
125 csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2);
127 spu_int_mask_set(spu, 0, 0ul);
128 spu_int_mask_set(spu, 1, 0ul);
129 spu_int_mask_set(spu, 2, 0ul);
131 spin_unlock_irq(&spu->register_lock);
134 static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu)
138 * Set a software watchdog timer, which specifies the
139 * maximum allowable time for a context save sequence.
141 * For present, this implementation will not set a global
142 * watchdog timer, as virtualization & variable system load
143 * may cause unpredictable execution times.
147 static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu)
151 * Inhibit user-space access (if provided) to this
152 * SPU by unmapping the virtual pages assigned to
153 * the SPU memory-mapped I/O (MMIO) for problem
158 static inline void set_switch_pending(struct spu_state *csa, struct spu *spu)
162 * Set a software context switch pending flag.
164 set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
168 static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
170 struct spu_priv2 __iomem *priv2 = spu->priv2;
173 * Suspend DMA and save MFC_CNTL.
175 switch (in_be64(&priv2->mfc_control_RW) &
176 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) {
177 case MFC_CNTL_SUSPEND_IN_PROGRESS:
178 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
179 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
180 MFC_CNTL_SUSPEND_COMPLETE);
182 case MFC_CNTL_SUSPEND_COMPLETE:
184 csa->priv2.mfc_control_RW =
185 in_be64(&priv2->mfc_control_RW) |
186 MFC_CNTL_SUSPEND_DMA_QUEUE;
189 case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
190 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
191 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
192 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
193 MFC_CNTL_SUSPEND_COMPLETE);
195 csa->priv2.mfc_control_RW =
196 in_be64(&priv2->mfc_control_RW) &
197 ~MFC_CNTL_SUSPEND_DMA_QUEUE;
203 static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu)
205 struct spu_problem __iomem *prob = spu->problem;
208 * Save SPU_Runcntl in the CSA. This value contains
209 * the "Application Desired State".
211 csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW);
214 static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu)
217 * Save MFC_SR1 in the CSA.
219 csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu);
222 static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
224 struct spu_problem __iomem *prob = spu->problem;
227 * Read SPU_Status[R], and save to CSA.
229 if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) {
230 csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
234 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
236 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
239 SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
240 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
241 if ((in_be32(&prob->spu_status_R) & stopped) == 0)
242 csa->prob.spu_status_R = SPU_STATUS_RUNNING;
244 csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
248 static inline void save_mfc_decr(struct spu_state *csa, struct spu *spu)
250 struct spu_priv2 __iomem *priv2 = spu->priv2;
253 * Read MFC_CNTL[Ds]. Update saved copy of
256 if (in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DECREMENTER_RUNNING) {
257 csa->priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING;
258 csa->suspend_time = get_cycles();
259 out_be64(&priv2->spu_chnlcntptr_RW, 7ULL);
261 csa->spu_chnldata_RW[7] = in_be64(&priv2->spu_chnldata_RW);
264 csa->priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING;
268 static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
270 struct spu_priv2 __iomem *priv2 = spu->priv2;
273 * Write MFC_CNTL[Dh] set to a '1' to halt
276 out_be64(&priv2->mfc_control_RW, MFC_CNTL_DECREMENTER_HALTED);
280 static inline void save_timebase(struct spu_state *csa, struct spu *spu)
283 * Read PPE Timebase High and Timebase low registers
284 * and save in CSA. TBD.
286 csa->suspend_time = get_cycles();
289 static inline void remove_other_spu_access(struct spu_state *csa,
293 * Remove other SPU access to this SPU by unmapping
294 * this SPU's pages from their address space. TBD.
298 static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu)
300 struct spu_problem __iomem *prob = spu->problem;
304 * Write SPU_MSSync register. Poll SPU_MSSync[P]
307 out_be64(&prob->spc_mssync_RW, 1UL);
308 POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING);
311 static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu)
316 * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register.
317 * Then issue a PPE sync instruction.
319 spu_tlb_invalidate(spu);
323 static inline void handle_pending_interrupts(struct spu_state *csa,
327 * Handle any pending interrupts from this SPU
328 * here. This is OS or hypervisor specific. One
329 * option is to re-enable interrupts to handle any
330 * pending interrupts, with the interrupt handlers
331 * recognizing the software Context Switch Pending
332 * flag, to ensure the SPU execution or MFC command
333 * queue is not restarted. TBD.
337 static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu)
339 struct spu_priv2 __iomem *priv2 = spu->priv2;
343 * If MFC_Cntl[Se]=0 then save
344 * MFC command queues.
346 if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) {
347 for (i = 0; i < 8; i++) {
348 csa->priv2.puq[i].mfc_cq_data0_RW =
349 in_be64(&priv2->puq[i].mfc_cq_data0_RW);
350 csa->priv2.puq[i].mfc_cq_data1_RW =
351 in_be64(&priv2->puq[i].mfc_cq_data1_RW);
352 csa->priv2.puq[i].mfc_cq_data2_RW =
353 in_be64(&priv2->puq[i].mfc_cq_data2_RW);
354 csa->priv2.puq[i].mfc_cq_data3_RW =
355 in_be64(&priv2->puq[i].mfc_cq_data3_RW);
357 for (i = 0; i < 16; i++) {
358 csa->priv2.spuq[i].mfc_cq_data0_RW =
359 in_be64(&priv2->spuq[i].mfc_cq_data0_RW);
360 csa->priv2.spuq[i].mfc_cq_data1_RW =
361 in_be64(&priv2->spuq[i].mfc_cq_data1_RW);
362 csa->priv2.spuq[i].mfc_cq_data2_RW =
363 in_be64(&priv2->spuq[i].mfc_cq_data2_RW);
364 csa->priv2.spuq[i].mfc_cq_data3_RW =
365 in_be64(&priv2->spuq[i].mfc_cq_data3_RW);
370 static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu)
372 struct spu_problem __iomem *prob = spu->problem;
375 * Save the PPU_QueryMask register
378 csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW);
381 static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu)
383 struct spu_problem __iomem *prob = spu->problem;
386 * Save the PPU_QueryType register
389 csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW);
392 static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
394 struct spu_priv2 __iomem *priv2 = spu->priv2;
397 * Save the MFC_CSR_TSQ register
400 csa->priv2.spu_tag_status_query_RW =
401 in_be64(&priv2->spu_tag_status_query_RW);
404 static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
406 struct spu_priv2 __iomem *priv2 = spu->priv2;
409 * Save the MFC_CSR_CMD1 and MFC_CSR_CMD2
410 * registers in the CSA.
412 csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW);
413 csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW);
416 static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
418 struct spu_priv2 __iomem *priv2 = spu->priv2;
421 * Save the MFC_CSR_ATO register in
424 csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW);
427 static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
430 * Save the MFC_TCLASS_ID register in
433 csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu);
436 static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
440 * Write the MFC_TCLASS_ID register with
441 * the value 0x10000000.
443 spu_mfc_tclass_id_set(spu, 0x10000000);
447 static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu)
449 struct spu_priv2 __iomem *priv2 = spu->priv2;
453 * Write MFC_CNTL[Pc]=1 (purge queue).
455 out_be64(&priv2->mfc_control_RW, MFC_CNTL_PURGE_DMA_REQUEST);
459 static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)
461 struct spu_priv2 __iomem *priv2 = spu->priv2;
464 * Poll MFC_CNTL[Ps] until value '11' is read
467 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
468 MFC_CNTL_PURGE_DMA_STATUS_MASK) ==
469 MFC_CNTL_PURGE_DMA_COMPLETE);
472 static inline void save_mfc_slbs(struct spu_state *csa, struct spu *spu)
474 struct spu_priv2 __iomem *priv2 = spu->priv2;
478 * If MFC_SR1[R]='1', save SLBs in CSA.
480 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) {
481 csa->priv2.slb_index_W = in_be64(&priv2->slb_index_W);
482 for (i = 0; i < 8; i++) {
483 out_be64(&priv2->slb_index_W, i);
485 csa->slb_esid_RW[i] = in_be64(&priv2->slb_esid_RW);
486 csa->slb_vsid_RW[i] = in_be64(&priv2->slb_vsid_RW);
492 static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
496 * Write MFC_SR1 with MFC_SR1[D=0,S=1] and
497 * MFC_SR1[TL,R,Pr,T] set correctly for the
498 * OS specific environment.
500 * Implementation note: The SPU-side code
501 * for save/restore is privileged, so the
502 * MFC_SR1[Pr] bit is not set.
505 spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
506 MFC_STATE1_RELOCATE_MASK |
507 MFC_STATE1_BUS_TLBIE_MASK));
510 static inline void save_spu_npc(struct spu_state *csa, struct spu *spu)
512 struct spu_problem __iomem *prob = spu->problem;
515 * Save SPU_NPC in the CSA.
517 csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW);
520 static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu)
522 struct spu_priv2 __iomem *priv2 = spu->priv2;
525 * Save SPU_PrivCntl in the CSA.
527 csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW);
530 static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu)
532 struct spu_priv2 __iomem *priv2 = spu->priv2;
536 * Write SPU_PrivCntl[S,Le,A] fields reset to 0.
538 out_be64(&priv2->spu_privcntl_RW, 0UL);
542 static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu)
544 struct spu_priv2 __iomem *priv2 = spu->priv2;
547 * Save SPU_LSLR in the CSA.
549 csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW);
552 static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu)
554 struct spu_priv2 __iomem *priv2 = spu->priv2;
560 out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK);
564 static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu)
566 struct spu_priv2 __iomem *priv2 = spu->priv2;
569 * Save SPU_Cfg in the CSA.
571 csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW);
574 static inline void save_pm_trace(struct spu_state *csa, struct spu *spu)
577 * Save PM_Trace_Tag_Wait_Mask in the CSA.
578 * Not performed by this implementation.
582 static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu)
585 * Save RA_GROUP_ID register and the
586 * RA_ENABLE reigster in the CSA.
588 csa->priv1.resource_allocation_groupID_RW =
589 spu_resource_allocation_groupID_get(spu);
590 csa->priv1.resource_allocation_enable_RW =
591 spu_resource_allocation_enable_get(spu);
594 static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
596 struct spu_problem __iomem *prob = spu->problem;
599 * Save MB_Stat register in the CSA.
601 csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R);
604 static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu)
606 struct spu_problem __iomem *prob = spu->problem;
609 * Save the PPU_MB register in the CSA.
611 csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R);
614 static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu)
616 struct spu_priv2 __iomem *priv2 = spu->priv2;
619 * Save the PPUINT_MB register in the CSA.
621 csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R);
624 static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
626 struct spu_priv2 __iomem *priv2 = spu->priv2;
627 u64 idx, ch_indices[7] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
633 /* Save CH 1, without channel count */
634 out_be64(&priv2->spu_chnlcntptr_RW, 1);
635 csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW);
637 /* Save the following CH: [0,3,4,24,25,27] */
638 for (i = 0; i < 7; i++) {
640 out_be64(&priv2->spu_chnlcntptr_RW, idx);
642 csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW);
643 csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW);
644 out_be64(&priv2->spu_chnldata_RW, 0UL);
645 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
650 static inline void save_spu_mb(struct spu_state *csa, struct spu *spu)
652 struct spu_priv2 __iomem *priv2 = spu->priv2;
656 * Save SPU Read Mailbox Channel.
658 out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
660 csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW);
661 for (i = 0; i < 4; i++) {
662 csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW);
664 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
668 static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu)
670 struct spu_priv2 __iomem *priv2 = spu->priv2;
673 * Save MFC_CMD Channel.
675 out_be64(&priv2->spu_chnlcntptr_RW, 21UL);
677 csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW);
681 static inline void reset_ch(struct spu_state *csa, struct spu *spu)
683 struct spu_priv2 __iomem *priv2 = spu->priv2;
684 u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL };
685 u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL };
690 * Reset the following CH: [21, 23, 28, 30]
692 for (i = 0; i < 4; i++) {
694 out_be64(&priv2->spu_chnlcntptr_RW, idx);
696 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
701 static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
703 struct spu_priv2 __iomem *priv2 = spu->priv2;
707 * Write MFC_CNTL[Sc]=0 (resume queue processing).
709 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
712 static inline void invalidate_slbs(struct spu_state *csa, struct spu *spu)
714 struct spu_priv2 __iomem *priv2 = spu->priv2;
718 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All.
720 if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) {
721 out_be64(&priv2->slb_invalidate_all_W, 0UL);
726 static inline void get_kernel_slb(u64 ea, u64 slb[2])
730 if (REGION_ID(ea) == KERNEL_REGION_ID)
731 llp = mmu_psize_defs[mmu_linear_psize].sllp;
733 llp = mmu_psize_defs[mmu_virtual_psize].sllp;
734 slb[0] = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) |
735 SLB_VSID_KERNEL | llp;
736 slb[1] = (ea & ESID_MASK) | SLB_ESID_V;
739 static inline void load_mfc_slb(struct spu *spu, u64 slb[2], int slbe)
741 struct spu_priv2 __iomem *priv2 = spu->priv2;
743 out_be64(&priv2->slb_index_W, slbe);
745 out_be64(&priv2->slb_vsid_RW, slb[0]);
746 out_be64(&priv2->slb_esid_RW, slb[1]);
750 static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu)
757 * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All
758 * register, then initialize SLB_VSID and SLB_ESID
759 * to provide access to SPU context save code and
762 * This implementation places both the context
763 * switch code and LSCSA in kernel address space.
765 * Further this implementation assumes that the
766 * MFC_SR1[R]=1 (in other words, assume that
767 * translation is desired by OS environment).
769 invalidate_slbs(csa, spu);
770 get_kernel_slb((unsigned long)&spu_save_code[0], code_slb);
771 get_kernel_slb((unsigned long)csa->lscsa, lscsa_slb);
772 load_mfc_slb(spu, code_slb, 0);
773 if ((lscsa_slb[0] != code_slb[0]) || (lscsa_slb[1] != code_slb[1]))
774 load_mfc_slb(spu, lscsa_slb, 1);
777 static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
781 * Change the software context switch pending flag
782 * to context switch active.
784 set_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags);
785 clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
789 static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
791 unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
792 CLASS1_ENABLE_STORAGE_FAULT_INTR;
796 * Reset and then enable interrupts, as
799 * This implementation enables only class1
800 * (translation) interrupts.
802 spin_lock_irq(&spu->register_lock);
803 spu_int_stat_clear(spu, 0, ~0ul);
804 spu_int_stat_clear(spu, 1, ~0ul);
805 spu_int_stat_clear(spu, 2, ~0ul);
806 spu_int_mask_set(spu, 0, 0ul);
807 spu_int_mask_set(spu, 1, class1_mask);
808 spu_int_mask_set(spu, 2, 0ul);
809 spin_unlock_irq(&spu->register_lock);
812 static inline int send_mfc_dma(struct spu *spu, unsigned long ea,
813 unsigned int ls_offset, unsigned int size,
814 unsigned int tag, unsigned int rclass,
817 struct spu_problem __iomem *prob = spu->problem;
818 union mfc_tag_size_class_cmd command;
819 unsigned int transfer_size;
820 volatile unsigned int status = 0x0;
824 (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size;
825 command.u.mfc_size = transfer_size;
826 command.u.mfc_tag = tag;
827 command.u.mfc_rclassid = rclass;
828 command.u.mfc_cmd = cmd;
830 out_be32(&prob->mfc_lsa_W, ls_offset);
831 out_be64(&prob->mfc_ea_W, ea);
832 out_be64(&prob->mfc_union_W.all64, command.all64);
834 in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
835 if (unlikely(status & 0x2)) {
838 } while (status & 0x3);
839 size -= transfer_size;
841 ls_offset += transfer_size;
846 static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu)
848 unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
849 unsigned int ls_offset = 0x0;
850 unsigned int size = 16384;
851 unsigned int tag = 0;
852 unsigned int rclass = 0;
853 unsigned int cmd = MFC_PUT_CMD;
856 * Issue a DMA command to copy the first 16K bytes
857 * of local storage to the CSA.
859 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
862 static inline void set_spu_npc(struct spu_state *csa, struct spu *spu)
864 struct spu_problem __iomem *prob = spu->problem;
868 * Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry
869 * point address of context save code in local
872 * This implementation uses SPU-side save/restore
873 * programs with entry points at LSA of 0.
875 out_be32(&prob->spu_npc_RW, 0);
879 static inline void set_signot1(struct spu_state *csa, struct spu *spu)
881 struct spu_problem __iomem *prob = spu->problem;
889 * Write SPU_Sig_Notify_1 register with upper 32-bits
890 * of the CSA.LSCSA effective address.
892 addr64.ull = (u64) csa->lscsa;
893 out_be32(&prob->signal_notify1, addr64.ui[0]);
897 static inline void set_signot2(struct spu_state *csa, struct spu *spu)
899 struct spu_problem __iomem *prob = spu->problem;
907 * Write SPU_Sig_Notify_2 register with lower 32-bits
908 * of the CSA.LSCSA effective address.
910 addr64.ull = (u64) csa->lscsa;
911 out_be32(&prob->signal_notify2, addr64.ui[1]);
915 static inline void send_save_code(struct spu_state *csa, struct spu *spu)
917 unsigned long addr = (unsigned long)&spu_save_code[0];
918 unsigned int ls_offset = 0x0;
919 unsigned int size = sizeof(spu_save_code);
920 unsigned int tag = 0;
921 unsigned int rclass = 0;
922 unsigned int cmd = MFC_GETFS_CMD;
925 * Issue a DMA command to copy context save code
926 * to local storage and start SPU.
928 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
931 static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu)
933 struct spu_problem __iomem *prob = spu->problem;
937 * Write PPU_QueryMask=1 (enable Tag Group 0)
938 * and issue eieio instruction.
940 out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0));
944 static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
946 struct spu_problem __iomem *prob = spu->problem;
947 u32 mask = MFC_TAGID_TO_TAGMASK(0);
954 * Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete)
955 * or write PPU_QueryType[TS]=01 and wait for Tag Group
956 * Complete Interrupt. Write INT_Stat_Class0 or
957 * INT_Stat_Class2 with value of 'handled'.
959 POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);
961 local_irq_save(flags);
962 spu_int_stat_clear(spu, 0, ~(0ul));
963 spu_int_stat_clear(spu, 2, ~(0ul));
964 local_irq_restore(flags);
967 static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
969 struct spu_problem __iomem *prob = spu->problem;
974 * Poll until SPU_Status[R]=0 or wait for SPU Class 0
975 * or SPU Class 2 interrupt. Write INT_Stat_class0
976 * or INT_Stat_class2 with value of handled.
978 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
980 local_irq_save(flags);
981 spu_int_stat_clear(spu, 0, ~(0ul));
982 spu_int_stat_clear(spu, 2, ~(0ul));
983 local_irq_restore(flags);
986 static inline int check_save_status(struct spu_state *csa, struct spu *spu)
988 struct spu_problem __iomem *prob = spu->problem;
992 * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
993 * context save succeeded, otherwise context save
996 complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
997 SPU_STATUS_STOPPED_BY_STOP);
998 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
1001 static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu)
1004 * If required, notify the "using application" that
1005 * the SPU task has been terminated. TBD.
1009 static inline void suspend_mfc(struct spu_state *csa, struct spu *spu)
1011 struct spu_priv2 __iomem *priv2 = spu->priv2;
1015 * Write MFC_Cntl[Dh,Sc]='1','1' to suspend
1016 * the queue and halt the decrementer.
1018 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE |
1019 MFC_CNTL_DECREMENTER_HALTED);
1023 static inline void wait_suspend_mfc_complete(struct spu_state *csa,
1026 struct spu_priv2 __iomem *priv2 = spu->priv2;
1030 * Poll MFC_CNTL[Ss] until 11 is returned.
1032 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
1033 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
1034 MFC_CNTL_SUSPEND_COMPLETE);
1037 static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
1039 struct spu_problem __iomem *prob = spu->problem;
1042 * If SPU_Status[R]=1, stop SPU execution
1043 * and wait for stop to complete.
1045 * Returns 1 if SPU_Status[R]=1 on entry.
1048 if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
1049 if (in_be32(&prob->spu_status_R) &
1050 SPU_STATUS_ISOLATED_EXIT_STAUTUS) {
1051 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1052 SPU_STATUS_RUNNING);
1054 if ((in_be32(&prob->spu_status_R) &
1055 SPU_STATUS_ISOLATED_LOAD_STAUTUS)
1056 || (in_be32(&prob->spu_status_R) &
1057 SPU_STATUS_ISOLATED_STATE)) {
1058 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1060 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1061 SPU_STATUS_RUNNING);
1062 out_be32(&prob->spu_runcntl_RW, 0x2);
1064 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1065 SPU_STATUS_RUNNING);
1067 if (in_be32(&prob->spu_status_R) &
1068 SPU_STATUS_WAITING_FOR_CHANNEL) {
1069 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1071 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1072 SPU_STATUS_RUNNING);
1079 static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1081 struct spu_problem __iomem *prob = spu->problem;
1083 /* Restore, Step 10:
1084 * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1,
1085 * release SPU from isolate state.
1087 if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
1088 if (in_be32(&prob->spu_status_R) &
1089 SPU_STATUS_ISOLATED_EXIT_STAUTUS) {
1090 spu_mfc_sr1_set(spu,
1091 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1093 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1095 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1096 SPU_STATUS_RUNNING);
1098 if ((in_be32(&prob->spu_status_R) &
1099 SPU_STATUS_ISOLATED_LOAD_STAUTUS)
1100 || (in_be32(&prob->spu_status_R) &
1101 SPU_STATUS_ISOLATED_STATE)) {
1102 spu_mfc_sr1_set(spu,
1103 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1105 out_be32(&prob->spu_runcntl_RW, 0x2);
1107 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1108 SPU_STATUS_RUNNING);
1113 static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
1115 struct spu_priv2 __iomem *priv2 = spu->priv2;
1116 u64 ch_indices[7] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1120 /* Restore, Step 20:
1124 out_be64(&priv2->spu_chnlcntptr_RW, 1);
1125 out_be64(&priv2->spu_chnldata_RW, 0UL);
1127 /* Reset the following CH: [0,3,4,24,25,27] */
1128 for (i = 0; i < 7; i++) {
1129 idx = ch_indices[i];
1130 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1132 out_be64(&priv2->spu_chnldata_RW, 0UL);
1133 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
1138 static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu)
1140 struct spu_priv2 __iomem *priv2 = spu->priv2;
1141 u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL };
1142 u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL };
1146 /* Restore, Step 21:
1147 * Reset the following CH: [21, 23, 28, 29, 30]
1149 for (i = 0; i < 5; i++) {
1150 idx = ch_indices[i];
1151 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1153 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1158 static inline void setup_spu_status_part1(struct spu_state *csa,
1161 u32 status_P = SPU_STATUS_STOPPED_BY_STOP;
1162 u32 status_I = SPU_STATUS_INVALID_INSTR;
1163 u32 status_H = SPU_STATUS_STOPPED_BY_HALT;
1164 u32 status_S = SPU_STATUS_SINGLE_STEP;
1165 u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR;
1166 u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP;
1167 u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP;
1168 u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR;
1171 /* Restore, Step 27:
1172 * If the CSA.SPU_Status[I,S,H,P]=1 then add the correct
1173 * instruction sequence to the end of the SPU based restore
1174 * code (after the "context restored" stop and signal) to
1175 * restore the correct SPU status.
1177 * NOTE: Rather than modifying the SPU executable, we
1178 * instead add a new 'stopped_status' field to the
1179 * LSCSA. The SPU-side restore reads this field and
1180 * takes the appropriate action when exiting.
1184 (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF;
1185 if ((csa->prob.spu_status_R & status_P_I) == status_P_I) {
1187 /* SPU_Status[P,I]=1 - Illegal Instruction followed
1188 * by Stop and Signal instruction, followed by 'br -4'.
1191 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I;
1192 csa->lscsa->stopped_status.slot[1] = status_code;
1194 } else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) {
1196 /* SPU_Status[P,H]=1 - Halt Conditional, followed
1197 * by Stop and Signal instruction, followed by
1200 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H;
1201 csa->lscsa->stopped_status.slot[1] = status_code;
1203 } else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) {
1205 /* SPU_Status[S,P]=1 - Stop and Signal instruction
1206 * followed by 'br -4'.
1208 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P;
1209 csa->lscsa->stopped_status.slot[1] = status_code;
1211 } else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) {
1213 /* SPU_Status[S,I]=1 - Illegal instruction followed
1216 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I;
1217 csa->lscsa->stopped_status.slot[1] = status_code;
1219 } else if ((csa->prob.spu_status_R & status_P) == status_P) {
1221 /* SPU_Status[P]=1 - Stop and Signal instruction
1222 * followed by 'br -4'.
1224 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P;
1225 csa->lscsa->stopped_status.slot[1] = status_code;
1227 } else if ((csa->prob.spu_status_R & status_H) == status_H) {
1229 /* SPU_Status[H]=1 - Halt Conditional, followed
1232 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H;
1234 } else if ((csa->prob.spu_status_R & status_S) == status_S) {
1236 /* SPU_Status[S]=1 - Two nop instructions.
1238 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S;
1240 } else if ((csa->prob.spu_status_R & status_I) == status_I) {
1242 /* SPU_Status[I]=1 - Illegal instruction followed
1245 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I;
1250 static inline void setup_spu_status_part2(struct spu_state *csa,
1255 /* Restore, Step 28:
1256 * If the CSA.SPU_Status[I,S,H,P,R]=0 then
1257 * add a 'br *' instruction to the end of
1258 * the SPU based restore code.
1260 * NOTE: Rather than modifying the SPU executable, we
1261 * instead add a new 'stopped_status' field to the
1262 * LSCSA. The SPU-side restore reads this field and
1263 * takes the appropriate action when exiting.
1265 mask = SPU_STATUS_INVALID_INSTR |
1266 SPU_STATUS_SINGLE_STEP |
1267 SPU_STATUS_STOPPED_BY_HALT |
1268 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1269 if (!(csa->prob.spu_status_R & mask)) {
1270 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R;
1274 static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu)
1276 /* Restore, Step 29:
1277 * Restore RA_GROUP_ID register and the
1278 * RA_ENABLE reigster from the CSA.
1280 spu_resource_allocation_groupID_set(spu,
1281 csa->priv1.resource_allocation_groupID_RW);
1282 spu_resource_allocation_enable_set(spu,
1283 csa->priv1.resource_allocation_enable_RW);
1286 static inline void send_restore_code(struct spu_state *csa, struct spu *spu)
1288 unsigned long addr = (unsigned long)&spu_restore_code[0];
1289 unsigned int ls_offset = 0x0;
1290 unsigned int size = sizeof(spu_restore_code);
1291 unsigned int tag = 0;
1292 unsigned int rclass = 0;
1293 unsigned int cmd = MFC_GETFS_CMD;
1295 /* Restore, Step 37:
1296 * Issue MFC DMA command to copy context
1297 * restore code to local storage.
1299 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1302 static inline void setup_decr(struct spu_state *csa, struct spu *spu)
1304 /* Restore, Step 34:
1305 * If CSA.MFC_CNTL[Ds]=1 (decrementer was
1306 * running) then adjust decrementer, set
1307 * decrementer running status in LSCSA,
1308 * and set decrementer "wrapped" status
1311 if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) {
1312 cycles_t resume_time = get_cycles();
1313 cycles_t delta_time = resume_time - csa->suspend_time;
1315 csa->lscsa->decr.slot[0] -= delta_time;
1319 static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu)
1321 /* Restore, Step 35:
1322 * Copy the CSA.PU_MB data into the LSCSA.
1324 csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R;
1327 static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu)
1329 /* Restore, Step 36:
1330 * Copy the CSA.PUINT_MB data into the LSCSA.
1332 csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R;
1335 static inline int check_restore_status(struct spu_state *csa, struct spu *spu)
1337 struct spu_problem __iomem *prob = spu->problem;
1340 /* Restore, Step 40:
1341 * If SPU_Status[P]=1 and SPU_Status[SC] = "success",
1342 * context restore succeeded, otherwise context restore
1345 complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
1346 SPU_STATUS_STOPPED_BY_STOP);
1347 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
1350 static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu)
1352 struct spu_priv2 __iomem *priv2 = spu->priv2;
1354 /* Restore, Step 41:
1355 * Restore SPU_PrivCntl from the CSA.
1357 out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW);
1361 static inline void restore_status_part1(struct spu_state *csa, struct spu *spu)
1363 struct spu_problem __iomem *prob = spu->problem;
1366 /* Restore, Step 42:
1367 * If any CSA.SPU_Status[I,S,H,P]=1, then
1368 * restore the error or single step state.
1370 mask = SPU_STATUS_INVALID_INSTR |
1371 SPU_STATUS_SINGLE_STEP |
1372 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
1373 if (csa->prob.spu_status_R & mask) {
1374 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1376 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1377 SPU_STATUS_RUNNING);
1381 static inline void restore_status_part2(struct spu_state *csa, struct spu *spu)
1383 struct spu_problem __iomem *prob = spu->problem;
1386 /* Restore, Step 43:
1387 * If all CSA.SPU_Status[I,S,H,P,R]=0 then write
1388 * SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1,
1389 * then write '00' to SPU_RunCntl[R0R1] and wait
1390 * for SPU_Status[R]=0.
1392 mask = SPU_STATUS_INVALID_INSTR |
1393 SPU_STATUS_SINGLE_STEP |
1394 SPU_STATUS_STOPPED_BY_HALT |
1395 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1396 if (!(csa->prob.spu_status_R & mask)) {
1397 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1399 POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) &
1400 SPU_STATUS_RUNNING);
1401 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1403 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1404 SPU_STATUS_RUNNING);
1408 static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)
1410 unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
1411 unsigned int ls_offset = 0x0;
1412 unsigned int size = 16384;
1413 unsigned int tag = 0;
1414 unsigned int rclass = 0;
1415 unsigned int cmd = MFC_GET_CMD;
1417 /* Restore, Step 44:
1418 * Issue a DMA command to restore the first
1419 * 16kb of local storage from CSA.
1421 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1424 static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
1426 /* Restore, Step 49:
1427 * Write INT_MASK_class0 with value of 0.
1428 * Write INT_MASK_class1 with value of 0.
1429 * Write INT_MASK_class2 with value of 0.
1430 * Write INT_STAT_class0 with value of -1.
1431 * Write INT_STAT_class1 with value of -1.
1432 * Write INT_STAT_class2 with value of -1.
1434 spin_lock_irq(&spu->register_lock);
1435 spu_int_mask_set(spu, 0, 0ul);
1436 spu_int_mask_set(spu, 1, 0ul);
1437 spu_int_mask_set(spu, 2, 0ul);
1438 spu_int_stat_clear(spu, 0, ~0ul);
1439 spu_int_stat_clear(spu, 1, ~0ul);
1440 spu_int_stat_clear(spu, 2, ~0ul);
1441 spin_unlock_irq(&spu->register_lock);
1444 static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu)
1446 struct spu_priv2 __iomem *priv2 = spu->priv2;
1449 /* Restore, Step 50:
1450 * If MFC_Cntl[Se]!=0 then restore
1451 * MFC command queues.
1453 if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) {
1454 for (i = 0; i < 8; i++) {
1455 out_be64(&priv2->puq[i].mfc_cq_data0_RW,
1456 csa->priv2.puq[i].mfc_cq_data0_RW);
1457 out_be64(&priv2->puq[i].mfc_cq_data1_RW,
1458 csa->priv2.puq[i].mfc_cq_data1_RW);
1459 out_be64(&priv2->puq[i].mfc_cq_data2_RW,
1460 csa->priv2.puq[i].mfc_cq_data2_RW);
1461 out_be64(&priv2->puq[i].mfc_cq_data3_RW,
1462 csa->priv2.puq[i].mfc_cq_data3_RW);
1464 for (i = 0; i < 16; i++) {
1465 out_be64(&priv2->spuq[i].mfc_cq_data0_RW,
1466 csa->priv2.spuq[i].mfc_cq_data0_RW);
1467 out_be64(&priv2->spuq[i].mfc_cq_data1_RW,
1468 csa->priv2.spuq[i].mfc_cq_data1_RW);
1469 out_be64(&priv2->spuq[i].mfc_cq_data2_RW,
1470 csa->priv2.spuq[i].mfc_cq_data2_RW);
1471 out_be64(&priv2->spuq[i].mfc_cq_data3_RW,
1472 csa->priv2.spuq[i].mfc_cq_data3_RW);
1478 static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu)
1480 struct spu_problem __iomem *prob = spu->problem;
1482 /* Restore, Step 51:
1483 * Restore the PPU_QueryMask register from CSA.
1485 out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW);
1489 static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu)
1491 struct spu_problem __iomem *prob = spu->problem;
1493 /* Restore, Step 52:
1494 * Restore the PPU_QueryType register from CSA.
1496 out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW);
1500 static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
1502 struct spu_priv2 __iomem *priv2 = spu->priv2;
1504 /* Restore, Step 53:
1505 * Restore the MFC_CSR_TSQ register from CSA.
1507 out_be64(&priv2->spu_tag_status_query_RW,
1508 csa->priv2.spu_tag_status_query_RW);
1512 static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
1514 struct spu_priv2 __iomem *priv2 = spu->priv2;
1516 /* Restore, Step 54:
1517 * Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2
1518 * registers from CSA.
1520 out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW);
1521 out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW);
1525 static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
1527 struct spu_priv2 __iomem *priv2 = spu->priv2;
1529 /* Restore, Step 55:
1530 * Restore the MFC_CSR_ATO register from CSA.
1532 out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW);
1535 static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
1537 /* Restore, Step 56:
1538 * Restore the MFC_TCLASS_ID register from CSA.
1540 spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);
1544 static inline void set_llr_event(struct spu_state *csa, struct spu *spu)
1546 u64 ch0_cnt, ch0_data;
1549 /* Restore, Step 57:
1550 * Set the Lock Line Reservation Lost Event by:
1551 * 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1.
1552 * 2. If CSA.SPU_Channel_0_Count=0 and
1553 * CSA.SPU_Wr_Event_Mask[Lr]=1 and
1554 * CSA.SPU_Event_Status[Lr]=0 then set
1555 * CSA.SPU_Event_Status_Count=1.
1557 ch0_cnt = csa->spu_chnlcnt_RW[0];
1558 ch0_data = csa->spu_chnldata_RW[0];
1559 ch1_data = csa->spu_chnldata_RW[1];
1560 csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT;
1561 if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) &&
1562 (ch1_data & MFC_LLR_LOST_EVENT)) {
1563 csa->spu_chnlcnt_RW[0] = 1;
1567 static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
1569 /* Restore, Step 58:
1570 * If the status of the CSA software decrementer
1571 * "wrapped" flag is set, OR in a '1' to
1572 * CSA.SPU_Event_Status[Tm].
1574 if (csa->lscsa->decr_status.slot[0] == 1) {
1575 csa->spu_chnldata_RW[0] |= 0x20;
1577 if ((csa->lscsa->decr_status.slot[0] == 1) &&
1578 (csa->spu_chnlcnt_RW[0] == 0 &&
1579 ((csa->spu_chnldata_RW[2] & 0x20) == 0x0) &&
1580 ((csa->spu_chnldata_RW[0] & 0x20) != 0x1))) {
1581 csa->spu_chnlcnt_RW[0] = 1;
1585 static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)
1587 struct spu_priv2 __iomem *priv2 = spu->priv2;
1588 u64 idx, ch_indices[7] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1591 /* Restore, Step 59:
1594 /* Restore CH 1 without count */
1595 out_be64(&priv2->spu_chnlcntptr_RW, 1);
1596 out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[1]);
1598 /* Restore the following CH: [0,3,4,24,25,27] */
1599 for (i = 0; i < 7; i++) {
1600 idx = ch_indices[i];
1601 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1603 out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]);
1604 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]);
1609 static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu)
1611 struct spu_priv2 __iomem *priv2 = spu->priv2;
1612 u64 ch_indices[3] = { 9UL, 21UL, 23UL };
1613 u64 ch_counts[3] = { 1UL, 16UL, 1UL };
1617 /* Restore, Step 60:
1618 * Restore the following CH: [9,21,23].
1621 ch_counts[1] = csa->spu_chnlcnt_RW[21];
1623 for (i = 0; i < 3; i++) {
1624 idx = ch_indices[i];
1625 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1627 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1632 static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu)
1634 struct spu_priv2 __iomem *priv2 = spu->priv2;
1636 /* Restore, Step 61:
1637 * Restore the SPU_LSLR register from CSA.
1639 out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW);
1643 static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu)
1645 struct spu_priv2 __iomem *priv2 = spu->priv2;
1647 /* Restore, Step 62:
1648 * Restore the SPU_Cfg register from CSA.
1650 out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW);
1654 static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu)
1656 /* Restore, Step 63:
1657 * Restore PM_Trace_Tag_Wait_Mask from CSA.
1658 * Not performed by this implementation.
1662 static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu)
1664 struct spu_problem __iomem *prob = spu->problem;
1666 /* Restore, Step 64:
1667 * Restore SPU_NPC from CSA.
1669 out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW);
1673 static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
1675 struct spu_priv2 __iomem *priv2 = spu->priv2;
1678 /* Restore, Step 65:
1679 * Restore MFC_RdSPU_MB from CSA.
1681 out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
1683 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]);
1684 for (i = 0; i < 4; i++) {
1685 out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]);
1690 static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
1692 struct spu_problem __iomem *prob = spu->problem;
1695 /* Restore, Step 66:
1696 * If CSA.MB_Stat[P]=0 (mailbox empty) then
1697 * read from the PPU_MB register.
1699 if ((csa->prob.mb_stat_R & 0xFF) == 0) {
1700 dummy = in_be32(&prob->pu_mb_R);
1705 static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
1707 struct spu_priv2 __iomem *priv2 = spu->priv2;
1710 /* Restore, Step 66:
1711 * If CSA.MB_Stat[I]=0 (mailbox empty) then
1712 * read from the PPUINT_MB register.
1714 if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
1715 dummy = in_be64(&priv2->puint_mb_R);
1717 spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
1722 static inline void restore_mfc_slbs(struct spu_state *csa, struct spu *spu)
1724 struct spu_priv2 __iomem *priv2 = spu->priv2;
1727 /* Restore, Step 68:
1728 * If MFC_SR1[R]='1', restore SLBs from CSA.
1730 if (csa->priv1.mfc_sr1_RW & MFC_STATE1_RELOCATE_MASK) {
1731 for (i = 0; i < 8; i++) {
1732 out_be64(&priv2->slb_index_W, i);
1734 out_be64(&priv2->slb_esid_RW, csa->slb_esid_RW[i]);
1735 out_be64(&priv2->slb_vsid_RW, csa->slb_vsid_RW[i]);
1738 out_be64(&priv2->slb_index_W, csa->priv2.slb_index_W);
1743 static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
1745 /* Restore, Step 69:
1746 * Restore the MFC_SR1 register from CSA.
1748 spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);
1752 static inline void restore_other_spu_access(struct spu_state *csa,
1755 /* Restore, Step 70:
1756 * Restore other SPU mappings to this SPU. TBD.
1760 static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu)
1762 struct spu_problem __iomem *prob = spu->problem;
1764 /* Restore, Step 71:
1765 * If CSA.SPU_Status[R]=1 then write
1766 * SPU_RunCntl[R0R1]='01'.
1768 if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) {
1769 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1774 static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
1776 struct spu_priv2 __iomem *priv2 = spu->priv2;
1778 /* Restore, Step 72:
1779 * Restore the MFC_CNTL register for the CSA.
1781 out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
1785 static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
1787 /* Restore, Step 73:
1788 * Enable user-space access (if provided) to this
1789 * SPU by mapping the virtual pages assigned to
1790 * the SPU memory-mapped I/O (MMIO) for problem
1795 static inline void reset_switch_active(struct spu_state *csa, struct spu *spu)
1797 /* Restore, Step 74:
1798 * Reset the "context switch active" flag.
1800 clear_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags);
1804 static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu)
1806 /* Restore, Step 75:
1807 * Re-enable SPU interrupts.
1809 spin_lock_irq(&spu->register_lock);
1810 spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);
1811 spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);
1812 spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);
1813 spin_unlock_irq(&spu->register_lock);
1816 static int quiece_spu(struct spu_state *prev, struct spu *spu)
1819 * Combined steps 2-18 of SPU context save sequence, which
1820 * quiesce the SPU state (disable SPU execution, MFC command
1821 * queues, decrementer, SPU interrupts, etc.).
1823 * Returns 0 on success.
1824 * 2 if failed step 2.
1825 * 6 if failed step 6.
1828 if (check_spu_isolate(prev, spu)) { /* Step 2. */
1831 disable_interrupts(prev, spu); /* Step 3. */
1832 set_watchdog_timer(prev, spu); /* Step 4. */
1833 inhibit_user_access(prev, spu); /* Step 5. */
1834 if (check_spu_isolate(prev, spu)) { /* Step 6. */
1837 set_switch_pending(prev, spu); /* Step 7. */
1838 save_mfc_cntl(prev, spu); /* Step 8. */
1839 save_spu_runcntl(prev, spu); /* Step 9. */
1840 save_mfc_sr1(prev, spu); /* Step 10. */
1841 save_spu_status(prev, spu); /* Step 11. */
1842 save_mfc_decr(prev, spu); /* Step 12. */
1843 halt_mfc_decr(prev, spu); /* Step 13. */
1844 save_timebase(prev, spu); /* Step 14. */
1845 remove_other_spu_access(prev, spu); /* Step 15. */
1846 do_mfc_mssync(prev, spu); /* Step 16. */
1847 issue_mfc_tlbie(prev, spu); /* Step 17. */
1848 handle_pending_interrupts(prev, spu); /* Step 18. */
1853 static void save_csa(struct spu_state *prev, struct spu *spu)
1856 * Combine steps 19-44 of SPU context save sequence, which
1857 * save regions of the privileged & problem state areas.
1860 save_mfc_queues(prev, spu); /* Step 19. */
1861 save_ppu_querymask(prev, spu); /* Step 20. */
1862 save_ppu_querytype(prev, spu); /* Step 21. */
1863 save_mfc_csr_tsq(prev, spu); /* Step 22. */
1864 save_mfc_csr_cmd(prev, spu); /* Step 23. */
1865 save_mfc_csr_ato(prev, spu); /* Step 24. */
1866 save_mfc_tclass_id(prev, spu); /* Step 25. */
1867 set_mfc_tclass_id(prev, spu); /* Step 26. */
1868 purge_mfc_queue(prev, spu); /* Step 27. */
1869 wait_purge_complete(prev, spu); /* Step 28. */
1870 save_mfc_slbs(prev, spu); /* Step 29. */
1871 setup_mfc_sr1(prev, spu); /* Step 30. */
1872 save_spu_npc(prev, spu); /* Step 31. */
1873 save_spu_privcntl(prev, spu); /* Step 32. */
1874 reset_spu_privcntl(prev, spu); /* Step 33. */
1875 save_spu_lslr(prev, spu); /* Step 34. */
1876 reset_spu_lslr(prev, spu); /* Step 35. */
1877 save_spu_cfg(prev, spu); /* Step 36. */
1878 save_pm_trace(prev, spu); /* Step 37. */
1879 save_mfc_rag(prev, spu); /* Step 38. */
1880 save_ppu_mb_stat(prev, spu); /* Step 39. */
1881 save_ppu_mb(prev, spu); /* Step 40. */
1882 save_ppuint_mb(prev, spu); /* Step 41. */
1883 save_ch_part1(prev, spu); /* Step 42. */
1884 save_spu_mb(prev, spu); /* Step 43. */
1885 save_mfc_cmd(prev, spu); /* Step 44. */
1886 reset_ch(prev, spu); /* Step 45. */
1889 static void save_lscsa(struct spu_state *prev, struct spu *spu)
1892 * Perform steps 46-57 of SPU context save sequence,
1893 * which save regions of the local store and register
1897 resume_mfc_queue(prev, spu); /* Step 46. */
1898 setup_mfc_slbs(prev, spu); /* Step 47. */
1899 set_switch_active(prev, spu); /* Step 48. */
1900 enable_interrupts(prev, spu); /* Step 49. */
1901 save_ls_16kb(prev, spu); /* Step 50. */
1902 set_spu_npc(prev, spu); /* Step 51. */
1903 set_signot1(prev, spu); /* Step 52. */
1904 set_signot2(prev, spu); /* Step 53. */
1905 send_save_code(prev, spu); /* Step 54. */
1906 set_ppu_querymask(prev, spu); /* Step 55. */
1907 wait_tag_complete(prev, spu); /* Step 56. */
1908 wait_spu_stopped(prev, spu); /* Step 57. */
1911 static void harvest(struct spu_state *prev, struct spu *spu)
1914 * Perform steps 2-25 of SPU context restore sequence,
1915 * which resets an SPU either after a failed save, or
1916 * when using SPU for first time.
1919 disable_interrupts(prev, spu); /* Step 2. */
1920 inhibit_user_access(prev, spu); /* Step 3. */
1921 terminate_spu_app(prev, spu); /* Step 4. */
1922 set_switch_pending(prev, spu); /* Step 5. */
1923 remove_other_spu_access(prev, spu); /* Step 6. */
1924 suspend_mfc(prev, spu); /* Step 7. */
1925 wait_suspend_mfc_complete(prev, spu); /* Step 8. */
1926 if (!suspend_spe(prev, spu)) /* Step 9. */
1927 clear_spu_status(prev, spu); /* Step 10. */
1928 do_mfc_mssync(prev, spu); /* Step 11. */
1929 issue_mfc_tlbie(prev, spu); /* Step 12. */
1930 handle_pending_interrupts(prev, spu); /* Step 13. */
1931 purge_mfc_queue(prev, spu); /* Step 14. */
1932 wait_purge_complete(prev, spu); /* Step 15. */
1933 reset_spu_privcntl(prev, spu); /* Step 16. */
1934 reset_spu_lslr(prev, spu); /* Step 17. */
1935 setup_mfc_sr1(prev, spu); /* Step 18. */
1936 invalidate_slbs(prev, spu); /* Step 19. */
1937 reset_ch_part1(prev, spu); /* Step 20. */
1938 reset_ch_part2(prev, spu); /* Step 21. */
1939 enable_interrupts(prev, spu); /* Step 22. */
1940 set_switch_active(prev, spu); /* Step 23. */
1941 set_mfc_tclass_id(prev, spu); /* Step 24. */
1942 resume_mfc_queue(prev, spu); /* Step 25. */
1945 static void restore_lscsa(struct spu_state *next, struct spu *spu)
1948 * Perform steps 26-40 of SPU context restore sequence,
1949 * which restores regions of the local store and register
1953 set_watchdog_timer(next, spu); /* Step 26. */
1954 setup_spu_status_part1(next, spu); /* Step 27. */
1955 setup_spu_status_part2(next, spu); /* Step 28. */
1956 restore_mfc_rag(next, spu); /* Step 29. */
1957 setup_mfc_slbs(next, spu); /* Step 30. */
1958 set_spu_npc(next, spu); /* Step 31. */
1959 set_signot1(next, spu); /* Step 32. */
1960 set_signot2(next, spu); /* Step 33. */
1961 setup_decr(next, spu); /* Step 34. */
1962 setup_ppu_mb(next, spu); /* Step 35. */
1963 setup_ppuint_mb(next, spu); /* Step 36. */
1964 send_restore_code(next, spu); /* Step 37. */
1965 set_ppu_querymask(next, spu); /* Step 38. */
1966 wait_tag_complete(next, spu); /* Step 39. */
1967 wait_spu_stopped(next, spu); /* Step 40. */
1970 static void restore_csa(struct spu_state *next, struct spu *spu)
1973 * Combine steps 41-76 of SPU context restore sequence, which
1974 * restore regions of the privileged & problem state areas.
1977 restore_spu_privcntl(next, spu); /* Step 41. */
1978 restore_status_part1(next, spu); /* Step 42. */
1979 restore_status_part2(next, spu); /* Step 43. */
1980 restore_ls_16kb(next, spu); /* Step 44. */
1981 wait_tag_complete(next, spu); /* Step 45. */
1982 suspend_mfc(next, spu); /* Step 46. */
1983 wait_suspend_mfc_complete(next, spu); /* Step 47. */
1984 issue_mfc_tlbie(next, spu); /* Step 48. */
1985 clear_interrupts(next, spu); /* Step 49. */
1986 restore_mfc_queues(next, spu); /* Step 50. */
1987 restore_ppu_querymask(next, spu); /* Step 51. */
1988 restore_ppu_querytype(next, spu); /* Step 52. */
1989 restore_mfc_csr_tsq(next, spu); /* Step 53. */
1990 restore_mfc_csr_cmd(next, spu); /* Step 54. */
1991 restore_mfc_csr_ato(next, spu); /* Step 55. */
1992 restore_mfc_tclass_id(next, spu); /* Step 56. */
1993 set_llr_event(next, spu); /* Step 57. */
1994 restore_decr_wrapped(next, spu); /* Step 58. */
1995 restore_ch_part1(next, spu); /* Step 59. */
1996 restore_ch_part2(next, spu); /* Step 60. */
1997 restore_spu_lslr(next, spu); /* Step 61. */
1998 restore_spu_cfg(next, spu); /* Step 62. */
1999 restore_pm_trace(next, spu); /* Step 63. */
2000 restore_spu_npc(next, spu); /* Step 64. */
2001 restore_spu_mb(next, spu); /* Step 65. */
2002 check_ppu_mb_stat(next, spu); /* Step 66. */
2003 check_ppuint_mb_stat(next, spu); /* Step 67. */
2004 restore_mfc_slbs(next, spu); /* Step 68. */
2005 restore_mfc_sr1(next, spu); /* Step 69. */
2006 restore_other_spu_access(next, spu); /* Step 70. */
2007 restore_spu_runcntl(next, spu); /* Step 71. */
2008 restore_mfc_cntl(next, spu); /* Step 72. */
2009 enable_user_access(next, spu); /* Step 73. */
2010 reset_switch_active(next, spu); /* Step 74. */
2011 reenable_interrupts(next, spu); /* Step 75. */
2014 static int __do_spu_save(struct spu_state *prev, struct spu *spu)
2019 * SPU context save can be broken into three phases:
2021 * (a) quiesce [steps 2-16].
2022 * (b) save of CSA, performed by PPE [steps 17-42]
2023 * (c) save of LSCSA, mostly performed by SPU [steps 43-52].
2025 * Returns 0 on success.
2026 * 2,6 if failed to quiece SPU
2027 * 53 if SPU-side of save failed.
2030 rc = quiece_spu(prev, spu); /* Steps 2-16. */
2041 save_csa(prev, spu); /* Steps 17-43. */
2042 save_lscsa(prev, spu); /* Steps 44-53. */
2043 return check_save_status(prev, spu); /* Step 54. */
2046 static int __do_spu_restore(struct spu_state *next, struct spu *spu)
2051 * SPU context restore can be broken into three phases:
2053 * (a) harvest (or reset) SPU [steps 2-24].
2054 * (b) restore LSCSA [steps 25-40], mostly performed by SPU.
2055 * (c) restore CSA [steps 41-76], performed by PPE.
2057 * The 'harvest' step is not performed here, but rather
2061 restore_lscsa(next, spu); /* Steps 24-39. */
2062 rc = check_restore_status(next, spu); /* Step 40. */
2065 /* Failed. Return now. */
2069 /* Fall through to next step. */
2072 restore_csa(next, spu);
2078 * spu_save - SPU context save, with locking.
2079 * @prev: pointer to SPU context save area, to be saved.
2080 * @spu: pointer to SPU iomem structure.
2082 * Acquire locks, perform the save operation then return.
2084 int spu_save(struct spu_state *prev, struct spu *spu)
2088 acquire_spu_lock(spu); /* Step 1. */
2089 rc = __do_spu_save(prev, spu); /* Steps 2-53. */
2090 release_spu_lock(spu);
2092 panic("%s failed on SPU[%d], rc=%d.\n",
2093 __func__, spu->number, rc);
2097 EXPORT_SYMBOL_GPL(spu_save);
2100 * spu_restore - SPU context restore, with harvest and locking.
2101 * @new: pointer to SPU context save area, to be restored.
2102 * @spu: pointer to SPU iomem structure.
2104 * Perform harvest + restore, as we may not be coming
2105 * from a previous successful save operation, and the
2106 * hardware state is unknown.
2108 int spu_restore(struct spu_state *new, struct spu *spu)
2112 acquire_spu_lock(spu);
2116 spu->slb_replace = 0;
2117 spu->class_0_pending = 0;
2118 rc = __do_spu_restore(new, spu);
2119 release_spu_lock(spu);
2121 panic("%s failed on SPU[%d] rc=%d.\n",
2122 __func__, spu->number, rc);
2126 EXPORT_SYMBOL_GPL(spu_restore);
2129 * spu_harvest - SPU harvest (reset) operation
2130 * @spu: pointer to SPU iomem structure.
2132 * Perform SPU harvest (reset) operation.
2134 void spu_harvest(struct spu *spu)
2136 acquire_spu_lock(spu);
2138 release_spu_lock(spu);
2141 static void init_prob(struct spu_state *csa)
2143 csa->spu_chnlcnt_RW[9] = 1;
2144 csa->spu_chnlcnt_RW[21] = 16;
2145 csa->spu_chnlcnt_RW[23] = 1;
2146 csa->spu_chnlcnt_RW[28] = 1;
2147 csa->spu_chnlcnt_RW[30] = 1;
2148 csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP;
2149 csa->prob.mb_stat_R = 0x000400;
2152 static void init_priv1(struct spu_state *csa)
2154 /* Enable decode, relocate, tlbie response, master runcntl. */
2155 csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK |
2156 MFC_STATE1_MASTER_RUN_CONTROL_MASK |
2157 MFC_STATE1_PROBLEM_STATE_MASK |
2158 MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
2160 /* Set storage description. */
2161 csa->priv1.mfc_sdr_RW = mfspr(SPRN_SDR1);
2163 /* Enable OS-specific set of interrupts. */
2164 csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
2165 CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
2166 CLASS0_ENABLE_SPU_ERROR_INTR;
2167 csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
2168 CLASS1_ENABLE_STORAGE_FAULT_INTR;
2169 csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR |
2170 CLASS2_ENABLE_SPU_HALT_INTR |
2171 CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR;
2174 static void init_priv2(struct spu_state *csa)
2176 csa->priv2.spu_lslr_RW = LS_ADDR_MASK;
2177 csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE |
2178 MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION |
2179 MFC_CNTL_DMA_QUEUES_EMPTY_MASK;
2183 * spu_alloc_csa - allocate and initialize an SPU context save area.
2185 * Allocate and initialize the contents of an SPU context save area.
2186 * This includes enabling address translation, interrupt masks, etc.,
2187 * as appropriate for the given OS environment.
2189 * Note that storage for the 'lscsa' is allocated separately,
2190 * as it is by far the largest of the context save regions,
2191 * and may need to be pinned or otherwise specially aligned.
2193 void spu_init_csa(struct spu_state *csa)
2195 struct spu_lscsa *lscsa;
2200 memset(csa, 0, sizeof(struct spu_state));
2202 lscsa = vmalloc(sizeof(struct spu_lscsa));
2206 memset(lscsa, 0, sizeof(struct spu_lscsa));
2208 spin_lock_init(&csa->register_lock);
2210 /* Set LS pages reserved to allow for user-space mapping. */
2211 for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE)
2212 SetPageReserved(vmalloc_to_page(p));
2218 EXPORT_SYMBOL_GPL(spu_init_csa);
2220 void spu_fini_csa(struct spu_state *csa)
2222 /* Clear reserved bit before vfree. */
2224 for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE)
2225 ClearPageReserved(vmalloc_to_page(p));
2229 EXPORT_SYMBOL_GPL(spu_fini_csa);