📄 switch.c
字号:
spu_mfc_tclass_id_set(spu, 0x10000000); eieio();}static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu){ struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 27: * Restore, Step 14. * Write MFC_CNTL[Pc]=1 (purge queue). */ out_be64(&priv2->mfc_control_RW, MFC_CNTL_PURGE_DMA_REQUEST); eieio();}static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu){ struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 28: * Poll MFC_CNTL[Ps] until value '11' is read * (purge complete). */ POLL_WHILE_FALSE(in_be64(&priv2->mfc_control_RW) & MFC_CNTL_PURGE_DMA_COMPLETE);}static inline void save_mfc_slbs(struct spu_state *csa, struct spu *spu){ struct spu_priv2 __iomem *priv2 = spu->priv2; int i; /* Save, Step 29: * If MFC_SR1[R]='1', save SLBs in CSA. */ if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) { csa->priv2.slb_index_W = in_be64(&priv2->slb_index_W); for (i = 0; i < 8; i++) { out_be64(&priv2->slb_index_W, i); eieio(); csa->slb_esid_RW[i] = in_be64(&priv2->slb_esid_RW); csa->slb_vsid_RW[i] = in_be64(&priv2->slb_vsid_RW); eieio(); } }}static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu){ /* Save, Step 30: * Restore, Step 18: * Write MFC_SR1 with MFC_SR1[D=0,S=1] and * MFC_SR1[TL,R,Pr,T] set correctly for the * OS specific environment. * * Implementation note: The SPU-side code * for save/restore is privileged, so the * MFC_SR1[Pr] bit is not set. * */ spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK | MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK));}static inline void save_spu_npc(struct spu_state *csa, struct spu *spu){ struct spu_problem __iomem *prob = spu->problem; /* Save, Step 31: * Save SPU_NPC in the CSA. */ csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW);}static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu){ struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 32: * Save SPU_PrivCntl in the CSA. */ csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW);}static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu){ struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 33: * Restore, Step 16: * Write SPU_PrivCntl[S,Le,A] fields reset to 0. */ out_be64(&priv2->spu_privcntl_RW, 0UL); eieio();}static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu){ struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 34: * Save SPU_LSLR in the CSA. */ csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW);}static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu){ struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 35: * Restore, Step 17. * Reset SPU_LSLR. */ out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK); eieio();}static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu){ struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 36: * Save SPU_Cfg in the CSA. */ csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW);}static inline void save_pm_trace(struct spu_state *csa, struct spu *spu){ /* Save, Step 37: * Save PM_Trace_Tag_Wait_Mask in the CSA. * Not performed by this implementation. */}static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu){ /* Save, Step 38: * Save RA_GROUP_ID register and the * RA_ENABLE reigster in the CSA. */ csa->priv1.resource_allocation_groupID_RW = spu_resource_allocation_groupID_get(spu); csa->priv1.resource_allocation_enable_RW = spu_resource_allocation_enable_get(spu);}static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu){ struct spu_problem __iomem *prob = spu->problem; /* Save, Step 39: * Save MB_Stat register in the CSA. */ csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R);}static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu){ struct spu_problem __iomem *prob = spu->problem; /* Save, Step 40: * Save the PPU_MB register in the CSA. */ csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R);}static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu){ struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 41: * Save the PPUINT_MB register in the CSA. */ csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R);}static inline void save_ch_part1(struct spu_state *csa, struct spu *spu){ struct spu_priv2 __iomem *priv2 = spu->priv2; u64 idx, ch_indices[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL }; int i; /* Save, Step 42: * Save the following CH: [0,1,3,4,24,25,27] */ for (i = 0; i < 7; i++) { idx = ch_indices[i]; out_be64(&priv2->spu_chnlcntptr_RW, idx); eieio(); csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW); csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW); out_be64(&priv2->spu_chnldata_RW, 0UL); out_be64(&priv2->spu_chnlcnt_RW, 0UL); eieio(); }}static inline void save_spu_mb(struct spu_state *csa, struct spu *spu){ struct spu_priv2 __iomem *priv2 = spu->priv2; int i; /* Save, Step 43: * Save SPU Read Mailbox Channel. */ out_be64(&priv2->spu_chnlcntptr_RW, 29UL); eieio(); csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW); for (i = 0; i < 4; i++) { csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW); } out_be64(&priv2->spu_chnlcnt_RW, 0UL); eieio();}static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu){ struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 44: * Save MFC_CMD Channel. */ out_be64(&priv2->spu_chnlcntptr_RW, 21UL); eieio(); csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW); eieio();}static inline void reset_ch(struct spu_state *csa, struct spu *spu){ struct spu_priv2 __iomem *priv2 = spu->priv2; u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL }; u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL }; u64 idx; int i; /* Save, Step 45: * Reset the following CH: [21, 23, 28, 30] */ for (i = 0; i < 4; i++) { idx = ch_indices[i]; out_be64(&priv2->spu_chnlcntptr_RW, idx); eieio(); out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); eieio(); }}static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu){ struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 46: * Restore, Step 25. * Write MFC_CNTL[Sc]=0 (resume queue processing). */ out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);}static inline void invalidate_slbs(struct spu_state *csa, struct spu *spu){ struct spu_priv2 __iomem *priv2 = spu->priv2; /* Save, Step 45: * Restore, Step 19: * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All. */ if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) { out_be64(&priv2->slb_invalidate_all_W, 0UL); eieio(); }}static inline void get_kernel_slb(u64 ea, u64 slb[2]){ slb[0] = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; slb[1] = (ea & ESID_MASK) | SLB_ESID_V; /* Large pages are used for kernel text/data, but not vmalloc. */ if (cpu_has_feature(CPU_FTR_16M_PAGE) && REGION_ID(ea) == KERNEL_REGION_ID) slb[0] |= SLB_VSID_L;}static inline void load_mfc_slb(struct spu *spu, u64 slb[2], int slbe){ struct spu_priv2 __iomem *priv2 = spu->priv2; out_be64(&priv2->slb_index_W, slbe); eieio(); out_be64(&priv2->slb_vsid_RW, slb[0]); out_be64(&priv2->slb_esid_RW, slb[1]); eieio();}static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu){ u64 code_slb[2]; u64 lscsa_slb[2]; /* Save, Step 47: * Restore, Step 30. * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All * register, then initialize SLB_VSID and SLB_ESID * to provide access to SPU context save code and * LSCSA. * * This implementation places both the context * switch code and LSCSA in kernel address space. * * Further this implementation assumes that the * MFC_SR1[R]=1 (in other words, assume that * translation is desired by OS environment). */ invalidate_slbs(csa, spu); get_kernel_slb((unsigned long)&spu_save_code[0], code_slb); get_kernel_slb((unsigned long)csa->lscsa, lscsa_slb); load_mfc_slb(spu, code_slb, 0); if ((lscsa_slb[0] != code_slb[0]) || (lscsa_slb[1] != code_slb[1])) load_mfc_slb(spu, lscsa_slb, 1);}static inline void set_switch_active(struct spu_state *csa, struct spu *spu){ /* Save, Step 48: * Restore, Step 23. * Change the software context switch pending flag * to context switch active. */ set_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags); clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); mb();}static inline void enable_interrupts(struct spu_state *csa, struct spu *spu){ unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR | CLASS1_ENABLE_STORAGE_FAULT_INTR; /* Save, Step 49: * Restore, Step 22: * Reset and then enable interrupts, as * needed by OS. * * This implementation enables only class1 * (translation) interrupts. */ spin_lock_irq(&spu->register_lock); spu_int_stat_clear(spu, 0, ~0ul); spu_int_stat_clear(spu, 1, ~0ul); spu_int_stat_clear(spu, 2, ~0ul); spu_int_mask_set(spu, 0, 0ul); spu_int_mask_set(spu, 1, class1_mask); spu_int_mask_set(spu, 2, 0ul); spin_unlock_irq(&spu->register_lock);}static inline int send_mfc_dma(struct spu *spu, unsigned long ea, unsigned int ls_offset, unsigned int size, unsigned int tag, unsigned int rclass, unsigned int cmd){ struct spu_problem __iomem *prob = spu->problem; union mfc_tag_size_class_cmd command; unsigned int transfer_size; volatile unsigned int status = 0x0; while (size > 0) { transfer_size = (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size; command.u.mfc_size = transfer_size; command.u.mfc_tag = tag; command.u.mfc_rclassid = rclass; command.u.mfc_cmd = cmd; do { out_be32(&prob->mfc_lsa_W, ls_offset); out_be64(&prob->mfc_ea_W, ea); out_be64(&prob->mfc_union_W.all64, command.all64); status = in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32); if (unlikely(status & 0x2)) { cpu_relax(); } } while (status & 0x3); size -= transfer_size; ea += transfer_size; ls_offset += transfer_size; } return 0;}static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu){ unsigned long addr = (unsigned long)&csa->lscsa->ls[0]; unsigned int ls_offset = 0x0; unsigned int size = 16384; unsigned int tag = 0; unsigned int rclass = 0; unsigned int cmd = MFC_PUT_CMD; /* Save, Step 50: * Issue a DMA command to copy the first 16K bytes * of local storage to the CSA. */ send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);}static inline void set_spu_npc(struct spu_state *csa, struct spu *spu){ struct spu_problem __iomem *prob = spu->problem; /* Save, Step 51: * Restore, Step 31. * Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry * point address of context save code in local * storage. * * This implementation uses SPU-side save/restore * programs with entry points at LSA of 0. */ out_be32(&prob->spu_npc_RW, 0); eieio();}static inline void set_signot1(struct spu_state *csa, struct spu *spu){ struct spu_problem __iomem *prob = spu->problem; union { u64 ull; u32 ui[2]; } addr64; /* Save, Step 52: * Restore, Step 32: * Write SPU_Sig_Notify_1 register with upper 32-bits * of the CSA.LSCSA effective address. */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -