⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 switch.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 5 页
字号:
static inline void restore_status_part1(struct spu_state *csa, struct spu *spu){	struct spu_problem __iomem *prob = spu->problem;	u32 mask;	/* Restore, Step 42:	 *     If any CSA.SPU_Status[I,S,H,P]=1, then	 *     restore the error or single step state.	 */	mask = SPU_STATUS_INVALID_INSTR |	    SPU_STATUS_SINGLE_STEP |	    SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;	if (csa->prob.spu_status_R & mask) {		out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);		eieio();		POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &				SPU_STATUS_RUNNING);	}}static inline void restore_status_part2(struct spu_state *csa, struct spu *spu){	struct spu_problem __iomem *prob = spu->problem;	u32 mask;	/* Restore, Step 43:	 *     If all CSA.SPU_Status[I,S,H,P,R]=0 then write	 *     SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1,	 *     then write '00' to SPU_RunCntl[R0R1] and wait	 *     for SPU_Status[R]=0.	 */	mask = SPU_STATUS_INVALID_INSTR |	    SPU_STATUS_SINGLE_STEP |	    SPU_STATUS_STOPPED_BY_HALT |	    SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;	if (!(csa->prob.spu_status_R & mask)) {		out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);		eieio();		POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) &				 SPU_STATUS_RUNNING);		out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);		eieio();		POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &				SPU_STATUS_RUNNING);	}}static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu){	unsigned long addr = (unsigned long)&csa->lscsa->ls[0];	unsigned int ls_offset = 0x0;	unsigned int size = 16384;	unsigned int tag = 0;	unsigned int rclass = 0;	unsigned int cmd = MFC_GET_CMD;	/* Restore, Step 44:	 *     Issue a DMA command to restore the first	 *     16kb of local storage from CSA.	 */	send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);}static inline void suspend_mfc(struct spu_state *csa, struct spu *spu){	struct spu_priv2 __iomem *priv2 = spu->priv2;	/* Restore, Step 47.	 *     Write MFC_Cntl[Sc,Sm]='1','0' to suspend	 *     the queue.	 */	out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);	eieio();}static inline void clear_interrupts(struct spu_state *csa, struct spu *spu){	/* Restore, Step 49:	 *     Write INT_MASK_class0 with value of 0.	 *     Write INT_MASK_class1 with value of 0.	 *     Write INT_MASK_class2 with value of 0.	 *     Write INT_STAT_class0 with value of -1.	 *     Write INT_STAT_class1 with value of -1.	 *     Write INT_STAT_class2 with value of -1.	 */	spin_lock_irq(&spu->register_lock);	spu_int_mask_set(spu, 0, 0ul);	spu_int_mask_set(spu, 1, 0ul);	spu_int_mask_set(spu, 2, 0ul);	spu_int_stat_clear(spu, 0, ~0ul);	spu_int_stat_clear(spu, 1, ~0ul);	spu_int_stat_clear(spu, 2, ~0ul);	spin_unlock_irq(&spu->register_lock);}static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu){	struct spu_priv2 __iomem *priv2 = spu->priv2;	int i;	/* Restore, Step 50:	 *     If MFC_Cntl[Se]!=0 then restore	 *     MFC command queues.	 */	if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) {		for (i = 0; i < 8; i++) {			out_be64(&priv2->puq[i].mfc_cq_data0_RW,				 csa->priv2.puq[i].mfc_cq_data0_RW);			out_be64(&priv2->puq[i].mfc_cq_data1_RW,				 csa->priv2.puq[i].mfc_cq_data1_RW);			out_be64(&priv2->puq[i].mfc_cq_data2_RW,				 csa->priv2.puq[i].mfc_cq_data2_RW);			out_be64(&priv2->puq[i].mfc_cq_data3_RW,				 csa->priv2.puq[i].mfc_cq_data3_RW);		}		for (i = 0; i < 16; i++) {			out_be64(&priv2->spuq[i].mfc_cq_data0_RW,				 csa->priv2.spuq[i].mfc_cq_data0_RW);			out_be64(&priv2->spuq[i].mfc_cq_data1_RW,				 csa->priv2.spuq[i].mfc_cq_data1_RW);			out_be64(&priv2->spuq[i].mfc_cq_data2_RW,				 csa->priv2.spuq[i].mfc_cq_data2_RW);			out_be64(&priv2->spuq[i].mfc_cq_data3_RW,				 csa->priv2.spuq[i].mfc_cq_data3_RW);		}	}	eieio();}static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu){	struct spu_problem __iomem *prob = spu->problem;	/* Restore, Step 51:	 *     Restore the PPU_QueryMask register from CSA.	 */	out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW);	eieio();}static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu){	struct spu_problem __iomem *prob = spu->problem;	/* Restore, Step 52:	 *     Restore the PPU_QueryType register from CSA.	 */	out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW);	eieio();}static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu){	struct spu_priv2 __iomem *priv2 = spu->priv2;	/* Restore, Step 53:	 *     Restore the MFC_CSR_TSQ register from CSA.	 */	out_be64(&priv2->spu_tag_status_query_RW,		 csa->priv2.spu_tag_status_query_RW);	eieio();}static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu){	struct spu_priv2 __iomem *priv2 = spu->priv2;	/* Restore, Step 54:	 *     Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2	 *     registers from CSA.	 */	out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW);	out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW);	eieio();}static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu){	struct spu_priv2 __iomem *priv2 = spu->priv2;	/* Restore, Step 55:	 *     Restore the MFC_CSR_ATO register from CSA.	 */	out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW);}static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu){	/* Restore, Step 56:	 *     Restore the MFC_TCLASS_ID register from CSA.	 */	spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);	eieio();}static inline void set_llr_event(struct spu_state *csa, struct spu *spu){	u64 ch0_cnt, ch0_data;	u64 ch1_data;	/* Restore, Step 57:	 *    Set the Lock Line Reservation Lost Event by:	 *      1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1.	 *      2. If CSA.SPU_Channel_0_Count=0 and	 *         CSA.SPU_Wr_Event_Mask[Lr]=1 and	 *         CSA.SPU_Event_Status[Lr]=0 then set	 *         CSA.SPU_Event_Status_Count=1.	 */	ch0_cnt = csa->spu_chnlcnt_RW[0];	ch0_data = csa->spu_chnldata_RW[0];	ch1_data = csa->spu_chnldata_RW[1];	csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT;	if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) &&	    (ch1_data & MFC_LLR_LOST_EVENT)) {		csa->spu_chnlcnt_RW[0] = 1;	}}static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu){	/* Restore, Step 58:	 *     If the status of the CSA software decrementer	 *     "wrapped" flag is set, OR in a '1' to	 *     CSA.SPU_Event_Status[Tm].	 */	if (!(csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED))		return;	if ((csa->spu_chnlcnt_RW[0] == 0) &&	    (csa->spu_chnldata_RW[1] & 0x20) &&	    !(csa->spu_chnldata_RW[0] & 0x20))		csa->spu_chnlcnt_RW[0] = 1;	csa->spu_chnldata_RW[0] |= 0x20;}static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu){	struct spu_priv2 __iomem *priv2 = spu->priv2;	u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };	int i;	/* Restore, Step 59:	 *	Restore the following CH: [0,3,4,24,25,27]	 */	for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {		idx = ch_indices[i];		out_be64(&priv2->spu_chnlcntptr_RW, idx);		eieio();		out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]);		out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]);		eieio();	}}static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu){	struct spu_priv2 __iomem *priv2 = spu->priv2;	u64 ch_indices[3] = { 9UL, 21UL, 23UL };	u64 ch_counts[3] = { 1UL, 16UL, 1UL };	u64 idx;	int i;	/* Restore, Step 60:	 *     Restore the following CH: [9,21,23].	 */	ch_counts[0] = 1UL;	ch_counts[1] = csa->spu_chnlcnt_RW[21];	ch_counts[2] = 1UL;	for (i = 0; i < 3; i++) {		idx = ch_indices[i];		out_be64(&priv2->spu_chnlcntptr_RW, idx);		eieio();		out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);		eieio();	}}static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu){	struct spu_priv2 __iomem *priv2 = spu->priv2;	/* Restore, Step 61:	 *     Restore the SPU_LSLR register from CSA.	 */	out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW);	eieio();}static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu){	struct spu_priv2 __iomem *priv2 = spu->priv2;	/* Restore, Step 62:	 *     Restore the SPU_Cfg register from CSA.	 */	out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW);	eieio();}static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu){	/* Restore, Step 63:	 *     Restore PM_Trace_Tag_Wait_Mask from CSA.	 *     Not performed by this implementation.	 */}static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu){	struct spu_problem __iomem *prob = spu->problem;	/* Restore, Step 64:	 *     Restore SPU_NPC from CSA.	 */	out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW);	eieio();}static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu){	struct spu_priv2 __iomem *priv2 = spu->priv2;	int i;	/* Restore, Step 65:	 *     Restore MFC_RdSPU_MB from CSA.	 */	out_be64(&priv2->spu_chnlcntptr_RW, 29UL);	eieio();	out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]);	for (i = 0; i < 4; i++) {		out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]);	}	eieio();}static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu){	struct spu_problem __iomem *prob = spu->problem;	u32 dummy = 0;	/* Restore, Step 66:	 *     If CSA.MB_Stat[P]=0 (mailbox empty) then	 *     read from the PPU_MB register.	 */	if ((csa->prob.mb_stat_R & 0xFF) == 0) {		dummy = in_be32(&prob->pu_mb_R);		eieio();	}}static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu){	struct spu_priv2 __iomem *priv2 = spu->priv2;	u64 dummy = 0UL;	/* Restore, Step 66:	 *     If CSA.MB_Stat[I]=0 (mailbox empty) then	 *     read from the PPUINT_MB register.	 */	if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {		dummy = in_be64(&priv2->puint_mb_R);		eieio();		spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);		eieio();	}}static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu){	/* Restore, Step 69:	 *     Restore the MFC_SR1 register from CSA.	 */	spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);	eieio();}static inline void restore_other_spu_access(struct spu_state *csa,					    struct spu *spu){	/* Restore, Step 70:	 *     Restore other SPU mappings to this SPU. TBD.	 */}static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu){	struct spu_problem __iomem *prob = spu->problem;	/* Restore, Step 71:	 *     If CSA.SPU_Status[R]=1 then write	 *     SPU_RunCntl[R0R1]='01'.	 */	if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) {		out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);		eieio();	}}static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu){	struct spu_priv2 __iomem *priv2 = spu->priv2;	/* Restore, Step 72:	 *    Restore the MFC_CNTL register for the CSA.	 */	out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);	eieio();	/*	 * FIXME: this is to restart a DMA that we were processing	 *        before the save. better remember the fault information	 *        in the csa instead.	 */	if ((csa->priv2.mfc_control_RW & MFC_CNTL_SUSPEND_DMA_QUEUE_MASK)) {		out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);		eieio();	}}static inline void enable_user_access(struct spu_state *csa, struct spu *spu){	/* Restore, Step 73:	 *     Enable user-space access (if provided) to this	 *     SPU by mapping the virtual pages assigned to	 *     the SPU memory-mapped I/O (MMIO) for problem	 *     state. TBD.	 */}static inline void reset_switch_active(struct spu_state *csa, struct spu *spu){	/* Restore, Step 74:	 *     Reset the "context switch active" flag.	 */	clear_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags);	mb();}static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu){	/* Restore, Step 75:	 *     Re-enable SPU interrupts.	 */	spin_lock_irq(&spu->register_lock);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -