⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 proc-xscale.s

📁 linux 内核源代码
💻 S
📖 第 1 页 / 共 2 页
字号:
/* *  linux/arch/arm/mm/proc-xscale.S * *  Author:	Nicolas Pitre *  Created:	November 2000 *  Copyright:	(C) 2000, 2001 MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * MMU functions for the Intel XScale CPUs * * 2001 Aug 21: *	some contributions by Brett Gaines <brett.w.gaines@intel.com> *	Copyright 2001 by Intel Corp. * * 2001 Sep 08: *	Completely revisited, many important fixes *	Nicolas Pitre <nico@cam.org> */#include <linux/linkage.h>#include <linux/init.h>#include <asm/assembler.h>#include <asm/elf.h>#include <asm/pgtable.h>#include <asm/pgtable-hwdef.h>#include <asm/page.h>#include <asm/ptrace.h>#include "proc-macros.S"/* * This is the maximum size of an area which will be flushed.  If the area * is larger than this, then we flush the whole cache */#define MAX_AREA_SIZE	32768/* * the cache line size of the I and D cache */#define CACHELINESIZE	32/* * the size of the data cache */#define CACHESIZE	32768/* * Virtual address used to allocate the cache when flushed * * This must be an address range which is _never_ used.  It should * apparently have a mapping in the corresponding page table for * compatibility with future CPUs that _could_ require it.  For instance we * don't care. * * This must be aligned on a 2*CACHESIZE boundary.  The code selects one of * the 2 areas in alternance each time the clean_d_cache macro is used. * Without this the XScale core exhibits cache eviction problems and no one * knows why. * * Reminder: the vector table is located at 0xffff0000-0xffff0fff. */#define CLEAN_ADDR	0xfffe0000/* * This macro is used to wait for a CP15 write and is needed * when we have to ensure that the last operation to the co-pro * was completed before continuing with operation. */	.macro	cpwait, rd	mrc	p15, 0, \rd, c2, c0, 0		@ arbitrary read of cp15	mov	\rd, \rd			@ wait for completion	sub 	pc, pc, #4			@ flush instruction pipeline	.endm	.macro	cpwait_ret, lr, rd	mrc	p15, 0, \rd, c2, c0, 0		@ arbitrary read of cp15	sub	pc, \lr, \rd, LSR #32		@ wait for completion and						@ flush instruction pipeline	.endm/* * This macro cleans the entire dcache using line allocate. * The main loop has been unrolled to reduce loop overhead. * rd and rs are two scratch registers. */	.macro  clean_d_cache, rd, rs	ldr	\rs, =clean_addr	ldr	\rd, [\rs]	eor	\rd, \rd, #CACHESIZE	str	\rd, [\rs]	add	\rs, \rd, #CACHESIZE1:	mcr	p15, 0, \rd, c7, c2, 5		@ allocate D cache line	add	\rd, \rd, #CACHELINESIZE	mcr	p15, 0, \rd, c7, c2, 5		@ allocate D cache line	add	\rd, \rd, #CACHELINESIZE	mcr	p15, 0, \rd, c7, c2, 5		@ allocate D cache line	add	\rd, \rd, #CACHELINESIZE	mcr	p15, 0, \rd, c7, c2, 5		@ allocate D cache line	add	\rd, \rd, #CACHELINESIZE	teq	\rd, \rs	bne	1b	.endm	.dataclean_addr:	.word	CLEAN_ADDR	.text/* * cpu_xscale_proc_init() * * Nothing too exciting at the moment */ENTRY(cpu_xscale_proc_init)	mov	pc, lr/* * cpu_xscale_proc_fin() */ENTRY(cpu_xscale_proc_fin)	str	lr, [sp, #-4]!	mov	r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE	msr	cpsr_c, r0	bl	xscale_flush_kern_cache_all	@ clean caches	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register	bic	r0, r0, #0x1800			@ ...IZ...........	bic	r0, r0, #0x0006			@ .............CA.	mcr	p15, 0, r0, c1, c0, 0		@ disable caches	ldr	pc, [sp], #4/* * cpu_xscale_reset(loc) * * Perform a soft reset of the system.  Put the CPU into the * same state as it would be if it had been reset, and branch * to what would be the reset vector. * * loc: location to jump to for soft reset * * Beware PXA270 erratum E7. */	.align	5ENTRY(cpu_xscale_reset)	mov	r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE	msr	cpsr_c, r1			@ reset CPSR	mcr	p15, 0, r1, c10, c4, 1		@ unlock I-TLB	mcr	p15, 0, r1, c8, c5, 0		@ invalidate I-TLB	mrc	p15, 0, r1, c1, c0, 0		@ ctrl register	bic	r1, r1, #0x0086			@ ........B....CA.	bic	r1, r1, #0x3900			@ ..VIZ..S........	sub	pc, pc, #4			@ flush pipeline	@ *** cache line aligned ***	mcr	p15, 0, r1, c1, c0, 0		@ ctrl register	bic	r1, r1, #0x0001			@ ...............M	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches & BTB	mcr	p15, 0, r1, c1, c0, 0		@ ctrl register	@ CAUTION: MMU turned off from this point. We count on the pipeline	@ already containing those two last instructions to survive.	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs	mov	pc, r0/* * cpu_xscale_do_idle() * * Cause the processor to idle * * For now we do nothing but go to idle mode for every case * * XScale supports clock switching, but using idle mode support * allows external hardware to react to system state changes. */	.align	5ENTRY(cpu_xscale_do_idle)	mov	r0, #1	mcr	p14, 0, r0, c7, c0, 0		@ Go to IDLE	mov	pc, lr/* ================================= CACHE ================================ *//* *	flush_user_cache_all() * *	Invalidate all cache entries in a particular address *	space. */ENTRY(xscale_flush_user_cache_all)	/* FALLTHROUGH *//* *	flush_kern_cache_all() * *	Clean and invalidate the entire cache. */ENTRY(xscale_flush_kern_cache_all)	mov	r2, #VM_EXEC	mov	ip, #0__flush_whole_cache:	clean_d_cache r0, r1	tst	r2, #VM_EXEC	mcrne	p15, 0, ip, c7, c5, 0		@ Invalidate I cache & BTB	mcrne	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer	mov	pc, lr/* *	flush_user_cache_range(start, end, vm_flags) * *	Invalidate a range of cache entries in the specified *	address space. * *	- start - start address (may not be aligned) *	- end	- end address (exclusive, may not be aligned) *	- vma	- vma_area_struct describing address space */	.align	5ENTRY(xscale_flush_user_cache_range)	mov	ip, #0	sub	r3, r1, r0			@ calculate total size	cmp	r3, #MAX_AREA_SIZE	bhs	__flush_whole_cache1:	tst	r2, #VM_EXEC	mcrne	p15, 0, r0, c7, c5, 1		@ Invalidate I cache line	mcr	p15, 0, r0, c7, c10, 1		@ Clean D cache line	mcr	p15, 0, r0, c7, c6, 1		@ Invalidate D cache line	add	r0, r0, #CACHELINESIZE	cmp	r0, r1	blo	1b	tst	r2, #VM_EXEC	mcrne	p15, 0, ip, c7, c5, 6		@ Invalidate BTB	mcrne	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer	mov	pc, lr/* *	coherent_kern_range(start, end) * *	Ensure coherency between the Icache and the Dcache in the *	region described by start.  If you have non-snooping *	Harvard caches, you need to implement this function. * *	- start  - virtual start address *	- end	 - virtual end address * *	Note: single I-cache line invalidation isn't used here since *	it also trashes the mini I-cache used by JTAG debuggers. */ENTRY(xscale_coherent_kern_range)	bic	r0, r0, #CACHELINESIZE - 11:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry	add	r0, r0, #CACHELINESIZE	cmp	r0, r1	blo	1b	mov	r0, #0	mcr	p15, 0, r0, c7, c5, 0		@ Invalidate I cache & BTB	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer	mov	pc, lr/* *	coherent_user_range(start, end) * *	Ensure coherency between the Icache and the Dcache in the *	region described by start.  If you have non-snooping *	Harvard caches, you need to implement this function. * *	- start  - virtual start address *	- end	 - virtual end address */ENTRY(xscale_coherent_user_range)	bic	r0, r0, #CACHELINESIZE - 11:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry	mcr	p15, 0, r0, c7, c5, 1		@ Invalidate I cache entry	add	r0, r0, #CACHELINESIZE	cmp	r0, r1	blo	1b	mov	r0, #0	mcr	p15, 0, r0, c7, c5, 6		@ Invalidate BTB	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer	mov	pc, lr/* *	flush_kern_dcache_page(void *page) * *	Ensure no D cache aliasing occurs, either with itself or *	the I cache * *	- addr	- page aligned address */ENTRY(xscale_flush_kern_dcache_page)	add	r1, r0, #PAGE_SZ1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry	add	r0, r0, #CACHELINESIZE	cmp	r0, r1	blo	1b	mov	r0, #0	mcr	p15, 0, r0, c7, c5, 0		@ Invalidate I cache & BTB	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer	mov	pc, lr/* *	dma_inv_range(start, end) * *	Invalidate (discard) the specified virtual address range. *	May not write back any entries.  If 'start' or 'end' *	are not cache line aligned, those lines must be written *	back. * *	- start  - virtual start address *	- end	 - virtual end address */ENTRY(xscale_dma_inv_range)	tst	r0, #CACHELINESIZE - 1	bic	r0, r0, #CACHELINESIZE - 1	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry	tst	r1, #CACHELINESIZE - 1	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry1:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry	add	r0, r0, #CACHELINESIZE	cmp	r0, r1	blo	1b	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer	mov	pc, lr/* *	dma_clean_range(start, end) * *	Clean the specified virtual address range. * *	- start  - virtual start address *	- end	 - virtual end address */ENTRY(xscale_dma_clean_range)	bic	r0, r0, #CACHELINESIZE - 11:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry	add	r0, r0, #CACHELINESIZE	cmp	r0, r1	blo	1b	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer	mov	pc, lr/* *	dma_flush_range(start, end) * *	Clean and invalidate the specified virtual address range. * *	- start  - virtual start address *	- end	 - virtual end address */ENTRY(xscale_dma_flush_range)	bic	r0, r0, #CACHELINESIZE - 11:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry	add	r0, r0, #CACHELINESIZE	cmp	r0, r1	blo	1b	mcr	p15, 0, r0, c7, c10, 4		@ Drain Write (& Fill) Buffer	mov	pc, lrENTRY(xscale_cache_fns)	.long	xscale_flush_kern_cache_all	.long	xscale_flush_user_cache_all	.long	xscale_flush_user_cache_range	.long	xscale_coherent_kern_range	.long	xscale_coherent_user_range	.long	xscale_flush_kern_dcache_page	.long	xscale_dma_inv_range	.long	xscale_dma_clean_range	.long	xscale_dma_flush_range/* * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't * clear the dirty bits, which means that if we invalidate a dirty line, * the dirty data can still be written back to external memory later on. * * The recommended workaround is to always do a clean D-cache line before * doing an invalidate D-cache line, so on the affected processors, * dma_inv_range() is implemented as dma_flush_range(). * * See erratum #25 of "Intel 80200 Processor Specification Update", * revision January 22, 2003, available at: *     http://www.intel.com/design/iio/specupdt/273415.htm */ENTRY(xscale_80200_A0_A1_cache_fns)	.long	xscale_flush_kern_cache_all	.long	xscale_flush_user_cache_all	.long	xscale_flush_user_cache_range	.long	xscale_coherent_kern_range	.long	xscale_coherent_user_range	.long	xscale_flush_kern_dcache_page	.long	xscale_dma_flush_range	.long	xscale_dma_clean_range	.long	xscale_dma_flush_rangeENTRY(cpu_xscale_dcache_clean_area)1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry	add	r0, r0, #CACHELINESIZE	subs	r1, r1, #CACHELINESIZE	bhi	1b	mov	pc, lr/* =============================== PageTable ============================== */#define PTE_CACHE_WRITE_ALLOCATE 0/* * cpu_xscale_switch_mm(pgd) * * Set the translation base pointer to be as described by pgd. * * pgd: new page tables */	.align	5ENTRY(cpu_xscale_switch_mm)	clean_d_cache r1, r2	mcr	p15, 0, ip, c7, c5, 0		@ Invalidate I cache & BTB	mcr	p15, 0, ip, c7, c10, 4		@ Drain Write (& Fill) Buffer	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs	cpwait_ret lr, ip/* * cpu_xscale_set_pte_ext(ptep, pte, ext) * * Set a PTE and flush it out * * Errata 40: must set memory to write-through for user read-only pages. */	.align	5ENTRY(cpu_xscale_set_pte_ext)	str	r1, [r0], #-2048		@ linux version	bic	r2, r1, #0xff0	orr	r2, r2, #PTE_TYPE_EXT		@ extended page	eor	r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY	tst	r3, #L_PTE_USER			@ User?	orrne	r2, r2, #PTE_EXT_AP_URO_SRW	@ yes -> user r/o, system r/w	tst	r3, #L_PTE_WRITE | L_PTE_DIRTY	@ Write and Dirty?	orreq	r2, r2, #PTE_EXT_AP_UNO_SRW	@ yes -> user n/a, system r/w						@ combined with user -> user r/w	@	@ Handle the X bit.  We want to set this bit for the minicache	@ (U = E = B = W = 0, C = 1) or when write allocate is enabled,	@ and we have a writeable, cacheable region.  If we ignore the	@ U and E bits, we can allow user space to use the minicache as	@ well.	@	@  X = (C & ~W & ~B) | (C & W & B & write_allocate)	@	eor	ip, r1, #L_PTE_CACHEABLE	tst	ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE#if PTE_CACHE_WRITE_ALLOCATE	eorne	ip, r1, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE	tstne	ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE#endif	orreq	r2, r2, #PTE_EXT_TEX(1)	@	@ Erratum 40: The B bit must be cleared for a user read-only	@ cacheable page.	@	@  B = B & ~(U & C & ~W)	@	and	ip, r1, #L_PTE_USER | L_PTE_WRITE | L_PTE_CACHEABLE	teq	ip, #L_PTE_USER | L_PTE_CACHEABLE	biceq	r2, r2, #PTE_BUFFERABLE

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -