⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mm.c

📁 RTEMS (Real-Time Executive for Multiprocessor Systems) is a free open source real-time operating sys
💻 C
📖 第 1 页 / 共 2 页
字号:
		       find_next_zone(res, lowpage, BootImage|Free))) {			lowpage=find_zone_start(res, highpage, BootImage|Free);			if ((highpage-lowpage)>minpages && 			    highpage>imghigh) {				imghigh=highpage;				highpage -=minpages;			}			if ((highpage-lowpage)>STACK_PAGES &&			    highpage>stkhigh) {				stkhigh=highpage;				highpage-=STACK_PAGES;			}		}		bd->image = (void *)((imghigh-minpages)<<PAGE_SHIFT);		bd->stack=(void *) (stkhigh<<PAGE_SHIFT);		/* The code mover is put at the lowest possible place		 * of free memory. If this corresponds to the loaded boot		 * partition image it does not matter because it overrides 		 * the unused part of it (x86 code). 		 */		bd->mover=(void *) (lowpage<<PAGE_SHIFT);		/* Let us flush the caches in all cases. After all it should 		 * not harm even on 601 and we don't care about performance. 		 * Right now it's easy since all processors have a line size 		 * of 32 bytes. Once again residual data has proved unreliable.		 */		bd->cache_lsize = 32;	}	/* For now we always assume that it's succesful, we should	 * handle better the case of insufficient memory.	 */	return 0;}void * valloc(u_long size) {	map *p, *q;	struct _mm_private * mm = (struct _mm_private *) bd->mm_private;	if (size==0) return NULL;	size=PAGE_ALIGN(size)-1;	for (p=mm->virtavail; p; p=p->next) {		if (p->base+size <= p->end) break;	}	if(!p) return NULL;	q=alloc_map();	q->base=p->base;	q->end=q->base+size;	q->firstpte=MAP_USED_VIRT;	insert_map(&mm->virtused, q);	if (q->end==p->end) free_map(remove_map(&mm->virtavail, p));	else p->base += size+1;	return (void *)q->base;}static void vflush(map *virtmap) {	struct _mm_private * mm = (struct _mm_private *) bd->mm_private;	u_long i, limit=(mm->hashmask>>3)+8;	hash_entry volatile *p=(hash_entry *) mm->sdr1;	/* PTE handling is simple since the processor never update	 * the entries. Writable pages always have the C bit set and 	 * all valid entries have the R bit set. From the processor	 * point of view the hash table is read only.	 */	for (i=0; i<limit; i++) {	  	if (p[i].key<0) {			u_long va;			va = ((i<<9)^((p[i].key)<<5)) &0x3ff000;			if (p[i].key&0x40) va^=0x3ff000;			va |= ((p[i].key<<21)&0xf0000000)			  | ((p[i].key<<22)&0x0fc00000);			if (va>=virtmap->base && va<=virtmap->end) {				p[i].key=0;				asm volatile("sync; tlbie %0; sync" : :					     "r" (va));			}		}	}}void vfree(void *vaddr) {	map *physmap, *virtmap; /* Actual mappings pertaining to this vm */	struct _mm_private * mm = (struct _mm_private *) bd->mm_private;		/* Flush memory queues */	asm volatile("sync": : : "memory");	virtmap = remove_map_at(&mm->virtused, vaddr);	if (!virtmap) return;	/* Remove mappings corresponding to virtmap */	for (physmap=mm->mappings; physmap; ) {		map *nextmap=physmap->next;		if (physmap->base>=virtmap->base 		    && physmap->base<virtmap->end) {			free_map(remove_map(&mm->mappings, physmap));		}		physmap=nextmap;	}	vflush(virtmap);	virtmap->firstpte= MAP_FREE_VIRT;	insert_map(&mm->virtavail, virtmap); 	coalesce_maps(mm->virtavail);}void vunmap(void *vaddr) {	map *physmap, *virtmap; /* Actual mappings pertaining to this vm */	struct _mm_private *mm = (struct _mm_private *) bd->mm_private;		/* Flush memory queues */	asm volatile("sync": : : "memory");	/* vaddr must be within one of the vm areas in use and	 * then must correspond to one of the physical areas 	 */	for (virtmap=mm->virtused; virtmap; virtmap=virtmap->next) {	  	if (virtmap->base<=(u_long)vaddr && 		    virtmap->end>=(u_long)vaddr) break;	}	if (!virtmap) return;	physmap = remove_map_at(&mm->mappings, vaddr);	if(!physmap) return;	vflush(physmap);	free_map(physmap);}int vmap(void *vaddr, u_long p, u_long size) {	map *q;	struct _mm_private *mm = (struct _mm_private *) bd->mm_private;	size=PAGE_ALIGN(size);	if(!size) return 1;	/* Check that the requested area fits in one vm image */	for (q=mm->virtused; q; q=q->next) {	  	if ((q->base <= (u_long)vaddr) && 		    (q->end>=(u_long)vaddr+size -1)) break;	}	if (!q) return 1;	q= alloc_map();	if (!q) return 1;	q->base = (u_long)vaddr;	q->end = (u_long)vaddr+size-1;	q->firstpte = p;	return insert_map(&mm->mappings, q);}staticvoid create_identity_mappings(int type, int attr) {	u_long lowpage=ULONG_MAX, highpage;	struct _mm_private *mm = (struct _mm_private *) bd->mm_private;	RESIDUAL * res=bd->residual;	while((highpage = find_next_zone(res, lowpage, type))) {		map *p;		lowpage=find_zone_start(res, highpage, type);		p=alloc_map();		/* Do not map page 0 to catch null pointers */		lowpage = lowpage ? lowpage : 1;		p->base=lowpage<<PAGE_SHIFT;		p->end=(highpage<<PAGE_SHIFT)-1;		p->firstpte = (lowpage<<PAGE_SHIFT)|attr;		insert_map(&mm->mappings, p);	}}static inlinevoid add_free_map(u_long base, u_long end) {	map *q=NULL;	struct _mm_private *mm = (struct _mm_private *) bd->mm_private;	if (base<end) q=alloc_map();	if (!q) return;	q->base=base;	q->end=end-1;	q->firstpte=MAP_FREE_VIRT;	insert_map(&mm->virtavail, q); }static inlinevoid create_free_vm(void) {	map *p;	struct _mm_private *mm = (struct _mm_private *) bd->mm_private;	u_long vaddr=PAGE_SIZE;	/* Never map vaddr 0 */	for(p=mm->mappings; p; p=p->next) {		add_free_map(vaddr, p->base);		vaddr=p->end+1;	}	/* Special end of memory case */	if (vaddr) add_free_map(vaddr,0);}/* Memory management initialization. * Set up the mapping lists.  */static inline void add_perm_map(u_long start, u_long size) {	struct _mm_private *mm = (struct _mm_private *) bd->mm_private;	map *p=alloc_map();	p->base = start;	p->end = start + size - 1;	p->firstpte = MAP_PERM_PHYS;	insert_map(& mm->physperm , p);}void mm_init(u_long image_size) {	u_long lowpage=ULONG_MAX, highpage;	struct _mm_private *mm = (struct _mm_private *) bd->mm_private;	RESIDUAL * res=bd->residual;	extern void (tlb_handlers)(void);	extern void (_handler_glue)(void);	int i;	map *p;	/* The checks are simplified by the fact that the image	 * and stack area are always allocated at the upper end	 * of a free block. 	 */	while((highpage = find_next_zone(res, lowpage, BootImage|Free))) {		lowpage=find_zone_start(res, highpage, BootImage|Free);		if ( ( ((u_long)bd->image+PAGE_ALIGN(image_size))>>PAGE_SHIFT)		     == highpage) {		  	highpage=(u_long)(bd->image)>>PAGE_SHIFT;			add_perm_map((u_long)bd->image, image_size);		}		if ( (( u_long)bd->stack>>PAGE_SHIFT) == highpage) {		  	highpage -= STACK_PAGES;			add_perm_map(highpage<<PAGE_SHIFT, 				     STACK_PAGES*PAGE_SIZE);		}		/* Protect the interrupt handlers that we need ! */		if (lowpage<2) lowpage=2;		/* Check for the special case of full area! */		if (highpage>lowpage) {			p = alloc_map();			p->base = lowpage<<PAGE_SHIFT;			p->end = (highpage<<PAGE_SHIFT)-1;			p->firstpte=MAP_FREE_PHYS;			insert_map(&mm->physavail, p);		}	}	/* Allocate the hash table */	mm->sdr1=__palloc(0x10000, PA_PERM|16);	_write_SDR1((u_long)mm->sdr1);	memset(mm->sdr1, 0, 0x10000);	mm->hashmask = 0xffc0;	/* Setup the segment registers as we want them */	for (i=0; i<16; i++) _write_SR(i, (void *)(i<<28));	/* Create the maps for the physical memory, firwmarecode does not	 * seem to be necessary. ROM is mapped read-only to reduce the risk 	 * of reprogramming it because it's often Flash and some are 	 * amazingly easy to overwrite.	 */	create_identity_mappings(BootImage|Free|FirmwareCode|FirmwareHeap|				 FirmwareStack, PTE_RAM);	create_identity_mappings(SystemROM, PTE_ROM);	create_identity_mappings(IOMemory|SystemIO|SystemRegs|				 PCIAddr|PCIConfig|ISAAddr, PTE_IO);	create_free_vm();		/* Install our own MMU and trap handlers. */	codemove((void *) 0x300, _handler_glue, 0x100, bd->cache_lsize); 	codemove((void *) 0x400, _handler_glue, 0x100, bd->cache_lsize); 	codemove((void *) 0x600, _handler_glue, 0x100, bd->cache_lsize); 	codemove((void *) 0x700, _handler_glue, 0x100, bd->cache_lsize); }  void * salloc(u_long size) {	map *p, *q;	struct _mm_private *mm = (struct _mm_private *) bd->mm_private;	if (size==0) return NULL;	size = (size+7)&~7;	for (p=mm->sallocfree; p; p=p->next) {		if (p->base+size <= p->end) break;	}	if(!p) {		void *m;		m = __palloc(size, PA_SUBALLOC);		p = alloc_map();		if (!m && !p) return NULL;		p->base = (u_long) m;		p->firstpte = MAP_FREE_SUBS;		p->end = (u_long)m+PAGE_ALIGN(size)-1;		insert_map(&mm->sallocfree, p);		coalesce_maps(mm->sallocfree);		coalesce_maps(mm->sallocphys);	};	q=alloc_map();	q->base=p->base;	q->end=q->base+size-1;	q->firstpte=MAP_USED_SUBS;	insert_map(&mm->sallocused, q);	if (q->end==p->end) free_map(remove_map(&mm->sallocfree, p));	else p->base += size;	memset((void *)q->base, 0, size);	return (void *)q->base;}void sfree(void *p) {	map *q;	struct _mm_private *mm = (struct _mm_private *) bd->mm_private;	q=remove_map_at(&mm->sallocused, p);	if (!q) return;	q->firstpte=MAP_FREE_SUBS;	insert_map(&mm->sallocfree, q);	coalesce_maps(mm->sallocfree);}/* first/last area fit, flags is a power of 2 indicating the required * alignment. The algorithms are stupid because we expect very little  * fragmentation of the areas, if any. The unit of allocation is the page. * The allocation is by default performed from higher addresses down, * unless flags&PA_LOW is true.  */void * __palloc(u_long size, int flags) {	u_long mask = ((1<<(flags&PA_ALIGN_MASK))-1);	map *newmap, *frommap, *p, *splitmap=0;	map **queue; 	u_long qflags;	struct _mm_private *mm = (struct _mm_private *) bd->mm_private;	/* Asking for a size which is not a multiple of the alignment	   is likely to be an error. */	if (size & mask) return NULL;	size = PAGE_ALIGN(size);	if(!size) return NULL;	if (flags&PA_SUBALLOC) {		queue = &mm->sallocphys;		qflags = MAP_SUBS_PHYS;	} else if (flags&PA_PERM) {	  	queue = &mm->physperm;		qflags = MAP_PERM_PHYS;	} else {	  	queue = &mm->physused;		qflags = MAP_USED_PHYS;	}	/* We need to allocate that one now so no two allocations may attempt	 * to take the same memory simultaneously. Alloc_map_page does	 * not call back here to avoid infinite recursion in alloc_map. 	 */	if (mask&PAGE_MASK) {		splitmap=alloc_map();		if (!splitmap) return NULL;	}	for (p=mm->physavail, frommap=NULL; p; p=p->next) {		u_long high = p->end;		u_long limit  = ((p->base+mask)&~mask) + size-1;		if (high>=limit && ((p->base+mask)&~mask)+size>p->base) {			frommap = p;			if (flags&PA_LOW) break;		}	}	if (!frommap) {		if (splitmap) free_map(splitmap);		return NULL;  	}		newmap=alloc_map();		if (flags&PA_LOW) {		newmap->base = (frommap->base+mask)&~mask;	} else {	  	newmap->base = (frommap->end +1 - size) & ~mask;	}	newmap->end = newmap->base+size-1;	newmap->firstpte = qflags;	/* Add a fragment if we don't allocate until the end. */		if (splitmap) {		splitmap->base=newmap->base+size;		splitmap->end=frommap->end;		splitmap->firstpte= MAP_FREE_PHYS;		frommap->end=newmap->base-1;	} else if (flags & PA_LOW) {		frommap->base=newmap->base+size;	} else {	  	frommap->end=newmap->base-1;	}        /* Remove a fragment if it becomes empty. */ 	if (frommap->base == frommap->end+1) {		free_map(remove_map(&mm->physavail, frommap));	}	if (splitmap) {	  	if (splitmap->base == splitmap->end+1) {			free_map(remove_map(&mm->physavail, splitmap));		} else {			insert_map(&mm->physavail, splitmap);  		}	}	insert_map(queue, newmap);	return (void *) newmap->base;		}void pfree(void * p) {	map *q;	struct _mm_private *mm = (struct _mm_private *) bd->mm_private;	q=remove_map_at(&mm->physused, p);	if (!q) return;	q->firstpte=MAP_FREE_PHYS;	insert_map(&mm->physavail, q);	coalesce_maps(mm->physavail);}#ifdef DEBUG /* Debugging functions */void print_maps(map *chain, const char *s) {	map *p;	printk("%s",s);	for(p=chain; p; p=p->next) {		printk("    %08lx-%08lx: %08lx\n", 		       p->base, p->end, p->firstpte);	}}void print_all_maps(const char * s) {	u_long freemaps;	struct _mm_private *mm = (struct _mm_private *) bd->mm_private;	map *free;	printk("%s",s);	print_maps(mm->mappings, "  Currently defined mappings:\n");	print_maps(mm->physavail, "  Currently available physical areas:\n");	print_maps(mm->physused, "  Currently used physical areas:\n");	print_maps(mm->virtavail, "  Currently available virtual areas:\n");	print_maps(mm->virtused, "  Currently used virtual areas:\n");	print_maps(mm->physperm, "  Permanently used physical areas:\n");	print_maps(mm->sallocphys, "  Physical memory used for salloc:\n");	print_maps(mm->sallocfree, "  Memory available for salloc:\n");	print_maps(mm->sallocused, "  Memory allocated through salloc:\n");	for (freemaps=0, free=mm->freemaps; free; freemaps++, free=free->next);	printk("  %ld free maps.\n", freemaps);}void print_hash_table(void) {	struct _mm_private *mm = (struct _mm_private *) bd->mm_private;	hash_entry *p=(hash_entry *) mm->sdr1;	u_int i, valid=0;	for (i=0; i<((mm->hashmask)>>3)+8; i++) {		if (p[i].key<0) valid++;	}	printk("%u valid hash entries on pass 1.\n", valid);	valid = 0;	for (i=0; i<((mm->hashmask)>>3)+8; i++) {		if (p[i].key<0) valid++;	}	printk("%u valid hash entries on pass 2.\n"	       "     vpn:rpn_attr, p/s, pteg.i\n", valid);	for (i=0; i<((mm->hashmask)>>3)+8; i++) {	  	if (p[i].key<0) {			u_int pteg=(i>>3);			u_long vpn;			vpn = (pteg^((p[i].key)>>7)) &0x3ff;			if (p[i].key&0x40) vpn^=0x3ff;			vpn |= ((p[i].key<<9)&0xffff0000)			  | ((p[i].key<<10)&0xfc00);			printk("%08lx:%08lx, %s, %5d.%d\n",			       vpn,  p[i].rpn, p[i].key&0x40 ? "sec" : "pri",			       pteg, i%8);		}	}}#endif

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -