⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mmu.cpp

📁 浙江大学的悟空嵌入式系统模拟器
💻 CPP
字号:
/*
 *  Copyright (c) 2005 Zhejiang University, P.R.China
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to the Free Software
 *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */ 

//=============================================================================
/**
 *  \file    ARM/MMU.cpp
 *
 *  $Id: MMU.cpp,v 1.4 2005/06/08 07:40:46 qilj Exp $
 *
 *  \author  Juncheng Jia <jiajuncheng@gmail.com>
 */
//=============================================================================

#include "stdafx.h"
#include "MMU.h"
#include "Coprocessor.h"
#include "ARM_Processor.h"
#include "Core/Board.h"

namespace ARM {

const u32 TLB::masks[] =
{
	0x00000000,		// TLB_INVALID 
	0xFFFFF000,		// TLB_SMALLPAGE 
	0xFFFF0000,		// TLB_LARGEPAGE 
	0xFFF00000,		// TLB_SECTION 
	0xFFFFF000		// TLB_EXTENDED_SMALLPAGE	
};

void TLB::invalidate_all()
{
	std::vector<Entry>::iterator itr;
	for( itr = entrys_.begin(); itr != entrys_.end(); ++itr)
		itr->mapping = TLB::TLB_INVALID;
	cycle_ = 0;
}

void TLB::invalidate(u32 addr)
{
	Entry * entry = search(addr);
	if( entry )
		entry->mapping = TLB_INVALID;
}

TLB::Entry * TLB::search(u32 va)
{
	u32 mask;
	std::vector<Entry>::iterator itr;
	for( itr = entrys_.begin() ; itr != entrys_.end(); ++itr)
	{
		if( (*itr).mapping == TLB_INVALID)
				continue;
		mask = masks[(*itr).mapping];
		if( (va & mask) == ( (*itr).va & mask) )
			return &(*itr);
	}
	return NULL;
}

bool TLB::check_perms(int ap, bool is_read)
{
	bool s, r;
	bool is_user;

	s = mmu_.system_protection_on();
	r = mmu_.rom_protection_on();
	is_user = mmu_.is_user_mode();
	
	switch( ap )
	{
	case 0:
		return is_read && ((s && ! is_user) || r);
	case 1:
		return ! is_user;
	case 2:
		return is_read || (! is_user);
	case 3:
		return true;
	}

	return false;
}

Memory_Result TLB::check_access(u32 va, Entry * entry, bool is_read)
{
	assert(entry);

	u32 access = mmu_.domain_access_type(entry->domain);

	// domain access fault
	if(access == 0 || access == 2)
	{
		mmu_.set_fault_status(MMU::SECTION_DOMAIN_FAULT);
		mmu_.set_fault_address(va);
		return MMU::ACCESS_FAULT;
		
	}
	// client access type
	if(access == 1)
	{
		int subpage, ap;
		// gets the index of subpage to which va refers
		switch(entry->mapping)
		{
		case TLB_TINYPAGE:
			subpage = 0;
			break;
		case TLB_SMALLPAGE:
			subpage = (va >> 10) & 3;
			break;
		case TLB_LARGEPAGE:
			subpage = (va >> 14) & 3;
			break;
		case TLB_SECTION:
			subpage = 3;
			break;
		default:
			assert(0);
		}
		ap = (entry->perms >> (subpage * 2 + 4)) & 3;
		// to check the access permission constraints in the entry
		if( !check_perms(ap, is_read))
		{
			if(entry->mapping == TLB_SECTION)
			{
				mmu_.set_fault_status(MMU::SECTION_PERMISSION_FAULT);
				mmu_.set_fault_address(va);
				return MMU::ACCESS_FAULT;
			}
			else
			{
				mmu_.set_fault_status(MMU::SUBPAGE_PERMISSION_FAULT);
				mmu_.set_fault_address(va);
				return MMU::ACCESS_FAULT;
			}
		}
	}

	return MMU::ACCESS_SUCCESSFUL;
}

Memory_Result TLB::ttw(u32 va, Entry ** tlb)
{
	assert(tlb);

	*tlb = search(va);
	if( ! *tlb)
	{
		u32 l1addr;
		Entry entry;

		l1addr = mmu_.get_translation_table_base();
		l1addr = (l1addr | (va >> 18)) & ~3;

		std::vector<u8> buffer;
		mmu_.mem_access(MMU::DATA_READ, l1addr, 4, buffer);

		u32 l1desc = 0;
		Core::Wukong_Get_System().convert_from_bytecode(buffer, l1desc);

		// 1st level descriptor
		switch(l1desc & 3)
		{
		case 0:	// fault
			mmu_.set_fault_status(MMU::PAGE_TRANSLATION_FAULT);
			mmu_.set_fault_address(va);
			return MMU::ACCESS_FAULT;

		case 1:	// course page talbe
			u32 l2addr, l2desc;
			l2addr = l1desc & 0xFFFFFC00;
			l2addr = (l2addr | ((va & 0x000FF000) >> 10)) & ~3;
			
			mmu_.mem_access(MMU::DATA_READ, l2addr, 4, buffer);
			Core::Wukong_Get_System().convert_from_bytecode(buffer, l2desc);

			entry.va= va;
			entry.pa = l2desc;
			entry.perms = l2desc & 0x00000FFC;
			entry.domain = (l1desc >> 5) & 0x0000000F;

			// 2nd level descriptor
			switch( l2desc & 3)
			{
			case 0:	// fault
				mmu_.set_fault_status(MMU::PAGE_TRANSLATION_FAULT);
				mmu_.set_fault_address(va);
				return MMU::ACCESS_FAULT;
			case 1:	// large page
				entry.mapping = TLB_LARGEPAGE;
				break;
			case 2: // small page
				entry.mapping = TLB_SMALLPAGE;
				break;
			case 3:	// extended small page
				entry.mapping = TLB_TINYPAGE;
				break;
			}

			break;
		case 2:	// section
			entry.va = va;
			entry.pa = l1desc;
			entry.perms = l1desc & 0x00000C0C;
			entry.domain = (l1desc >> 5) & 0x0000000F;
			entry.mapping = TLB::TLB_SECTION;
			break;
		case 3:	// fine page table
			WUKONG_STDOUT << "Tiny page NOT supported by Wukong" << std::endl;
			assert(0);
			return 0;
		}
		
		entry.va &= masks[entry.mapping];
		entry.pa &= masks[entry.mapping];

		entrys_.at(cycle_) = entry;
		*tlb = &(entrys_.at(cycle_));
		cycle_ = (cycle_ + 1) % entrys_.size();
				
	}

	return MMU::ACCESS_SUCCESSFUL;
}


TLB::TLB(MMU & mmu, size_t size) : mmu_(mmu)
{
	assert(size > 0);

	entrys_.resize(size);
	invalidate_all();

}

TLB::~TLB()
{
}

Cache::Cache(MMU & mmu, size_t width, size_t way, size_t set, Write_Mode write_mode): mmu_(mmu)
{
	sets_.resize(set);

	size_t i, j;
	for (i = 0; i < set; i++)
	{
		sets_[i].lines.resize(way);
		for (j = 0; j < way; j++)
		{
			sets_[i].lines[j].tag = 0;
			sets_[i].lines[j].data.resize(width);
		}
		sets_[i].cycle = 0;
	}

	num_width_ = width;
	num_set_ = set;
	num_way_ = way;
	write_mode_ = write_mode;

}

Cache::~Cache()
{

}

void Cache::invalidate_all()
{
	size_t i,j;
	for (i = 0; i < num_set_; i++)
	{
		for (j = 0; j < num_way_; j++)
		{
			write_back(& (sets_[i].lines[j]));
			sets_[i].lines[j].tag = 0;
		}
	}
}

void Cache::invalidate(u32 va)
{
	Cache_Line *cache;

	cache = search(va);
	if (cache)
	{
		write_back(cache);
		cache->tag = 0;
	}
}

Cache::Cache_Line * Cache::search(u32 va)
{
	size_t index = get_set_index(va);
	u32 tag = get_tag(va);
	
	Cache_Set & cache_set = sets_[index];
	for(size_t i = 0; i < num_way_; i++)
	{
		if (( cache_set.lines[i].tag & TAG_VALID_FLAG) 	&& (tag == get_tag(cache_set.lines[i].tag)))
		{
			return &cache_set.lines[i];
		}
	}
	
	return NULL;
}

Cache::Cache_Line * Cache::allocate(u32 va, u32 pa)
{
	va = get_tag(va);
	pa = get_tag(pa);
	size_t index = get_set_index(va);

	Cache_Set & cache_set = sets_[index];
	Cache_Line & cache_line = cache_set.lines[cache_set.cycle++];
	if (cache_set.cycle == num_way_)
		cache_set.cycle = 0;

	if (write_mode_ == WRITE_BACK)
	{
		if (cache_line.tag & Cache::TAG_VALID_FLAG)
		{	
			
			write_back( & cache_line);
		}

	}

	cache_line.tag = va | TAG_VALID_FLAG;
	cache_line.pa = pa;

	return & cache_line;
}

void Cache::write_back(Cache::Cache_Line * cache)
{
	assert(cache);

	u32 pa = cache->pa;
	size_t nw = num_width_ >> MMU::WORD_SHT;

	if ((cache->tag & 1) == 0)
		return;
	
	switch(cache->tag & ~1 & (TAG_FIRST_HALF_DIRTY | TAG_LAST_HALF_DIRTY)){
		case 0:
			{
				return;
			}
		case TAG_FIRST_HALF_DIRTY:
			{
				nw /= 2;
				mmu_.mem_access(Memory_32Bit::MEMORY_READ, pa, nw, cache->data);
				break;
			}
		case TAG_LAST_HALF_DIRTY:
			{
				nw  /= 2;
				pa += (u32) (nw << MMU::WORD_SHT);
				std::vector<u8> buffer;
				for( size_t i = nw ; i < num_width_; i++)
				{
					buffer.push_back(cache->data[i]);
				}
				mmu_.mem_access(Memory_32Bit::MEMORY_WRITE, pa, nw, cache->data);
				break;
			}
		case TAG_FIRST_HALF_DIRTY|TAG_LAST_HALF_DIRTY:
			{
				mmu_.mem_access(Memory_32Bit::MEMORY_WRITE, pa, nw, cache->data);
				break;
			}
	}


	cache->tag &= ~(TAG_FIRST_HALF_DIRTY | TAG_LAST_HALF_DIRTY);

}

void Cache::clean(u32 va)
{
	Cache_Line *cache_line;

	cache_line = search(va);
	if (cache_line)
		write_back(cache_line);	
}

MMU::MMU(void)
{
	
}

MMU::~MMU(void)
{

}

Memory_Result MMU::access(Memory_Access_Type type, u32 start, size_t size, Bytecode_Type & buffer)
{
	if( start == 0xf4000008)
	{
		u32 value = 0;
		Core::Wukong_Get_System().convert_to_bytecode(value, buffer);
		return ACCESS_SUCCESSFUL;
	}


	switch(type)
	{
	case DATA_READ:
		return data_read(start, size, buffer);
		
	case DATA_WRITE:
		return data_write(start, size, buffer);
		
	case INSTR_READ:
		return instr_read(start, size, buffer);
		
	default:
		WUKONG_STDOUT << "memory access type error" << std::endl;
		return ACCESS_FAULT;
	}

}


Memory_Result MMU::mem_access(Memory_Access_Type type, u32 start, size_t size, Bytecode_Type & buffer)
{
	assert(size > 0);

	Memory_Result io_result;
	Memory<u32> * mem = (Memory<u32> *)Wukong_Get_System().get_memory(start);

	assert(mem);

	if (mem->is_mapped())
	{
		Core::Board<Core::u32> * board = (Core::Board<Core::u32> *)Wukong_Get_System().get_board();
		io_result = board->io_dispatch(type, start, size, buffer);
		
		//------------------ memory killer
		//if(io_result != IO_RW)
		//	return ACCESS_SUCCESSFUL;
		//------------------
		
		if( io_result == IO_RW )
		{		
			Core::u32 value = 0;
			Core::Wukong_Get_System().convert_to_bytecode(value, buffer);
			return ACCESS_SUCCESSFUL;
		}
		else
		{
			return ACCESS_SUCCESSFUL;
		}

	}

	if(!mem->test_fit(start) || !mem->test_fit(start + (u32)size - 1))
		return ACCESS_FAULT;

	if(type == Memory<u32>::MEMORY_WRITE && mem->is_readonly())
		return ACCESS_FAULT;

	mem->unchecked_access(type, start, size, buffer);
	return ACCESS_SUCCESSFUL;
}


} //namespace

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -