⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 vm.c

📁 这个是LINUX下的GDB调度工具的源码
💻 C
📖 第 1 页 / 共 3 页
字号:
    TRACE(trace_vm, ("ea=0x%lx - set change bit - tlb=0x%lx &pte1=0x%lx\n",		     (long)ea, (long)page_tlb_entry,		     (long)page_tlb_entry->real_address_of_pte_1));  }  ra = (page_tlb_entry->masked_real_page_number | om_ea_masked_byte(ea));  TRACE(trace_vm, ("ea=0x%lx - page translation - ra=0x%lx\n",		   (long)ea, (long)ra));  return ra;}/* * Definition of operations for memory management *//* rebuild all the relevant bat information */STATIC_INLINE_VM\(void)om_unpack_bat(om_bat *bat,	      spreg ubat,	      spreg lbat){  /* for extracting out the offset within a page */  bat->block_length_mask = ((MASKED(ubat, 51, 61) << (17-2))			    | MASK(63-17+1, 63));  /* for checking the effective page index */  bat->block_effective_page_index = MASKED(ubat, 0, 46);  bat->block_effective_page_index_mask = ~bat->block_length_mask;  /* protection information */  bat->protection_bits = EXTRACTED(lbat, 62, 63);  bat->block_real_page_number = MASKED(lbat, 0, 46);}/* rebuild the given bat table */STATIC_INLINE_VM\(void)om_unpack_bats(om_bats *bats,	       spreg *raw_bats,	       msreg msr){  int i;  bats->nr_valid_bat_registers = 0;  for (i = 0; i < nr_om_bat_registers*2; i += 2) {    spreg ubat = raw_bats[i];    spreg lbat = raw_bats[i+1];    if ((msr & msr_problem_state)	? EXTRACTED(ubat, 63, 63)	: EXTRACTED(ubat, 62, 62)) {      om_unpack_bat(&bats->bat[bats->nr_valid_bat_registers],		    ubat, lbat);      bats->nr_valid_bat_registers += 1;    }  }}#if (WITH_TARGET_WORD_BITSIZE == 32)STATIC_INLINE_VM\(void)om_unpack_sr(vm *virtual,	     sreg *srs,	     int which_sr,	     cpu *processor,	     unsigned_word cia){  om_segment_tlb_entry *segment_tlb_entry = 0;  sreg new_sr_value = 0;  /* check register in range */  ASSERT(which_sr >= 0 && which_sr < nr_om_segment_tlb_entries);  /* get the working values */  segment_tlb_entry = &virtual->segment_tlb.entry[which_sr];    new_sr_value = srs[which_sr];    /* do we support this */  if (MASKED32(new_sr_value, 0, 0))    cpu_error(processor, cia, "unsupported value of T in segment register %d",	      which_sr);  /* update info */  segment_tlb_entry->key[om_supervisor_state] = EXTRACTED32(new_sr_value, 1, 1);  segment_tlb_entry->key[om_problem_state] = EXTRACTED32(new_sr_value, 2, 2);  segment_tlb_entry->invalid_access = (MASKED32(new_sr_value, 3, 3)				       ? om_instruction_read				       : om_access_any);  segment_tlb_entry->masked_virtual_segment_id =    INSERTED32(EXTRACTED32(new_sr_value, 8, 31),	       31-6-24+1, 31-6); /* aligned ready for pte group addr */}#endif#if (WITH_TARGET_WORD_BITSIZE == 32)STATIC_INLINE_VM\(void)om_unpack_srs(vm *virtual,	      sreg *srs,	      cpu *processor,	      unsigned_word cia){  int which_sr;  for (which_sr = 0; which_sr < nr_om_segment_tlb_entries; which_sr++) {    om_unpack_sr(virtual, srs, which_sr,		 processor, cia);  }}#endif/* Rebuild all the data structures for the new context as specifed by   the passed registers */INLINE_VM\(void)vm_synchronize_context(vm *virtual,		       spreg *sprs,		       sreg *srs,		       msreg msr,		       /**/		       cpu *processor,		       unsigned_word cia){  /* enable/disable translation */  int problem_state = (msr & msr_problem_state) != 0;  int data_relocate = (msr & msr_data_relocate) != 0;  int instruction_relocate = (msr & msr_instruction_relocate) != 0;  int little_endian = (msr & msr_little_endian_mode) != 0;  unsigned_word page_table_hash_mask;  unsigned_word real_address_of_page_table;   /* update current processor mode */  virtual->instruction_map.translation.is_relocate = instruction_relocate;  virtual->instruction_map.translation.is_problem_state = problem_state;  virtual->data_map.translation.is_relocate = data_relocate;  virtual->data_map.translation.is_problem_state = problem_state;  /* update bat registers for the new context */  om_unpack_bats(&virtual->ibats, &sprs[spr_ibat0u], msr);  om_unpack_bats(&virtual->dbats, &sprs[spr_dbat0u], msr);  /* unpack SDR1 - the storage description register 1 */#if (WITH_TARGET_WORD_BITSIZE == 64)  real_address_of_page_table = MASKED64(sprs[spr_sdr1], 0, 45);  page_table_hash_mask = MASK64(18+28-EXTRACTED64(sprs[spr_sdr1], 59, 63),				63-7);#endif#if (WITH_TARGET_WORD_BITSIZE == 32)  real_address_of_page_table = MASKED32(sprs[spr_sdr1], 0, 15);  page_table_hash_mask = (INSERTED32(EXTRACTED32(sprs[spr_sdr1], 23, 31),				     7, 7+9-1)			  | MASK32(7+9, 31-6));#endif  virtual->instruction_map.translation.real_address_of_page_table = real_address_of_page_table;  virtual->instruction_map.translation.page_table_hash_mask = page_table_hash_mask;  virtual->data_map.translation.real_address_of_page_table = real_address_of_page_table;  virtual->data_map.translation.page_table_hash_mask = page_table_hash_mask;  /* unpack the segment tlb registers */#if (WITH_TARGET_WORD_BITSIZE == 32)  om_unpack_srs(virtual, srs,		processor, cia);#endif   /* set up the XOR registers if the current endian mode conflicts     with what is in the MSR */  if (WITH_XOR_ENDIAN) {    int i = 1;    unsigned mask;    if ((little_endian && CURRENT_TARGET_BYTE_ORDER == LITTLE_ENDIAN)	|| (!little_endian && CURRENT_TARGET_BYTE_ORDER == BIG_ENDIAN))      mask = 0;    else      mask = WITH_XOR_ENDIAN - 1;    while (i - 1 < WITH_XOR_ENDIAN) {      virtual->instruction_map.translation.xor[i-1] = mask;      virtual->data_map.translation.xor[i-1] =  mask;      mask = (mask << 1) & (WITH_XOR_ENDIAN - 1);      i = i * 2;    }  }  else {    /* don't allow the processor to change endian modes */    if ((little_endian && CURRENT_TARGET_BYTE_ORDER != LITTLE_ENDIAN)	|| (!little_endian && CURRENT_TARGET_BYTE_ORDER != BIG_ENDIAN))      cpu_error(processor, cia, "attempt to change hardwired byte order");  }}/* update vm data structures due to a TLB operation */INLINE_VM\(void)vm_page_tlb_invalidate_entry(vm *memory,			     unsigned_word ea){  int i = om_page_tlb_index(ea);  memory->instruction_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);  memory->data_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);  TRACE(trace_vm, ("ea=0x%lx - tlb invalidate entry\n", (long)ea));}INLINE_VM\(void)vm_page_tlb_invalidate_all(vm *memory){  int i;  for (i = 0; i < nr_om_page_tlb_entries; i++) {    memory->instruction_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);    memory->data_tlb.entry[i].masked_virtual_segment_id = MASK(0, 63);  }  TRACE(trace_vm, ("tlb invalidate all\n"));}INLINE_VM\(vm_data_map *)vm_create_data_map(vm *memory){  return &memory->data_map;}INLINE_VM\(vm_instruction_map *)vm_create_instruction_map(vm *memory){  return &memory->instruction_map;}STATIC_INLINE_VM\(unsigned_word)vm_translate(om_map *map,	     unsigned_word ea,	     om_access_types access,	     cpu *processor,	     unsigned_word cia,	     int abort){  switch (CURRENT_ENVIRONMENT) {  case USER_ENVIRONMENT:  case VIRTUAL_ENVIRONMENT:    return ea;  case OPERATING_ENVIRONMENT:    return om_translate_effective_to_real(map, ea, access,					  processor, cia,					  abort);  default:    error("internal error - vm_translate - bad switch");    return 0;  }}INLINE_VM\(unsigned_word)vm_real_data_addr(vm_data_map *map,		  unsigned_word ea,		  int is_read,		  cpu *processor,		  unsigned_word cia){  return vm_translate(&map->translation,		      ea,		      is_read ? om_data_read : om_data_write,		      processor,		      cia,		      1); /*abort*/}INLINE_VM\(unsigned_word)vm_real_instruction_addr(vm_instruction_map *map,			 cpu *processor,			 unsigned_word cia){  return vm_translate(&map->translation,		      cia,		      om_instruction_read,		      processor,		      cia,		      1); /*abort*/}INLINE_VM\(instruction_word)vm_instruction_map_read(vm_instruction_map *map,			cpu *processor,			unsigned_word cia){  unsigned_word ra = vm_real_instruction_addr(map, processor, cia);  ASSERT((cia & 0x3) == 0); /* always aligned */  if (WITH_XOR_ENDIAN)    ra ^= map->translation.xor[sizeof(instruction_word) - 1];  return core_map_read_4(map->code, ra, processor, cia);}INLINE_VM\(int)vm_data_map_read_buffer(vm_data_map *map,			void *target,			unsigned_word addr,			unsigned nr_bytes,			cpu *processor,			unsigned_word cia){  unsigned count;  for (count = 0; count < nr_bytes; count++) {    unsigned_1 byte;    unsigned_word ea = addr + count;    unsigned_word ra = vm_translate(&map->translation,				    ea, om_data_read,				    processor, /*processor*/				    cia, /*cia*/				    processor != NULL); /*abort?*/    if (ra == MASK(0, 63))      break;    if (WITH_XOR_ENDIAN)      ra ^= map->translation.xor[0];    if (core_map_read_buffer(map->read, &byte, ra, sizeof(byte))	!= sizeof(byte))      break;    ((unsigned_1*)target)[count] = T2H_1(byte);  }  return count;}INLINE_VM\(int)vm_data_map_write_buffer(vm_data_map *map,			 const void *source,			 unsigned_word addr,			 unsigned nr_bytes,			 int violate_read_only_section,			 cpu *processor,			 unsigned_word cia){  unsigned count;  unsigned_1 byte;  for (count = 0; count < nr_bytes; count++) {    unsigned_word ea = addr + count;    unsigned_word ra = vm_translate(&map->translation,				    ea, om_data_write,				    processor,				    cia,				    processor != NULL); /*abort?*/    if (ra == MASK(0, 63))      break;    if (WITH_XOR_ENDIAN)      ra ^= map->translation.xor[0];    byte = T2H_1(((unsigned_1*)source)[count]);    if (core_map_write_buffer((violate_read_only_section			       ? map->read			       : map->write),			      &byte, ra, sizeof(byte)) != sizeof(byte))      break;  }  return count;}/* define the read/write 1/2/4/8/word functions */#define N 1#include "vm_n.h"#undef N#define N 2#include "vm_n.h"#undef N#define N 4#include "vm_n.h"#undef N#define N 8#include "vm_n.h"#undef N#define N word#include "vm_n.h"#undef N#endif /* _VM_C_ */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -