📄 map.c
字号:
//------------------------------------------------------------------------------
static xdata uint32 _log ;
static xdata uint16 _block ;
static xdata uint16 _page ;
static xdata uint16 _sectors_per_block ;
t_result map_lba2addr_rd(uint32 addr) reentrant
{
// uint32 _log;
// uint16 _block;
// uint8 _sect;
// map lba -> logical block / logical sector
g_addr_segment = 0 ;
if(_media_data(segments_per_page) == 1)
{
_log = addr / (uint32) _media_data(pages_per_block) ;
g_addr_page = addr% _media_data(pages_per_block);
}
else
{
_sectors_per_block = (uint16) _media_data(pages_per_block) * (uint16) _media_data(segments_per_page) ;
_log = addr / (uint32) _sectors_per_block ;
g_addr_page = (addr % _sectors_per_block) / _media_data(segments_per_page) ;
// new: segment computes lba offset within a page for pages larger than 2k
g_addr_segment= (addr % _sectors_per_block) % _media_data(segments_per_page) ;
}
// map logical block -> zone
if( _log >= _media_data(logical_blocks_per_boot_zone) )
{
_log -= _media_data(logical_blocks_per_boot_zone) ;
g_addr_log_blk = (uint16)(_log%_media_data(logical_blocks_per_zone));
g_addr_zone = (uint8) (_log/_media_data(logical_blocks_per_zone)) + 1 ;
_log += _media_data(logical_blocks_per_boot_zone) ;
}
else
{
g_addr_log_blk = _log ;
g_addr_zone = 0 ;
}
trace3(0, map, 0, "absolute log_blk:%d ==> zone:%d log_blk:%d", _log, g_addr_zone, g_addr_log_blk) ;
// validate the mapping as an addressable sector
if (g_addr_zone>=_media_data(num_zones))
{
trace2(0, map, 0, "zone %d out of range. should be between 0 and %d", g_addr_zone, (_media_data(num_zones))) ;
return k_error ;
}
// map zone, logblock to phyblock through mapping table
if (g_addr_zone!=0 && g_addr_zone != _media_data(assign_zone) )
{
// log2phy map not available for current zone... ditch one of the zones and create a map for the given zone
trace2(0, map, 0, "paging in new zone! old zone:%d new zone:%d ", (_media_data(assign_zone)), g_addr_zone) ;
#ifdef k_opt_erase_cache
if( _media_data(options)&kbm_media_data_opt_erase_cache)
{
// flush only non-boot zones
if(_media_data(assign_zone))
{
TRACE0(304, stk, 0, "about to call map_erase_cache_flush_zone") ;
// $$$ get the above call, but die in the zone call...
// $$$ _stack_dump() ;
map_erase_cache_flush_zone(_media_data(assign_zone));
// don't get this one
TRACE0(305, stk, 0, "map_erase_cache_flush_zone() returned") ;
}
}
#endif
#ifdef k_enable_write_caching
if( _media_data(options)&kbm_media_data_opt_write_cache)
{
map_write_cache_flush_all();
}
#endif
_media_data(assign_zone)=0;
_block=g_addr_log_blk;
_page=g_addr_page;
if (k_success != map_build_sector_map())
{
trace0(0, map, 0, "error building sector map!") ;
return k_error ;
}
_media_data(assign_zone)=g_addr_zone;
g_addr_log_blk=_block;
g_addr_page=_page;
}
// map_dump_log2phy_table() ;
// perform the physical block mapping
_l2p_map = _log_map(g_addr_zone) ;
g_addr_rd_phy_blk=_l2p_map[g_addr_log_blk]&kbm_map_l2p_bits;
// enabled this block for br308
if(g_addr_rd_phy_blk == k_block_free)
{
uint16 old_wr_blk;
trace0(0, map, 0, "logical block not bound... find free block to use in its place") ;
old_wr_blk=g_addr_wr_phy_blk ;
map_alloc_wr_blk() ;
g_addr_rd_phy_blk=g_addr_wr_phy_blk;
g_addr_wr_phy_blk=old_wr_blk;
trace1(0, map, 0, "soft binding block %d", g_addr_rd_phy_blk) ;
// set soft binding bit in log2phy table
_l2p_map[g_addr_log_blk]=g_addr_rd_phy_blk|kbm_map_soft_l2p_binding ;
}
trace3(0, map, 0, "zone:%d log_blk:%d ==> rd_phy_blk:%d", g_addr_zone, g_addr_log_blk, g_addr_rd_phy_blk) ;
// bind current log block to the current 'extra data' buffer
_media_clear_extra_data() ;
_media_bind_log2phy();
return k_success;
}
//+-----------------------------------------------------------------------------
// Name:
// map_is_addr_first_in_block()
//
// Declaration:
// t_bool map_is_addr_first_in_block(void) reentrant
//
// Purpose:
//
// Arguments:
//
// Return:
//
// Notes:
//
// Since:
// fmc-1.0
//------------------------------------------------------------------------------
t_bool map_is_addr_first_in_block(void) reentrant
{
return (g_addr_page?k_false:k_true) ;
}
//+-----------------------------------------------------------------------------
// Name:
// map_is_addr_last_in_block()
//
// Declaration:
// t_bool map_is_addr_last_in_block(void) reentrant
//
// Purpose:
//
// Arguments:
//
// Return:
//
// Notes:
//
// Since:
// fmc-1.0
//------------------------------------------------------------------------------
t_bool map_is_addr_last_in_block(void) reentrant
{
return (g_addr_page < (_media_data(pages_per_block)-1)?k_false:k_true) ;
}
//+-----------------------------------------------------------------------------
// Name:
// map_write_begin()
//
// Declaration:
// t_result map_write_begin(unsigned long start,uint8 *buf) reentrant
//
// Purpose:
//
// Arguments:
//
// Return:
//
// Notes:
//
// Since:
// fmc-1.0
//------------------------------------------------------------------------------
t_result map_write_begin(uint32 lba) reentrant
{
t_result result;
trace1(0, map, 0, "map_write_begin(lba:%04X)", lba) ;
if (k_success != map_lba2addr_rd(lba))
return( k_error );
TRACE0(306, stk, 0, "returned from lba2addr_rd from map_write_begin()") ;
// copy the head sectors to new block
result=k_success;
if(g_addr_page)
{
#ifdef k_enable_write_caching
/*
if (_media_data(options)&kbm_media_data_opt_write_cache)
{
trace0(0, map, 0, "beginning write in mid block... caching copy_head()") ;
if (k_success != map_copy_block_head_cache())
return k_error;
}
else
{
trace0(0, map, 0, "using non-cached copy_block_head()") ;
if (k_success != map_alloc_wr_blk())
return k_error;
if (k_success != media_copy_block_head())
return k_error ;
}
*/
#else
trace0(0, map, 0, "using non-cached copy_block_head()") ;
if (k_success != map_alloc_wr_blk())
return k_error;
trace4(0, nand_int, 0, "inside mapper() - page:%d phy_blk:%d log:%d segment:%d", g_addr_page, g_addr_rd_phy_blk, g_addr_log_blk, g_addr_segment) ;
result=media_copy_block_head();
if (k_success != result)
{
if(k_media_copy_error_src==result)
{
uint16 old_wr_phy_blk;
trace2(0, map, 0, "failure reading from source block %d:%d. setting phyblock failed", g_addr_zone, g_addr_rd_phy_blk) ;
old_wr_phy_blk=g_addr_wr_phy_blk;
g_addr_wr_phy_blk=g_addr_rd_phy_blk;
_media_set_phyblock_failed();
g_addr_wr_phy_blk=old_wr_phy_blk;
// switch the logical binding on the read block to "soft-binding" so that it's not erased
_l2p_map = _log_map(g_addr_zone);
_l2p_map[g_addr_log_blk]|=kbm_map_soft_l2p_binding|kbm_map_blk_has_bad_data;
trace2(0, map, 0, "block %d:%d now soft bound, and marked as having bad data", g_addr_zone, g_addr_rd_phy_blk);
result=k_success;
}
else
{
trace0(0, map, 0, "unrecoverable copy error. erasing write block reporting error") ;
map_erase_block() ;
}
}
#endif
}
else
{
trace0(0, map, 0, " first sector in block. skipping copy block_head") ;
if (k_success != map_alloc_wr_blk())
return k_error;
}
trace0(0, map, 0, "ready to begin");
// ready to begin writing new sectors
return result ;
}
//+-----------------------------------------------------------------------------
// Name:
// map_write_flush()
//
// Declaration:
// t_result map_write_flush(void) reentrant
//
// Purpose:
//
// Arguments:
//
// Return:
//
// Notes:
//
// Since:
// fmc-1.0
//------------------------------------------------------------------------------
t_result map_write_flush(void) reentrant
{
TRACE0(307, stk, 0, "map_write_flush()") ;
// this works because the write-cache is always cleared at the beginning
// of a cached write, even if we are continuing one.
if(map_is_addr_last_in_block())
{
return k_success;
}
g_addr_page++;
// if we were NOT the last in the block, then incrementing the
// sector counter will NOT force a new zone/block mapping, so we
// can safely increment the sector count.
#ifdef k_enable_write_caching
if(_media_data(options)&kbm_media_data_opt_write_cache)
{
// keep track of what's next, and tell host we've got idle processing...
/// eww wanted to keep the lun stuff away from here.
return map_copy_block_tail_cache() ;
}
else
#endif
{
return map_copy_block_tail() ;
}
}
//+-----------------------------------------------------------------------------
// Name:
// map_log2phy()
//
// Declaration:
// t_result map_log2phy(void) reentrant
//
// Purpose:
//
// Arguments:
//
// Return:
//
// Notes:
//
// Since:
// fmc-1.0
//------------------------------------------------------------------------------
uint16 map_log2phy(uint16 log) reentrant
{
return _log_map(g_addr_zone)[log];
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
void map_erase_block() reentrant
{
t_result result;
trace2(0, map, 0, "map_erase_block(): phy %d:%d", g_addr_zone, g_addr_wr_phy_blk) ;
// _stack_dump();
_a_map=_assign_map(g_addr_zone) ;
//check result. if failure, then try again.
//fails twice, mark the block bad, and don't free it in the map table.
result=_media_erase_block();
if(k_success != result)
{
if(result!=k_media_error_wp)
{
trace0(0, map, 0, "warning: erase block failed! attempting to retry") ;
if(k_success != _media_erase_block())
{
uint8 page ;
page=g_addr_page ;
g_addr_page=0 ;
trace0(0, map, 0, "error: block %d erase failure. marking block bad") ;
_media_set_phyblock_failed() ;
_media_write_extra_data() ;
trace0(0, cache, 0, "block removed from usage.") ;
g_addr_page=page ;
}
else
{
_map_phy_blk_free( _a_map, g_addr_wr_phy_blk ) ;
trace1(0, map, 0, "retry successful. phy block %d available for writing", g_addr_wr_phy_blk) ;
}
}
else
{
trace0(0, map, 0, "media write protected... cannot erase block! something MAY be amiss");
}
}
else
{
trace1(0, map, 0, "erase success. phy block %d available for writing", g_addr_wr_phy_blk) ;
_map_phy_blk_free( _a_map, g_addr_wr_phy_blk ) ;
}
}
#ifdef k_opt_erase_cache
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
void map_erase_cache_init() reentrant
{
#if 0
g_erase_cache_head_ptr=0;
g_erase_cache_tail_ptr=0;
g_erase_cache_zone=0;
#else
#endif
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
t_result map_erase_block_cache() reentrant
{
uint8 zone_idx;
trace2(0, cache, 0, "map_erase_block_cache(): caching phy %d:%d", g_addr_zone, g_addr_wr_phy_blk);
_e_map=_erase_cache(g_addr_zone);
zone_idx=g_addr_zone?1:0;
// do not need to check for 'full' because there are exactly the same number of cache bits as there
// phy blocks for a zone, so it's impossible to become full. The cache will be flushed either one
// at a time when alloc_wr_blk detects no more phy_blks, or when idle-time processing comes around.
if(_media_data(erase_start)[zone_idx]==k_block_free)
{
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -