📄 nand.c
字号:
trace0(0, n2k, 0, "nand_cmd_cache_program()") ;
_mcu_register_wr(x_smc_stat, kbm_smc_stat_rdy) ;
_sm_set_wr_cmd(k_nand_cmd_write_cache) ;
if (k_success != sm_wait_rdy_with_timeout(k_nand_write_page_timeout))
{
trace0(0, n2k, 0, "error waiting for device busy") ;
nand_cmd_reset_device();
return(k_error );
}
trace0(0, n2k, 0, "success") ;
return(k_success );
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
t_result n2k_read_cache_next() reentrant
{
trace0(0, n2k, 0, "n2k_read_cache_next()") ;
_mcu_register_wr(x_smc_stat, kbm_smc_stat_rdy) ;
_sm_set_rd_cmd(k_nand_cmd_read_cache_next) ;
if (k_success!=sm_wait_rdy_with_timeout(k_sm_busy_read_timeout))
{
trace0(0, n2k, 0, "error waiting for data") ;
nand_cmd_reset_device();
return(k_error );
}
return(k_success );
}
//------------------------------------------------------------------------------
//------------------------------------------------------------------------------
t_result n2k_read_cache_final() reentrant
{
trace0(0, n2k, 0, "n2k_read_cache_final()") ;
_mcu_register_wr(x_smc_stat, kbm_smc_stat_rdy) ;
_sm_set_rd_cmd(k_nand_cmd_read_cache_final) ;
if (k_success!=sm_wait_rdy_with_timeout(k_sm_busy_read_timeout))
{
trace0(0, n2k, 0, "error waiting for data") ;
nand_cmd_reset_device();
return(k_error );
}
return(k_success );
}
//-----------------------------------------------------------------------------
// read fmc xfer callbacks
//-----------------------------------------------------------------------------
extern t_result nand_read_begin_split(void) reentrant;
extern t_result nand_read_end_first_split(void) reentrant ;
//+-----------------------------------------------------------------------------
//------------------------------------------------------------------------------
t_result nand_read_begin_xfer() reentrant
{
t_result result;
TRACE4(386, nand, 10, "nand_read_begin_xfer() - start:0x%04X%04X count:0x%04X%04X" , _hw(_fmc_get_start_lb_32()), _lw(_fmc_get_start_lb_32()), _hw(_fmc_get_lb_count_32()), _lw(_fmc_get_lb_count_32()));
_lun_data(sensep) = &sense_read_error;
if (g_ix_media_nand == k_ix_media_nand)
{
return(sm_read_begin_xfer() );
}
//nand interleaved
if (g_ix_media_nand == k_ix_media_nand_int)
{
result = sm_read_begin_xfer() ;
if (!g_addr_page)
{
_lun_data(max_lb_per_split) = _min((_media_data(segments_per_page)*_media_data(pages_per_block)), _fmc_get_lb_count_32());
trace5(0, sm, 10, "-----read begin xfer - zone:%02d phy:0x%04X log:0x%04X page:%02d count:%02d - first in block", g_addr_zone, g_addr_rd_phy_blk, g_addr_log_blk, g_addr_page, _lun_data(max_lb_per_split) );
}
else
{
_lun_data(max_lb_per_split) = _min((((_media_data(pages_per_block) - g_addr_page)*_media_data(segments_per_page)) - g_addr_segment), _fmc_get_lb_count_32());
trace5(0, sm, 10, "-----read begin xfer - zone:%02d phy:0x%04X log:0x%04X page:%02d count:%02d", g_addr_zone, g_addr_rd_phy_blk, g_addr_log_blk, g_addr_page, _lun_data(max_lb_per_split));
}
_lun_data(max_lb_per_burst) = 1 ; //just one burst at a time
return(result);
}
if (k_success != map_lba2addr_rd( _fmc_get_start_lb_32() ) )
{
_lun_data(sensep)=&sense_illegal_address ;
trace0(0, sm, 0, "error in lba2addr") ;
return(k_error );
}
_lun_data(max_lb_per_split) = 4-g_addr_segment ;
_lun_data(max_lb_per_burst) = 4-g_addr_segment ;
return(k_success );
}
//+-----------------------------------------------------------------------------
// Name:
// sm_read_begin_next_split
//
// Declaration:
// t_result sm_read_begin_next_split(void);
//
// Purpose:
// Issue the read command to subsequent splits after the first.
//
// Arguments:
// None.
//
// Return:
// A t_result indicating:
// k_success - command completed.
//
// Notes:
// This is a FUNCTION, not a DFA.
// Do not yeild, or run a DFA, from within this callback.
//
// Since:
// fmc-1.0
//------------------------------------------------------------------------------
t_result nand_read_begin_split(void) reentrant
{
t_result result ;
t_sync sync;
uint8 addr_segment = g_addr_segment;
uint8 cnt;
TRACE0(387, nand, 1, "nand_read_begin_split()") ;
if (k_success != map_lba2addr_rd( _fmc_get_start_lb_32() ) )
{
_lun_data(sensep)=&sense_illegal_address ;
return(k_error );
}
addr_segment = g_addr_segment;
// start the read
result=k_success;
nand_int_select_chip(addr_segment);
if (map_log_blk_has_bad_data())
{
if (sm_check_data_status(g_addr_page, (uint8) _fmc_get_lb_count_32()))
{
trace0(0, sm, 0, "read error detected in lba range") ;
nand_cmd_reset_device();
return(k_error);
}
nand_cmd_reset_device();
trace0(0, sm, 0, "read in log blk that has bad data is OK");
}
TRACE5(388, nand, 0, "beginning split - zone:%d phy:%d log:%d page:%d segment:%d", g_addr_zone, g_addr_rd_phy_blk, g_addr_log_blk, g_addr_page, g_addr_segment) ;
nand_rd_va2pa() ;
if (g_ix_media_nand == k_ix_media_nand_2k)
{
TRACE0(389, nand, 0, "using large nand flash chips") ;
// g_fmc_begin_burst_callback=(t_fmc_callback) nand_read_begin_burst ;
result = nand_page_read_for_data() ;
if (g_addr_segment)
n2k_page_set_rd_offset( _n2k_data_offset(g_addr_segment)) ;
// return k_success;
}
else
{
// issue the read commands to all chips to be used (could be < number in set)
cnt = _min(_lun_data(max_lb_per_split), _media_data(segments_per_page));
while (cnt)
{
nand_int_select_chip(addr_segment);
_sm_rd_cmd_begin(k_sm_read) ;
_media_set_read_addr() ;
// update counters here, while device is "busy"
--cnt;
++addr_segment;
if (addr_segment >= _media_data(segments_per_page))
{
addr_segment=0;
nand_incr_addr();
}
// now wait for rdy... should be done by now... only takes a few usec
result = sm_wait_rdy_with_timeout(k_sm_busy_read_timeout);
if (k_success != result)
{
TRACE1(390, nand, 0, "timeout waiting for busy - smc_stat:%02x", x_smc_stat) ;
nand_cmd_reset_device();
return(k_error );
}
}
// do single lba bursting here in w/ a poller for use by the interleaved read/write code
cnt=(uint8) _fmc_get_lb_count_32();
addr_segment=g_addr_segment;
// enable ecc hardware
_mcu_register_wr(x_smc_stat, (kbm_smc_stat_ecc_err_d_a|kbm_smc_stat_ecc_err_d_b|kbm_smc_stat_ecc_err_c_a|kbm_smc_stat_ecc_err_c_b));
_mcu_register_clr_bits(x_smc_stat_msk, (kbm_smc_stat_ecc_err_d_a|kbm_smc_stat_ecc_err_d_b|kbm_smc_stat_ecc_err_c_a|kbm_smc_stat_ecc_err_c_b));
_mcu_register_clr_bits(x_imr0, kbm_isr0_fmc_irq);
_mcu_register_set_bits(sm_mode_ctl, kbm_sm_mode_ecc_blk_xfer_en) ;
// clear blk_xfer_complete irq.
_mcu_register_wr(x_isr0, kbm_isr0_blk_xfer_complete);
// write the count registers which won't change
_mcu_register_wr(x_fmc_cnt3, 0);
_mcu_register_wr(x_fmc_cnt2, 0);
_mcu_register_wr(x_fmc_cnt0, 0);
// prime the loop to "resume" transfer
result = k_success;
/////////////////////////////////////////////////
while (cnt)
{
nand_int_select_chip(addr_segment);
// set cnt1 to 0x02 to get a 512 byte xfer
_mcu_register_wr(x_fmc_cnt1, 0x02);
// enable blk xfer
_mcu_register_set_bits(x_fmc_ctl, kbm_fmc_ctl_blk_xfer_en);
// do loop calculations
cnt--;
addr_segment=(addr_segment+1)%_media_data(segments_per_page);
thread_set_timer(g_fmc_timeout);
do
{
sync = thread_got_sync(kbm_sync_abort |kbm_sync_usbrst |kbm_sync_timer|kbm_sync_fmc_irq);
_thread_clr_sync(sync);
if (sync & kbm_sync_usbrst)
{
trace0(0, sm, 1, "optimized fmc_wait_blk_irq_with_timeout() - error: kbm_sync_usbrst");
result = k_usbreset;
break;
}
if (sync & kbm_sync_abort)
{
trace0(0, sm, 1, "optimized fmc_wait_blk_irq_with_timeout() - error: kbm_sync_abort");
result = k_aborted;
break;
}
if (sync & kbm_sync_timer)
{
trace0(0, sm, 1, "optimized fmc_wait_blk_irq_with_timeout() - error: timeout awaiting irq");
result = k_timeout;
break;
}
if (sync & kbm_sync_fmc_irq)
{
trace0(0, sm, 1, "optimized fmc_wait_blk_irq_with_timeout() - error: kbm_sync_fmc_irq") ;
// set to success, since we don't know the cause of the error yet... the specific end-burst
// function should determine the actual status fot eh fmc xfer
result = k_success;
break;
}
// AAA: added this to poll instead of taking the interrupt
// cds: added isr bit fmc_irq to break out of loop for fmc error (ecc or otherwise) detection
} while (!(_mcu_register_rd(x_isr0) & (kbm_isr0_blk_xfer_complete|kbm_isr0_fmc_irq) ));
if (k_success!=result)
{
trace0(0, sm, 1, "optimized read begin split detected error... aborting") ;
return(result);
}
if (_mcu_register_rd(x_smc_stat)&(kbm_smc_stat_ecc_err_c_a|kbm_smc_stat_ecc_err_c_b|kbm_smc_stat_ecc_err_d_a|kbm_smc_stat_ecc_err_d_b))
{
result=sm_read_end_burst();
if (k_resume==result)
{
result=k_success;
}
if (k_success!=result)
{
trace0(0, sm, 1, "optimized read begin split detected error... aborting") ;
break;
}
}
}
// hack fmc registers to prevent fmc split loop from being entered upon our callback
g_start_lb_this_xfer.u32 += g_n_lb_this_split.u32;
// decrement the number of logical blocks remaining in the transfer
g_n_lb_this_xfer.u32 -= g_n_lb_this_split.u32;
// track the mscbot byte residue
_mscbot_decr_residue( (g_n_lb_this_split.u32*512L) );
g_n_lb_this_split.u32 = 0;
}
return(result);
}
#if 0
//------------------------------------------------------------------------------
// only for n2k media, that doesn't have read-sequential capabilities.
//------------------------------------------------------------------------------
t_result nand_read_begin_burst() reentrant
{
if ( g_addr_page <= (_media_data(pages_per_block)-2))
{
TRACE0(391, nand, 0, "issue read next cached page here") ;
n2k_read_cache_next() ;
}
else if ( g_addr_page == (_media_data(pages_per_block)-2))
{
TRACE0(392, nand, 0, "issue read final cached page here") ;
n2k_read_cache_final() ;
}
_lun_data(max_lb_per_burst) = 4 ;
// setup hardware
_mcu_register_wr(x_smc_stat, kbm_smc_stat_ecc_err_d_a | kbm_smc_stat_ecc_err_d_b | kbm_smc_stat_ecc_err_c_a | kbm_smc_stat_ecc_err_c_b) ;
_mcu_register_clr_bits(x_smc_stat_msk, kbm_smc_stat_ecc_err_d_a | kbm_smc_stat_ecc_err_d_b | kbm_smc_stat_ecc_err_c_a | kbm_smc_stat_ecc_err_c_b) ;
_mcu_register_wr(x_isr0, kbm_isr0_fmc_irq) ;
_mcu_register_clr_bits(x_imr0, kbm_isr0_fmc_irq) ;
_mcu_register_set_bits(sm_mode_ctl,kbm_sm_mode_ecc_blk_xfer_en);
return(k_success );
}
#endif
//+-----------------------------------------------------------------------------
// Name:
// nand_read_end_first_split
//
// Declaration:
// t_result nand_read_end_first_split(void);
//
// Purpose:
//
//
// Arguments:
// None.
//
// Return:
// A t_result indicating:
// k_success - command completed.
//
// Notes:
// This is a FUNCTION, not a DFA.
// Do not yeild, or run a DFA, from within this callback.
//
// Since:
// fmc-1.0
//------------------------------------------------------------------------------
t_result nand_read_end_first_split(void) reentrant
{
TRACE0(393, nand, 0, "nand_read_end_first_split()") ;
// go into overdrive
TRACE2(394, nand, 0, "adjusting split/burst size. before: max_lb_per_split:%d max_lb_per_burst: %d", _lun_data(max_lb_per_split), _lun_data(max_lb_per_burst)) ;
if (g_active_media==k_ix_media_nand)
{
_lun_data(max_lb_per_split) = _min( (_media_data(pages_per_block)*_media_data(segments_per_page)), g_n_lb_this_xfer.u32);
_lun_data(max_lb_per_burst) = _lun_data(max_lb_per_split) ;
}
else if (g_active_media==k_ix_media_nand_int)
{
_lun_data(max_lb_per_split) = _min( (_media_data(pages_per_block)*_media_data(segments_per_page)), g_n_lb_this_xfer.u32);
_lun_data(max_lb_per_burst) = 1 ;
}
else
{
_lun_data(max_lb_per_split) = _min( 4, g_n_lb_this_xfer.u32);
_lun_data(max_lb_per_burst) = _min( 4, g_n_lb_this_xfer.u32);
}
TRACE2(395, nand, 0, "adjusting split/burst size. after: max_lb_per_split:%d max_lb_per_burst: %d", _lun_data(max_lb_per_split), _lun_data(max_lb_per_burst)) ;
// no need to callback here any more
g_fmc_end_split_callback = (t_fmc_callback) sm_read_end_split ;
_sm_hw_set_rd_standby() ;
return(k_success );
}
//+-----------------------------------------------------------------------------
// Name:
// dfa_sm_read
//
// Declaration:
// void dfa_sm_read(void) reentrant
//
// Purpose:
// Read data from sm media
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -