📄 bambi.asm
字号:
jz wnodel_first_partial_common
; the partial block is in the cache. Better copy in our data.
push ax ; save sector count
les di,d_trans ; get disk transfer address
mov ah,byte ptr curblk_index ; get first sector #
; Note: al == transfer count
call write_part_cache_block ; copy partial block into cache
pop ax ; restore sector count
; come here to update pointers after dealing with first partial
; block. al == number of sectors in first partial
wnodel_first_partial_common:
sub d_count,ax ; keep track of count
mul sector_size_bytes ; bump transfer addr by this much
wnodel_write_next_block:
add word ptr d_trans,ax ; update transfer address
add curblk_l,1 ; let's move to next block
adc curblk_h,0
; Now we're block aligned. Complete any full blocks first.
wnodel_no_first_partial:
mov ax,cache_block_sectors
cmp ax,d_count ; do we need a full block?
ja wnodel_last_partial ; skip if no more full blocks
sub d_count,ax ; count it down
call check_hit ; is curblk in cache?
jz wnodel_full_common
; We got a hit! Must update cache!
les di,d_trans ; get user buffer
call write_full_cache_block ; write the whole block
wnodel_full_common:
mov ax,cache_block_bytes ; update pointer
jmp wnodel_write_next_block
; d_count is now less than a full cache block
; we are cache block aligned
wnodel_last_partial:
cmp d_count,0 ; *IS* there a partial?
jz wnodel_done ; skip if not
call check_hit ; is last partial in cache?
jz wnodel_done ; skip if not
les di,d_trans
mov ax,d_count ; ah=0, al=count
call write_part_cache_block ; copy that data into cache
; Now we've updated the cache to reflect our pending write.
; Go ahead and do the write, and let the device driver
; return the error code to the caller.
wnodel_done:
jmp call_dd_ureqblk ; let dd set return code
; The writes are being delayed. Just put everything into
; the cache with dirty bits set.
do_delayed_writes:
mov num_valid_buffers,0 ;data may have been dupped in supercache
;so invalidate supercache
;PARADOX BUG FIX 8/4/92 Scottq
cmp byte ptr curblk_index,0 ; partial block at the front?
ifdef USE_VALID
jz no_partial_to_write
else
jnz must_write_partial
jmp no_partial_to_write
endif
; we need a mask with curblk_index (known non-zero) low bits
; clear, and 'count' bits set after that, counting from bit zero.
; This mask gives the bits which we'll have to set in dirty_bits.
must_write_partial:
; count is min (cache_block_sectors - curblk_index, d_count)
mov cx,cache_block_sectors
sub cx,curblk_index ; how many left in block?
cmp cx,d_count ; do we want that many?
jb xbsetup_need_em_all ; skip if so
mov cx,d_count ; only transfer what we need
xbsetup_need_em_all:
mov ax,1
shl ax,cl ; now put in that many one bits
dec ax ; convert to mask
push cx ; save the count
mov cl,byte ptr curblk_index
shl ax,cl ; that many zero bits
push ax ; save the mask
call check_hit ; see if we hit on curblk
pop cx ; restore mask into cx!!!!
pop ax ; restore count into ax!!!!
push ax ; re-save sector count
; *****************************************************
; The following big chunk of code varies greatly between
; the USE_VALID version and otherwise, so it exists in
; two separate versions rather than riddling the code
; with ifdefs
; *****************************************************
ifdef USE_VALID ; for debugging, use other code
jnz wr_partial_was_a_hit ; brif we hit partial
; ; seed mask is in cx
mov bx,cx ; only the ones we're writing will
; ; be valid
call create_cache_block
jmp short wr_partial_common
wr_partial_was_a_hit:
or cx,dirty_mask ; or with previous mask
ifdef USE_VALID
mov bx,cx ; set all dirty_bits valid
or bx,valid_mask
endif
call set_dirtyindex
wr_partial_common:
; Note! We've set the dirty_bits before we filled up the
; buffer. Good thing we know the commit_cache function
; can't be running concurrently with this mainline code.
; copy from cache to local_buf
; *** Note: We really only have to copy the part of the block
; that we're not replacing.
mov bp,cache_element_index ;setup cache block index
les di,d_trans ;get pointer to user data
pop ax ;ax is now actual count
push ax ;must restore later
sub d_count,ax ;update global sector count
mov ah,byte ptr curblk_index;setup for next call
call write_part_cache_block ;write data to xms
mov num_valid_buffers,0 ;data may have been dupped in supercache
;so invalidate supercache
pop ax ;restore count
mul sector_size_bytes ;adjust to bytes
; go write next block
else ; ndef USE_VALID
jnz wr_partial_was_a_hit ; brif we hit partial
; ; seed mask is in cx
; This non-USE_VALID code path may still be used during
; debugging, so it still has some USE_VALID logic in it
ifdef USE_VALID
mov bx,cache_mask ; all valid for now
endif
call create_cache_block ; filling up local_buf
; NOTE: We really only need to read the first part of the block
les di,local_buf
mov ax,1
mov num_valid_buffers,0 ; invalidate local_buf
call read_curblks_from_disk ; read it into buffer
jnc wr_first_partial_common
; Shit! read error. Better invalidate block & bomb!
call invalidate_cache_block
pop ax ; get sector count to write
push ax ; resave count
mov ah,al ; count into ah
mov al,byte ptr curblk_index ; get starting sector
les di,d_trans
call write_part_curblk_to_disk
pop dx ; restore sector count
jnc write_first_partial_recover_ok
jmp got_fatal_error
write_first_partial_recover_ok:
mov ax,dx ; get number of sectors just written
mul sector_size_bytes ; get amount to increase d_trans by
jmp short write_next_block
wr_partial_was_a_hit:
or cx,dirty_mask ; or with previous mask
ifdef USE_VALID
mov bx,cx ; set all dirty_bits valid
or bx,valid_mask
endif
call set_dirtyindex
; Note! We've set the dirty_bits before we filled up the
; buffer. Good thing we know the commit_cache function
; can't be running concurrently with this mainline code.
; copy from cache to local_buf
; *** Note: We really only have to copy the part of the block
; that we're not replacing.
mov bp,cache_element_index ;setup cache block index
les di,d_trans ;get pointer to user data
pop ax ;ax is now actual count
push ax ;must restore later
sub d_count,ax ;update global sector count
mov ah,byte ptr curblk_index;setup for next call
call write_part_cache_block ;write data to xms
mov num_valid_buffers,0 ;data may have been dupped in supercache
;so invalidate supercache
pop ax ;restore count
mul sector_size_bytes ;adjust to bytes
ifdef USE_VALID
jmp write_next_block ;continue...
else
jmp short write_next_block
endif
wr_first_partial_common:
mov ax,curblk_index ; get starting index
mul sector_size_bytes
les di,local_buf
add di,ax ; point to destination in buffer
pop ax ; get the actual transfer count
sub d_count,ax ; keep track of count
mul sector_size_bytes
mov cx,ax
shr cx,1 ; convert count to words
lds si,d_trans ; get disk transfer address
assume ds:nothing
rep movsw ; move the data
push cs
pop ds
assume ds:zseg ; restore addressability
push ax ; save byte count for update
mov ax,1 ; make supercache valid
call set_supercache_valid ; with one block (curblk)
les di,local_buf
call write_full_cache_block ; write that pig thru the cache
pop ax ; restore byte count
; go write next block
endif ; ifdef USE_VALID
write_next_block:
add word ptr d_trans,ax ; update transfer address
add curblk_l,1 ; let's move to next block
adc curblk_h,0
; Now we're block aligned. Complete any full blocks first.
no_partial_to_write:
mov ax,cache_block_sectors
cmp ax,d_count ; do we need a full block?
ja write_last_partial ; skip if no more full blocks
sub d_count,ax ; count it down
call check_hit ; is curblk in cache?
jnz wr_full_was_a_hit ; skip if that block is present
mov cx,cache_mask ; fully dirty
ifdef USE_VALID
mov bx,cx ; and fully valid
endif
call create_cache_block ; filling up local_buf
jmp short wr_full_common
wr_full_was_a_hit:
mov cx,cache_mask ; mark it fully dirty
ifdef USE_VALID
mov bx,cx ; and fully valid
or bx,valid_mask
endif
call set_dirtyindex
; okay. copy that puppy into cache.
wr_full_common:
les di,d_trans ; get user buffer
call write_full_cache_block ; write the whole block
mov ax,cache_block_bytes ; update pointer
jmp write_next_block
; d_count is now less than a full cache block
; we are cache block aligned
write_last_partial:
cmp d_count,0 ; *IS* there a partial?
jnz write_last_partial_not_done
jmp short writing_done ; skip if not
write_last_partial_not_done:
; figure out our mask bits
mov cl,byte ptr d_count
mov ax,1
shl ax,cl
dec ax ; convert to mask
push ax
call check_hit ; is last partial in cache?
pop cx ; get new mask bits into cx
; *********************************************
; Here is another big block which is different for USE_VALID
; *********************************************
ifdef USE_VALID
jnz wr_last_partial_was_a_hit ; skip if that block is present
mov bx,cx ; only our writing bits are valid
call create_cache_block ; filling up local_buf
jmp short wr_last_partial_common
wr_last_partial_was_a_hit:
or cx,dirty_mask ; or with previous
mov bx,cx ; valid=dirty
or bx,valid_mask
call set_dirtyindex
wr_last_partial_common:
mov num_valid_buffers,0 ; invalidate super-cache just in case
mov bp,cache_element_index ;get block index
les di,d_trans ;get pointer to user data
mov ax,d_count ;get final count of last write
call write_part_cache_block ;write data to cache in xms
else ; ndef USE_VALID
jnz wr_last_partial_was_a_hit ; skip if that block is present
; ifdef USE_VALID
; mov bx,cache_mask ; the whole thing is valid
; endif
call create_cache_block ; filling up local_buf
; Note: We really don't need to read the whole block, only the
; last part of it.
les di,local_buf
mov ax,1
mov num_valid_buffers,0 ; invalidate supercache
call read_curblks_from_disk ; read it into buffer
jnc wr_last_partial_common
call invalidate_cache_block
; Now we should attempt to write the sectors directly to the disk.
;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -