📄 blkman.c
字号:
if( locked_len > block_size ) { cur_blocks = locked_len / block_size; cur_len = cur_blocks * block_size; break; } blkman_unlock_iovecs( cur_vecs, cur_vec_count, cur_vec_offset, locked_len ); } if( cur_len == 0 ) { if( need_buffer ) panic( "Cannot lock scratch buffer\n" ); need_buffer = true; goto retry; } } res = blkman_map_iovecs( cur_vecs, cur_vec_count, cur_vec_offset, cur_len, phys_vecs, device->params.max_sg_num, device->params.dma_boundary, device->params.dma_boundary_solid ); if( res != NO_ERROR ) goto cannot_map; if( phys_vecs->total_len < cur_len ) { cur_blocks = phys_vecs->total_len / block_size; if( cur_blocks == 0 ) { if( need_locking ) blkman_unlock_iovecs( cur_vecs, cur_vec_count, cur_vec_offset, cur_len ); if( need_buffer ) panic( "Scratch buffer turned out to be too fragmented !?\n" ); need_buffer = true; goto retry; } phys_vecs->total_len = cur_blocks * block_size; } if( write ) res = handle->device->interface->write( handle->handle_cookie, phys_vecs, block_pos, cur_blocks, &bytes_transferred ); else res = handle->device->interface->read( handle->handle_cookie, phys_vecs, block_pos, cur_blocks, &bytes_transferred );cannot_map: if( need_locking ) blkman_unlock_iovecs( cur_vecs, cur_vec_count, cur_vec_offset, cur_len ); if( res != NO_ERROR ) { if( need_buffer ) mutex_unlock( &blkman_buffer_mutex ); goto err2; } if( need_buffer ) { if( !write ) { blkman_copy_buffer( blkman_buffer + block_ofs, vec, vec_count, vec_offset, bytes_transferred, false ); } mutex_unlock( &blkman_buffer_mutex ); } len -= bytes_transferred; vec_offset += bytes_transferred; } locked_pool->free( handle->device->phys_vecs_pool, phys_vecs ); return orig_len;err2: locked_pool->free( handle->device->phys_vecs_pool, phys_vecs ); return res;err: return res;}static ssize_t blkman_readv_int( blkman_handle_info *handle, struct iovec *vec, size_t vec_count, off_t pos, ssize_t len, bool need_locking ){ return blkman_readwrite( handle, vec, vec_count, pos, len, need_locking, false );}static ssize_t blkman_writev_int( blkman_handle_info *handle, struct iovec *vec, size_t vec_count, off_t pos, ssize_t len, bool need_locking ){ return blkman_readwrite( handle, vec, vec_count, pos, len, need_locking, true );}static ssize_t blkman_readpage( blkman_handle_info *handle, iovecs *vecs, off_t pos){ return blkman_readv_int( handle, vecs->vec, vecs->num, pos, vecs->total_len, false );}static ssize_t blkman_writepage( blkman_handle_info *handle, iovecs *vecs, off_t pos){ return blkman_writev_int( handle, vecs->vec, vecs->num, pos, vecs->total_len, false );}static inline ssize_t blkman_readwritev( blkman_handle_info *handle, struct iovec *vec, size_t vec_count, off_t pos, ssize_t len, bool write ){ int res; if( (res = vm_lock_range( (addr_t)vec, vec_count * sizeof( vec[0] ), 0 )) != NO_ERROR ) return res; if( write ) res = blkman_writev_int( handle, vec, vec_count, pos, len, true ); else res = blkman_readv_int( handle, vec, vec_count, pos, len, true ); vm_unlock_range( (addr_t)vec, vec_count * sizeof( vec[0] )); return res;}static ssize_t blkman_readv( blkman_handle_info *handle, struct iovec *vec, size_t vec_count, off_t pos, ssize_t len ){ return blkman_readwritev( handle, vec, vec_count, pos, len, false );}static ssize_t blkman_read( blkman_handle_info *handle, void *buf, off_t pos, ssize_t len ){ iovec vec[1]; vec[0].start = buf; vec[0].len = len; return blkman_readv( handle, vec, 1, pos, len );}static ssize_t blkman_writev( blkman_handle_info *handle, struct iovec *vec, size_t vec_count, off_t pos, ssize_t len ){ return blkman_readwritev( handle, vec, vec_count, pos, len, true );}static ssize_t blkman_write( blkman_handle_info *handle, void *buf, off_t pos, ssize_t len ){ iovec vec[1]; vec[0].start = buf; vec[0].len = len; return blkman_writev( handle, vec, 1, pos, len );}static int blkman_canpage( blkman_handle_info *handle ){ return true;}/*static phys_vecs *blkman_alloc_phys_vecs( blkman_device_info *device ){ phys_vecs *vec;retry: mutex_lock( &device->lock ); vec = device->free_phys_vecs; if( vec == NULL ) { mutex_unlock( &device->lock ); sem_acquire( device->phys_vec_avail, 1 ); goto retry; } device->free_phys_vecs = *((phys_vecs **)vec); mutex_unlock( &device->lock ); return vec;}static void blkman_free_phys_vecs( blkman_device_info *device, phys_vecs *vec ){ bool was_empty; mutex_lock( &device->lock ); was_empty = device->free_phys_vecs == NULL; *((phys_vecs **)vec) = device->free_phys_vecs; device->free_phys_vecs = vec; mutex_unlock( &device->lock ); if( was_empty ) sem_release( device->phys_vec_avail, 1 );}*/static int blkman_set_capacity( blkman_device_info *device, uint64 capacity, size_t block_size ){ mutex_lock( &device->lock ); device->params.capacity = capacity; device->params.block_size = block_size; mutex_unlock( &device->lock ); return NO_ERROR;}static int blkman_init_buffer( void ){ int res; SHOW_FLOW0( 3, "" ); blkman_buffer_size = 32*1024; res = mutex_init( &blkman_buffer_mutex, "blkman_buffer_mutex" ); if( res < 0 ) goto err1; res = blkman_buffer_region = vm_create_anonymous_region( vm_get_kernel_aspace_id(), "blkman_buffer", (void **)&blkman_buffer, REGION_ADDR_ANY_ADDRESS, blkman_buffer_size, REGION_WIRING_WIRED, LOCK_RW | LOCK_KERNEL ); if( res < 0 ) goto err2; res = vm_get_page_mapping( vm_get_kernel_aspace_id(), (addr_t)blkman_buffer, &blkman_buffer_phys ); if( res < 0 ) goto err3; blkman_buffer_vec[0].start = blkman_buffer; blkman_buffer_vec[0].len = blkman_buffer_size; blkman_buffer_phys_vec.num = 1; blkman_buffer_phys_vec.total_len = blkman_buffer_size; blkman_buffer_phys_vec.vec[0].start = blkman_buffer_phys; blkman_buffer_phys_vec.vec[0].len = blkman_buffer_size; return NO_ERROR;err3: vm_delete_region( vm_get_kernel_aspace_id(), blkman_buffer_region );err2: mutex_destroy( &blkman_buffer_mutex );err1: return res;}static int blkman_uninit_buffer( void ){ vm_delete_region( vm_get_kernel_aspace_id(), blkman_buffer_region ); mutex_destroy( &blkman_buffer_mutex ); return NO_ERROR;}static int blkman_init( void ){ int res; SHOW_FLOW0( 3, "" ); /*res = module_get( PARTITIONS_MANAGER_MODULE_NAME, 0, (void **)&part_mngr ); if( res != NO_ERROR ) goto err1;*/ res = module_get( LOCKED_POOL_MODULE_NAME, 0, (void **)&locked_pool ); if( res != NO_ERROR ) goto err2; res = blkman_init_buffer(); if( res != NO_ERROR ) goto err3; return NO_ERROR;err3: module_put( LOCKED_POOL_MODULE_NAME );err2: /*module_put( PARTITIONS_MANAGER_MODULE_NAME );*///err1: return res;}static int blkman_uninit( void ){ SHOW_FLOW0( 3, "" ); blkman_uninit_buffer(); module_put( LOCKED_POOL_MODULE_NAME ); /*module_put( PARTITIONS_MANAGER_MODULE_NAME );*/ return NO_ERROR;}struct dev_calls dev_interface = { (int (*)(dev_ident, dev_cookie *cookie)) blkman_open, (int (*)(dev_cookie)) blkman_close, (int (*)(dev_cookie)) blkman_freecookie, (int (*)(dev_cookie, off_t, seek_type)) blkman_seek, (int (*)(dev_cookie, int, void *, size_t)) blkman_ioctl, (ssize_t (*)(dev_cookie, void *, off_t, ssize_t)) blkman_read, (ssize_t (*)(dev_cookie, const void *, off_t, ssize_t)) blkman_write, (int (*)(dev_ident)) blkman_canpage, (ssize_t (*)(dev_ident, iovecs *, off_t)) blkman_readpage, (ssize_t (*)(dev_ident, iovecs *, off_t)) blkman_writepage,};blkman_interface blkman = { blkman_register_dev, blkman_unregister_dev, blkman_set_capacity};module_header blkman_module = { BLKMAN_MODULE_NAME, MODULE_CURR_VERSION, 0, &blkman, blkman_init, blkman_uninit};module_header *modules[] ={ &blkman_module, NULL};// map->num contains maximum number of entries on callstatic int vm_get_page_range_mapping( vm_address_space *aspace, addr_t vaddr, size_t len, phys_vec *map, size_t max_entries, size_t *num_entries, size_t *mapped_len ){ size_t cur_idx = 0; size_t left_len = len; for( left_len = len, cur_idx = 0; left_len > 0 && cur_idx < max_entries; ) { int res; if( (res = vm_get_page_mapping( vm_get_current_user_aspace_id(), vaddr, &map[cur_idx].start )) != NO_ERROR ) return res; map[cur_idx].len = min( ROUND_PAGE_UP( vaddr ) - vaddr, left_len ); left_len -= map[cur_idx].len; vaddr += map[cur_idx].len; if( cur_idx > 0 && map[cur_idx].start == map[cur_idx - 1].start + map[cur_idx - 1].len ) { map[cur_idx - 1].len += map[cur_idx].len; } else { ++cur_idx; } } *num_entries = len - left_len; *mapped_len = cur_idx; return NO_ERROR;}int vm_get_iovec_page_mapping( iovec *vec, size_t vec_count, size_t vec_offset, size_t len, phys_vec *map, size_t max_entries, size_t *num_entries, size_t *mapped_len ){ size_t cur_idx; size_t left_len; while( vec_count > 0 && vec_offset > vec->len ) { vec_offset -= vec->len; --vec_count; ++vec; } for( left_len = len, cur_idx = 0; left_len > 0 && vec_count > 0 && cur_idx < max_entries; ++vec, --vec_count ) { addr_t range_start; size_t range_len; int res; size_t cur_num_entries, cur_mapped_len; range_start = (addr_t)vec->start + vec_offset; range_len = min( vec->len - vec_offset, left_len ); vec_offset = 0; if( (res = vm_get_page_range_mapping( vm_get_current_user_aspace(), range_start, range_len, &map[cur_idx], max_entries - cur_idx, &cur_num_entries, &cur_mapped_len )) != NO_ERROR ) return res; if( cur_num_entries > 0 && cur_idx > 0 && map[cur_idx].start == map[cur_idx - 1].start + map[cur_idx - 1].len ) { map[cur_idx - 1].len = map[cur_idx].len; memcpy( &map[cur_idx], &map[cur_idx + 1], (cur_num_entries - 1) * sizeof( phys_vec )); } cur_idx += cur_num_entries; left_len -= cur_mapped_len; } *num_entries = cur_idx; *mapped_len = len - left_len; return NO_ERROR;}int vm_lock_range( addr_t vaddr, size_t len, int flags ){ return NO_ERROR;}int vm_unlock_range( addr_t vaddr, size_t len ){ return NO_ERROR;}/*void *memcpy(void *dest, const void *src, size_t count){ char *tmp = (char *)dest; char *s = (char *)src; while(count--) *tmp++ = *s++; return dest;}*/int devfs_unpublish_device( const char *name ){ return ERR_UNIMPLEMENTED;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -