📄 bitmap.c
字号:
static inline void hundredth_slices(reiserfs_blocknr_hint_t * hint){ struct in_core_key *key = &hint->key; b_blocknr_t slice_start; slice_start = (keyed_hash((char *)(&key->k_dir_id), 4) % 100) * (hint->end / 100); if (slice_start > hint->search_start || slice_start + (hint->end / 100) <= hint->search_start) { hint->search_start = slice_start; }}static void determine_search_start(reiserfs_blocknr_hint_t * hint, int amount_needed){ struct super_block *s = hint->th->t_super; int unfm_hint; hint->beg = 0; hint->end = SB_BLOCK_COUNT(s) - 1; /* This is former border algorithm. Now with tunable border offset */ if (concentrating_formatted_nodes(s)) set_border_in_hint(s, hint);#ifdef DISPLACE_NEW_PACKING_LOCALITIES /* whenever we create a new directory, we displace it. At first we will hash for location, later we might look for a moderately empty place for it */ if (displacing_new_packing_localities(s) && hint->th->displace_new_blocks) { displace_new_packing_locality(hint); /* we do not continue determine_search_start, * if new packing locality is being displaced */ return; }#endif /* all persons should feel encouraged to add more special cases here and * test them */ if (displacing_large_files(s) && !hint->formatted_node && this_blocknr_allocation_would_make_it_a_large_file(hint)) { displace_large_file(hint); return; } /* if none of our special cases is relevant, use the left neighbor in the tree order of the new node we are allocating for */ if (hint->formatted_node && TEST_OPTION(hashed_formatted_nodes, s)) { hash_formatted_node(hint); return; } unfm_hint = get_left_neighbor(hint); /* Mimic old block allocator behaviour, that is if VFS allowed for preallocation, new blocks are displaced based on directory ID. Also, if suggested search_start is less than last preallocated block, we start searching from it, assuming that HDD dataflow is faster in forward direction */ if (TEST_OPTION(old_way, s)) { if (!hint->formatted_node) { if (!reiserfs_hashed_relocation(s)) old_way(hint); else if (!reiserfs_no_unhashed_relocation(s)) old_hashed_relocation(hint); if (hint->inode && hint->search_start < REISERFS_I(hint->inode)->i_prealloc_block) hint->search_start = REISERFS_I(hint->inode)->i_prealloc_block; } return; } /* This is an approach proposed by Hans */ if (TEST_OPTION(hundredth_slices, s) && !(displacing_large_files(s) && !hint->formatted_node)) { hundredth_slices(hint); return; } /* old_hashed_relocation only works on unformatted */ if (!unfm_hint && !hint->formatted_node && TEST_OPTION(old_hashed_relocation, s)) { old_hashed_relocation(hint); } /* new_hashed_relocation works with both formatted/unformatted nodes */ if ((!unfm_hint || hint->formatted_node) && TEST_OPTION(new_hashed_relocation, s)) { new_hashed_relocation(hint); } /* dirid grouping works only on unformatted nodes */ if (!unfm_hint && !hint->formatted_node && TEST_OPTION(dirid_groups, s)) { dirid_groups(hint); }#ifdef DISPLACE_NEW_PACKING_LOCALITIES if (hint->formatted_node && TEST_OPTION(dirid_groups, s)) { dirid_groups(hint); }#endif /* oid grouping works only on unformatted nodes */ if (!unfm_hint && !hint->formatted_node && TEST_OPTION(oid_groups, s)) { oid_groups(hint); } return;}static int determine_prealloc_size(reiserfs_blocknr_hint_t * hint){ /* make minimum size a mount option and benchmark both ways */ /* we preallocate blocks only for regular files, specific size */ /* benchmark preallocating always and see what happens */ hint->prealloc_size = 0; if (!hint->formatted_node && hint->preallocate) { if (S_ISREG(hint->inode->i_mode) && hint->inode->i_size >= REISERFS_SB(hint->th->t_super)->s_alloc_options. preallocmin * hint->inode->i_sb->s_blocksize) hint->prealloc_size = REISERFS_SB(hint->th->t_super)->s_alloc_options. preallocsize - 1; } return CARRY_ON;}/* XXX I know it could be merged with upper-level function; but may be result function would be too complex. */static inline int allocate_without_wrapping_disk(reiserfs_blocknr_hint_t * hint, b_blocknr_t * new_blocknrs, b_blocknr_t start, b_blocknr_t finish, int min, int amount_needed, int prealloc_size){ int rest = amount_needed; int nr_allocated; while (rest > 0 && start <= finish) { nr_allocated = scan_bitmap(hint->th, &start, finish, min, rest + prealloc_size, !hint->formatted_node, hint->block); if (nr_allocated == 0) /* no new blocks allocated, return */ break; /* fill free_blocknrs array first */ while (rest > 0 && nr_allocated > 0) { *new_blocknrs++ = start++; rest--; nr_allocated--; } /* do we have something to fill prealloc. array also ? */ if (nr_allocated > 0) { /* it means prealloc_size was greater that 0 and we do preallocation */ list_add(&REISERFS_I(hint->inode)->i_prealloc_list, &SB_JOURNAL(hint->th->t_super)-> j_prealloc_list); REISERFS_I(hint->inode)->i_prealloc_block = start; REISERFS_I(hint->inode)->i_prealloc_count = nr_allocated; break; } } return (amount_needed - rest);}static inline int blocknrs_and_prealloc_arrays_from_search_start (reiserfs_blocknr_hint_t * hint, b_blocknr_t * new_blocknrs, int amount_needed) { struct super_block *s = hint->th->t_super; b_blocknr_t start = hint->search_start; b_blocknr_t finish = SB_BLOCK_COUNT(s) - 1; int passno = 0; int nr_allocated = 0; determine_prealloc_size(hint); if (!hint->formatted_node) { int quota_ret;#ifdef REISERQUOTA_DEBUG reiserfs_debug(s, REISERFS_DEBUG_CODE, "reiserquota: allocating %d blocks id=%u", amount_needed, hint->inode->i_uid);#endif quota_ret = DQUOT_ALLOC_BLOCK_NODIRTY(hint->inode, amount_needed); if (quota_ret) /* Quota exceeded? */ return QUOTA_EXCEEDED; if (hint->preallocate && hint->prealloc_size) {#ifdef REISERQUOTA_DEBUG reiserfs_debug(s, REISERFS_DEBUG_CODE, "reiserquota: allocating (prealloc) %d blocks id=%u", hint->prealloc_size, hint->inode->i_uid);#endif quota_ret = DQUOT_PREALLOC_BLOCK_NODIRTY(hint->inode, hint->prealloc_size); if (quota_ret) hint->preallocate = hint->prealloc_size = 0; } /* for unformatted nodes, force large allocations */ } do { switch (passno++) { case 0: /* Search from hint->search_start to end of disk */ start = hint->search_start; finish = SB_BLOCK_COUNT(s) - 1; break; case 1: /* Search from hint->beg to hint->search_start */ start = hint->beg; finish = hint->search_start; break; case 2: /* Last chance: Search from 0 to hint->beg */ start = 0; finish = hint->beg; break; default: /* We've tried searching everywhere, not enough space */ /* Free the blocks */ if (!hint->formatted_node) {#ifdef REISERQUOTA_DEBUG reiserfs_debug(s, REISERFS_DEBUG_CODE, "reiserquota: freeing (nospace) %d blocks id=%u", amount_needed + hint->prealloc_size - nr_allocated, hint->inode->i_uid);#endif DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed + hint->prealloc_size - nr_allocated); /* Free not allocated blocks */ } while (nr_allocated--) reiserfs_free_block(hint->th, hint->inode, new_blocknrs[nr_allocated], !hint->formatted_node); return NO_DISK_SPACE; } } while ((nr_allocated += allocate_without_wrapping_disk(hint, new_blocknrs + nr_allocated, start, finish, 1, amount_needed - nr_allocated, hint-> prealloc_size)) < amount_needed); if (!hint->formatted_node && amount_needed + hint->prealloc_size > nr_allocated + REISERFS_I(hint->inode)->i_prealloc_count) { /* Some of preallocation blocks were not allocated */#ifdef REISERQUOTA_DEBUG reiserfs_debug(s, REISERFS_DEBUG_CODE, "reiserquota: freeing (failed prealloc) %d blocks id=%u", amount_needed + hint->prealloc_size - nr_allocated - REISERFS_I(hint->inode)->i_prealloc_count, hint->inode->i_uid);#endif DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed + hint->prealloc_size - nr_allocated - REISERFS_I(hint->inode)-> i_prealloc_count); } return CARRY_ON;}/* grab new blocknrs from preallocated list *//* return amount still needed after using them */static int use_preallocated_list_if_available(reiserfs_blocknr_hint_t * hint, b_blocknr_t * new_blocknrs, int amount_needed){ struct inode *inode = hint->inode; if (REISERFS_I(inode)->i_prealloc_count > 0) { while (amount_needed) { *new_blocknrs++ = REISERFS_I(inode)->i_prealloc_block++; REISERFS_I(inode)->i_prealloc_count--; amount_needed--; if (REISERFS_I(inode)->i_prealloc_count <= 0) { list_del(&REISERFS_I(inode)->i_prealloc_list); break; } } } /* return amount still needed after using preallocated blocks */ return amount_needed;}int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t * hint, b_blocknr_t * new_blocknrs, int amount_needed, int reserved_by_us /* Amount of blocks we have already reserved */ ){ int initial_amount_needed = amount_needed; int ret; struct super_block *s = hint->th->t_super; /* Check if there is enough space, taking into account reserved space */ if (SB_FREE_BLOCKS(s) - REISERFS_SB(s)->reserved_blocks < amount_needed - reserved_by_us) return NO_DISK_SPACE; /* should this be if !hint->inode && hint->preallocate? */ /* do you mean hint->formatted_node can be removed ? - Zam */ /* hint->formatted_node cannot be removed because we try to access inode information here, and there is often no inode assotiated with metadata allocations - green */ if (!hint->formatted_node && hint->preallocate) { amount_needed = use_preallocated_list_if_available (hint, new_blocknrs, amount_needed); if (amount_needed == 0) /* all blocknrs we need we got from prealloc. list */ return CARRY_ON; new_blocknrs += (initial_amount_needed - amount_needed); } /* find search start and save it in hint structure */ determine_search_start(hint, amount_needed); if (hint->search_start >= SB_BLOCK_COUNT(s)) hint->search_start = SB_BLOCK_COUNT(s) - 1; /* allocation itself; fill new_blocknrs and preallocation arrays */ ret = blocknrs_and_prealloc_arrays_from_search_start (hint, new_blocknrs, amount_needed); /* we used prealloc. list to fill (partially) new_blocknrs array. If final allocation fails we * need to return blocks back to prealloc. list or just free them. -- Zam (I chose second * variant) */ if (ret != CARRY_ON) { while (amount_needed++ < initial_amount_needed) { reiserfs_free_block(hint->th, hint->inode, *(--new_blocknrs), 1); } } return ret;}void reiserfs_cache_bitmap_metadata(struct super_block *sb, struct buffer_head *bh, struct reiserfs_bitmap_info *info){ unsigned long *cur = (unsigned long *)(bh->b_data + bh->b_size); /* The first bit must ALWAYS be 1 */ BUG_ON(!reiserfs_test_le_bit(0, (unsigned long *)bh->b_data)); info->free_count = 0; while (--cur >= (unsigned long *)bh->b_data) { int i; /* 0 and ~0 are special, we can optimize for them */ if (*cur == 0) info->free_count += BITS_PER_LONG; else if (*cur != ~0L) /* A mix, investigate */ for (i = BITS_PER_LONG - 1; i >= 0; i--) if (!reiserfs_test_le_bit(i, cur)) info->free_count++; }}struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb, unsigned int bitmap){ b_blocknr_t block = (sb->s_blocksize << 3) * bitmap; struct reiserfs_bitmap_info *info = SB_AP_BITMAP(sb) + bitmap; struct buffer_head *bh; /* Way old format filesystems had the bitmaps packed up front. * I doubt there are any of these left, but just in case... */ if (unlikely(test_bit(REISERFS_OLD_FORMAT, &(REISERFS_SB(sb)->s_properties)))) block = REISERFS_SB(sb)->s_sbh->b_blocknr + 1 + bitmap; else if (bitmap == 0) block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1; bh = sb_bread(sb, block); if (bh == NULL) reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) " "reading failed", __FUNCTION__, block); else { if (buffer_locked(bh)) { PROC_INFO_INC(sb, scan_bitmap.wait); __wait_on_buffer(bh); } BUG_ON(!buffer_uptodate(bh)); BUG_ON(atomic_read(&bh->b_count) == 0); if (info->free_count == UINT_MAX) reiserfs_cache_bitmap_metadata(sb, bh, info); } return bh;}int reiserfs_init_bitmap_cache(struct super_block *sb){ struct reiserfs_bitmap_info *bitmap; unsigned int bmap_nr = reiserfs_bmap_count(sb); bitmap = vmalloc(sizeof(*bitmap) * bmap_nr); if (bitmap == NULL) return -ENOMEM; memset(bitmap, 0xff, sizeof(*bitmap) * bmap_nr); SB_AP_BITMAP(sb) = bitmap; return 0;}void reiserfs_free_bitmap_cache(struct super_block *sb){ if (SB_AP_BITMAP(sb)) { vfree(SB_AP_BITMAP(sb)); SB_AP_BITMAP(sb) = NULL; }}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -