📄 buffer.c
字号:
static inline struct buffer_head * find_buffer(kdev_t dev, int block, int size){ struct buffer_head * tmp; for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next) if (tmp->b_blocknr == block && tmp->b_dev == dev) if (tmp->b_size == size) return tmp; else { printk("VFS: Wrong blocksize on device %s\n", kdevname(dev)); return NULL; } return NULL;}/* * Why like this, I hear you say... The reason is race-conditions. * As we don't lock buffers (unless we are reading them, that is), * something might happen to it while we sleep (ie a read-error * will force it bad). This shouldn't really happen currently, but * the code is ready. */struct buffer_head * get_hash_table(kdev_t dev, int block, int size){ struct buffer_head * bh; for (;;) { if (!(bh=find_buffer(dev,block,size))) return NULL; bh->b_count++; wait_on_buffer(bh); if (bh->b_dev == dev && bh->b_blocknr == block && bh->b_size == size) return bh; bh->b_count--; }}void set_blocksize(kdev_t dev, int size){ extern int *blksize_size[]; int i, nlist; struct buffer_head * bh, *bhnext; if (!blksize_size[MAJOR(dev)]) return; if (size > PAGE_SIZE) size = 0; switch (size) { default: panic("Invalid blocksize passed to set_blocksize"); case 512: case 1024: case 2048: case 4096: case 8192: ; } if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) { blksize_size[MAJOR(dev)][MINOR(dev)] = size; return; } if (blksize_size[MAJOR(dev)][MINOR(dev)] == size) return; sync_buffers(dev, 2); blksize_size[MAJOR(dev)][MINOR(dev)] = size; /* We need to be quite careful how we do this - we are moving entries around on the free list, and we can get in a loop if we are not careful.*/ for(nlist = 0; nlist < NR_LIST; nlist++) { bh = lru_list[nlist]; for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) { if(!bh) break; bhnext = bh->b_next_free; if (bh->b_dev != dev) continue; if (bh->b_size == size) continue; bhnext->b_count++; wait_on_buffer(bh); bhnext->b_count--; if (bh->b_dev == dev && bh->b_size != size) { clear_bit(BH_Dirty, &bh->b_state); clear_bit(BH_Uptodate, &bh->b_state); clear_bit(BH_Req, &bh->b_state); bh->b_flushtime = 0; } remove_from_hash_queue(bh); } }}/* check if a buffer is OK to be reclaimed */static inline int can_reclaim(struct buffer_head *bh, int size){ if (bh->b_count || buffer_protected(bh) || buffer_locked(bh) || mem_map[MAP_NR((unsigned long) bh->b_data)].count != 1 || buffer_dirty(bh)) return 0; if (bh->b_size != size) return 0; return 1;}/* find a candidate buffer to be reclaimed */static struct buffer_head *find_candidate(struct buffer_head *bh, int *list_len, int size){ int lookahead = 7; if (!bh) goto no_candidate; for (; (*list_len) > 0; bh = bh->b_next_free, (*list_len)--) { if (size != bh->b_size) { /* this provides a mechanism for freeing blocks of other sizes, this is necessary now that we no longer have the lav code. */ try_to_free_buffer(bh,&bh,1); if (!bh) break; lookahead = 7; continue; } else if (buffer_locked(bh) && (bh->b_list == BUF_LOCKED || bh->b_list == BUF_LOCKED1)) { if (!--lookahead) { (*list_len) = 0; goto no_candidate; } } else if (can_reclaim(bh,size)) return bh; }no_candidate: return NULL;}static void put_unused_buffer_head(struct buffer_head * bh){ if (nr_unused_buffer_heads >= MAX_UNUSED_BUFFERS) { nr_buffer_heads--; kfree(bh); return; } memset(bh,0,sizeof(*bh)); nr_unused_buffer_heads++; bh->b_next_free = unused_list; unused_list = bh; if (!waitqueue_active(&buffer_wait)) return; wake_up(&buffer_wait);}/* * We can't put completed temporary IO buffer_heads directly onto the * unused_list when they become unlocked, since the device driver * end_request routines still expect access to the buffer_head's * fields after the final unlock. So, the device driver puts them on * the reuse_list instead once IO completes, and we recover these to * the unused_list here. * * The reuse_list receives buffers from interrupt routines, so we need * to be IRQ-safe here (but note that interrupts only _add_ to the * reuse_list, never take away. So we don't need to worry about the * reuse_list magically emptying). */static inline void recover_reusable_buffer_heads(void){ if (reuse_list) { struct buffer_head *head; head = xchg(&reuse_list, NULL); do { struct buffer_head *bh = head; head = head->b_next_free; put_unused_buffer_head(bh); } while (head); }}extern void allow_interrupts(void);static void refill_freelist(int size){ struct buffer_head * bh; struct buffer_head * candidate[BUF_DIRTY]; extern struct task_struct *bdflush_tsk; unsigned int best_time, winner; int buffers[BUF_DIRTY]; int i, limit = ((min_free_pages + free_pages_low) >> 1); int needed; refilled = 1; /* If there are too many dirty buffers, we wake up the update process now so as to ensure that there are still clean buffers available for user processes to use (and dirty) */ /* We are going to try to locate this much memory */ needed = bdf_prm.b_un.nrefill * size; while (nr_free_pages > min_free_pages*2 && needed > 0 && grow_buffers(GFP_BUFFER, size)) { needed -= PAGE_SIZE; }repeat: allow_interrupts(); recover_reusable_buffer_heads(); if(needed <= 0) return; /* OK, we cannot grow the buffer cache, now try to get some from the lru list */ /* First set the candidate pointers to usable buffers. This should be quick nearly all of the time. */ for(i=0; i<BUF_DIRTY; i++){ buffers[i] = nr_buffers_type[i]; candidate[i] = find_candidate(lru_list[i], &buffers[i], size); } /* Now see which candidate wins the election */ winner = best_time = UINT_MAX; for(i=0; i<BUF_DIRTY; i++){ if(!candidate[i]) continue; if(candidate[i]->b_lru_time < best_time){ best_time = candidate[i]->b_lru_time; winner = i; } } /* If we have a winner, use it, and then get a new candidate from that list */ if(winner != UINT_MAX) { i = winner; while (needed>0 && (bh=candidate[i])) { candidate[i] = bh->b_next_free; if(candidate[i] == bh) candidate[i] = NULL; /* Got last one */ remove_from_queues(bh); bh->b_dev = B_FREE; put_last_free(bh); needed -= bh->b_size; buffers[i]--; if(buffers[i] == 0) candidate[i] = NULL; if (candidate[i] && !can_reclaim(candidate[i],size)) candidate[i] = find_candidate(candidate[i],&buffers[i], size); } goto repeat; } /* Too bad, that was not enough. Try a little harder to grow some. */ if (nr_free_pages > limit) { if (grow_buffers(GFP_BUFFER, size)) { needed -= PAGE_SIZE; goto repeat; }; } /* If we are not bdflush we should wake up bdflush and try it again. */ if (current != bdflush_tsk && (buffermem >> PAGE_SHIFT) > (MAP_NR(high_memory) >> 2) && nr_buffers_type[BUF_DIRTY] > bdf_prm.b_un.nref_dirt) { wakeup_bdflush(1); needed -= PAGE_SIZE; goto repeat; } /* * In order to protect our reserved pages, * return now if we got any buffers. */ allow_interrupts(); if (free_list[BUFSIZE_INDEX(size)]) return; /* and repeat until we find something good */ i = grow_buffers(GFP_BUFFER, size); if (current != bdflush_tsk && !i && nr_buffers_type[BUF_DIRTY] > 0) wakeup_bdflush(1); else if (!i) grow_buffers(GFP_IO, size); /* decrease needed even if there is no success */ needed -= PAGE_SIZE; goto repeat;}/* * Ok, this is getblk, and it isn't very clear, again to hinder * race-conditions. Most of the code is seldom used, (ie repeating), * so it should be much more efficient than it looks. * * The algorithm is changed: hopefully better, and an elusive bug removed. * * 14.02.92: changed it to sync dirty buffers a bit: better performance * when the filesystem starts to get full of dirty blocks (I hope). */struct buffer_head * getblk(kdev_t dev, int block, int size){ struct buffer_head * bh; int isize = BUFSIZE_INDEX(size); /* If there are too many dirty buffers, we wake up the update process now so as to ensure that there are still clean buffers available for user processes to use (and dirty) */repeat: allow_interrupts(); bh = get_hash_table(dev, block, size); if (bh) { if (!buffer_dirty(bh)) { if (buffer_uptodate(bh)) put_last_lru(bh); bh->b_flushtime = 0; } set_bit(BH_Touched, &bh->b_state); return bh; }get_free: bh = free_list[isize]; if (!bh) goto refill; remove_from_free_list(bh); /* OK, FINALLY we know that this buffer is the only one of its kind, * and that it's unused (b_count=0), unlocked (buffer_locked=0), * and clean */ bh->b_count=1; bh->b_list=BUF_CLEAN; bh->b_flushtime=0; bh->b_state=(1<<BH_Touched); bh->b_dev=dev; bh->b_blocknr=block; insert_into_queues(bh); return bh;refill: allow_interrupts(); refill_freelist(size); if (!find_buffer(dev,block,size)) goto get_free; goto repeat;}void set_writetime(struct buffer_head * buf, int flag){ int newtime; if (buffer_dirty(buf)) { /* Move buffer to dirty list if jiffies is clear */ newtime = jiffies + (flag ? bdf_prm.b_un.age_super : bdf_prm.b_un.age_buffer); if(!buf->b_flushtime || buf->b_flushtime > newtime) buf->b_flushtime = newtime; } else { buf->b_flushtime = 0; }}/* * A buffer may need to be moved from one buffer list to another * (e.g. in case it is not shared any more). Handle this. */void refile_buffer(struct buffer_head * buf){ int dispose; if(buf->b_dev == B_FREE) { printk("Attempt to refile free buffer\n"); return; } if (buffer_dirty(buf)) dispose = BUF_DIRTY; else if (buffer_locked(buf)) dispose = BUF_LOCKED; else dispose = BUF_CLEAN; if(dispose == BUF_CLEAN) buf->b_lru_time = jiffies; if(dispose != buf->b_list) { if(dispose == BUF_DIRTY) buf->b_lru_time = jiffies; if(dispose == BUF_LOCKED && (buf->b_flushtime - buf->b_lru_time) <= bdf_prm.b_un.age_super) dispose = BUF_LOCKED1; remove_from_queues(buf); buf->b_list = dispose; insert_into_queues(buf); if (dispose == BUF_DIRTY) { /* This buffer is dirty, maybe we need to start flushing. */ /* If too high a percentage of the buffers are dirty... */ if (nr_buffers_type[BUF_DIRTY] > nr_buffers * bdf_prm.b_un.nfract/100) wakeup_bdflush(0); /* If this is a loop device, and * more than half of the buffers are dirty... */ /* (Prevents no-free-buffers deadlock with loop device.) */ if (MAJOR(buf->b_dev) == LOOP_MAJOR && nr_buffers_type[BUF_DIRTY]*2>nr_buffers) wakeup_bdflush(1); } }}/* * Release a buffer head */void __brelse(struct buffer_head * buf){ wait_on_buffer(buf); /* If dirty, mark the time this buffer should be written back */ set_writetime(buf, 0); refile_buffer(buf); if (buf->b_count) { buf->b_count--; return; } printk("VFS: brelse: Trying to free free buffer\n");}/* * bforget() is like brelse(), except it removes the buffer * from the hash-queues (so that it won't be re-used if it's * shared). */void __bforget(struct buffer_head * buf){ wait_on_buffer(buf); mark_buffer_clean(buf); clear_bit(BH_Protected, &buf->b_state); buf->b_count--; remove_from_hash_queue(buf); buf->b_dev = NODEV; refile_buffer(buf);}/*
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -