📄 buffer.c
字号:
bh->b_prev_free->b_next_free = bh->b_next_free; bh->b_next_free->b_prev_free = bh->b_prev_free; if (free_list[isize] == bh) free_list[isize] = bh->b_next_free; } bh->b_next_free = bh->b_prev_free = NULL;}static void remove_from_queues(struct buffer_head * bh){ if(bh->b_dev == B_FREE) { remove_from_free_list(bh); /* Free list entries should not be in the hash queue */ return; } nr_buffers_type[bh->b_list]--; remove_from_hash_queue(bh); remove_from_lru_list(bh);}static inline void put_last_free(struct buffer_head * bh){ if (bh) { struct buffer_head **bhp = &free_list[BUFSIZE_INDEX(bh->b_size)]; bh->b_dev = B_FREE; /* So it is obvious we are on the free list. */ /* Add to back of free list. */ if(!*bhp) { *bhp = bh; bh->b_prev_free = bh; } bh->b_next_free = *bhp; bh->b_prev_free = (*bhp)->b_prev_free; (*bhp)->b_prev_free->b_next_free = bh; (*bhp)->b_prev_free = bh; }}static void insert_into_queues(struct buffer_head * bh){ /* put at end of free list */ if(bh->b_dev == B_FREE) { put_last_free(bh); } else { struct buffer_head **bhp = &lru_list[bh->b_list]; if(!*bhp) { *bhp = bh; bh->b_prev_free = bh; } if (bh->b_next_free) panic("VFS: buffer LRU pointers corrupted"); bh->b_next_free = *bhp; bh->b_prev_free = (*bhp)->b_prev_free; (*bhp)->b_prev_free->b_next_free = bh; (*bhp)->b_prev_free = bh; nr_buffers_type[bh->b_list]++; /* Put the buffer in new hash-queue if it has a device. */ bh->b_next = NULL; bh->b_pprev = NULL; if (bh->b_dev) { struct buffer_head **bhp = &hash(bh->b_dev, bh->b_blocknr); struct buffer_head *next = *bhp; if (next) { bh->b_next = next; next->b_pprev = &bh->b_next; } *bhp = bh; bh->b_pprev = bhp; } nr_hashed_buffers++; }}struct buffer_head * find_buffer(kdev_t dev, int block, int size){ struct buffer_head * next; next = hash(dev,block); for (;;) { struct buffer_head *tmp = next; if (!next) break; next = tmp->b_next; if (tmp->b_blocknr != block || tmp->b_size != size || tmp->b_dev != dev) continue; next = tmp; break; } return next;}/* * Why like this, I hear you say... The reason is race-conditions. * As we don't lock buffers (unless we are reading them, that is), * something might happen to it while we sleep (ie a read-error * will force it bad). This shouldn't really happen currently, but * the code is ready. */struct buffer_head * get_hash_table(kdev_t dev, int block, int size){ struct buffer_head * bh; bh = find_buffer(dev,block,size); if (bh) bh->b_count++; return bh;}unsigned int get_hardblocksize(kdev_t dev){ /* * Get the hard sector size for the given device. If we don't know * what it is, return 0. */ if (hardsect_size[MAJOR(dev)] != NULL) { int blksize = hardsect_size[MAJOR(dev)][MINOR(dev)]; if (blksize != 0) return blksize; } /* * We don't know what the hardware sector size for this device is. * Return 0 indicating that we don't know. */ return 0;}void set_blocksize(kdev_t dev, int size){ extern int *blksize_size[]; int i, nlist; struct buffer_head * bh, *bhnext; if (!blksize_size[MAJOR(dev)]) return; /* Size must be a power of two, and between 512 and PAGE_SIZE */ if (size > PAGE_SIZE || size < 512 || (size & (size-1))) panic("Invalid blocksize passed to set_blocksize"); if (blksize_size[MAJOR(dev)][MINOR(dev)] == 0 && size == BLOCK_SIZE) { blksize_size[MAJOR(dev)][MINOR(dev)] = size; return; } if (blksize_size[MAJOR(dev)][MINOR(dev)] == size) return; sync_buffers(dev, 2); blksize_size[MAJOR(dev)][MINOR(dev)] = size; /* We need to be quite careful how we do this - we are moving entries * around on the free list, and we can get in a loop if we are not careful. */ for(nlist = 0; nlist < NR_LIST; nlist++) { bh = lru_list[nlist]; for (i = nr_buffers_type[nlist]*2 ; --i > 0 ; bh = bhnext) { if(!bh) break; bhnext = bh->b_next_free; if (bh->b_dev != dev) continue; if (bh->b_size == size) continue; bhnext->b_count++; wait_on_buffer(bh); bhnext->b_count--; if (bh->b_dev == dev && bh->b_size != size) { clear_bit(BH_Dirty, &bh->b_state); clear_bit(BH_Uptodate, &bh->b_state); clear_bit(BH_Req, &bh->b_state); bh->b_flushtime = 0; } remove_from_hash_queue(bh); } }}/* * We used to try various strange things. Let's not. */static void refill_freelist(int size){ if (!grow_buffers(size)) { wakeup_bdflush(1); current->policy |= SCHED_YIELD; schedule(); }}void init_buffer(struct buffer_head *bh, kdev_t dev, int block, bh_end_io_t *handler, void *dev_id){ bh->b_count = 1; bh->b_list = BUF_CLEAN; bh->b_flushtime = 0; bh->b_dev = dev; bh->b_blocknr = block; bh->b_end_io = handler; bh->b_dev_id = dev_id;}static void end_buffer_io_sync(struct buffer_head *bh, int uptodate){ mark_buffer_uptodate(bh, uptodate); unlock_buffer(bh);}/* * Ok, this is getblk, and it isn't very clear, again to hinder * race-conditions. Most of the code is seldom used, (ie repeating), * so it should be much more efficient than it looks. * * The algorithm is changed: hopefully better, and an elusive bug removed. * * 14.02.92: changed it to sync dirty buffers a bit: better performance * when the filesystem starts to get full of dirty blocks (I hope). */struct buffer_head * getblk(kdev_t dev, int block, int size){ struct buffer_head * bh; int isize;repeat: bh = get_hash_table(dev, block, size); if (bh) { if (!buffer_dirty(bh)) { bh->b_flushtime = 0; } return bh; } isize = BUFSIZE_INDEX(size);get_free: bh = free_list[isize]; if (!bh) goto refill; remove_from_free_list(bh); /* OK, FINALLY we know that this buffer is the only one of its kind, * and that it's unused (b_count=0), unlocked, and clean. */ init_buffer(bh, dev, block, end_buffer_io_sync, NULL); bh->b_state=0; insert_into_queues(bh); return bh; /* * If we block while refilling the free list, somebody may * create the buffer first ... search the hashes again. */refill: refill_freelist(size); if (!find_buffer(dev,block,size)) goto get_free; goto repeat;}void set_writetime(struct buffer_head * buf, int flag){ int newtime; if (buffer_dirty(buf)) { /* Move buffer to dirty list if jiffies is clear. */ newtime = jiffies + (flag ? bdf_prm.b_un.age_super : bdf_prm.b_un.age_buffer); if(!buf->b_flushtime || buf->b_flushtime > newtime) buf->b_flushtime = newtime; } else { buf->b_flushtime = 0; }}/* * Put a buffer into the appropriate list, without side-effects. */static inline void file_buffer(struct buffer_head *bh, int list){ remove_from_queues(bh); bh->b_list = list; insert_into_queues(bh);}/* * A buffer may need to be moved from one buffer list to another * (e.g. in case it is not shared any more). Handle this. */void refile_buffer(struct buffer_head * buf){ int dispose; if(buf->b_dev == B_FREE) { printk("Attempt to refile free buffer\n"); return; } if (buffer_dirty(buf)) dispose = BUF_DIRTY; else if (buffer_locked(buf)) dispose = BUF_LOCKED; else dispose = BUF_CLEAN; if(dispose != buf->b_list) { file_buffer(buf, dispose); if(dispose == BUF_DIRTY) { int too_many = (nr_buffers * bdf_prm.b_un.nfract/100); /* This buffer is dirty, maybe we need to start flushing. * If too high a percentage of the buffers are dirty... */ if (nr_buffers_type[BUF_DIRTY] > too_many) wakeup_bdflush(1); /* If this is a loop device, and * more than half of the buffers are dirty... * (Prevents no-free-buffers deadlock with loop device.) */ if (MAJOR(buf->b_dev) == LOOP_MAJOR && nr_buffers_type[BUF_DIRTY]*2>nr_buffers) wakeup_bdflush(1); } }}/* * Release a buffer head */void __brelse(struct buffer_head * buf){ /* If dirty, mark the time this buffer should be written back. */ set_writetime(buf, 0); refile_buffer(buf); touch_buffer(buf); if (buf->b_count) { buf->b_count--; return; } printk("VFS: brelse: Trying to free free buffer\n");}/* * bforget() is like brelse(), except it puts the buffer on the * free list if it can.. We can NOT free the buffer if: * - there are other users of it * - it is locked and thus can have active IO */void __bforget(struct buffer_head * buf){ if (buf->b_count != 1 || buffer_locked(buf)) { __brelse(buf); return; } buf->b_count = 0; buf->b_state = 0; remove_from_queues(buf); put_last_free(buf);}/* * bread() reads a specified block and returns the buffer that contains * it. It returns NULL if the block was unreadable. */struct buffer_head * bread(kdev_t dev, int block, int size){ struct buffer_head * bh; bh = getblk(dev, block, size); if (buffer_uptodate(bh)) return bh; ll_rw_block(READ, 1, &bh); wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; brelse(bh); return NULL;}/* * Ok, breada can be used as bread, but additionally to mark other * blocks for reading as well. End the argument list with a negative * number. */#define NBUF 16struct buffer_head * breada(kdev_t dev, int block, int bufsize, unsigned int pos, unsigned int filesize){ struct buffer_head * bhlist[NBUF]; unsigned int blocks; struct buffer_head * bh; int index; int i, j; if (pos >= filesize) return NULL; if (block < 0) return NULL; bh = getblk(dev, block, bufsize); index = BUFSIZE_INDEX(bh->b_size); if (buffer_uptodate(bh)) return(bh); else ll_rw_block(READ, 1, &bh); blocks = (filesize - pos) >> (9+index); if (blocks < (read_ahead[MAJOR(dev)] >> index)) blocks = read_ahead[MAJOR(dev)] >> index; if (blocks > NBUF) blocks = NBUF;/* if (blocks) printk("breada (new) %d blocks\n",blocks); */ bhlist[0] = bh; j = 1; for(i=1; i<blocks; i++) { bh = getblk(dev,block+i,bufsize); if (buffer_uptodate(bh)) { brelse(bh); break; } else bhlist[j++] = bh; } /* Request the read for these buffers, and then release them. */ if (j>1) ll_rw_block(READA, (j-1), bhlist+1); for(i=1; i<j; i++) brelse(bhlist[i]); /* Wait for this buffer, and then continue on. */ bh = bhlist[0]; wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; brelse(bh); return NULL;}/* * Note: the caller should wake up the buffer_wait list if needed. */static void put_unused_buffer_head(struct buffer_head * bh){ if (nr_unused_buffer_heads >= MAX_UNUSED_BUFFERS) { nr_buffer_heads--; kmem_cache_free(bh_cachep, bh); return; } memset(bh,0,sizeof(*bh)); nr_unused_buffer_heads++; bh->b_next_free = unused_list; unused_list = bh;}/* * We can't put completed temporary IO buffer_heads directly onto the * unused_list when they become unlocked, since the device driver * end_request routines still expect access to the buffer_head's * fields after the final unlock. So, the device driver puts them on * the reuse_list instead once IO completes, and we recover these to * the unused_list here.
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -