📄 buffer.c
字号:
bh->b_dev=dev;
bh->b_blocknr=block;
insert_into_queues(bh);
return bh;
}
void brelse(struct buffer_head * buf)
{
if (!buf)
return;
wait_on_buffer(buf);
if (buf->b_count) {
if (--buf->b_count)
return;
wake_up(&buffer_wait);
return;
}
printk("VFS: brelse: Trying to free free buffer\n");
}
/*
* bread() reads a specified block and returns the buffer that contains
* it. It returns NULL if the block was unreadable.
*/
struct buffer_head * bread(dev_t dev, int block, int size)
{
struct buffer_head * bh;
if (!(bh = getblk(dev, block, size))) {
printk("VFS: bread: READ error on device %d/%d\n",
MAJOR(dev), MINOR(dev));
return NULL;
}
if (bh->b_uptodate)
return bh;
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
if (bh->b_uptodate)
return bh;
brelse(bh);
return NULL;
}
/*
* Ok, breada can be used as bread, but additionally to mark other
* blocks for reading as well. End the argument list with a negative
* number.
*/
struct buffer_head * breada(dev_t dev,int first, ...)
{
va_list args;
unsigned int blocksize;
struct buffer_head * bh, *tmp;
va_start(args,first);
blocksize = BLOCK_SIZE;
if (blksize_size[MAJOR(dev)] && blksize_size[MAJOR(dev)][MINOR(dev)])
blocksize = blksize_size[MAJOR(dev)][MINOR(dev)];
if (!(bh = getblk(dev, first, blocksize))) {
printk("VFS: breada: READ error on device %d/%d\n",
MAJOR(dev), MINOR(dev));
return NULL;
}
if (!bh->b_uptodate)
ll_rw_block(READ, 1, &bh);
while ((first=va_arg(args,int))>=0) {
tmp = getblk(dev, first, blocksize);
if (tmp) {
if (!tmp->b_uptodate)
ll_rw_block(READA, 1, &tmp);
tmp->b_count--;
}
}
va_end(args);
wait_on_buffer(bh);
if (bh->b_uptodate)
return bh;
brelse(bh);
return (NULL);
}
/*
* See fs/inode.c for the weird use of volatile..
*/
static void put_unused_buffer_head(struct buffer_head * bh)
{
struct wait_queue * wait;
wait = ((volatile struct buffer_head *) bh)->b_wait;
memset((void *) bh,0,sizeof(*bh));
((volatile struct buffer_head *) bh)->b_wait = wait;
bh->b_next_free = unused_list;
unused_list = bh;
}
static void get_more_buffer_heads(void)
{
int i;
struct buffer_head * bh;
if (unused_list)
return;
if(! (bh = (struct buffer_head*) get_free_page(GFP_BUFFER)))
return;
for (nr_buffer_heads+=i=PAGE_SIZE/sizeof*bh ; i>0; i--) {
bh->b_next_free = unused_list; /* only make link */
unused_list = bh++;
}
}
static struct buffer_head * get_unused_buffer_head(void)
{
struct buffer_head * bh;
get_more_buffer_heads();
if (!unused_list)
return NULL;
bh = unused_list;
unused_list = bh->b_next_free;
bh->b_next_free = NULL;
bh->b_data = NULL;
bh->b_size = 0;
bh->b_req = 0;
return bh;
}
/*
* Create the appropriate buffers when given a page for data area and
* the size of each buffer.. Use the bh->b_this_page linked list to
* follow the buffers created. Return NULL if unable to create more
* buffers.
*/
static struct buffer_head * create_buffers(unsigned long page, unsigned long size)
{
struct buffer_head *bh, *head;
unsigned long offset;
head = NULL;
offset = PAGE_SIZE;
while ((offset -= size) < PAGE_SIZE) {
bh = get_unused_buffer_head();
if (!bh)
goto no_grow;
bh->b_this_page = head;
head = bh;
bh->b_data = (char *) (page+offset);
bh->b_size = size;
}
return head;
/*
* In case anything failed, we just free everything we got.
*/
no_grow:
bh = head;
while (bh) {
head = bh;
bh = bh->b_this_page;
put_unused_buffer_head(head);
}
return NULL;
}
static void read_buffers(struct buffer_head * bh[], int nrbuf)
{
int i;
int bhnum = 0;
struct buffer_head * bhr[8];
for (i = 0 ; i < nrbuf ; i++) {
if (bh[i] && !bh[i]->b_uptodate)
bhr[bhnum++] = bh[i];
}
if (bhnum)
ll_rw_block(READ, bhnum, bhr);
for (i = 0 ; i < nrbuf ; i++) {
if (bh[i]) {
wait_on_buffer(bh[i]);
}
}
}
static unsigned long check_aligned(struct buffer_head * first, unsigned long address,
dev_t dev, int *b, int size)
{
struct buffer_head * bh[8];
unsigned long page;
unsigned long offset;
int block;
int nrbuf;
page = (unsigned long) first->b_data;
if (page & ~PAGE_MASK) {
brelse(first);
return 0;
}
mem_map[MAP_NR(page)]++;
bh[0] = first;
nrbuf = 1;
for (offset = size ; offset < PAGE_SIZE ; offset += size) {
block = *++b;
if (!block)
goto no_go;
first = get_hash_table(dev, block, size);
if (!first)
goto no_go;
bh[nrbuf++] = first;
if (page+offset != (unsigned long) first->b_data)
goto no_go;
}
read_buffers(bh,nrbuf); /* make sure they are actually read correctly */
while (nrbuf-- > 0)
brelse(bh[nrbuf]);
free_page(address);
++current->min_flt;
return page;
no_go:
while (nrbuf-- > 0)
brelse(bh[nrbuf]);
free_page(page);
return 0;
}
static unsigned long try_to_load_aligned(unsigned long address,
dev_t dev, int b[], int size)
{
struct buffer_head * bh, * tmp, * arr[8];
unsigned long offset;
int * p;
int block;
bh = create_buffers(address, size);
if (!bh)
return 0;
/* do any of the buffers already exist? punt if so.. */
p = b;
for (offset = 0 ; offset < PAGE_SIZE ; offset += size) {
block = *(p++);
if (!block)
goto not_aligned;
if (find_buffer(dev, block, size))
goto not_aligned;
}
tmp = bh;
p = b;
block = 0;
while (1) {
arr[block++] = bh;
bh->b_count = 1;
bh->b_dirt = 0;
bh->b_uptodate = 0;
bh->b_dev = dev;
bh->b_blocknr = *(p++);
nr_buffers++;
insert_into_queues(bh);
if (bh->b_this_page)
bh = bh->b_this_page;
else
break;
}
buffermem += PAGE_SIZE;
bh->b_this_page = tmp;
mem_map[MAP_NR(address)]++;
read_buffers(arr,block);
while (block-- > 0)
brelse(arr[block]);
++current->maj_flt;
return address;
not_aligned:
while ((tmp = bh) != NULL) {
bh = bh->b_this_page;
put_unused_buffer_head(tmp);
}
return 0;
}
/*
* Try-to-share-buffers tries to minimize memory use by trying to keep
* both code pages and the buffer area in the same page. This is done by
* (a) checking if the buffers are already aligned correctly in memory and
* (b) if none of the buffer heads are in memory at all, trying to load
* them into memory the way we want them.
*
* This doesn't guarantee that the memory is shared, but should under most
* circumstances work very well indeed (ie >90% sharing of code pages on
* demand-loadable executables).
*/
static inline unsigned long try_to_share_buffers(unsigned long address,
dev_t dev, int *b, int size)
{
struct buffer_head * bh;
int block;
block = b[0];
if (!block)
return 0;
bh = get_hash_table(dev, block, size);
if (bh)
return check_aligned(bh, address, dev, b, size);
return try_to_load_aligned(address, dev, b, size);
}
#define COPYBLK(size,from,to) \
__asm__ __volatile__("rep ; movsl": \
:"c" (((unsigned long) size) >> 2),"S" (from),"D" (to) \
:"cx","di","si")
/*
* bread_page reads four buffers into memory at the desired address. It's
* a function of its own, as there is some speed to be got by reading them
* all at the same time, not waiting for one to be read, and then another
* etc. This also allows us to optimize memory usage by sharing code pages
* and filesystem buffers..
*/
unsigned long bread_page(unsigned long address, dev_t dev, int b[], int size, int prot)
{
struct buffer_head * bh[8];
unsigned long where;
int i, j;
if (!(prot & PAGE_RW)) {
where = try_to_share_buffers(address,dev,b,size);
if (where)
return where;
}
++current->maj_flt;
for (i=0, j=0; j<PAGE_SIZE ; i++, j+= size) {
bh[i] = NULL;
if (b[i])
bh[i] = getblk(dev, b[i], size);
}
read_buffers(bh,i);
where = address;
for (i=0, j=0; j<PAGE_SIZE ; i++, j += size,address += size) {
if (bh[i]) {
if (bh[i]->b_uptodate)
COPYBLK(size, (unsigned long) bh[i]->b_data,address);
brelse(bh[i]);
}
}
return where;
}
/*
* Try to increase the number of buffers available: the size argument
* is used to determine what kind of buffers we want.
*/
static int grow_buffers(int pri, int size)
{
unsigned long page;
struct buffer_head *bh, *tmp;
if ((size & 511) || (size > PAGE_SIZE)) {
printk("VFS: grow_buffers: size = %d\n",size);
return 0;
}
if(!(page = __get_free_page(pri)))
return 0;
bh = create_buffers(page, size);
if (!bh) {
free_page(page);
return 0;
}
tmp = bh;
while (1) {
if (free_list) {
tmp->b_next_free = free_list;
tmp->b_prev_free = free_list->b_prev_free;
free_list->b_prev_free->b_next_free = tmp;
free_list->b_prev_free = tmp;
} else {
tmp->b_prev_free = tmp;
tmp->b_next_free = tmp;
}
free_list = tmp;
++nr_buffers;
if (tmp->b_this_page)
tmp = tmp->b_this_page;
else
break;
}
tmp->b_this_page = bh;
buffermem += PAGE_SIZE;
return 1;
}
/*
* try_to_free() checks if all the buffers on this particular page
* are unused, and free's the page if so.
*/
static int try_to_free(struct buffer_head * bh, struct buffer_head ** bhp)
{
unsigned long page;
struct buffer_head * tmp, * p;
*bhp = bh;
page = (unsigned long) bh->b_data;
page &= PAGE_MASK;
tmp = bh;
do {
if (!tmp)
return 0;
if (tmp->b_count || tmp->b_dirt || tmp->b_lock || tmp->b_wait)
return 0;
tmp = tmp->b_this_page;
} while (tmp != bh);
tmp = bh;
do {
p = tmp;
tmp = tmp->b_this_page;
nr_buffers--;
if (p == *bhp)
*bhp = p->b_prev_free;
remove_from_queues(p);
put_unused_buffer_head(p);
} while (tmp != bh);
buffermem -= PAGE_SIZE;
free_page(page);
return !mem_map[MAP_NR(page)];
}
/*
* Try to free up some pages by shrinking the buffer-cache
*
* Priority tells the routine how hard to try to shrink the
* buffers: 3 means "don't bother too much", while a value
* of 0 means "we'd better get some free pages now".
*/
int shrink_buffers(unsigned int priority)
{
struct buffer_head *bh;
int i;
if (priority < 2)
sync_buffers(0,0);
bh = free_list;
i = nr_buffers >> priority;
for ( ; i-- > 0 ; bh = bh->b_next_free) {
if (bh->b_count ||
(priority >= 5 &&
mem_map[MAP_NR((unsigned long) bh->b_data)] > 1)) {
put_last_free(bh);
continue;
}
if (!bh->b_this_page)
continue;
if (bh->b_lock)
if (priority)
continue;
else
wait_on_buffer(bh);
if (bh->b_dirt) {
bh->b_count++;
ll_rw_block(WRITEA, 1, &bh);
bh->b_count--;
continue;
}
if (try_to_free(bh, &bh))
return 1;
}
return 0;
}
void show_buffers(void)
{
struct buffer_head * bh;
int found = 0, locked = 0, dirty = 0, used = 0, lastused = 0;
printk("Buffer memory: %6dkB\n",buffermem>>10);
printk("Buffer heads: %6d\n",nr_buffer_heads);
printk("Buffer blocks: %6d\n",nr_buffers);
bh = free_list;
do {
found++;
if (bh->b_lock)
locked++;
if (bh->b_dirt)
dirty++;
if (bh->b_count)
used++, lastused = found;
bh = bh->b_next_free;
} while (bh != free_list);
printk("Buffer mem: %d buffers, %d used (last=%d), %d locked, %d dirty\n",
found, used, lastused, locked, dirty);
}
/*
* This initializes the initial buffer free list. nr_buffers is set
* to one less the actual number of buffers, as a sop to backwards
* compatibility --- the old code did this (I think unintentionally,
* but I'm not sure), and programs in the ps package expect it.
* - TYT 8/30/92
*/
void buffer_init(void)
{
int i;
if (high_memory >= 4*1024*1024)
min_free_pages = 200;
else
min_free_pages = 20;
for (i = 0 ; i < NR_HASH ; i++)
hash_table[i] = NULL;
free_list = 0;
grow_buffers(GFP_KERNEL, BLOCK_SIZE);
if (!free_list)
panic("VFS: Unable to initialize buffer free list!");
return;
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -