📄 018_mm_shmem_c.html
字号:
flow: static(header); } /* used to insert page numbers */ div.google_header::before, div.google_footer::before { position: absolute; top: 0; } div.google_footer { flow: static(footer); } /* always consider this element at the start of the doc */ div#google_footer { flow: static(footer, start); } span.google_pagenumber { content: counter(page); } span.google_pagecount { content: counter(pages); } } @page { @top { content: flow(header); } @bottom { content: flow(footer); } } /* end default print css */ /* custom css *//* end custom css */ /* ui edited css */ body { font-family: Verdana; font-size: 10.0pt; line-height: normal; background-color: #ffffff; } .documentBG { background-color: #ffffff; } /* end ui edited css */</style> </head> <body revision="dcbsxfpf_59fh79wxfg:6"> <table align=center cellpadding=0 cellspacing=0 height=5716 width=768>
<tbody>
<tr>
<td height=5716 valign=top width=100%>
<pre>2006-8-3<br>mm/shmem.c<br> 实际上这是一个文件系统。 Resizable simple fs for linux,heavily base on<br>ramfs。<br> --- from code comment.<br> 有必要简单介绍一下,tmpfs 就象虚拟磁盘ramdisk,tmpfs 可以使用您的 RAM,但<br>它也可以使用您的交换分区。而且传统的虚拟磁盘是个块设备,并需要一个 mkfs 之类<br>的命令才能真正地使用它,tmpfs 则是一个文件系统,您只是安装它,它就可以使用了。<br> 并且tempfs不建立于任何设备之上,空间也是动态分配的。<br> 什么是tempfs?<br><br> 执行一下df命令可以看到如下信息:<br>======================================================<br><br> <br>文件系统 1k-块 已用 可用 % 挂接点<br>tmpfs 388116 0 388116 0% /dev/shm<br> <br>=======================================================<br><br> 这里的文件系统tmpfs就是shemem.c所实现的.但是在我们分析的这个版本中fs<br>的名字叫做"shm"(见717行 <br>static DECLARE_FSTYPE(shmem_fs_type, "shm", shmem_read_super, FS_LITTER);<br>)而不是"tmpfs",多少令人有些疑惑.其实上,2.4.20已经不是这样声明了,而是如下:<br><br>#ifdef CONFIG_TMPFS<br>/* type "shm" will be tagged obsolete in 2.5 */<br>static DECLARE_FSTYPE(shmem_fs_type, "shm", shmem_read_super, FS_LITTER);<br>static DECLARE_FSTYPE(tmpfs_fs_type, "tmpfs", shmem_read_super, FS_LITTER);<br>#else<br>static DECLARE_FSTYPE(tmpfs_fs_type, "tmpfs", shmem_read_super, FS_LITTER|FS_NOMOUNT);<br>#endif<br><br> <br> 第一部分: 外部接口<br><br><br> 已经明确说明shm这个名字就要过时了. 文件系统有一个定义的很好的接口,所<br>以这里大部分函数和变量(函数表)都是静态的,并且是一个module.先来看看模块提<br>供的几个接口函数:<br> <br> <br> <br> 1)shmem_nopage :ipc/shm.c 使用了这个接口.和shemem.c一样都是作为标准接口<br>vm_operations_struct::nopage使用.在讨论filemap.c的时候已经讨论过这个接口了.<br>filemap.c, shemem.c, shm.c都提供了vm_operations_struct,作为页面调入的标准<br>接口,这个接口在内核的位置是memory.c ->handle_mm_fault->handle_pte_fault-><br>do_no_page.同时可以看到swap不是以vm_operations_struct的接口出现的,而是<br>handle_pte_fault->do_swap_page.<br><br><br><br><br><br><br> 2)module_init(init_shmem_fs) & module_exit(exit_shmem_fs):模块接口函数.<br>/*<br> *注册文件系统<br> *进行虚拟安装,以便内核自己使用此文件系统<br> */<br>static int __init init_shmem_fs(void)<br>{<br> int error;<br> struct vfsmount * res;<br><br> if ((error = register_filesystem(&shmem_fs_type))) {/*挂入list: file_systems*/<br> printk (KERN_ERR "Could not register shmem fs\n");<br> return error;<br> }<br><br> /*将此文件系统安装到虚拟的根:shmem_fs_type->kern_mnt*/<br> /*内核就可以通过此根访问这个文件系统*/<br> res = kern_mount(&shmem_fs_type);<br> if (IS_ERR (res)) {<br> printk (KERN_ERR "could not kern_mount shmem fs\n");<br> unregister_filesystem(&shmem_fs_type);<br> return PTR_ERR(res);<br> }<br><br> devfs_mk_dir (NULL, "shm", NULL); /*devfs文件系统相关*/<br> return 0;<br>}<br> 这里也看看kern_mount的作用:<br>/*<br> * 安装到自己,给内核自己使用的文件系统<br> * 记录在fs type->kern_mnt<br> */<br>struct vfsmount *kern_mount(struct file_system_type *type)<br>{<br> kdev_t dev = get_unnamed_dev();/*获得一个unnamed dev,专门为<br> 虚拟文件系统准备*/<br> struct super_block *sb;<br> struct vfsmount *mnt;<br> if (!dev)<br> return ERR_PTR(-EMFILE);<br><br> /*malloc sb, and type->read_super,like shmem_read_super*/<br> sb = read_super(dev, NULL, type, 0, NULL, 0);<br> if (!sb) {<br> put_unnamed_dev(dev);<br> return ERR_PTR(-EINVAL);<br> }<br><br> /*仅仅获得一个mnt 结构, 安装点是自己(自环), mnt_parent也是自己*/<br> mnt = add_vfsmnt(NULL, sb->s_root, NULL);<br> if (!mnt) {<br> kill_super(sb, 0);<br> return ERR_PTR(-ENOMEM);<br> }<br> <br> type->kern_mnt = mnt; /*内核可以使用此文件系统,对kernel 的user不可见*/<br> return mnt;<br>}<br> 这里read_super->type->readsuper->shmem_read_super<br><br>/* 解析安装参数<br> * 分配一个dir inode, 分配根节点的inode,并建立root和root indoe的关系<br> * sb->root= new root, sb->s_op= &shmem_ops<br> */<br>super_block *shmem_read_super(struct super_block * sb, void * data, int silent)<br>{<br> struct inode * inode;<br> struct dentry * root;<br> unsigned long blocks = ULONG_MAX; /* unlimited */<br> unsigned long inodes = ULONG_MAX; /* unlimited */<br> int mode = S_IRWXUGO | S_ISVTX;<br><br> if (shmem_parse_options (data, &mode, &blocks, &inodes)) {<br> printk(KERN_ERR "shmem fs invalid option\n");<br> return NULL;<br> }<br> spin_lock_init (&sb->u.shmem_sb.stat_lock);<br> .... //设置解析的参数,略<br> sb->s_op = &shmem_ops;<br><br> /*为根节点分配inode*/<br> inode = shmem_get_inode(sb, S_IFDIR | mode, 0);<br> if (!inode)<br> return NULL;<br><br> /*分配根'/' 的dentry,其parent 是null,并建立和inode的关系*/<br> root = d_alloc_root(inode);<br> if (!root) {<br> iput(inode);<br> return NULL;<br> }<br> sb->s_root = root;<br> return sb;<br>}<br><br>最后是shmem_get_inode:<br>/*<br> * malloc things for shmem inode<br> *<br> * 根据类型设置inode->i_op,inode->i_fop,或者是设备文件<br> * inode->i_mapping->a_ops = &shmem_aops; list_add (&inode->u.shmem_i.list, &shmem_inodes);<br> */<br>struct inode *shmem_get_inode(struct super_block *sb, int mode, int dev)<br>{<br> struct inode * inode;<br><br> .... //santi check, 是否还有inode,略<br><br> inode = new_inode(sb);<br> if (inode) {<br> ....//忽略简单变量设置<br> inode->i_mapping->a_ops = &shmem_aops;<br> ....<br> switch (mode & S_IFMT) {<br> default: /*也支持设备文件*/<br> init_special_inode(inode, mode, dev);<br> break;<br> case S_IFREG: /*普通文件*/<br> inode->i_op = &shmem_inode_operations;<br> inode->i_fop = &shmem_file_operations;<br> break;<br> case S_IFDIR: /*目录*/<br> inode->i_op = &shmem_dir_inode_operations;<br> inode->i_fop = &shmem_dir_operations;<br> break;<br> case S_IFLNK: /*符号连接*/<br> inode->i_op = &page_symlink_inode_operations;<br> break;<br> }<br> spin_lock (&shmem_ilock);<br> //加入shmem的inode 列表<br> list_add (&inode->u.shmem_i.list, &shmem_inodes);<br> spin_unlock (&shmem_ilock);<br> }<br> return inode;<br>}<br> 总值模块初始化时候建立了kern_mnt,sb,sb root, sb inode直接的相互关系,图<br>暂缺.以后补上.最好自己画一个. <br><br><br><br><br><br><br><br><br><br> 3) shmem_file_setup,shmem_zero_setup: <br> shm.c, 和mmap共同使用的一个接口. mmap.c已经讨论过了,这里要补充一个mmap的<br>一种用法:MAP_SHARED|MAP_ANONYMOUS,意思是采用共享匿名的映射.其含义逐步分析如下:<br>先是sys_i386.c do_mmap2--><br> ....<br> if (!(flags & MAP_ANONYMOUS)) { //如果不是匿名映射,必须指定文件<br> file = fget(fd);<br> if (!file)<br> goto out;<br> }<br><br>然后看do_mmap_pgoff(mm/mmap.c):<br> ..........<br> if (file) {<br> if (vma->vm_flags & VM_DENYWRITE) {<br> error = deny_write_access(file);/*禁止作为普通文件的写操作*/<br> if (error)<br> goto free_vma;<br> correct_wcount = 1;<br> }<br> vma->vm_file = file;<br> get_file(file);<br> error = file->f_op->mmap(file, vma);/*ext2就是generic_file_mmap<br> *就是设置vma->vm_ops从而使<br> *vma->vm_ops->readpage就是<br> *filemap_nopage<br> */<br> if (error)<br> goto unmap_and_free_vma;<br> } else if (flags & MAP_SHARED) {//匿名共享映射支持<br> error = shmem_zero_setup(vma); //为vma指定一个文件,dev/zero<br> if (error)<br> goto free_vma;<br> }<br> ..........<br><br> 7.20开始写此文件的分析,直到8.1号才到这里,因为有个问题困惑了近两周:既<br>然为vma指定了文件 dev/zero,ls /dev/shm为何不能看到这个文件?借此机会了解到<br>自己的无知--大部分文件系统重复安装会使用同一个mnt->mnt_root。而这里安装于<br>/dev/shm 的系统使用了不同的mnt root 见kernel_mount,do_mount->get_sb_nodev.<br><br> kernel_mount和/dev/shm下的tmpfs不是同一个根目录,不过即便是同一个,dcache<br>_readdir也会不显示此文件,因为没有hash的dentry不能显示出来,详见dcache<br>_readdir和系列文章--019_using uml.txt。<br> <br> 闲话少叙,看:<br>int shmem_zero_setup(struct vm_area_struct *vma)<br>{<br> struct file *file;<br> loff_t size = vma->vm_end - vma->vm_start;<br><br> /*这里的文件"dev/zero" 仅仅存在于函数init_tmpfs 所安装的fs中*/<br> /*安装于/dev/shm 的系统使用了不同的mnt root 见 shmem_fill_super*/<br> file = shmem_file_setup("dev/zero", size);<br> if (IS_ERR(file))<br> return PTR_ERR(file);<br><br> if (vma->vm_file)<br> fput (vma->vm_file);<br> vma->vm_file = file;<br> vma->vm_ops = &shmem_shared_vm_ops;<br> return 0;<br>}<br><br><br>/*<br> * shmem_file_setup - get an unlinked file living in shmem fs<br> * @name: name for dentry (to be seen in /proc/<pid>/maps<br> * @size: size to be set for the file<br> * (是一个被删除了的文件)<br> * /dev/shm中不能被ls 看到dev/zero 不是因为它<br> * 已经被删除了。 实际上原因如下:<br> * 1.kern_mount 的tmpfs 和/dev/shm的tmpfs根本不<br> * 是同一个根<br> * 2.shmem 的readdir操作是dcache_readdir,需要将<br> * 条件 if (!list_empty(&de->d_hash) && de->d_inode) <br> * 删除,这两点线索可以让dev/zero 可见<br> * 3. /proc/<pid>/maps 文件中可以看到相关信<br> * 息 <br> */<br>struct file *shmem_file_setup(char * name, loff_t size)<br>{<br> int error;<br> struct file *file;<br> struct inode * inode;<br> struct dentry *dentry, *root;<br> struct qstr this;<br> int vm_enough_memory(long pages);<br><br> error = -ENOMEM;<br> if (!vm_enough_memory((size) >> PAGE_SHIFT))<br> goto out;<br><br> this.name = name;<br> this.len = strlen(name);<br> this.hash = 0; /* will go */<br> root = shmem_fs_type.kern_mnt->mnt_root;<br> dentry = d_alloc(root, &this); /*分配dentry,建立subdir和child的关系*/<br> if (!dentry)<br> goto out;<br><br> error = -ENFILE;<br> file = get_empty_filp(); /*分配file 结构,just malloc things*/<br> if (!file)<br> goto put_dentry;<br><br> error = -ENOSPC;<br> inode = shmem_get_inode(root->d_sb, S_IFREG | S_IRWXUGO, 0);/*分配inode*/<br> if (!inode) <br> goto close_file;<br><br> d_instantiate(dentry, inode);/*建立inode 和dentry 的关系*/<br> dentry->d_inode->i_size = size;<br><br> /*建立file 的关键关系*/<br> file->f_vfsmnt = mntget(shmem_fs_type.kern_mnt);<br> file->f_dentry = dentry;<br> file->f_op = &shmem_file_operations;<br> file->f_mode = FMODE_WRITE | FMODE_READ;<br> inode->i_nlink = 0; /* It is unlinked */<br> return(file);<br><br>close_file:<br> put_filp(file);<br>put_dentry:<br> dput (dentry);<br>out:<br> return ERR_PTR(error); <br>}<br><br>这两个函数建立如下的结构:<br> file_system_type vfsmount <br> +------------+ +----------------------+ <br> | name | |mnt_root |----+root dentry <br> | read_super |-->shmem_read_super |mnt_mountpoint | +--------+ <br> | kern_mnt |--------------------->|mnt_parent | | | <br> +------------+ |list_ent mnt_instances| | | <br> |list mnt_clash | | | <br> |mnt_sb | | | <br> |list_head mnt_mounts | |d_subdir| <br> |list_head mnt_child | | / | <br> +---------/---------.--+ +-.----|-+ <br> 这里 | | | | <br> +-------------+ file | | | | <br> | vma->file |--------->+-----------------+ | | | | <br> | vma->vm_ops-|------+ | file->f_vfsmnt >------\ \--------<| | <br> +-------------+ | | file->f_op |-->&shmem_file_operations|| | <br> | | file->f_mode=rw | || | <br> | | file->f_dentry |------->+dentry+ || | <br> | +-----------------+ +-------------+ || | <br> | | | || | <br> | |list d_vfsmnt>>-\| | <br> | |d_parent----->>--\ | <br> + |list d_hash | | <br> shmem_shared_vm_ops |list d_child<---------\ <br> +---------------------+ |list d_subdir| <br> |nopage: shmem_nopage | |list d_alias----/ <br> +---------------------+ /----<d_inode | | <br> | |qstr d_name | | <br> | |d_op | | <br> | |d_sb | | <br> | |d_iname | | <br> | +-------------+ | <br> | | <br> inode \ | <br> +--------+ | <br> |i_sb | | <br> |i_dentry>>-------------------\ <br> | | <br> | | <br> +--------+ <br> <br><br><br><br><br>4) shmem_unuse:swapfile.c使用这个接口.<br> 看代码的注释即可明白了:<br>/*<br> * unuse_shmem() search for an eventually swapped out shmem page.<br> * 当系统swapoff的时候要释放swap设备<br> * swap entry 对应的数据已经加载到page<br> * 此函数要释放对应的swap entry,以便可以swapoff<br> */<br>void shmem_unuse(swp_entry_t entry, struct page *page)<br>{<br> struct list_head *p;<br> struct inode * inode;<br><br> spin_lock (&shmem_ilock);<br> list_for_each(p, &shmem_inodes) {//遍历shmme 的inode<br> inode = list_entry(p, struct inode, u.shmem_i.list);<br><br> //找到使用此swap entry的inode,并释放swap 空间<br> if (shmem_unuse_inode(inode, entry, page))<br> break;/*找到了就不用再遍历了*/<br> }<br> spin_unlock (&shmem_ilock);<br>}<br><br>/*<br> * 在inode中寻找引使用了entry的部分,释放swap 空间(如果有)<br> */<br>static int shmem_unuse_inode (struct inode *inode, swp_entry_t entry, struct page *page)<br>{<br> swp_entry_t **base, **ptr;<br> unsigned long idx;<br> int offset;<br> struct shmem_inode_info *info = &inode->u.shmem_i;<br> <br> idx = 0;<br> spin_lock (&info->lock);<br> /*在直接索引块搜索*/<br> if ((offset = shmem_clear_swp (entry,info->i_direct, SHMEM_NR_DIRECT)) >= 0)<br> goto found;<br><br> idx = SHMEM_NR_DIRECT;<br> if (!(base = info->i_indirect))<br> goto out;<br><br> /*搜索二级引用块*/<br> for (ptr = base; ptr < base + ENTRIES_PER_PAGE; ptr++) {<br> if (*ptr &&<br> (offset = shmem_clear_swp (entry, *ptr, ENTRIES_PER_PAGE)) >= 0)<br> goto found;<br> idx += ENTRIES_PER_PAGE;<br> }<br>out:<br> spin_unlock (&info->lock);<br> return 0;<br>found:<br> /*找到了,将对应page重新加入page cache ,不再需要swap 空间了*/<br> add_to_page_cache(page, inode->i_mapping, offset + idx);<br> set_page_dirty(page);<br> SetPageUptodate(page);<br> UnlockPage(page);<br> info->swapped--;<br> spin_unlock(&info->lock);<br> return 1;<br>}<br><br><br><br><br> <br> 第二部分 文件系统特性<br> 在第一部分中我们已经讨论了tmpfs作为文件系统的一些特性的实现,包括kernel<br>mount 相关的super block,inode ,dentry的一些函数 。 以及和mmap密切相关联系<br>的shmem_zero_setup,shmem_file_setup等。<br><br> 接下来讨论此文件系统的其他特性之实现。注意2.4.0的内核没有实现read,write<br>操作. 2.6.14已经实现.(不知道啥时候搞的)<br> <br>I) vm_operations 和 address_space operations (shmem_aops)<br> 这两个个接口和mmap/swap/filemap紧密相关,分别对应缺页中断和swap out/fsyn.<br>对应的说明应在函数中注明,包括shmem_writepage,shmem_nopage.<br> mmap的匿名共享影射通过如下路径使用shmem_nopage<br> memory.c ->handle_mm_fault->handle_pte_fault->do_no_page:vm_ops->nopage<br> <br> filemap/swap的情形如下:<br> page_launder:page->mapping->a_ops->writepage<br> filemap_fdatasync-> page->mapping->a_ops->writepage<br> <br> <br>/*<br> * shmem_nopage - either get the page from swap or allocate a new one<br> *<br> * If we allocate a new one we do not mark it dirty. That's up to the<br> * vm. If we swap it in we mark it dirty since we also free the swap<br> * entry since a page cannot live in both the swap and page cache<br> */<br>/* mmap的匿名共享影射通过如下路径使用此函数<br> * memory.c ->handle_mm_fault->handle_pte_fault->do_no_page:vm_ops->nopage<br> */<br>struct page * shmem_nopage(struct vm_area_struct * vma, unsigned long address, int no_share)<br>{<br> unsigned long size; /*以page size 为单位的文件大小*/<br> struct page * page;<br> unsigned int idx;<br> swp_entry_t *entry;<br> struct inode * inode = vma->vm_file->f_dentry->d_inode;<br> struct address_space * mapping = inode->i_mapping;<br> struct shmem_inode_info *info;<br><br> idx = (address - vma->vm_start) >> PAGE_SHIFT;/*vma 内的page 偏移*/<br> idx += vma->vm_pgoff; /*vma 地址在对应的文件内的偏移*/<br><br> down (&inode->i_sem);<br> size = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;/*文件大小*/<br> page = NOPAGE_SIGBUS;<br> if ((idx >= size) && (vma->vm_mm == current->mm))<br> goto out;<br><br> /* retry, we may have slept *//*睡眠过程中可能已经调入pagecache*/<br> page = __find_lock_page(mapping, idx, page_hash (mapping, idx));<br> if (page)<br> goto cached_page;<br><br> info = &inode->u.shmem_i;<br> entry = shmem_swp_entry (info, idx);/*找到tmpfs 文件对应的swap entry*/<br> if (!entry)<br> goto oom;<br> if (entry->val) {/*非0时代表一个swap entry*/<br> unsigned long flags;<br><br> /* Look it up and read it in.. */<br> page = lookup_swap_cache(*entry);/*在swap cache 查找*/<br> if (!page) {/*未找到*/<br> lock_kernel();<br> swapin_readahead(*entry);/*swap in 异步预读*/<br> page = read_swap_cache(*entry); /*等待读入指定页面完成*/<br> unlock_kernel();<br> if (!page) <br> goto oom;<br> }<br><br> /* We have to this with page locked to prevent races */<br> spin_lock (&info->lock);<br> swap_free(*entry);/*不需要swap entry了*/<br> lock_page(page);<br> delete_from_swap_cache_nolock(page);/*从swap cache删除*/<br> *entry = (swp_entry_t) {0};<br> flags = page->flags & ~((1 << PG_uptodate) ...//line too long,cut<br> page->flags = flags | (1 << PG_dirty);<br> add_to_page_cache_locked(page, mapping, idx);/*加入page cache*/<br> info->swapped--;<br> spin_unlock (&info->lock);<br> } else {//为0代表根本没有交换到swap 设备,分配新页面即可<br> spin_lock (&inode->i_sb->u.shmem_sb.stat_lock);<br> if (inode->i_sb->u.shmem_sb.free_blocks == 0)<br> goto no_space;<br> inode->i_sb->u.shmem_sb.free_blocks--;<br> spin_unlock (&inode->i_sb->u.shmem_sb.stat_lock);<br> /* Ok, get a new page */<br> page = page_cache_alloc();/*分配新页面*/<br> if (!page)<br> goto oom;<br> clear_user_highpage(page, address);<br> inode->i_blocks++;<br> add_to_page_cache (page, mapping, idx); /*加入page cahche*/<br> }<br> /* We have the page */<br> SetPageUptodate (page);<br><br>cached_page:<br> UnlockPage (page);<br> up(&inode->i_sem);<br><br> if (no_share) {/*copy on write *//*见do_no_page,read 情形下no_share为0*/<br> struct page *new_page = page_cache_alloc();<br><br> if (new_page) {<br> copy_user_highpage(new_page, page, address);<br> flush_page_to_ram(new_page);<br> } else<br> new_page = NOPAGE_OOM;<br> page_cache_release(page);<br> return new_page;<br> }<br><br> flush_page_to_ram (page);<br> return(page);<br>no_space:<br> spin_unlock (&inode->i_sb->u.shmem_sb.stat_lock);<br>oom:<br> page = NOPAGE_OOM;<br>out:<br> up(&inode->i_sem);<br> return page;<br>}<br><br><br>/*<br> * Move the page from the page cache to the swap cache<br> * (未做真正写入,留给swap cache 写入)<br> */<br> /* page_launder:page->mapping->a_ops->writepage<br> * filemap_fdatasync-> page->mapping->a_ops->writepage<br> */<br>static int shmem_writepage(struct page * page)<br>{<br> int error;<br> struct shmem_inode_info *info;<br> swp_entry_t *entry, swap;<br><br> /*<br> * <br> */<br> info = &page->mapping->host->u.shmem_i;<br> if (info->locked)<br> return 1;<br> swap = __get_swap_page(2); /* 分配swap page*/<br> if (!swap.val)<br> return 1;<br><br> spin_lock(&info->lock);<br> /*寻找tmpfs内记录swap entry 的散列表*/<br> entry = shmem_swp_entry (info, page->index);<br> if (!entry) /* this had been allocted on page allocation */<br> BUG();<br> error = -EAGAIN;<br> if (entry->val) { /*已经有了swap entry与之对应*/<br> __swap_free(swap, 2);<br> goto out;<br> }<br><br> *entry = swap;<br> error = 0;<br> /* Remove the from the page cache */<br> lru_cache_del(page);<br> remove_inode_page(page);<br><br> /* Add it to the swap cache */<br> add_to_swap_cache(page, swap);<br> page_cache_release(page);<br> set_page_dirty(page);<br> info->swapped++;<br>out:<br> spin_unlock(&info->lock);<br> UnlockPage(page);<br> return error;<br>}<br><br> <br><br>II)普通文件操作函数<br> 因为没有读写,简单些。只支持fop mmap: shmem_mmap和inode ops truncate:<br>shmem_truncate。<br><br>shmem_mmap:将tmpfs 的文件进行非匿名mmap。 代码省略,就是设置vma->vm_ops.<br><br>shmem_truncate:删除或者截断文件的时候,遍历inode的直接索引和间接索引,释放<br> swap entry 以及可能存在的swap page.代码略。<br> <br><br>III)目录文件操作<br> fops:read: generic_read_dir(空函数)<br> readdir: dcache_readdir (遍历目录,见文章using_uml,搞定uml调试)<br><br> inode ops:<br> create: shmem_create, /*创建普通文件->shmem_mknod*/<br> mkdir: shmem_mkdir, /*创建目录文件->shmem_mknod*/<br> mknod: shmem_mknod, /*提供各种创建功能*/<br> lookup: shmem_lookup, /*fs/namei.c real_lookup:dir->i_op->lookup*/<br> link: shmem_link, /* fs/namei.c vfs_link*/<br> unlink: shmem_unlink, /* 取消link */<br> symlink: shmem_symlink,/*符号连接*/<br> rmdir: shmem_rmdir, /*同shmme_unlink*/<br> rename: shmem_rename, /*重名功能*/<br><br>shmem_create, shmem_mkdir 都使用shmme_mknod,一个万能create 函数(^_^):<br>/*<br> * File creation. Allocate an inode, and we're done..<br> */<br>static int shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, int dev)<br>{<br> struct inode * inode = shmem_get_inode(dir->i_sb, mode, dev);<br> int error = -ENOSPC;<br><br> if (inode) {<br> d_instantiate(dentry, inode); //将dentry 挂入inode <br> dget(dentry); /* Extra count - pin the dentry in core */<br> error = 0;<br> }<br> return error;<br>}<br><br>shmem_link: Link a file..<br> inode->i_nlink++, 将dentry->d_alias 挂入inode->i_dentry<br> <br>shmem_symlink:(其他函数不再罗列)<br><br>sys_symlink->vfs_symlink:dir->i_op->symlink->shmem_symlink<br>/*<br> * 在目录dir下创建detnry,并符号连接到symname<br> */<br>static int shmem_symlink(struct inode * dir, struct dentry *dentry, const char * symname)<br>{<br> int error;<br><br> error = shmem_mknod(dir, dentry, S_IFLNK | S_IRWXUGO, 0);<br> if (!error) {<br> int l = strlen(symname)+1;<br> struct inode *inode = dentry->d_inode;<br> error = block_symlink(inode, symname, l);/*此inode 记录下symlink路径(memcp)*/<br> }<br> return error;<br>}<br><br><br> 不再分析其他函数了。理解之后,代码并不难。 <br></pre>
</td>
</tr>
</tbody>
</table></body></html>
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -