📄 cifsfs.c
字号:
#endif /* CONFIG_CIFS_EXPERIMENTAL */};const struct file_operations cifs_dir_ops = { .readdir = cifs_readdir, .release = cifs_closedir, .read = generic_read_dir,#ifdef CONFIG_CIFS_EXPERIMENTAL .dir_notify = cifs_dir_notify,#endif /* CONFIG_CIFS_EXPERIMENTAL */ .ioctl = cifs_ioctl,};#ifndef SLAB_MEM_SPREAD#define SLAB_MEM_SPREAD 0#endifstatic voidcifs_init_once(void *inode, struct kmem_cache *cachep, unsigned long flags){ struct cifsInodeInfo *cifsi = inode; inode_init_once(&cifsi->vfs_inode); INIT_LIST_HEAD(&cifsi->lockList);}static intcifs_init_inodecache(void){ cifs_inode_cachep = kmem_cache_create("cifs_inode_cache", sizeof(struct cifsInodeInfo), 0, (SLAB_RECLAIM_ACCOUNT| SLAB_MEM_SPREAD),#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22) cifs_init_once);#else cifs_init_once, NULL);#endif if (cifs_inode_cachep == NULL) return -ENOMEM; return 0;}static voidcifs_destroy_inodecache(void){ kmem_cache_destroy(cifs_inode_cachep);}static intcifs_init_request_bufs(void){ if (CIFSMaxBufSize < 8192) { /* Buffer size can not be smaller than 2 * PATH_MAX since maximum Unicode path name has to fit in any SMB/CIFS path based frames */ CIFSMaxBufSize = 8192; } else if (CIFSMaxBufSize > 1024*127) { CIFSMaxBufSize = 1024 * 127; } else { CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/ }/* cERROR(1,("CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize)); */ cifs_req_cachep = kmem_cache_create("cifs_request", CIFSMaxBufSize + MAX_CIFS_HDR_SIZE, 0,#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22) SLAB_HWCACHE_ALIGN, NULL);#else SLAB_HWCACHE_ALIGN, NULL, NULL);#endif if (cifs_req_cachep == NULL) return -ENOMEM; if (cifs_min_rcv < 1) cifs_min_rcv = 1; else if (cifs_min_rcv > 64) { cifs_min_rcv = 64; cERROR(1, ("cifs_min_rcv set to maximum (64)")); } cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv, cifs_req_cachep); if (cifs_req_poolp == NULL) { kmem_cache_destroy(cifs_req_cachep); return -ENOMEM; } /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and almost all handle based requests (but not write response, nor is it sufficient for path based requests). A smaller size would have been more efficient (compacting multiple slab items on one 4k page) for the case in which debug was on, but this larger size allows more SMBs to use small buffer alloc and is still much more efficient to alloc 1 per page off the slab compared to 17K (5page) alloc of large cifs buffers even when page debugging is on */ cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq", MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22) NULL);#else NULL, NULL);#endif if (cifs_sm_req_cachep == NULL) { mempool_destroy(cifs_req_poolp); kmem_cache_destroy(cifs_req_cachep); return -ENOMEM; } if (cifs_min_small < 2) cifs_min_small = 2; else if (cifs_min_small > 256) { cifs_min_small = 256; cFYI(1, ("cifs_min_small set to maximum (256)")); } cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small, cifs_sm_req_cachep); if (cifs_sm_req_poolp == NULL) { mempool_destroy(cifs_req_poolp); kmem_cache_destroy(cifs_req_cachep); kmem_cache_destroy(cifs_sm_req_cachep); return -ENOMEM; } return 0;}static voidcifs_destroy_request_bufs(void){ mempool_destroy(cifs_req_poolp); kmem_cache_destroy(cifs_req_cachep); mempool_destroy(cifs_sm_req_poolp); kmem_cache_destroy(cifs_sm_req_cachep);}static intcifs_init_mids(void){ cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids", sizeof(struct mid_q_entry), 0,#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22) SLAB_HWCACHE_ALIGN, NULL);#else SLAB_HWCACHE_ALIGN, NULL, NULL);#endif if (cifs_mid_cachep == NULL) return -ENOMEM; /* 3 is a reasonable minimum number of simultaneous operations */ cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep); if (cifs_mid_poolp == NULL) { kmem_cache_destroy(cifs_mid_cachep); return -ENOMEM; } cifs_oplock_cachep = kmem_cache_create("cifs_oplock_structs", sizeof(struct oplock_q_entry), 0,#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22) SLAB_HWCACHE_ALIGN, NULL);#else SLAB_HWCACHE_ALIGN, NULL, NULL);#endif if (cifs_oplock_cachep == NULL) { mempool_destroy(cifs_mid_poolp); kmem_cache_destroy(cifs_mid_cachep); return -ENOMEM; } return 0;}static voidcifs_destroy_mids(void){ mempool_destroy(cifs_mid_poolp); kmem_cache_destroy(cifs_mid_cachep); kmem_cache_destroy(cifs_oplock_cachep);}static int cifs_oplock_thread(void *dummyarg){ struct oplock_q_entry *oplock_item; struct cifsTconInfo *pTcon; struct inode *inode; __u16 netfid; int rc;#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22) set_freezable();#endif do {#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 12) if (try_to_freeze()) continue;#endif spin_lock(&GlobalMid_Lock); if (list_empty(&GlobalOplock_Q)) { spin_unlock(&GlobalMid_Lock); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(39*HZ); } else { oplock_item = list_entry(GlobalOplock_Q.next, struct oplock_q_entry, qhead); if (oplock_item) { cFYI(1, ("found oplock item to write out")); pTcon = oplock_item->tcon; inode = oplock_item->pinode; netfid = oplock_item->netfid; spin_unlock(&GlobalMid_Lock); DeleteOplockQEntry(oplock_item); /* can not grab inode sem here since it would deadlock when oplock received on delete since vfs_unlink holds the i_mutex across the call */ /* mutex_lock(&inode->i_mutex);*/ if (S_ISREG(inode->i_mode)) { rc = filemap_fdatawrite(inode->i_mapping); if (CIFS_I(inode)->clientCanCacheRead == 0) { filemap_fdatawait(inode->i_mapping); invalidate_remote_inode(inode); } } else rc = 0; /* mutex_unlock(&inode->i_mutex);*/ if (rc) CIFS_I(inode)->write_behind_rc = rc; cFYI(1, ("Oplock flush inode %p rc %d", inode, rc)); /* releasing stale oplock after recent reconnect of smb session using a now incorrect file handle is not a data integrity issue but do not bother sending an oplock release if session to server still is disconnected since oplock already released by the server in that case */ if (pTcon->tidStatus != CifsNeedReconnect) { rc = CIFSSMBLock(0, pTcon, netfid, 0 /* len */ , 0 /* offset */, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, 0 /* wait flag */); cFYI(1, ("Oplock release rc = %d", rc)); } } else spin_unlock(&GlobalMid_Lock); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(1); /* yield in case q were corrupt */ } } while (!kthread_should_stop()); return 0;}static int cifs_dnotify_thread(void *dummyarg){ struct list_head *tmp; struct cifsSesInfo *ses; do {#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 12) if (try_to_freeze()) continue;#endif set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(15*HZ); read_lock(&GlobalSMBSeslock); /* check if any stuck requests that need to be woken up and wakeq so the thread can wake up and error out */ list_for_each(tmp, &GlobalSMBSessionList) { ses = list_entry(tmp, struct cifsSesInfo, cifsSessionList); if (ses && ses->server && atomic_read(&ses->server->inFlight)) wake_up_all(&ses->server->response_q); } read_unlock(&GlobalSMBSeslock); } while (!kthread_should_stop()); return 0;}static int __initinit_cifs(void){ int rc = 0;#ifdef CONFIG_PROC_FS cifs_proc_init();#endif/* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */ INIT_LIST_HEAD(&GlobalSMBSessionList); INIT_LIST_HEAD(&GlobalTreeConnectionList); INIT_LIST_HEAD(&GlobalOplock_Q);#ifdef CONFIG_CIFS_EXPERIMENTAL INIT_LIST_HEAD(&GlobalDnotifyReqList); INIT_LIST_HEAD(&GlobalDnotifyRsp_Q);#endif/* * Initialize Global counters */ atomic_set(&sesInfoAllocCount, 0); atomic_set(&tconInfoAllocCount, 0); atomic_set(&tcpSesAllocCount, 0); atomic_set(&tcpSesReconnectCount, 0); atomic_set(&tconInfoReconnectCount, 0); atomic_set(&bufAllocCount, 0); atomic_set(&smBufAllocCount, 0);#ifdef CONFIG_CIFS_STATS2 atomic_set(&totBufAllocCount, 0); atomic_set(&totSmBufAllocCount, 0);#endif /* CONFIG_CIFS_STATS2 */ atomic_set(&midCount, 0); GlobalCurrentXid = 0; GlobalTotalActiveXid = 0; GlobalMaxActiveXid = 0; memset(Local_System_Name, 0, 15); rwlock_init(&GlobalSMBSeslock); spin_lock_init(&GlobalMid_Lock); if (cifs_max_pending < 2) { cifs_max_pending = 2; cFYI(1, ("cifs_max_pending set to min of 2")); } else if (cifs_max_pending > 256) { cifs_max_pending = 256; cFYI(1, ("cifs_max_pending set to max of 256")); } rc = cifs_init_inodecache(); if (rc) goto out_clean_proc; rc = cifs_init_mids(); if (rc) goto out_destroy_inodecache; rc = cifs_init_request_bufs(); if (rc) goto out_destroy_mids; rc = register_filesystem(&cifs_fs_type); if (rc) goto out_destroy_request_bufs; oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd"); if (IS_ERR(oplockThread)) { rc = PTR_ERR(oplockThread); cERROR(1, ("error %d create oplock thread", rc)); goto out_unregister_filesystem; } dnotifyThread = kthread_run(cifs_dnotify_thread, NULL, "cifsdnotifyd"); if (IS_ERR(dnotifyThread)) { rc = PTR_ERR(dnotifyThread); cERROR(1, ("error %d create dnotify thread", rc)); goto out_stop_oplock_thread; } return 0; out_stop_oplock_thread: kthread_stop(oplockThread); out_unregister_filesystem: unregister_filesystem(&cifs_fs_type); out_destroy_request_bufs: cifs_destroy_request_bufs(); out_destroy_mids: cifs_destroy_mids(); out_destroy_inodecache: cifs_destroy_inodecache(); out_clean_proc:#ifdef CONFIG_PROC_FS cifs_proc_clean();#endif return rc;}static void __exitexit_cifs(void){ cFYI(0, ("exit_cifs"));#ifdef CONFIG_PROC_FS cifs_proc_clean();#endif unregister_filesystem(&cifs_fs_type); cifs_destroy_inodecache(); cifs_destroy_mids(); cifs_destroy_request_bufs(); kthread_stop(oplockThread); kthread_stop(dnotifyThread);}MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>");MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */MODULE_DESCRIPTION ("VFS to access servers complying with the SNIA CIFS Specification " "e.g. Samba and Windows");MODULE_VERSION(CIFS_VERSION);module_init(init_cifs)module_exit(exit_cifs)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -