⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 rw.c

📁 lustre 1.6.5 source code
💻 C
📖 第 1 页 / 共 5 页
字号:
                llap_write_pending(inode, llap);                GOTO(out, 0);        }        llap->llap_write_queued = 0;        rc = oig_init(&oig);        if (rc)                GOTO(out, rc);        /* make full-page requests if we are not at EOF (bug 4410) */        if (to != CFS_PAGE_SIZE && llap->llap_page->index < size_index) {                LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,                               "sync write before EOF: size_index %lu, to %d\n",                               size_index, to);                to = CFS_PAGE_SIZE;        } else if (to != CFS_PAGE_SIZE && llap->llap_page->index == size_index){                int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;                LL_CDEBUG_PAGE(D_PAGE, llap->llap_page,                               "sync write at EOF: size_index %lu, to %d/%d\n",                               size_index, to, size_to);                if (to < size_to)                        to = size_to;        }        /* compare the checksum once before the page leaves llite */        if (unlikely((sbi->ll_flags & LL_SBI_LLITE_CHECKSUM) &&                     llap->llap_checksum != 0)) {                __u32 csum;                struct page *page = llap->llap_page;                char *kaddr = kmap_atomic(page, KM_USER0);                csum = init_checksum(OSC_DEFAULT_CKSUM);                csum = compute_checksum(csum, kaddr, CFS_PAGE_SIZE,                                        OSC_DEFAULT_CKSUM);                kunmap_atomic(kaddr, KM_USER0);                if (llap->llap_checksum == csum) {                        CDEBUG(D_PAGE, "page %p cksum %x confirmed\n",                               page, csum);                } else {                        CERROR("page %p old cksum %x != new cksum %x!\n",                               page, llap->llap_checksum, csum);                }        }        rc = obd_queue_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig,                                llap->llap_cookie, OBD_BRW_WRITE | noquot,                                0, to, 0, ASYNC_READY | ASYNC_URGENT |                                ASYNC_COUNT_STABLE | ASYNC_GROUP_SYNC);        if (rc)                GOTO(free_oig, rc);        rc = obd_trigger_group_io(exp, ll_i2info(inode)->lli_smd, NULL, oig);        if (rc)                GOTO(free_oig, rc);        rc = oig_wait(oig);        if (!rc && async_flags & ASYNC_READY) {                unlock_page(llap->llap_page);                if (PageWriteback(llap->llap_page)) {                        end_page_writeback(llap->llap_page);                }        }        LL_CDEBUG_PAGE(D_PAGE, llap->llap_page, "sync write returned %d\n", rc);free_oig:        oig_release(oig);out:        RETURN(rc);}/* update our write count to account for i_size increases that may have * happened since we've queued the page for io. *//* be careful not to return success without setting the page Uptodate or * the next pass through prepare_write will read in stale data from disk. */int ll_commit_write(struct file *file, struct page *page, unsigned from,                    unsigned to){        struct ll_file_data *fd = LUSTRE_FPRIVATE(file);        struct inode *inode = page->mapping->host;        struct ll_inode_info *lli = ll_i2info(inode);        struct lov_stripe_md *lsm = lli->lli_smd;        struct obd_export *exp;        struct ll_async_page *llap;        loff_t size;        struct lustre_handle *lockh = NULL;        int rc = 0;        ENTRY;        SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */        LASSERT(inode == file->f_dentry->d_inode);        LASSERT(PageLocked(page));        CDEBUG(D_INODE, "inode %p is writing page %p from %d to %d at %lu\n",               inode, page, from, to, page->index);        if (fd->fd_flags & LL_FILE_GROUP_LOCKED)                lockh = &fd->fd_cwlockh;        llap = llap_from_page_with_lockh(page, LLAP_ORIGIN_COMMIT_WRITE, lockh);        if (IS_ERR(llap))                RETURN(PTR_ERR(llap));        exp = ll_i2obdexp(inode);        if (exp == NULL)                RETURN(-EINVAL);        llap->llap_ignore_quota = capable(CAP_SYS_RESOURCE);        /* queue a write for some time in the future the first time we         * dirty the page */        if (!PageDirty(page)) {                ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_MISSES, 1);                rc = queue_or_sync_write(exp, inode, llap, to, 0);                if (rc)                        GOTO(out, rc);        } else {                ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_DIRTY_HITS, 1);        }        /* put the page in the page cache, from now on ll_removepage is         * responsible for cleaning up the llap.         * only set page dirty when it's queued to be write out */        if (llap->llap_write_queued)                set_page_dirty(page);out:        size = (((obd_off)page->index) << CFS_PAGE_SHIFT) + to;        ll_inode_size_lock(inode, 0);        if (rc == 0) {                lov_stripe_lock(lsm);                obd_adjust_kms(exp, lsm, size, 0);                lov_stripe_unlock(lsm);                if (size > i_size_read(inode))                        i_size_write(inode, size);                SetPageUptodate(page);        } else if (size > i_size_read(inode)) {                /* this page beyond the pales of i_size, so it can't be                 * truncated in ll_p_r_e during lock revoking. we must                 * teardown our book-keeping here. */                ll_removepage(page);        }        ll_inode_size_unlock(inode, 0);        RETURN(rc);}static unsigned long ll_ra_count_get(struct ll_sb_info *sbi, unsigned long len){        struct ll_ra_info *ra = &sbi->ll_ra_info;        unsigned long ret;        ENTRY;        spin_lock(&sbi->ll_lock);        ret = min(ra->ra_max_pages - ra->ra_cur_pages, len);        ra->ra_cur_pages += ret;        spin_unlock(&sbi->ll_lock);        RETURN(ret);}static void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len){        struct ll_ra_info *ra = &sbi->ll_ra_info;        spin_lock(&sbi->ll_lock);        LASSERTF(ra->ra_cur_pages >= len, "r_c_p %lu len %lu\n",                 ra->ra_cur_pages, len);        ra->ra_cur_pages -= len;        spin_unlock(&sbi->ll_lock);}/* called for each page in a completed rpc.*/int ll_ap_completion(void *data, int cmd, struct obdo *oa, int rc){        struct ll_async_page *llap;        struct page *page;        int ret = 0;        ENTRY;        llap = LLAP_FROM_COOKIE(data);        page = llap->llap_page;        LASSERT(PageLocked(page));        LASSERT(CheckWriteback(page,cmd));        LL_CDEBUG_PAGE(D_PAGE, page, "completing cmd %d with %d\n", cmd, rc);        if (cmd & OBD_BRW_READ && llap->llap_defer_uptodate)                ll_ra_count_put(ll_i2sbi(page->mapping->host), 1);        if (rc == 0)  {                if (cmd & OBD_BRW_READ) {                        if (!llap->llap_defer_uptodate)                                SetPageUptodate(page);                } else {                        llap->llap_write_queued = 0;                }                ClearPageError(page);        } else {                if (cmd & OBD_BRW_READ) {                        llap->llap_defer_uptodate = 0;                }                SetPageError(page);#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))                if (rc == -ENOSPC)                        set_bit(AS_ENOSPC, &page->mapping->flags);                else                        set_bit(AS_EIO, &page->mapping->flags);#else                page->mapping->gfp_mask |= AS_EIO_MASK;#endif        }        unlock_page(page);        if (cmd & OBD_BRW_WRITE) {                llap_write_complete(page->mapping->host, llap);                ll_try_done_writing(page->mapping->host);        }        if (PageWriteback(page)) {                end_page_writeback(page);        }        page_cache_release(page);        RETURN(ret);}static void __ll_put_llap(struct page *page){        struct inode *inode = page->mapping->host;        struct obd_export *exp;        struct ll_async_page *llap;        struct ll_sb_info *sbi = ll_i2sbi(inode);        int rc;        ENTRY;        exp = ll_i2obdexp(inode);        if (exp == NULL) {                CERROR("page %p ind %lu gave null export\n", page, page->index);                EXIT;                return;        }        llap = llap_from_page(page, LLAP_ORIGIN_REMOVEPAGE);        if (IS_ERR(llap)) {                CERROR("page %p ind %lu couldn't find llap: %ld\n", page,                       page->index, PTR_ERR(llap));                EXIT;                return;        }        //llap_write_complete(inode, llap);        rc = obd_teardown_async_page(exp, ll_i2info(inode)->lli_smd, NULL,                                     llap->llap_cookie);        if (rc != 0)                CERROR("page %p ind %lu failed: %d\n", page, page->index, rc);        /* this unconditional free is only safe because the page lock         * is providing exclusivity to memory pressure/truncate/writeback..*/        __clear_page_ll_data(page);        spin_lock(&sbi->ll_lock);        if (!list_empty(&llap->llap_pglist_item))                list_del_init(&llap->llap_pglist_item);        sbi->ll_pglist_gen++;        sbi->ll_async_page_count--;        spin_unlock(&sbi->ll_lock);        OBD_SLAB_FREE(llap, ll_async_page_slab, ll_async_page_slab_size);        EXIT;}/* the kernel calls us here when a page is unhashed from the page cache. * the page will be locked and the kernel is holding a spinlock, so * we need to be careful.  we're just tearing down our book-keeping * here. */void ll_removepage(struct page *page){        struct ll_async_page *llap = llap_cast_private(page);        ENTRY;        LASSERT(!in_interrupt());        /* sync pages or failed read pages can leave pages in the page         * cache that don't have our data associated with them anymore */        if (page_private(page) == 0) {                EXIT;                return;        }        LASSERT(!llap->llap_lockless_io_page);        LASSERT(!llap->llap_nocache);        LL_CDEBUG_PAGE(D_PAGE, page, "being evicted\n");        __ll_put_llap(page);        EXIT;}static int ll_issue_page_read(struct obd_export *exp,                              struct ll_async_page *llap,                              struct obd_io_group *oig, int defer){        struct page *page = llap->llap_page;        int rc;        page_cache_get(page);        llap->llap_defer_uptodate = defer;        llap->llap_ra_used = 0;        rc = obd_queue_group_io(exp, ll_i2info(page->mapping->host)->lli_smd,                                NULL, oig, llap->llap_cookie, OBD_BRW_READ, 0,                                CFS_PAGE_SIZE, 0, ASYNC_COUNT_STABLE | ASYNC_READY |                                              ASYNC_URGENT);        if (rc) {                LL_CDEBUG_PAGE(D_ERROR, page, "read queue failed: rc %d\n", rc);                page_cache_release(page);        }        RETURN(rc);}static void ll_ra_stats_inc_unlocked(struct ll_ra_info *ra, enum ra_stat which){        LASSERTF(which >= 0 && which < _NR_RA_STAT, "which: %u\n", which);        ra->ra_stats[which]++;}static void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which){        struct ll_sb_info *sbi = ll_i2sbi(mapping->host);        struct ll_ra_info *ra = &ll_i2sbi(mapping->host)->ll_ra_info;        spin_lock(&sbi->ll_lock);        ll_ra_stats_inc_unlocked(ra, which);        spin_unlock(&sbi->ll_lock);}void ll_ra_accounting(struct ll_async_page *llap, struct address_space *mapping){        if (!llap->llap_defer_uptodate || llap->llap_ra_used)                return;        ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);}#define RAS_CDEBUG(ras) \        CDEBUG(D_READA,                                                      \               "lrp %lu cr %lu cp %lu ws %lu wl %lu nra %lu r %lu ri %lu"    \               "csr %lu sf %lu sp %lu sl %lu \n", 		     	     \               ras->ras_last_readpage, ras->ras_consecutive_requests,        \               ras->ras_consecutive_pages, ras->ras_window_start,            \               ras->ras_window_len, ras->ras_next_readahead,                 \

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -