aops.c
来自「Linux Kernel 2.6.9 for OMAP1710」· C语言 代码 · 共 2,031 行 · 第 1/4 页
C
2,031 行
unlock_page(page); return 0; } /* NOTE: Different naming scheme to ntfs_read_block()! */ /* The first block in the page. */ block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); /* The first out of bounds block for the data size. */ dblock = (vi->i_size + blocksize - 1) >> blocksize_bits; /* The last (fully or partially) initialized block. */ iblock = ni->initialized_size >> blocksize_bits; /* * Be very careful. We have no exclusion from __set_page_dirty_buffers * here, and the (potentially unmapped) buffers may become dirty at * any time. If a buffer becomes dirty here after we've inspected it * then we just miss that fact, and the page stays dirty. * * Buffers outside i_size may be dirtied by __set_page_dirty_buffers; * handle that here by just cleaning them. */ /* * Loop through all the buffers in the page, mapping all the dirty * buffers to disk addresses and handling any aliases from the * underlying block device's mapping. */ rl = NULL; err = 0; do { BOOL is_retry = FALSE; if (unlikely(block >= dblock)) { /* * Mapped buffers outside i_size will occur, because * this page can be outside i_size when there is a * truncate in progress. The contents of such buffers * were zeroed by ntfs_writepage(). * * FIXME: What about the small race window where * ntfs_writepage() has not done any clearing because * the page was within i_size but before we get here, * vmtruncate() modifies i_size? */ clear_buffer_dirty(bh); set_buffer_uptodate(bh); continue; } /* Clean buffers are not written out, so no need to map them. */ if (!buffer_dirty(bh)) continue; /* Make sure we have enough initialized size. */ if (unlikely((block >= iblock) && (ni->initialized_size < vi->i_size))) { /* * If this page is fully outside initialized size, zero * out all pages between the current initialized size * and the current page. Just use ntfs_readpage() to do * the zeroing transparently. */ if (block > iblock) { // TODO: // For each page do: // - read_cache_page() // Again for each page do: // - wait_on_page_locked() // - Check (PageUptodate(page) && // !PageError(page)) // Update initialized size in the attribute and // in the inode. // Again, for each page do: // __set_page_dirty_buffers(); // page_cache_release() // We don't need to wait on the writes. // Update iblock. } /* * The current page straddles initialized size. Zero * all non-uptodate buffers and set them uptodate (and * dirty?). Note, there aren't any non-uptodate buffers * if the page is uptodate. * FIXME: For an uptodate page, the buffers may need to * be written out because they were not initialized on * disk before. */ if (!PageUptodate(page)) { // TODO: // Zero any non-uptodate buffers up to i_size. // Set them uptodate and dirty. } // TODO: // Update initialized size in the attribute and in the // inode (up to i_size). // Update iblock. // FIXME: This is inefficient. Try to batch the two // size changes to happen in one go. ntfs_error(vol->sb, "Writing beyond initialized size " "is not supported yet. Sorry."); err = -EOPNOTSUPP; break; // Do NOT set_buffer_new() BUT DO clear buffer range // outside write request range. // set_buffer_uptodate() on complete buffers as well as // set_buffer_dirty(). } /* No need to map buffers that are already mapped. */ if (buffer_mapped(bh)) continue; /* Unmapped, dirty buffer. Need to map it. */ bh->b_bdev = vol->sb->s_bdev; /* Convert block into corresponding vcn and offset. */ vcn = (VCN)block << blocksize_bits >> vol->cluster_size_bits; vcn_ofs = ((VCN)block << blocksize_bits) & vol->cluster_size_mask; if (!rl) {lock_retry_remap: down_read(&ni->runlist.lock); rl = ni->runlist.rl; } if (likely(rl != NULL)) { /* Seek to element containing target vcn. */ while (rl->length && rl[1].vcn <= vcn) rl++; lcn = ntfs_vcn_to_lcn(rl, vcn); } else lcn = (LCN)LCN_RL_NOT_MAPPED; /* Successful remap. */ if (lcn >= 0) { /* Setup buffer head to point to correct block. */ bh->b_blocknr = ((lcn << vol->cluster_size_bits) + vcn_ofs) >> blocksize_bits; set_buffer_mapped(bh); continue; } /* It is a hole, need to instantiate it. */ if (lcn == LCN_HOLE) { // TODO: Instantiate the hole. // clear_buffer_new(bh); // unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr); ntfs_error(vol->sb, "Writing into sparse regions is " "not supported yet. Sorry."); err = -EOPNOTSUPP; break; } /* If first try and runlist unmapped, map and retry. */ if (!is_retry && lcn == LCN_RL_NOT_MAPPED) { is_retry = TRUE; /* * Attempt to map runlist, dropping lock for * the duration. */ up_read(&ni->runlist.lock); err = ntfs_map_runlist(ni, vcn); if (likely(!err)) goto lock_retry_remap; rl = NULL; } /* Failed to map the buffer, even after retrying. */ bh->b_blocknr = -1UL; ntfs_error(vol->sb, "ntfs_vcn_to_lcn(vcn = 0x%llx) failed " "with error code 0x%llx%s.", (unsigned long long)vcn, (unsigned long long)-lcn, is_retry ? " even after retrying" : ""); // FIXME: Depending on vol->on_errors, do something. if (!err) err = -EIO; break; } while (block++, (bh = bh->b_this_page) != head); /* Release the lock if we took it. */ if (rl) up_read(&ni->runlist.lock); /* For the error case, need to reset bh to the beginning. */ bh = head; /* Just an optimization, so ->readpage() isn't called later. */ if (unlikely(!PageUptodate(page))) { int uptodate = 1; do { if (!buffer_uptodate(bh)) { uptodate = 0; bh = head; break; } } while ((bh = bh->b_this_page) != head); if (uptodate) SetPageUptodate(page); } /* Setup all mapped, dirty buffers for async write i/o. */ do { get_bh(bh); if (buffer_mapped(bh) && buffer_dirty(bh)) { lock_buffer(bh); if (test_clear_buffer_dirty(bh)) { BUG_ON(!buffer_uptodate(bh)); mark_buffer_async_write(bh); } else unlock_buffer(bh); } else if (unlikely(err)) { /* * For the error case. The buffer may have been set * dirty during attachment to a dirty page. */ if (err != -ENOMEM) clear_buffer_dirty(bh); } } while ((bh = bh->b_this_page) != head); if (unlikely(err)) { // TODO: Remove the -EOPNOTSUPP check later on... if (unlikely(err == -EOPNOTSUPP)) err = 0; else if (err == -ENOMEM) { ntfs_warning(vol->sb, "Error allocating memory. " "Redirtying page so we try again " "later."); /* * Put the page back on mapping->dirty_pages, but * leave its buffer's dirty state as-is. */ redirty_page_for_writepage(wbc, page); err = 0; } else SetPageError(page); } BUG_ON(PageWriteback(page)); set_page_writeback(page); /* Keeps try_to_free_buffers() away. */ unlock_page(page); /* * Submit the prepared buffers for i/o. Note the page is unlocked, * and the async write i/o completion handler can end_page_writeback() * at any time after the *first* submit_bh(). So the buffers can then * disappear... */ need_end_writeback = TRUE; do { struct buffer_head *next = bh->b_this_page; if (buffer_async_write(bh)) { submit_bh(WRITE, bh); need_end_writeback = FALSE; } put_bh(bh); bh = next; } while (bh != head); /* If no i/o was started, need to end_page_writeback(). */ if (unlikely(need_end_writeback)) end_page_writeback(page); ntfs_debug("Done."); return err;}static const char *ntfs_please_email = "Please email " "linux-ntfs-dev@lists.sourceforge.net and say that you saw " "this message. Thank you.";/** * ntfs_write_mst_block - write a @page to the backing store * @wbc: writeback control structure * @page: page cache page to write out * * This function is for writing pages belonging to non-resident, mst protected * attributes to their backing store. The only supported attribute is the * index allocation attribute. Both directory inodes and index inodes are * supported. * * The page must remain locked for the duration of the write because we apply * the mst fixups, write, and then undo the fixups, so if we were to unlock the * page before undoing the fixups, any other user of the page will see the * page contents as corrupt. * * Return 0 on success and -errno on error. * * Based on ntfs_write_block(), ntfs_mft_writepage(), and * write_mft_record_nolock(). */static int ntfs_write_mst_block(struct writeback_control *wbc, struct page *page){ sector_t block, dblock, rec_block; struct inode *vi = page->mapping->host; ntfs_inode *ni = NTFS_I(vi); ntfs_volume *vol = ni->vol; u8 *kaddr; unsigned int bh_size = 1 << vi->i_blkbits; unsigned int rec_size; struct buffer_head *bh, *head; int max_bhs = PAGE_CACHE_SIZE / bh_size; struct buffer_head *bhs[max_bhs]; int i, nr_recs, nr_bhs, bhs_per_rec, err; unsigned char bh_size_bits; BOOL rec_is_dirty; ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index " "0x%lx.", vi->i_ino, ni->type, page->index); BUG_ON(!NInoNonResident(ni)); BUG_ON(!NInoMstProtected(ni)); BUG_ON(!(S_ISDIR(vi->i_mode) || (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION))); BUG_ON(PageWriteback(page)); BUG_ON(!PageUptodate(page)); BUG_ON(!max_bhs); /* Make sure we have mapped buffers. */ if (unlikely(!page_has_buffers(page))) {no_buffers_err_out: ntfs_error(vol->sb, "Writing ntfs records without existing " "buffers is not implemented yet. %s", ntfs_please_email); err = -EOPNOTSUPP; goto err_out; } bh = head = page_buffers(page); if (unlikely(!bh)) goto no_buffers_err_out; bh_size_bits = vi->i_blkbits; rec_size = ni->itype.index.block_size; nr_recs = PAGE_CACHE_SIZE / rec_size; BUG_ON(!nr_recs); bhs_per_rec = rec_size >> bh_size_bits; BUG_ON(!bhs_per_rec); /* The first block in the page. */ rec_block = block = (s64)page->index << (PAGE_CACHE_SHIFT - bh_size_bits); /* The first out of bounds block for the data size. */ dblock = (vi->i_size + bh_size - 1) >> bh_size_bits; err = nr_bhs = 0; /* Need this to silence a stupid gcc warning. */ rec_is_dirty = FALSE; do { if (unlikely(block >= dblock)) { /* * Mapped buffers outside i_size will occur, because * this page can be outside i_size when there is a * truncate in progress. The contents of such buffers * were zeroed by ntfs_writepage(). * * FIXME: What about the small race window where * ntfs_writepage() has not done any clearing because * the page was within i_size but before we get here, * vmtruncate() modifies i_size? */ clear_buffer_dirty(bh); continue; } if (rec_block == block) { /* This block is the first one in the record. */ rec_block += rec_size >> bh_size_bits; if (!buffer_dirty(bh)) { /* Clean buffers are not written out. */ rec_is_dirty = FALSE; continue; } rec_is_dirty = TRUE; } else { /* This block is not the first one in the record. */ if (!buffer_dirty(bh)) { /* Clean buffers are not written out. */ BUG_ON(rec_is_dirty); continue; } BUG_ON(!rec_is_dirty); } /* Attempting to write outside the initialized size is a bug. */ BUG_ON(((block + 1) << bh_size_bits) > ni->initialized_size); if (!buffer_mapped(bh)) { ntfs_error(vol->sb, "Writing ntfs records without " "existing mapped buffers is not " "implemented yet. %s", ntfs_please_email); clear_buffer_dirty(bh); err = -EOPNOTSUPP; goto cleanup_out; } if (!buffer_uptodate(bh)) { ntfs_error(vol->sb, "Writing ntfs records without " "existing uptodate buffers is not " "implemented yet. %s", ntfs_please_email); clear_buffer_dirty(bh); err = -EOPNOTSUPP; goto cleanup_out; } bhs[nr_bhs++] = bh; BUG_ON(nr_bhs > max_bhs); } while (block++, (bh = bh->b_this_page) != head); /* If there were no dirty buffers, we are done. */ if (!nr_bhs) goto done; /* Apply the mst protection fixups. */ kaddr = page_address(page); for (i = 0; i < nr_bhs; i++) { if (!(i % bhs_per_rec)) { err = pre_write_mst_fixup((NTFS_RECORD*)(kaddr + bh_offset(bhs[i])), rec_size); if (err) { ntfs_error(vol->sb, "Failed to apply mst " "fixups (inode 0x%lx, " "attribute type 0x%x, page " "index 0x%lx)! Umount and " "run chkdsk.", vi->i_ino, ni->type, page->index); nr_bhs = i; goto mst_cleanup_out; } } } flush_dcache_page(page); /* Lock buffers and start synchronous write i/o on them. */ for (i = 0; i < nr_bhs; i++) { struct buffer_head *tbh = bhs[i]; if (unlikely(test_set_buffer_locked(tbh))) BUG(); if (unlikely(!test_clear_buffer_dirty(tbh))) { unlock_buffer(tbh); continue; } BUG_ON(!buffer_uptodate(tbh)); BUG_ON(!buffer_mapped(tbh)); get_bh(tbh); tbh->b_end_io = end_buffer_write_sync; submit_bh(WRITE, tbh); } /* Wait on i/o completion of buffers. */ for (i = 0; i < nr_bhs; i++) { struct buffer_head *tbh = bhs[i]; wait_on_buffer(tbh); if (unlikely(!buffer_uptodate(tbh))) { err = -EIO; /* * Set the buffer uptodate so the page & buffer states * don't become out of sync. */ if (PageUptodate(page)) set_buffer_uptodate(tbh); } } /* Remove the mst protection fixups again. */ for (i = 0; i < nr_bhs; i++) { if (!(i % bhs_per_rec)) post_write_mst_fixup((NTFS_RECORD*)(kaddr + bh_offset(bhs[i]))); } flush_dcache_page(page); if (unlikely(err)) { /* I/O error during writing. This is really bad! */ ntfs_error(vol->sb, "I/O error while writing ntfs record " "(inode 0x%lx, attribute type 0x%x, page " "index 0x%lx)! Umount and run chkdsk.", vi->i_ino, ni->type, page->index); goto err_out; }done: set_page_writeback(page); unlock_page(page); end_page_writeback(page); if (!err) ntfs_debug("Done."); return err;mst_cleanup_out: /* Remove the mst protection fixups again. */ for (i = 0; i < nr_bhs; i++) { if (!(i % bhs_per_rec)) post_write_mst_fixup((NTFS_RECORD*)(kaddr + bh_offset(bhs[i]))); }cleanup_out: /* Clean the buffers. */ for (i = 0; i < nr_bhs; i++) clear_buffer_dirty(bhs[i]);err_out: SetPageError(page); goto done;}/** * ntfs_writepage - write a @page to the backing store * @page: page cache page to write out * @wbc: writeback control structure * * For non-resident attributes, ntfs_writepage() writes the @page by calling * the ntfs version of the generic block_write_full_page() function, * ntfs_write_block(), which in turn if necessary creates and writes the * buffers associated with the page asynchronously. * * For resident attributes, OTOH, ntfs_writepage() writes the @page by copying * the data to the mft record (which at this stage is most likely in memory).
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?