📄 file.c
字号:
initialized_size = ni->initialized_size; BUG_ON(end > ni->allocated_size); read_unlock_irqrestore(&ni->size_lock, flags); BUG_ON(initialized_size != i_size); if (end > initialized_size) { write_lock_irqsave(&ni->size_lock, flags); ni->initialized_size = end; i_size_write(vi, end); write_unlock_irqrestore(&ni->size_lock, flags); } /* Mark the mft record dirty, so it gets written back. */ flush_dcache_mft_record_page(ctx->ntfs_ino); mark_mft_record_dirty(ctx->ntfs_ino); ntfs_attr_put_search_ctx(ctx); unmap_mft_record(base_ni); ntfs_debug("Done."); return 0;err_out: if (err == -ENOMEM) { ntfs_warning(vi->i_sb, "Error allocating memory required to " "commit the write."); if (PageUptodate(page)) { ntfs_warning(vi->i_sb, "Page is uptodate, setting " "dirty so the write will be retried " "later on by the VM."); /* * Put the page on mapping->dirty_pages, but leave its * buffers' dirty state as-is. */ __set_page_dirty_nobuffers(page); err = 0; } else ntfs_error(vi->i_sb, "Page is not uptodate. Written " "data has been lost."); } else { ntfs_error(vi->i_sb, "Resident attribute commit write failed " "with error %i.", err); NVolSetErrors(ni->vol); } if (ctx) ntfs_attr_put_search_ctx(ctx); if (m) unmap_mft_record(base_ni); return err;}/** * ntfs_file_buffered_write - * * Locking: The vfs is holding ->i_mutex on the inode. */static ssize_t ntfs_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos, loff_t *ppos, size_t count){ struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *vi = mapping->host; ntfs_inode *ni = NTFS_I(vi); ntfs_volume *vol = ni->vol; struct page *pages[NTFS_MAX_PAGES_PER_CLUSTER]; struct page *cached_page = NULL; char __user *buf = NULL; s64 end, ll; VCN last_vcn; LCN lcn; unsigned long flags; size_t bytes, iov_ofs = 0; /* Offset in the current iovec. */ ssize_t status, written; unsigned nr_pages; int err; struct pagevec lru_pvec; ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, " "pos 0x%llx, count 0x%lx.", vi->i_ino, (unsigned)le32_to_cpu(ni->type), (unsigned long long)pos, (unsigned long)count); if (unlikely(!count)) return 0; BUG_ON(NInoMstProtected(ni)); /* * If the attribute is not an index root and it is encrypted or * compressed, we cannot write to it yet. Note we need to check for * AT_INDEX_ALLOCATION since this is the type of both directory and * index inodes. */ if (ni->type != AT_INDEX_ALLOCATION) { /* If file is encrypted, deny access, just like NT4. */ if (NInoEncrypted(ni)) { /* * Reminder for later: Encrypted files are _always_ * non-resident so that the content can always be * encrypted. */ ntfs_debug("Denying write access to encrypted file."); return -EACCES; } if (NInoCompressed(ni)) { /* Only unnamed $DATA attribute can be compressed. */ BUG_ON(ni->type != AT_DATA); BUG_ON(ni->name_len); /* * Reminder for later: If resident, the data is not * actually compressed. Only on the switch to non- * resident does compression kick in. This is in * contrast to encrypted files (see above). */ ntfs_error(vi->i_sb, "Writing to compressed files is " "not implemented yet. Sorry."); return -EOPNOTSUPP; } } /* * If a previous ntfs_truncate() failed, repeat it and abort if it * fails again. */ if (unlikely(NInoTruncateFailed(ni))) { down_write(&vi->i_alloc_sem); err = ntfs_truncate(vi); up_write(&vi->i_alloc_sem); if (err || NInoTruncateFailed(ni)) { if (!err) err = -EIO; ntfs_error(vol->sb, "Cannot perform write to inode " "0x%lx, attribute type 0x%x, because " "ntfs_truncate() failed (error code " "%i).", vi->i_ino, (unsigned)le32_to_cpu(ni->type), err); return err; } } /* The first byte after the write. */ end = pos + count; /* * If the write goes beyond the allocated size, extend the allocation * to cover the whole of the write, rounded up to the nearest cluster. */ read_lock_irqsave(&ni->size_lock, flags); ll = ni->allocated_size; read_unlock_irqrestore(&ni->size_lock, flags); if (end > ll) { /* Extend the allocation without changing the data size. */ ll = ntfs_attr_extend_allocation(ni, end, -1, pos); if (likely(ll >= 0)) { BUG_ON(pos >= ll); /* If the extension was partial truncate the write. */ if (end > ll) { ntfs_debug("Truncating write to inode 0x%lx, " "attribute type 0x%x, because " "the allocation was only " "partially extended.", vi->i_ino, (unsigned) le32_to_cpu(ni->type)); end = ll; count = ll - pos; } } else { err = ll; read_lock_irqsave(&ni->size_lock, flags); ll = ni->allocated_size; read_unlock_irqrestore(&ni->size_lock, flags); /* Perform a partial write if possible or fail. */ if (pos < ll) { ntfs_debug("Truncating write to inode 0x%lx, " "attribute type 0x%x, because " "extending the allocation " "failed (error code %i).", vi->i_ino, (unsigned) le32_to_cpu(ni->type), err); end = ll; count = ll - pos; } else { ntfs_error(vol->sb, "Cannot perform write to " "inode 0x%lx, attribute type " "0x%x, because extending the " "allocation failed (error " "code %i).", vi->i_ino, (unsigned) le32_to_cpu(ni->type), err); return err; } } } pagevec_init(&lru_pvec, 0); written = 0; /* * If the write starts beyond the initialized size, extend it up to the * beginning of the write and initialize all non-sparse space between * the old initialized size and the new one. This automatically also * increments the vfs inode->i_size to keep it above or equal to the * initialized_size. */ read_lock_irqsave(&ni->size_lock, flags); ll = ni->initialized_size; read_unlock_irqrestore(&ni->size_lock, flags); if (pos > ll) { err = ntfs_attr_extend_initialized(ni, pos, &cached_page, &lru_pvec); if (err < 0) { ntfs_error(vol->sb, "Cannot perform write to inode " "0x%lx, attribute type 0x%x, because " "extending the initialized size " "failed (error code %i).", vi->i_ino, (unsigned)le32_to_cpu(ni->type), err); status = err; goto err_out; } } /* * Determine the number of pages per cluster for non-resident * attributes. */ nr_pages = 1; if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni)) nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT; /* Finally, perform the actual write. */ last_vcn = -1; if (likely(nr_segs == 1)) buf = iov->iov_base; do { VCN vcn; pgoff_t idx, start_idx; unsigned ofs, do_pages, u; size_t copied; start_idx = idx = pos >> PAGE_CACHE_SHIFT; ofs = pos & ~PAGE_CACHE_MASK; bytes = PAGE_CACHE_SIZE - ofs; do_pages = 1; if (nr_pages > 1) { vcn = pos >> vol->cluster_size_bits; if (vcn != last_vcn) { last_vcn = vcn; /* * Get the lcn of the vcn the write is in. If * it is a hole, need to lock down all pages in * the cluster. */ down_read(&ni->runlist.lock); lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >> vol->cluster_size_bits, false); up_read(&ni->runlist.lock); if (unlikely(lcn < LCN_HOLE)) { status = -EIO; if (lcn == LCN_ENOMEM) status = -ENOMEM; else ntfs_error(vol->sb, "Cannot " "perform write to " "inode 0x%lx, " "attribute type 0x%x, " "because the attribute " "is corrupt.", vi->i_ino, (unsigned) le32_to_cpu(ni->type)); break; } if (lcn == LCN_HOLE) { start_idx = (pos & ~(s64) vol->cluster_size_mask) >> PAGE_CACHE_SHIFT; bytes = vol->cluster_size - (pos & vol->cluster_size_mask); do_pages = nr_pages; } } } if (bytes > count) bytes = count; /* * Bring in the user page(s) that we will copy from _first_. * Otherwise there is a nasty deadlock on copying from the same * page(s) as we are writing to, without it/them being marked * up-to-date. Note, at present there is nothing to stop the * pages being swapped out between us bringing them into memory * and doing the actual copying. */ if (likely(nr_segs == 1)) ntfs_fault_in_pages_readable(buf, bytes); else ntfs_fault_in_pages_readable_iovec(iov, iov_ofs, bytes); /* Get and lock @do_pages starting at index @start_idx. */ status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages, pages, &cached_page, &lru_pvec); if (unlikely(status)) break; /* * For non-resident attributes, we need to fill any holes with * actual clusters and ensure all bufferes are mapped. We also * need to bring uptodate any buffers that are only partially * being written to. */ if (NInoNonResident(ni)) { status = ntfs_prepare_pages_for_non_resident_write( pages, do_pages, pos, bytes); if (unlikely(status)) { loff_t i_size; do { unlock_page(pages[--do_pages]); page_cache_release(pages[do_pages]); } while (do_pages); /* * The write preparation may have instantiated * allocated space outside i_size. Trim this * off again. We can ignore any errors in this * case as we will just be waisting a bit of * allocated space, which is not a disaster. */ i_size = i_size_read(vi); if (pos + bytes > i_size) vmtruncate(vi, i_size); break; } } u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index; if (likely(nr_segs == 1)) { copied = ntfs_copy_from_user(pages + u, do_pages - u, ofs, buf, bytes); buf += copied; } else copied = ntfs_copy_from_user_iovec(pages + u, do_pages - u, ofs, &iov, &iov_ofs, bytes); ntfs_flush_dcache_pages(pages + u, do_pages - u); status = ntfs_commit_pages_after_write(pages, do_pages, pos, bytes); if (likely(!status)) { written += copied; count -= copied; pos += copied; if (unlikely(copied != bytes)) status = -EFAULT; } do { unlock_page(pages[--do_pages]); mark_page_accessed(pages[do_pages]); page_cache_release(pages[do_pages]); } while (do_pages); if (unlikely(status)) break; balance_dirty_pages_ratelimited(mapping); cond_resched(); } while (count);err_out: *ppos = pos; if (cached_page) page_cache_release(cached_page); /* For now, when the user asks for O_SYNC, we actually give O_DSYNC. */ if (likely(!status)) { if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(vi))) { if (!mapping->a_ops->writepage || !is_sync_kiocb(iocb)) status = generic_osync_inode(vi, mapping, OSYNC_METADATA|OSYNC_DATA); } } pagevec_lru_add(&lru_pvec); ntfs_debug("Done. Returning %s (written 0x%lx, status %li).", written ? "written" : "status", (unsigned long)written, (long)status); return written ? written : status;}/** * ntfs_file_aio_write_nolock - */static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos){ struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; loff_t pos; size_t count; /* after file limit checks */ ssize_t written, err; count = 0; err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ); if (err) return err; pos = *ppos; vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); /* We can write back this queue in page reclaim. */ current->backing_dev_info = mapping->backing_dev_info; written = 0; err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); if (err) goto out; if (!count) goto out; err = remove_suid(file->f_path.dentry); if (err) goto out; file_update_time(file); written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos, count);out: current->backing_dev_info = NULL; return written ? written : err;}/** * ntfs_file_aio_write - */static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos){ struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; ssize_t ret; BUG_ON(iocb->ki_pos != pos); mutex_lock(&inode->i_mutex); ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos); mutex_unlock(&inode->i_mutex); if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { int err = sync_page_range(inode, mapping, pos, ret); if (err < 0) ret = err; } return ret;}/** * ntfs_file_writev - * * Basically the same as generic_file_writev() ex
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -