📄 ntvfs_generic.c
字号:
return NT_STATUS_NO_MEMORY; } cl->close.level = RAW_CLOSE_CLOSE; cl->close.in.file.ntvfs = wr->writeclose.in.file.ntvfs; cl->close.in.write_time = wr->writeclose.in.mtime; if (wr2->generic.in.count != 0) { /* do the close sync for now */ state = req->async_states->state; req->async_states->state &= ~NTVFS_ASYNC_STATE_MAY_ASYNC; status = ntvfs->ops->close(ntvfs, req, cl); req->async_states->state = state; } break; case RAW_WRITE_SPLWRITE: break; case RAW_WRITE_SMB2: wr->smb2.out._pad = 0; wr->smb2.out.nwritten = wr2->generic.out.nwritten; wr->smb2.out.unknown1 = 0; break; default: return NT_STATUS_INVALID_LEVEL; } return status;}/* NTVFS write generic to any mapper*/NTSTATUS ntvfs_map_write(struct ntvfs_module_context *ntvfs, struct ntvfs_request *req, union smb_write *wr){ union smb_write *wr2; NTSTATUS status; wr2 = talloc(req, union smb_write); if (wr2 == NULL) { return NT_STATUS_NO_MEMORY; } status = ntvfs_map_async_setup(ntvfs, req, wr, wr2, (second_stage_t)ntvfs_map_write_finish); if (!NT_STATUS_IS_OK(status)) { return status; } wr2->writex.level = RAW_WRITE_GENERIC; switch (wr->generic.level) { case RAW_WRITE_WRITEX: status = NT_STATUS_INVALID_LEVEL; break; case RAW_WRITE_WRITE: wr2->writex.in.file.ntvfs= wr->write.in.file.ntvfs; wr2->writex.in.offset = wr->write.in.offset; wr2->writex.in.wmode = 0; wr2->writex.in.remaining = wr->write.in.remaining; wr2->writex.in.count = wr->write.in.count; wr2->writex.in.data = wr->write.in.data; status = ntvfs->ops->write(ntvfs, req, wr2); break; case RAW_WRITE_WRITEUNLOCK: wr2->writex.in.file.ntvfs= wr->writeunlock.in.file.ntvfs; wr2->writex.in.offset = wr->writeunlock.in.offset; wr2->writex.in.wmode = 0; wr2->writex.in.remaining = wr->writeunlock.in.remaining; wr2->writex.in.count = wr->writeunlock.in.count; wr2->writex.in.data = wr->writeunlock.in.data; status = ntvfs->ops->write(ntvfs, req, wr2); break; case RAW_WRITE_WRITECLOSE: wr2->writex.in.file.ntvfs= wr->writeclose.in.file.ntvfs; wr2->writex.in.offset = wr->writeclose.in.offset; wr2->writex.in.wmode = 0; wr2->writex.in.remaining = 0; wr2->writex.in.count = wr->writeclose.in.count; wr2->writex.in.data = wr->writeclose.in.data; status = ntvfs->ops->write(ntvfs, req, wr2); break; case RAW_WRITE_SPLWRITE: wr2->writex.in.file.ntvfs= wr->splwrite.in.file.ntvfs; wr2->writex.in.offset = 0; wr2->writex.in.wmode = 0; wr2->writex.in.remaining = 0; wr2->writex.in.count = wr->splwrite.in.count; wr2->writex.in.data = wr->splwrite.in.data; status = ntvfs->ops->write(ntvfs, req, wr2); break; case RAW_WRITE_SMB2: wr2->writex.in.file.ntvfs= wr->smb2.in.file.ntvfs; wr2->writex.in.offset = wr->smb2.in.offset; wr2->writex.in.wmode = 0; wr2->writex.in.remaining = 0; wr2->writex.in.count = wr->smb2.in.data.length; wr2->writex.in.data = wr->smb2.in.data.data; status = ntvfs->ops->write(ntvfs, req, wr2); } return ntvfs_map_async_finish(req, status);}/* NTVFS read generic to any mapper - finish the out mapping*/static NTSTATUS ntvfs_map_read_finish(struct ntvfs_module_context *ntvfs, struct ntvfs_request *req, union smb_read *rd, union smb_read *rd2, NTSTATUS status){ switch (rd->generic.level) { case RAW_READ_READ: rd->read.out.nread = rd2->generic.out.nread; break; case RAW_READ_READBRAW: rd->readbraw.out.nread = rd2->generic.out.nread; break; case RAW_READ_LOCKREAD: rd->lockread.out.nread = rd2->generic.out.nread; break; case RAW_READ_SMB2: rd->smb2.out.data.length= rd2->generic.out.nread; rd->smb2.out.remaining = 0; rd->smb2.out.reserved = 0; break; default: return NT_STATUS_INVALID_LEVEL; } return status;}/* NTVFS read* to readx mapper*/NTSTATUS ntvfs_map_read(struct ntvfs_module_context *ntvfs, struct ntvfs_request *req, union smb_read *rd){ union smb_read *rd2; union smb_lock *lck; NTSTATUS status; uint_t state; rd2 = talloc(req, union smb_read); if (rd2 == NULL) { return NT_STATUS_NO_MEMORY; } status = ntvfs_map_async_setup(ntvfs, req, rd, rd2, (second_stage_t)ntvfs_map_read_finish); if (!NT_STATUS_IS_OK(status)) { return status; } rd2->readx.level = RAW_READ_READX; rd2->readx.in.read_for_execute = false; switch (rd->generic.level) { case RAW_READ_READX: status = NT_STATUS_INVALID_LEVEL; break; case RAW_READ_READ: rd2->readx.in.file.ntvfs= rd->read.in.file.ntvfs; rd2->readx.in.offset = rd->read.in.offset; rd2->readx.in.mincnt = rd->read.in.count; rd2->readx.in.maxcnt = rd->read.in.count; rd2->readx.in.remaining = rd->read.in.remaining; rd2->readx.out.data = rd->read.out.data; status = ntvfs->ops->read(ntvfs, req, rd2); break; case RAW_READ_READBRAW: rd2->readx.in.file.ntvfs= rd->readbraw.in.file.ntvfs; rd2->readx.in.offset = rd->readbraw.in.offset; rd2->readx.in.mincnt = rd->readbraw.in.mincnt; rd2->readx.in.maxcnt = rd->readbraw.in.maxcnt; rd2->readx.in.remaining = 0; rd2->readx.out.data = rd->readbraw.out.data; status = ntvfs->ops->read(ntvfs, req, rd2); break; case RAW_READ_LOCKREAD: /* do the initial lock sync for now */ state = req->async_states->state; req->async_states->state &= ~NTVFS_ASYNC_STATE_MAY_ASYNC; lck = talloc(rd2, union smb_lock); if (lck == NULL) { status = NT_STATUS_NO_MEMORY; goto done; } lck->lock.level = RAW_LOCK_LOCK; lck->lock.in.file.ntvfs = rd->lockread.in.file.ntvfs; lck->lock.in.count = rd->lockread.in.count; lck->lock.in.offset = rd->lockread.in.offset; status = ntvfs->ops->lock(ntvfs, req, lck); req->async_states->state = state; rd2->readx.in.file.ntvfs= rd->lockread.in.file.ntvfs; rd2->readx.in.offset = rd->lockread.in.offset; rd2->readx.in.mincnt = rd->lockread.in.count; rd2->readx.in.maxcnt = rd->lockread.in.count; rd2->readx.in.remaining = rd->lockread.in.remaining; rd2->readx.out.data = rd->lockread.out.data; if (NT_STATUS_IS_OK(status)) { status = ntvfs->ops->read(ntvfs, req, rd2); } break; case RAW_READ_SMB2: rd2->readx.in.file.ntvfs= rd->smb2.in.file.ntvfs; rd2->readx.in.offset = rd->smb2.in.offset; rd2->readx.in.mincnt = rd->smb2.in.min_count; rd2->readx.in.maxcnt = rd->smb2.in.length; rd2->readx.in.remaining = 0; rd2->readx.out.data = rd->smb2.out.data.data; status = ntvfs->ops->read(ntvfs, req, rd2); break; }done: return ntvfs_map_async_finish(req, status);}/* NTVFS close generic to any mapper*/static NTSTATUS ntvfs_map_close_finish(struct ntvfs_module_context *ntvfs, struct ntvfs_request *req, union smb_close *cl, union smb_close *cl2, NTSTATUS status){ NT_STATUS_NOT_OK_RETURN(status); switch (cl->generic.level) { case RAW_CLOSE_SMB2: cl->smb2.out.flags = cl2->generic.out.flags; cl->smb2.out._pad = 0; cl->smb2.out.create_time = cl2->generic.out.create_time; cl->smb2.out.access_time = cl2->generic.out.access_time; cl->smb2.out.write_time = cl2->generic.out.write_time; cl->smb2.out.change_time = cl2->generic.out.change_time; cl->smb2.out.alloc_size = cl2->generic.out.alloc_size; cl->smb2.out.size = cl2->generic.out.size; cl->smb2.out.file_attr = cl2->generic.out.file_attr; break; default: break; } return status;}/* NTVFS close generic to any mapper*/NTSTATUS ntvfs_map_close(struct ntvfs_module_context *ntvfs, struct ntvfs_request *req, union smb_close *cl){ union smb_close *cl2; NTSTATUS status; cl2 = talloc(req, union smb_close); if (cl2 == NULL) { return NT_STATUS_NO_MEMORY; } switch (cl->generic.level) { case RAW_CLOSE_GENERIC: return NT_STATUS_INVALID_LEVEL; case RAW_CLOSE_CLOSE: cl2->generic.level = RAW_CLOSE_GENERIC; cl2->generic.in.file = cl->close.in.file; cl2->generic.in.write_time = cl->close.in.write_time; cl2->generic.in.flags = 0; break; case RAW_CLOSE_SPLCLOSE: cl2->generic.level = RAW_CLOSE_GENERIC; cl2->generic.in.file = cl->splclose.in.file; cl2->generic.in.write_time = 0; cl2->generic.in.flags = 0; break; case RAW_CLOSE_SMB2: cl2->generic.level = RAW_CLOSE_GENERIC; cl2->generic.in.file = cl->smb2.in.file; cl2->generic.in.write_time = 0; cl2->generic.in.flags = cl->smb2.in.flags; break; } status = ntvfs_map_async_setup(ntvfs, req, cl, cl2, (second_stage_t)ntvfs_map_close_finish); NT_STATUS_NOT_OK_RETURN(status); status = ntvfs->ops->close(ntvfs, req, cl2); return ntvfs_map_async_finish(req, status);}/* NTVFS notify generic to any mapper*/static NTSTATUS ntvfs_map_notify_finish(struct ntvfs_module_context *ntvfs, struct ntvfs_request *req, union smb_notify *nt, union smb_notify *nt2, NTSTATUS status){ NT_STATUS_NOT_OK_RETURN(status); switch (nt->nttrans.level) { case RAW_NOTIFY_SMB2: if (nt2->nttrans.out.num_changes == 0) { return STATUS_NOTIFY_ENUM_DIR; } nt->smb2.out.num_changes = nt2->nttrans.out.num_changes; nt->smb2.out.changes = talloc_steal(req, nt2->nttrans.out.changes); break; default: return NT_STATUS_INVALID_LEVEL; } return status;}/* NTVFS notify generic to any mapper*/NTSTATUS ntvfs_map_notify(struct ntvfs_module_context *ntvfs, struct ntvfs_request *req, union smb_notify *nt){ union smb_notify *nt2; NTSTATUS status; nt2 = talloc(req, union smb_notify); NT_STATUS_HAVE_NO_MEMORY(nt2); status = ntvfs_map_async_setup(ntvfs, req, nt, nt2, (second_stage_t)ntvfs_map_notify_finish); NT_STATUS_NOT_OK_RETURN(status); nt2->nttrans.level = RAW_NOTIFY_NTTRANS; switch (nt->nttrans.level) { case RAW_NOTIFY_NTTRANS: status = NT_STATUS_INVALID_LEVEL; break; case RAW_NOTIFY_SMB2: nt2->nttrans.in.file.ntvfs = nt->smb2.in.file.ntvfs; nt2->nttrans.in.buffer_size = nt->smb2.in.buffer_size; nt2->nttrans.in.completion_filter = nt->smb2.in.completion_filter; nt2->nttrans.in.recursive = nt->smb2.in.recursive; status = ntvfs->ops->notify(ntvfs, req, nt2); break; } return ntvfs_map_async_finish(req, status);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -