📄 reply.c
字号:
io->readx.in.maxcnt |= high_part << 16; } } /* the 64 bit variant */ if (req->in.wct == 12) { uint32_t offset_high = IVAL(req->in.vwv, VWV(10)); io->readx.in.offset |= (((uint64_t)offset_high) << 32); } /* setup the reply packet assuming the maximum possible read */ smbsrv_setup_reply(req, 12, 1 + io->readx.in.maxcnt); /* tell the backend where to put the data. Notice the pad byte. */ if (io->readx.in.maxcnt != 0xFFFF && io->readx.in.mincnt != 0xFFFF) { io->readx.out.data = req->out.data + 1; } else { io->readx.out.data = req->out.data; } SMBSRV_CHECK_FILE_HANDLE(io->readx.in.file.ntvfs); SMBSRV_CALL_NTVFS_BACKEND(ntvfs_read(req->ntvfs, io));}/**************************************************************************** Reply to a writebraw (core+ or LANMAN1.0 protocol).****************************************************************************/void smbsrv_reply_writebraw(struct smbsrv_request *req){ smbsrv_send_error(req, NT_STATUS_DOS(ERRSRV, ERRuseSTD));}/**************************************************************************** Reply to a writeunlock (async reply)****************************************************************************/static void reply_writeunlock_send(struct ntvfs_request *ntvfs){ struct smbsrv_request *req; union smb_write *io; SMBSRV_CHECK_ASYNC_STATUS(io, union smb_write); /* construct reply */ smbsrv_setup_reply(req, 1, 0); SSVAL(req->out.vwv, VWV(0), io->writeunlock.out.nwritten); smbsrv_send_reply(req);}/**************************************************************************** Reply to a writeunlock (core+).****************************************************************************/void smbsrv_reply_writeunlock(struct smbsrv_request *req){ union smb_write *io; SMBSRV_CHECK_WCT(req, 5); SMBSRV_TALLOC_IO_PTR(io, union smb_write); SMBSRV_SETUP_NTVFS_REQUEST(reply_writeunlock_send, NTVFS_ASYNC_STATE_MAY_ASYNC); io->writeunlock.level = RAW_WRITE_WRITEUNLOCK; io->writeunlock.in.file.ntvfs = smbsrv_pull_fnum(req, req->in.vwv, VWV(0)); io->writeunlock.in.count = SVAL(req->in.vwv, VWV(1)); io->writeunlock.in.offset = IVAL(req->in.vwv, VWV(2)); io->writeunlock.in.remaining = SVAL(req->in.vwv, VWV(4)); io->writeunlock.in.data = req->in.data + 3; /* make sure they gave us the data they promised */ if (io->writeunlock.in.count+3 > req->in.data_size) { smbsrv_send_error(req, NT_STATUS_FOOBAR); return; } /* make sure the data block is big enough */ if (SVAL(req->in.data, 1) < io->writeunlock.in.count) { smbsrv_send_error(req, NT_STATUS_FOOBAR); return; } SMBSRV_CHECK_FILE_HANDLE(io->writeunlock.in.file.ntvfs); SMBSRV_CALL_NTVFS_BACKEND(ntvfs_write(req->ntvfs, io));}/**************************************************************************** Reply to a write (async reply)****************************************************************************/static void reply_write_send(struct ntvfs_request *ntvfs){ struct smbsrv_request *req; union smb_write *io; SMBSRV_CHECK_ASYNC_STATUS(io, union smb_write); /* construct reply */ smbsrv_setup_reply(req, 1, 0); SSVAL(req->out.vwv, VWV(0), io->write.out.nwritten); smbsrv_send_reply(req);}/**************************************************************************** Reply to a write****************************************************************************/void smbsrv_reply_write(struct smbsrv_request *req){ union smb_write *io; SMBSRV_CHECK_WCT(req, 5); SMBSRV_TALLOC_IO_PTR(io, union smb_write); SMBSRV_SETUP_NTVFS_REQUEST(reply_write_send, NTVFS_ASYNC_STATE_MAY_ASYNC); io->write.level = RAW_WRITE_WRITE; io->write.in.file.ntvfs = smbsrv_pull_fnum(req, req->in.vwv, VWV(0)); io->write.in.count = SVAL(req->in.vwv, VWV(1)); io->write.in.offset = IVAL(req->in.vwv, VWV(2)); io->write.in.remaining = SVAL(req->in.vwv, VWV(4)); io->write.in.data = req->in.data + 3; /* make sure they gave us the data they promised */ if (req_data_oob(&req->in.bufinfo, io->write.in.data, io->write.in.count)) { smbsrv_send_error(req, NT_STATUS_FOOBAR); return; } /* make sure the data block is big enough */ if (SVAL(req->in.data, 1) < io->write.in.count) { smbsrv_send_error(req, NT_STATUS_FOOBAR); return; } SMBSRV_CHECK_FILE_HANDLE(io->write.in.file.ntvfs); SMBSRV_CALL_NTVFS_BACKEND(ntvfs_write(req->ntvfs, io));}/**************************************************************************** Reply to a write and X (async reply)****************************************************************************/static void reply_write_and_X_send(struct ntvfs_request *ntvfs){ struct smbsrv_request *req; union smb_write *io; SMBSRV_CHECK_ASYNC_STATUS(io, union smb_write); /* construct reply */ smbsrv_setup_reply(req, 6, 0); SSVAL(req->out.vwv, VWV(0), SMB_CHAIN_NONE); SSVAL(req->out.vwv, VWV(1), 0); SSVAL(req->out.vwv, VWV(2), io->writex.out.nwritten & 0xFFFF); SSVAL(req->out.vwv, VWV(3), io->writex.out.remaining); SSVAL(req->out.vwv, VWV(4), io->writex.out.nwritten >> 16); SMBSRV_VWV_RESERVED(5, 1); smbsrv_chain_reply(req);}/**************************************************************************** Reply to a write and X.****************************************************************************/void smbsrv_reply_write_and_X(struct smbsrv_request *req){ union smb_write *io; if (req->in.wct != 14) { SMBSRV_CHECK_WCT(req, 12); } SMBSRV_TALLOC_IO_PTR(io, union smb_write); SMBSRV_SETUP_NTVFS_REQUEST(reply_write_and_X_send, NTVFS_ASYNC_STATE_MAY_ASYNC); io->writex.level = RAW_WRITE_WRITEX; io->writex.in.file.ntvfs= smbsrv_pull_fnum(req, req->in.vwv, VWV(2)); io->writex.in.offset = IVAL(req->in.vwv, VWV(3)); io->writex.in.wmode = SVAL(req->in.vwv, VWV(7)); io->writex.in.remaining = SVAL(req->in.vwv, VWV(8)); io->writex.in.count = SVAL(req->in.vwv, VWV(10)); io->writex.in.data = req->in.hdr + SVAL(req->in.vwv, VWV(11)); if (req->in.wct == 14) { uint32_t offset_high = IVAL(req->in.vwv, VWV(12)); uint16_t count_high = SVAL(req->in.vwv, VWV(9)); io->writex.in.offset |= (((uint64_t)offset_high) << 32); io->writex.in.count |= ((uint32_t)count_high) << 16; } /* make sure the data is in bounds */ if (req_data_oob(&req->in.bufinfo, io->writex.in.data, io->writex.in.count)) { smbsrv_send_error(req, NT_STATUS_FOOBAR); return; } SMBSRV_CHECK_FILE_HANDLE(io->writex.in.file.ntvfs); SMBSRV_CALL_NTVFS_BACKEND(ntvfs_write(req->ntvfs, io));}/**************************************************************************** Reply to a lseek (async reply)****************************************************************************/static void reply_lseek_send(struct ntvfs_request *ntvfs){ struct smbsrv_request *req; union smb_seek *io; SMBSRV_CHECK_ASYNC_STATUS(io, union smb_seek); /* construct reply */ smbsrv_setup_reply(req, 2, 0); SIVALS(req->out.vwv, VWV(0), io->lseek.out.offset); smbsrv_send_reply(req);}/**************************************************************************** Reply to a lseek.****************************************************************************/void smbsrv_reply_lseek(struct smbsrv_request *req){ union smb_seek *io; SMBSRV_CHECK_WCT(req, 4); SMBSRV_TALLOC_IO_PTR(io, union smb_seek); SMBSRV_SETUP_NTVFS_REQUEST(reply_lseek_send, NTVFS_ASYNC_STATE_MAY_ASYNC); io->lseek.in.file.ntvfs = smbsrv_pull_fnum(req, req->in.vwv, VWV(0)); io->lseek.in.mode = SVAL(req->in.vwv, VWV(1)); io->lseek.in.offset = IVALS(req->in.vwv, VWV(2)); SMBSRV_CHECK_FILE_HANDLE(io->lseek.in.file.ntvfs); SMBSRV_CALL_NTVFS_BACKEND(ntvfs_seek(req->ntvfs, io));}/**************************************************************************** Reply to a flush.****************************************************************************/void smbsrv_reply_flush(struct smbsrv_request *req){ union smb_flush *io; uint16_t fnum; /* parse request */ SMBSRV_CHECK_WCT(req, 1); SMBSRV_TALLOC_IO_PTR(io, union smb_flush); SMBSRV_SETUP_NTVFS_REQUEST(reply_simple_send, NTVFS_ASYNC_STATE_MAY_ASYNC); fnum = SVAL(req->in.vwv, VWV(0)); if (fnum == 0xFFFF) { io->flush_all.level = RAW_FLUSH_ALL; } else { io->flush.level = RAW_FLUSH_FLUSH; io->flush.in.file.ntvfs = smbsrv_pull_fnum(req, req->in.vwv, VWV(0)); SMBSRV_CHECK_FILE_HANDLE(io->flush.in.file.ntvfs); } SMBSRV_CALL_NTVFS_BACKEND(ntvfs_flush(req->ntvfs, io));}/**************************************************************************** Reply to a close Note that this has to deal with closing a directory opened by NT SMB's.****************************************************************************/void smbsrv_reply_close(struct smbsrv_request *req){ union smb_close *io; /* parse request */ SMBSRV_CHECK_WCT(req, 3); SMBSRV_TALLOC_IO_PTR(io, union smb_close); SMBSRV_SETUP_NTVFS_REQUEST(reply_simple_send, NTVFS_ASYNC_STATE_MAY_ASYNC); io->close.level = RAW_CLOSE_CLOSE; io->close.in.file.ntvfs = smbsrv_pull_fnum(req, req->in.vwv, VWV(0)); io->close.in.write_time = srv_pull_dos_date3(req->smb_conn, req->in.vwv + VWV(1)); SMBSRV_CHECK_FILE_HANDLE(io->close.in.file.ntvfs); SMBSRV_CALL_NTVFS_BACKEND(ntvfs_close(req->ntvfs, io));}/**************************************************************************** Reply to a writeclose (async reply)****************************************************************************/static void reply_writeclose_send(struct ntvfs_request *ntvfs){ struct smbsrv_request *req; union smb_write *io; SMBSRV_CHECK_ASYNC_STATUS(io, union smb_write); /* construct reply */ smbsrv_setup_reply(req, 1, 0); SSVAL(req->out.vwv, VWV(0), io->write.out.nwritten); smbsrv_send_reply(req);}/**************************************************************************** Reply to a writeclose (Core+ protocol).****************************************************************************/void smbsrv_reply_writeclose(struct smbsrv_request *req){ union smb_write *io; /* this one is pretty weird - the wct can be 6 or 12 */ if (req->in.wct != 12) { SMBSRV_CHECK_WCT(req, 6); } SMBSRV_TALLOC_IO_PTR(io, union smb_write); SMBSRV_SETUP_NTVFS_REQUEST(reply_writeclose_send, NTVFS_ASYNC_STATE_MAY_ASYNC); io->writeclose.level = RAW_WRITE_WRITECLOSE; io->writeclose.in.file.ntvfs = smbsrv_pull_fnum(req, req->in.vwv, VWV(0)); io->writeclose.in.count = SVAL(req->in.vwv, VWV(1)); io->writeclose.in.offset = IVAL(req->in.vwv, VWV(2)); io->writeclose.in.mtime = srv_pull_dos_date3(req->smb_conn, req->in.vwv + VWV(4)); io->writeclose.in.data = req->in.data + 1; /* make sure they gave us the data they promised */ if (req_data_oob(&req->in.bufinfo, io->writeclose.in.data, io->writeclose.in.count)) { smbsrv_send_error(req, NT_STATUS_FOOBAR); return; } SMBSRV_CHECK_FILE_HANDLE(io->writeclose.in.file.ntvfs); SMBSRV_CALL_NTVFS_BACKEND(ntvfs_write(req->ntvfs, io));}/**************************************************************************** Reply to a lock.****************************************************************************/void smbsrv_reply_lock(struct smbsrv_request *req){ union smb_lock *lck; /* parse request */ SMBSRV_CHECK_WCT(req, 5); SMBSRV_TALLOC_IO_PTR(lck, union smb_lock); SMBSRV_SETUP_NTVFS_REQUEST(reply_simple_send, NTVFS_ASYNC_STATE_MAY_ASYNC); lck->lock.level = RAW_LOCK_LOCK; lck->lock.in.file.ntvfs = smbsrv_pull_fnum(req, req->in.vwv, VWV(0)); lck->lock.in.count = IVAL(req->in.vwv, VWV(1)); lck->lock.in.offset = IVAL(req->in.vwv, VWV(3)); SMBSRV_CHECK_FILE_HANDLE(lck->lock.in.file.ntvfs); SMBSRV_CALL_NTVFS_BACKEND(ntvfs_lock(req->ntvfs, lck));}/**************************************************************************** Reply to a unlock.****************************************************************************/void smbsrv_reply_unlock(struct smbsrv_request *req){ union smb_lock *lck; /* parse request */ SMBSRV_CHECK_WCT(req, 5); SMBSRV_TALLOC_IO_PTR(lck, union smb_lock); SMBSRV_SETUP_NTVFS_REQUEST(reply_simple_send, NTVFS_ASYNC_STATE_MAY_ASYNC); lck->unlock.level = RAW_LOCK_UNLOCK; lck->unlock.in.file.ntvfs = smbsrv_pull_fnum(req, req->in.vwv, VWV(0)); lck->unlock.in.count = IVAL(req->in.vwv, VWV(1)); lck->unlock.in.offset = IVAL(req->in.vwv, VWV(3)); SMBSRV_CHECK_FILE_HANDLE(lck->unlock.in.file.ntvfs); SMBSRV_CALL_NTVFS_BACKEND(ntvfs_lock(req->ntvfs, lck));}/**************************************************************************** Reply to a tdis.****************************************************************************/void smbsrv_reply_tdis(struct smbsrv_request *req){ struct smbsrv_handle *h, *nh; SMBSRV_CHECK_WCT(req, 0); /* * TODO: cancel all pending requests on this tcon */ /* * close all handles on this tcon */ for (h=req->tcon->handles.list; h; h=nh) { nh = h->next; talloc_free(h); } /* finally destroy the tcon */ talloc_free(req->tcon); req->tcon = NULL; smbsrv_setup_reply(req, 0, 0); smbsrv_send_reply(req);}/**************************************************************************** Reply to a echo. This is one of the few calls that is handled directly (the backends don't see it at all)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -