📄 atapipci.cpp
字号:
}
//
// Setup an interrupt vector.
//
if (!InterruptInitialize(pController->m_dwSysIntr, pPort->m_hIRQEvent, NULL, 0) )
{
DEBUGMSG( ZONE_INIT, (TEXT("ATAPIPCI: InterruptInitialize failed (DevId %x) at SysIntr=%ld!!!\r\n"),m_dwDeviceId, pController->m_dwSysIntr));
return FALSE;
}
pPort->m_dwSysIntr = pController->m_dwSysIntr;
m_pPort = pPort;
#ifdef DEBUG
pPort->PrintInfo();
#endif
CopyDiskInfoFromPort();
return TRUE;
}
BOOL CPCIDisk::TranslateAddress (PDWORD pdwAddr)
{
// Translate from a system address to a bus-relative address for the DMA bus controller
//
PHYSICAL_ADDRESS SystemLogicalAddress, TransLogicalAddress;
DWORD dwBus;
if(m_pPort->m_pCNTRL != NULL) {
dwBus = m_pPort->m_pCNTRL->m_dwBus;
} else {
dwBus = 0;
}
SystemLogicalAddress.HighPart = 0;
SystemLogicalAddress.LowPart = *pdwAddr;
if (!HalTranslateSystemAddress(PCIBus, dwBus, SystemLogicalAddress, &TransLogicalAddress))
return FALSE;
*pdwAddr = TransLogicalAddress.LowPart;
return TRUE;
}
//--------------------------------------------------------------------------
BOOL CPCIDisk::SetupDMA( PSG_BUF pSgBuf, DWORD dwSgCount, BOOL fRead)
{
DWORD dwAlignMask = ALIGN_VALUE - 1;
DWORD dwPageMask = UserKInfo[KINX_PAGESIZE] - 1;
DWORD iPage = 0, iPFN, iBuffer;
BOOL fUnalign = FALSE;
DEBUGMSG( ZONE_DMA, (TEXT("ATAPI:SetupDMA Request = %s SgCount=%ld\r\n"), fRead ? TEXT("Read") : TEXT("Write"), dwSgCount));
WriteBMCommand(0); // Turn off BusMaster in case it is already on !!!
if (!m_pPRD) {
m_pPRD = (PDMATable)AllocPhysMem( UserKInfo[KINX_PAGESIZE], PAGE_READWRITE, 0x0000FFFF, 0, &m_pPRDPhys);
if (!m_pPRD) {
goto ExitFailure;
}
if (!TranslateAddress (&m_pPRDPhys)) {
goto ExitFailure;
}
}
// The m_pPhysList keeps track of pages used for DMA buffers when the SG buffer is unaligned.
if (!m_pPhysList) {
m_pPhysList = (PPhysTable)VirtualAlloc( m_pStartMemory, UserKInfo[KINX_PAGESIZE], MEM_COMMIT, PAGE_READWRITE);
if (!m_pPhysList) {
goto ExitFailure;
}
// Allocate the minimum number of fixed pages.
for (DWORD i = 0; i < MIN_PHYS_PAGES; i++) {
m_pPhysList[i].pVirtualAddress = (LPBYTE)AllocPhysMem( UserKInfo[KINX_PAGESIZE],
PAGE_READWRITE, 0x00000FFF, 0, (LPDWORD)&m_pPhysList[i].pPhysicalAddress);
if (!m_pPhysList[i].pVirtualAddress || !TranslateAddress ((PDWORD)&m_pPhysList[i].pPhysicalAddress)) {
goto ExitFailure;
}
}
}
m_dwPhysCount = 0;
// The m_pSGCopy table keeps track of the mapping between SG buffers and DMA buffers when the SG buffer is unaligned and
// we are reading, so we can copy the read data back to the SG buffer. When the SG buffer is aligned, it keeps track of the
// SG buffers for a particular DMA session so we can unlock the buffers on completion.
if (!m_pSGCopy) {
m_pSGCopy = (PSGCopyTable)VirtualAlloc( m_pStartMemory + UserKInfo[KINX_PAGESIZE], UserKInfo[KINX_PAGESIZE], MEM_COMMIT, PAGE_READWRITE);
if (!m_pSGCopy) {
goto ExitFailure;
}
}
m_dwSGCount = 0;
if (!m_pPFNs) {
m_pPFNs = (PDWORD)VirtualAlloc( m_pStartMemory + 2*UserKInfo[KINX_PAGESIZE], UserKInfo[KINX_PAGESIZE], MEM_COMMIT, PAGE_READWRITE);
if (!m_pPFNs) {
goto ExitFailure;
}
}
// Check if either the buffer or the buffer length is unaligned.
for (iBuffer = 0; iBuffer < dwSgCount; iBuffer++) {
if (((DWORD)pSgBuf[iBuffer].sb_buf & dwAlignMask) || ((DWORD)pSgBuf[iBuffer].sb_len & dwAlignMask)) {
fUnalign = TRUE;
break;
}
}
if (fUnalign) {
DWORD dwCurPageOffset = 0;
for (iBuffer = 0; iBuffer < dwSgCount; iBuffer++) {
LPBYTE pBuffer = (PBYTE)MapPtrToProcess((LPVOID)pSgBuf[iBuffer].sb_buf, GetCallerProcess());
DWORD dwBufferLeft = pSgBuf[iBuffer].sb_len;
while (dwBufferLeft) {
DWORD dwBytesInCurPage = UserKInfo[KINX_PAGESIZE] - dwCurPageOffset;
DWORD dwBytesToTransfer = (dwBufferLeft > dwBytesInCurPage) ? dwBytesInCurPage : dwBufferLeft;
// Allocate new page if necessary
if ((dwCurPageOffset == 0) && (m_dwPhysCount >= MIN_PHYS_PAGES)) {
m_pPhysList[m_dwPhysCount].pVirtualAddress = (LPBYTE)AllocPhysMem( UserKInfo[KINX_PAGESIZE],
PAGE_READWRITE, 0x00000FFF, 0, (LPDWORD)&m_pPhysList[m_dwPhysCount].pPhysicalAddress);
if (!m_pPhysList[m_dwPhysCount].pVirtualAddress || !TranslateAddress ((PDWORD)&m_pPhysList[m_dwPhysCount].pPhysicalAddress)) {
goto ExitFailure;
}
}
if (fRead) {
// Set up a SG copy entry on a read, so we can copy data from DMA buffer to the SG
// buffer after the DMA is complete
m_pSGCopy[m_dwSGCount].pSrcAddress = m_pPhysList[m_dwPhysCount].pVirtualAddress + dwCurPageOffset;
m_pSGCopy[m_dwSGCount].pDstAddress = pBuffer;
m_pSGCopy[m_dwSGCount].dwSize = dwBytesToTransfer;
m_dwSGCount++;
} else {
memcpy( m_pPhysList[m_dwPhysCount].pVirtualAddress + dwCurPageOffset, pBuffer, dwBytesToTransfer);
}
// If the buffer this larger than the space left in the page then finish off processing
// this page by setting dwCurPageOffset = 0.
if (dwBufferLeft >= dwBytesInCurPage) {
dwCurPageOffset = 0;
} else {
dwCurPageOffset += dwBytesToTransfer;
}
// We have finished a page if the offset was reset to 0 or this is the last buffer. Add this to the PRD table.
if ((dwCurPageOffset == 0) || (iBuffer == (dwSgCount - 1))) {
m_pPRD[m_dwPhysCount].physAddr = (DWORD)m_pPhysList[m_dwPhysCount].pPhysicalAddress;
m_pPRD[m_dwPhysCount].size = dwCurPageOffset ? (USHORT)dwCurPageOffset : (USHORT)UserKInfo[KINX_PAGESIZE];
m_pPRD[m_dwPhysCount].EOTpad = 0;
m_dwPhysCount++;
}
dwBufferLeft -= dwBytesToTransfer;
pBuffer += dwBytesToTransfer;
}
}
m_pPRD[m_dwPhysCount-1].EOTpad = 0x8000;
} else {
DWORD dwTotalBytes = 0;
for (iBuffer = 0; iBuffer < dwSgCount; iBuffer++) {
LPBYTE pBuffer = (PBYTE)MapPtrToProcess((LPVOID)pSgBuf[iBuffer].sb_buf, GetCallerProcess());
// Calculate the total bytes left to put in the PRD struct.
dwTotalBytes = pSgBuf[iBuffer].sb_len;
if (!LockPages (pBuffer, dwTotalBytes, m_pPFNs, fRead ? LOCKFLAG_WRITE : LOCKFLAG_READ)) {
goto ExitFailure;
}
// Add an SG copy entry for the area we lock, so that we can Unlock it at the end.
m_pSGCopy[m_dwSGCount].pSrcAddress = pBuffer;
m_pSGCopy[m_dwSGCount].pDstAddress = 0;
m_pSGCopy[m_dwSGCount].dwSize = dwTotalBytes;
m_dwSGCount++;
iPFN = 0;
while (dwTotalBytes) {
DWORD dwBytesToTransfer = UserKInfo[KINX_PAGESIZE];
if ((DWORD)pBuffer & dwPageMask) {
// Buffer is not page aligned, use up to the next page boundary
dwBytesToTransfer = UserKInfo[KINX_PAGESIZE] - ((DWORD)pBuffer & dwPageMask);
}
if (dwTotalBytes < dwBytesToTransfer) {
// Use what is left
dwBytesToTransfer = dwTotalBytes;
}
m_pPRD[iPage].physAddr = (m_pPFNs[iPFN] >> UserKInfo[KINX_PFN_SHIFT]) + ((DWORD)pBuffer & dwPageMask);
if (!TranslateAddress (&m_pPRD[iPage].physAddr)) {
goto ExitFailure;
}
m_pPRD[iPage].size = (USHORT)dwBytesToTransfer;
m_pPRD[iPage].EOTpad = 0;
iPage++;
iPFN++;
pBuffer += dwBytesToTransfer;
dwTotalBytes -= dwBytesToTransfer;
}
}
m_dwPhysCount = 0;
m_pPRD[iPage-1].EOTpad = 0x8000;
}
return TRUE;
ExitFailure:
// Cleanup !!!
ASSERT(0);
//FreeDMABuffers();
return FALSE;
}
//--------------------------------------------------------------------------
BOOL CPCIDisk::BeginDMA(BOOL fRead)
{
BYTE bStatus, bCommand;
CacheSync(CACHE_SYNC_DISCARD);
WriteBMCommand(0);
WriteBMTable( m_pPRDPhys);
bStatus = ReadBMStatus();
bStatus |= 0x06;
// bStatus |= 0x66;
WriteBMStatus(bStatus);
if (fRead)
bCommand = 0x08 | 0x01;
else
bCommand = 0x00 | 0x01;
WriteBMCommand( bCommand);
bStatus = ReadBMStatus();
return TRUE;
}
//--------------------------------------------------------------------------
BOOL CPCIDisk::EndDMA()
{
BYTE bStatus = ReadBMStatus();
if ((bStatus & BM_STATUS_INTR) && (bStatus & BM_STATUS_ACTIVE)) {
DEBUGMSG( ZONE_DMA, (TEXT(" ATAPI:EndDMA This is an active state... status=%02x\n"), bStatus));
} else if ((bStatus & BM_STATUS_INTR) && !(bStatus & BM_STATUS_ACTIVE))
{
DEBUGMSG( ZONE_DMA, (TEXT(" ATAPI:EndDMA This is an inactive state... status=%02x\n"), bStatus));
}
else if (!(bStatus & BM_STATUS_INTR) && (bStatus & BM_STATUS_ACTIVE))
{
DEBUGMSG( ZONE_ERROR | ZONE_DMA, (TEXT(" ATAPI:EndDMA Interrupt Delayed... status=%02x\n"), bStatus));
BOOL bCount = 0;
while (TRUE) {
StallExecution(100);
bCount++;
bStatus = ReadBMStatus();
if ((bStatus & BM_STATUS_INTR) && !(bStatus & BM_STATUS_ACTIVE)) {
DEBUGMSG( ZONE_DMA, (TEXT(" ATAPI:EndDMA DMA Done after wait... status=%02x\n"), bStatus));
break;
} else {
DEBUGMSG( ZONE_ERROR | ZONE_DMA, (TEXT(" ATAPI:EndDMA Interrupt still delayed... status=%02x\n"), bStatus));
if (bCount > 10) {
WriteBMCommand(0);
return FALSE;
}
}
}
}
else
{
if (bStatus & BM_STATUS_ERROR) {
DEBUGMSG( ZONE_ERROR | ZONE_DMA, (TEXT(" ATAPI:EndDMA Error... status=%02x\n"), bStatus));
ASSERT(0);
return FALSE;
}
}
WriteBMCommand(0);
return TRUE;
}
//--------------------------------------------------------------------------
BOOL CPCIDisk::AbortDMA()
{
DWORD i;
WriteBMCommand(0);
for (i = 0; i < m_dwSGCount; i++) {
if (!m_pSGCopy[i].pDstAddress) {
// This memory region needs to be unlocked
UnlockPages (m_pSGCopy[i].pSrcAddress, m_pSGCopy[i].dwSize);
}
}
// Free all but the first MIN_PHYS_PAGES pages. The first MIN_PHYS_PAGES pages are fixed.
for (i = MIN_PHYS_PAGES; i < m_dwPhysCount; i++) {
FreePhysMem (m_pPhysList[i].pVirtualAddress);
}
return FALSE;
}
//--------------------------------------------------------------------------
BOOL CPCIDisk::CompleteDMA(PSG_BUF pSgBuf, DWORD dwSgCount, BOOL fRead)
{
DWORD i;
for (i = 0; i < m_dwSGCount; i++) {
if (m_pSGCopy[i].pDstAddress) {
// This corresponds to an unaligned region. Copy it back to the SG buffer.
memcpy (m_pSGCopy[i].pDstAddress, m_pSGCopy[i].pSrcAddress, m_pSGCopy[i].dwSize);
}
else {
// This memory region needs to be unlocked
UnlockPages (m_pSGCopy[i].pSrcAddress, m_pSGCopy[i].dwSize);
}
}
// Free all but the first MIN_PHYS_PAGES pages. The first MIN_PHYS_PAGES pages are fixed.
for (i = MIN_PHYS_PAGES; i < m_dwPhysCount; i++) {
FreePhysMem (m_pPhysList[i].pVirtualAddress);
}
return TRUE;
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -