📄 rf_kintf.c
字号:
return; if (raidID >= NRAIDFRAME || !raidPtrs[raidID]) { bp->b_error = ENODEV; bp->b_flags |= B_ERROR; bp->b_resid = bp->b_bcount; biodone(bp); return; } raidPtr = raidPtrs[raidID]; if (!raidPtr->valid) { bp->b_error = ENODEV; bp->b_flags |= B_ERROR; bp->b_resid = bp->b_bcount; biodone(bp); return; }#if DKUSAGE > 0 { int s = splbio(); dku_start_io(DKU_RAIDFRAME_BUS, raidID, 0); splx(s); }#endif /* DKUSAGE > 0 */ bp->b_error = rf_DoAccessKernel(raidPtrs[raidID], bp, NULL, NULL, NULL); if (bp->b_error) { bp->b_flags |= B_ERROR; }}/* currently, the driver will never send a single I/O that is larger than one stripe * unit to any disk. So, as long as as the SU is less than the max transfer size * of the device, there's nothing to do here. * * If I implement coalescing of I/Os in the disk queues, this needs to change. */void rf_minphys(bp) struct buf *bp;{ /* printf("rf_minphys: bp->b_un.b_addr=%lx\n", bp->b_un.b_addr); */}int rf_read(dev, uio) dev_t dev; struct uio *uio;{ struct buf *bp; int ret_val; if (rf_kbooted != RFK_BOOT_GOOD) { return(EINVAL); } bp = ubc_bufget(); if (bp == NULL) return(ENOMEM); ret_val = physio(rf_strategy, bp, dev, B_READ, rf_minphys, uio); ubc_buffree(bp); return(ret_val);}int rf_write(dev, uio) dev_t dev; struct uio *uio;{ struct buf *bp; int ret_val; if (rf_kbooted != RFK_BOOT_GOOD) { return(EINVAL); } bp = ubc_bufget(); if (bp == NULL) return(ENOMEM); ret_val = physio(rf_strategy, bp, dev, B_WRITE, rf_minphys, uio); ubc_buffree(bp); return(ret_val);}/* returns the size of the RAID device. Returning -1 is what cdisk_size does * in the error case */int rf_size(dev) dev_t dev;{ unsigned int raidID = RF_DEV2RAIDID(dev); if (raidID >= NRAIDFRAME || !raidPtrs[raidID] || !raidPtrs[raidID]->valid) return(-1); return(raidPtrs[raidID]->totalSectors);}int rf_ioctl(dev, cmd, data, flag) dev_t dev; /* major/minor nunber */ int cmd; /* Ioctl command */ caddr_t data; /* User data buffer - already copied in */ int flag; /* unused */{ /* NEVER stack-allocate a config struct in the kernel. it's too big, and causes stack overflow. */ RF_Config_t *k_cfg, *u_cfg; u_char *specific_buf; int retcode = 0, nbytes, spl, rw, row; struct rf_test_acc *ta; struct buf *bp; unsigned int raidID = RF_DEV2RAIDID(dev); struct rf_recon_req *rrcopy, *rr; RF_SparetWait_t *waitreq; int i; struct rf_test_acc *ta_p, *ta_copy; if (rf_kbooted != RFK_BOOT_GOOD) { return(EINVAL); } if (raidID >= NRAIDFRAME || !raidPtrs[raidID]) return(EINVAL); db2_printf(("rf_ioctl: raidID=%d\n", raidID));#if DKUSAGE > 0 { minor_t minor; int error; minor = ((DKU_RAIDFRAME_BUS << BUS_SHIFT) | (raidID << TARGET_SHIFT))<<6; error = dkusage_ioctl(makedev(0,minor), cmd, data, flag, raidPtrs[raidID]->valid); if (error >= 0) return(error); }#endif /* DKUSAGE > 0 */ db5_printf(("rf_ioctl: not dkusage\n")); /* the ioctls in this first switch are executed without looking up the os-specific * device information */ switch (cmd) { /* configure the system */ case RAIDFRAME_CONFIGURE: db3_printf(("rf_ioctl: RAIDFRAME_CONFIGURE\n")); /* copy-in the configuration information */ u_cfg = *((RF_Config_t **) data); /* data points to a pointer to the configuration structure */ RF_Malloc(k_cfg,sizeof(RF_Config_t),(RF_Config_t *)); if (k_cfg == NULL) { db3_printf(("rf_ioctl: ENOMEM for config\n", retcode)); return(ENOMEM); } retcode = copyin((caddr_t) u_cfg, (caddr_t) k_cfg, sizeof(RF_Config_t)); if (retcode) { db3_printf(("rf_ioctl: retcode=%d copyin.1\n", retcode)); return(retcode); } /* allocate a buffer for the layout-specific data, and copy it in */ if (k_cfg->layoutSpecificSize) { if (k_cfg->layoutSpecificSize > 10000) { /* sanity check */ db3_printf(("rf_ioctl: EINVAL\n", retcode)); return(EINVAL); } RF_Malloc(specific_buf,k_cfg->layoutSpecificSize,(u_char *)); if (specific_buf == NULL) { RF_Free(k_cfg,sizeof(RF_Config_t)); db3_printf(("rf_ioctl: ENOMEM\n", retcode)); return(ENOMEM); } retcode = copyin(k_cfg->layoutSpecific, (caddr_t) specific_buf, k_cfg->layoutSpecificSize); if (retcode) { db3_printf(("rf_ioctl: retcode=%d copyin.2\n", retcode)); return(retcode); } } else specific_buf = NULL; k_cfg->layoutSpecific = specific_buf; /* should do some kind of sanity check on the configuration. Store the sum * of all the bytes in the last byte? */ /* configure the system */ rf_pending_testaccs = 0; retcode = rf_Configure(raidPtrs[raidID], k_cfg);#if DKUSAGE > 0 if (retcode == 0) { /* * let dkusage see a bogus I/O so it'll know * we exist (so group device queries on the * raidframe device won't fail) */ int s = splbio(); dku_start_io(DKU_RAIDFRAME_BUS, raidID, 0); dku_end_io(DKU_RAIDFRAME_BUS, raidID, 0, CAM_DIR_NONE, 0); splx(s); }#endif /* DKUSAGE > 0 */ raidPtrs[raidID]->raidid = raidID; /* free the buffers. No return code here. */ if (k_cfg->layoutSpecificSize) { RF_Free(specific_buf,k_cfg->layoutSpecificSize); } RF_Free(k_cfg,sizeof(RF_Config_t)); db3_printf(("rf_ioctl: retcode=%d RAIDFRAME_CONFIGURE\n", retcode)); return(retcode); /* shutdown the system */ case RAIDFRAME_SHUTDOWN: /* the intention here was to disallow shutdowns while raidframe is mounted, but it doesn't work * because the shutdown ioctl calls rf_open */ if (rf_pending_testaccs > 0) { printf("RAIDFRAME: Can't shutdown because there are %d pending test accs\n", rf_pending_testaccs); return(EINVAL); } if (rf_debugKernelAccess) { printf("call shutdown\n"); } retcode = rf_Shutdown(raidPtrs[raidID]); return(retcode); /* initialize all parity */ case RAIDFRAME_REWRITEPARITY: if (raidPtrs[raidID]->Layout.map->faultsTolerated == 0) return(EINVAL); retcode = rf_RewriteParity(raidPtrs[raidID]); /* borrow the thread of the requesting process */ if (retcode) retcode = EIO; /* return I/O Error if the parity rewrite fails */ return(retcode); /* issue a test-unit-ready through raidframe to the indicated device */ case RAIDFRAME_TUR: retcode = rf_SCSI_DoTUR(0, 0, 0, 0, *(dev_t *) data); /* debug only */ return(retcode); /* issue a test access through raidframe. * DO NOT USE THIS CALL UNLESS YOU KNOW WHAT YOU'RE DOING. * THIS CALL RETURNS BEFORE THE I/O IS COMPLETE. * IF THE CALLING PROGRAM EXITS WHILE ANY I/Os ARE PENDING HERE, THE * KERNEL WILL PANIC BECAUSE IT WILL FIND WIRED PAGES IN A TERMINATING THREAD. */ case RAIDFRAME_TEST_ACC: { RF_Raid_t *raid = raidPtrs[raidID]; ta = (struct rf_test_acc *) data; retcode = 0; if (!raid->valid) return(ENODEV); ta_copy = (void *) rf_DupTestAccDesc(ta); /* make a copy of the descriptor */ if (!ta_copy) return(ENOMEM); if (ta->type == RF_IO_TYPE_READ || ta->type == RF_IO_TYPE_WRITE) { nbytes = ta->numSector << raid->logBytesPerSector; /* check accessability of user buffer */ if (ta->type == RF_IO_TYPE_READ) rw = B_WRITE; else rw = B_READ; if (!(retcode = useracc(ta->buf, (u_int) nbytes, rw))) { printf("RAIDFRAME: useracc says no: retcode = %d\n",retcode); rf_FreeTestAccDesc(ta_copy); return(EFAULT); } /* wire the user buffer */ if ((retcode = vm_map_pageable(current_task()->map, trunc_page(ta->buf), round_page(ta->buf+nbytes), VM_PROT_WRITE | VM_PROT_READ)) != KERN_SUCCESS) { printf("RAIDFRAME: vm_map_pageable says no: retcode = %d\n",retcode); rf_FreeTestAccDesc(ta_copy); return(EFAULT); } /* create a buf struct to describe this RAID I/O */ bp = ubc_bufget(); if (bp == NULL) { rf_FreeTestAccDesc(ta_copy); return(ENOMEM); } InitBP(bp, (ta->type == RF_IO_TYPE_WRITE) ? B_WRITE : B_READ, dev, ta->startSector, ta->numSector, ta->buf, NULL, NULL, raidPtrs[raidID]->logBytesPerSector, u.u_procp); ta_copy->bp = (void *) bp; /* fire off the access & return without blocking */ rf_pending_testaccs++; retcode = rf_DoAccessKernel(raidPtrs[raidID], bp, RF_DAG_TEST_ACCESS, rf_AsyncTestAccCallbackFunc, ta_copy); } else { /* mark this NOP as done */ rf_AsyncTestAccCallbackFunc(ta_copy); } /* piggyback the return of up to 10 completed testacc descriptors on this I/O */ RF_LOCK_MUTEX(rf_async_done_q_mutex); for (i=0; i < 10 && rf_async_done_qh; i++) { ta_p = rf_async_done_qh; /* yank descriptor from FIFO queue */ rf_async_done_qh = ta_p->next; if (!rf_async_done_qh) rf_async_done_qt = NULL; ta_p->next = NULL; ta->returnBufs[i] = ta_p->myaddr; /* install user pointer in this I/O descriptor */ if (ta_p->type == RF_IO_TYPE_READ || ta_p->type == RF_IO_TYPE_WRITE) { rf_pending_testaccs--; rf_WrapUpTestAcc(ta_p, raid); /* free memory */ } rf_FreeTestAccDesc(ta_p); /* release the copy we made */ } if (i < 10) ta->returnBufs[i] = NULL; /* mark end of list */ RF_UNLOCK_MUTEX(rf_async_done_q_mutex); return(retcode); } break; case RAIDFRAME_GET_INFO: { RF_Raid_t *raid = raidPtrs[raidID]; RF_DeviceConfig_t *cfg, **ucfgp; int i, j, d; if (!raid->valid) return(ENODEV); ucfgp = (RF_DeviceConfig_t **)data; RF_Malloc(cfg,sizeof(RF_DeviceConfig_t),(RF_DeviceConfig_t *)); if (cfg == NULL) return(ENOMEM); bzero((char *)cfg, sizeof(RF_DeviceConfig_t)); cfg->rows = raid->numRow; cfg->cols = raid->numCol; cfg->ndevs = raid->numRow * raid->numCol; if (cfg->ndevs >= RF_MAX_DISKS) { cfg->ndevs = 0; return(ENOMEM); } cfg->nspares = raid->numSpare; if (cfg->nspares >= RF_MAX_DISKS) { cfg->nspares = 0; return(ENOMEM); } cfg->maxqdepth = raid->maxQueueDepth; d = 0; for(i=0;i<cfg->rows;i++) { for(j=0;j<cfg->cols;j++) { cfg->devs[d] = raid->Disks[i][j]; d++; } } for(j=cfg->cols,i=0;i<cfg->nspares;i++,j++) { cfg->spares[i] = raid->Disks[0][j]; } retcode = copyout((caddr_t)cfg, (caddr_t)*ucfgp, sizeof(RF_DeviceConfig_t)); RF_Free(cfg,sizeof(RF_DeviceConfig_t)); return(retcode); } break; case RAIDFRAME_RESET_ACCTOTALS: { RF_Raid_t *raid = raidPtrs[raidID]; bzero(&raid->acc_totals, sizeof(raid->acc_totals)); return(0); } break; case RAIDFRAME_GET_ACCTOTALS: { RF_AccTotals_t *totals = (RF_AccTotals_t *)data; RF_Raid_t *raid = raidPtrs[raidID]; *totals = raid->acc_totals; return(0); } break; case RAIDFRAME_KEEP_ACCTOTALS: { RF_Raid_t *raid = raidPtrs[raidID]; int *keep = (int *)data; raid->keep_acc_totals = *keep; return(0); } break; case RAIDFRAME_GET_SIZE: *(int *) data = raidPtrs[raidID]->totalSectors; return(0); #if DFSTRACE > 0 /* start/stop tracing accesses */ case RAIDFRAME_START_ATRACE: rf_DFSTraceAccesses = 1; return(0); case RAIDFRAME_STOP_ATRACE: rf_DFSTraceAccesses = 0; return(0);#endif /* DFSTRACE > 0 */ #if RAIDFRAME_RECON > 0 /* fail a disk & optionally start reconstruction */ case RAIDFRAME_FAIL_DISK: rr = (struct rf_recon_req *) data; if (rr->row < 0 || rr->row >= raidPtrs[raidID]->numRow || rr->col < 0 || rr->col >= raidPtrs[raidID]->numCol) return(EINVAL);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -