⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 viopath.c

📁 linux-2.4.29操作系统的源码
💻 C
📖 第 1 页 / 共 2 页
字号:
	if (vio_handler[subtype] == NULL)		return -EAGAIN;	vio_handler[subtype] = NULL;	return 0;}static void handleConfig(struct HvLpEvent *event){	if (!event)		return;	if (event->xFlags.xFunction == HvLpEvent_Function_Int) {		printk(KERN_WARNING_VIO		       "unexpected config request from partition %d",		       event->xSourceLp);		if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&		    (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {			event->xRc = HvLpEvent_Rc_InvalidSubtype;			HvCallEvent_ackLpEvent(event);		}		return;	}	up((struct semaphore *) event->xCorrelationToken);}/* Initialization of the hosting partition */void vio_set_hostlp(void){	/* If this has already been set then we DON'T want to either change	 * it or re-register the proc file system	 */	if (viopath_hostLp != HvLpIndexInvalid)		return;	/* Figure out our hosting partition.  This isn't allowed to change	 * while we're active	 */	viopath_ourLp = HvLpConfig_getLpIndex();	viopath_hostLp = HvCallCfg_getHostingLpIndex(viopath_ourLp);	/* If we have a valid hosting LP, create a proc file system entry	 * for config information	 */	if (viopath_hostLp != HvLpIndexInvalid) {		iSeries_proc_callback(&vio_proc_init);		vio_setHandler(viomajorsubtype_config, handleConfig);	}}static void vio_handleEvent(struct HvLpEvent *event, struct pt_regs *regs){	HvLpIndex remoteLp;	int subtype =	    (event->	     xSubtype & VIOMAJOR_SUBTYPE_MASK) >> VIOMAJOR_SUBTYPE_SHIFT;	if (event->xFlags.xFunction == HvLpEvent_Function_Int) {		remoteLp = event->xSourceLp;		/* The isActive is checked because if the hosting partition		 * went down and came back up it would not be active but it would have		 * different source and target instances, in which case we'd want to		 * reset them.  This case really protects against an unauthorized		 * active partition sending interrupts or acks to this linux partition.		 */		if (viopathStatus[remoteLp].isActive		    && (event->xSourceInstanceId !=			viopathStatus[remoteLp].mTargetInst)) {			printk(KERN_WARNING_VIO			       "message from invalid partition. "			       "int msg rcvd, source inst (%d) doesnt match (%d)\n",			       viopathStatus[remoteLp].mTargetInst,			       event->xSourceInstanceId);			return;		}		if (viopathStatus[remoteLp].isActive		    && (event->xTargetInstanceId !=			viopathStatus[remoteLp].mSourceInst)) {			printk(KERN_WARNING_VIO			       "message from invalid partition. "			       "int msg rcvd, target inst (%d) doesnt match (%d)\n",			       viopathStatus[remoteLp].mSourceInst,			       event->xTargetInstanceId);			return;		}	} else {		remoteLp = event->xTargetLp;		if (event->xSourceInstanceId !=		    viopathStatus[remoteLp].mSourceInst) {			printk(KERN_WARNING_VIO			       "message from invalid partition. "			       "ack msg rcvd, source inst (%d) doesnt match (%d)\n",			       viopathStatus[remoteLp].mSourceInst,			       event->xSourceInstanceId);			return;		}		if (event->xTargetInstanceId !=		    viopathStatus[remoteLp].mTargetInst) {			printk(KERN_WARNING_VIO			       "message from invalid partition. "			       "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n",			       viopathStatus[remoteLp].mTargetInst,			       event->xTargetInstanceId);			return;		}	}	if (vio_handler[subtype] == NULL) {		printk(KERN_WARNING_VIO		       "unexpected virtual io event subtype %d from partition %d\n",		       event->xSubtype, remoteLp);		/* No handler.  Ack if necessary		 */		if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&		    (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {			event->xRc = HvLpEvent_Rc_InvalidSubtype;			HvCallEvent_ackLpEvent(event);		}		return;	}	/* This innocuous little line is where all the real work happens	 */	(*vio_handler[subtype]) (event);}static void viopath_donealloc(void *parm, int number){	struct doneAllocParms_t *doneAllocParmsp =	    (struct doneAllocParms_t *) parm;	doneAllocParmsp->number = number;	up(doneAllocParmsp->sem);}static int allocateEvents(HvLpIndex remoteLp, int numEvents){	struct doneAllocParms_t doneAllocParms;	DECLARE_MUTEX_LOCKED(Semaphore);	doneAllocParms.sem = &Semaphore;	mf_allocateLpEvents(remoteLp, HvLpEvent_Type_VirtualIo, 250,	/* It would be nice to put a real number here! */			    numEvents,			    &viopath_donealloc, &doneAllocParms);	down(&Semaphore);	return doneAllocParms.number;}int viopath_open(HvLpIndex remoteLp, int subtype, int numReq){	int i;	unsigned long flags;	void *tempEventBuffer = NULL;	int tempNumAllocated;	if ((remoteLp >= HvMaxArchitectedLps)	    || (remoteLp == HvLpIndexInvalid))		return -EINVAL;	subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;	if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))		return -EINVAL;	/*	 * NOTE: If VIO_MAX_SUBTYPES exceeds 16 then we need	 * to allocate more than one page for the event_buffer.	 */	if (event_buffer[0] == NULL) {		if (VIO_MAX_SUBTYPES <= 16) {			tempEventBuffer =			    (void *) get_free_page(GFP_KERNEL);			if (tempEventBuffer == NULL)				return -ENOMEM;		} else {			printk(KERN_INFO_VIO			       "VIO_MAX_SUBTYPES > 16. Need more space.");			return -ENOMEM;		}	}	spin_lock_irqsave(&statuslock, flags);	/*	 * OK...we can fit 16 maximum-sized events (256 bytes) in	 * each page (4096).	 */	if (event_buffer[0] == NULL) {		event_buffer[0] = tempEventBuffer;		atomic_set(&event_buffer_available[0], 1);		/*		 * Start at the second element because we've already		 * set the pointer for the first element and set the		 * pointers for every 256 bytes in the page we		 * allocated earlier.		 */		for (i = 1; i < VIO_MAX_SUBTYPES; i++) {			event_buffer[i] = event_buffer[i - 1] + 256;			atomic_set(&event_buffer_available[i], 1);		}	} else {		/*		 * While we were fetching the pages, which shouldn't		 * be done in a spin lock, another call to viopath_open		 * decided to do the same thing and allocated storage		 * and set the event_buffer before we could so we'll		 * free the one that we allocated and continue with our		 * viopath_open operation.		 */		free_page((unsigned long) tempEventBuffer);	}	viopathStatus[remoteLp].users[subtype]++;	if (!viopathStatus[remoteLp].isOpen) {		viopathStatus[remoteLp].isOpen = 1;		HvCallEvent_openLpEventPath(remoteLp,					    HvLpEvent_Type_VirtualIo);		spin_unlock_irqrestore(&statuslock, flags);		/*		 * Don't hold the spinlock during an operation that		 * can sleep.		 */		tempNumAllocated = allocateEvents(remoteLp, 1);		spin_lock_irqsave(&statuslock, flags);		viopathStatus[remoteLp].numberAllocated +=		    tempNumAllocated;		if (viopathStatus[remoteLp].numberAllocated == 0) {			HvCallEvent_closeLpEventPath(remoteLp,						     HvLpEvent_Type_VirtualIo);			spin_unlock_irqrestore(&statuslock, flags);			return -ENOMEM;		}		viopathStatus[remoteLp].mSourceInst =		    HvCallEvent_getSourceLpInstanceId(remoteLp,						      HvLpEvent_Type_VirtualIo);		viopathStatus[remoteLp].mTargetInst =		    HvCallEvent_getTargetLpInstanceId(remoteLp,						      HvLpEvent_Type_VirtualIo);		HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,					  &vio_handleEvent);		sendMonMsg(remoteLp);		printk(KERN_INFO_VIO		       "Opening connection to partition %d, setting sinst %d, tinst %d\n",		       remoteLp,		       viopathStatus[remoteLp].mSourceInst,		       viopathStatus[remoteLp].mTargetInst);	}	spin_unlock_irqrestore(&statuslock, flags);	tempNumAllocated = allocateEvents(remoteLp, numReq);	spin_lock_irqsave(&statuslock, flags);	viopathStatus[remoteLp].numberAllocated += tempNumAllocated;	spin_unlock_irqrestore(&statuslock, flags);	return 0;}int viopath_close(HvLpIndex remoteLp, int subtype, int numReq){	unsigned long flags;	int i;	int numOpen;	struct doneAllocParms_t doneAllocParms;	DECLARE_MUTEX_LOCKED(Semaphore);	doneAllocParms.sem = &Semaphore;	if ((remoteLp >= HvMaxArchitectedLps)	    || (remoteLp == HvLpIndexInvalid))		return -EINVAL;	subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;	if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))		return -EINVAL;	spin_lock_irqsave(&statuslock, flags);	/*	 * If the viopath_close somehow gets called before a	 * viopath_open it could decrement to -1 which is a non	 * recoverable state so we'll prevent this from	 * happening.	 */	if (viopathStatus[remoteLp].users[subtype] > 0) {		viopathStatus[remoteLp].users[subtype]--;	}	spin_unlock_irqrestore(&statuslock, flags);	mf_deallocateLpEvents(remoteLp, HvLpEvent_Type_VirtualIo,			      numReq, &viopath_donealloc, &doneAllocParms);	down(&Semaphore);	spin_lock_irqsave(&statuslock, flags);	for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++) {		numOpen += viopathStatus[remoteLp].users[i];	}	if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {		printk(KERN_INFO_VIO		       "Closing connection to partition %d", remoteLp);		HvCallEvent_closeLpEventPath(remoteLp,					     HvLpEvent_Type_VirtualIo);		viopathStatus[remoteLp].isOpen = 0;		viopathStatus[remoteLp].isActive = 0;		for (i = 0; i < VIO_MAX_SUBTYPES; i++) {			atomic_set(&event_buffer_available[i], 0);		}		/*		 * Precautionary check to make sure we don't		 * erroneously try to free a page that wasn't		 * allocated.		 */		if (event_buffer[0] != NULL) {			free_page((unsigned long) event_buffer[0]);			for (i = 0; i < VIO_MAX_SUBTYPES; i++) {				event_buffer[i] = NULL;			}		}	}	spin_unlock_irqrestore(&statuslock, flags);	return 0;}void *vio_get_event_buffer(int subtype){	subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;	if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))		return NULL;	if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)		return event_buffer[subtype];	else		return NULL;}void vio_free_event_buffer(int subtype, void *buffer){	subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;	if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {		printk(KERN_WARNING_VIO		       "unexpected subtype %d freeing event buffer\n",		       subtype);		return;	}	if (atomic_read(&event_buffer_available[subtype]) != 0) {		printk(KERN_WARNING_VIO		       "freeing unallocated event buffer, subtype %d\n",		       subtype);		return;	}	if (buffer != event_buffer[subtype]) {		printk(KERN_WARNING_VIO		       "freeing invalid event buffer, subtype %d\n",		       subtype);	}	atomic_set(&event_buffer_available[subtype], 1);}static const struct vio_error_entry vio_no_error =    { 0, 0, "Non-VIO Error" };static const struct vio_error_entry vio_unknown_error =    { 0, EIO, "Unknown Error" };static const struct vio_error_entry vio_default_errors[] = {	{0x0001, EIO, "No Connection"},	{0x0002, EIO, "No Receiver"},	{0x0003, EIO, "No Buffer Available"},	{0x0004, EBADRQC, "Invalid Message Type"},	{0x0000, 0, NULL},};const struct vio_error_entry *vio_lookup_rc(const struct vio_error_entry					    *local_table, u16 rc){	const struct vio_error_entry *cur;	if (!rc)		return &vio_no_error;	if (local_table)		for (cur = local_table; cur->rc; ++cur)			if (cur->rc == rc)				return cur;	for (cur = vio_default_errors; cur->rc; ++cur)		if (cur->rc == rc)			return cur;	return &vio_unknown_error;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -