⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 chsc.c

📁 linux 内核源代码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* *  drivers/s390/cio/chsc.c *   S/390 common I/O routines -- channel subsystem call * *    Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, *			      IBM Corporation *    Author(s): Ingo Adlung (adlung@de.ibm.com) *		 Cornelia Huck (cornelia.huck@de.ibm.com) *		 Arnd Bergmann (arndb@de.ibm.com) */#include <linux/module.h>#include <linux/slab.h>#include <linux/init.h>#include <linux/device.h>#include <asm/cio.h>#include <asm/chpid.h>#include "css.h"#include "cio.h"#include "cio_debug.h"#include "ioasm.h"#include "chp.h"#include "chsc.h"static void *sei_page;struct chsc_ssd_area {	struct chsc_header request;	u16 :10;	u16 ssid:2;	u16 :4;	u16 f_sch;	  /* first subchannel */	u16 :16;	u16 l_sch;	  /* last subchannel */	u32 :32;	struct chsc_header response;	u32 :32;	u8 sch_valid : 1;	u8 dev_valid : 1;	u8 st	     : 3; /* subchannel type */	u8 zeroes    : 3;	u8  unit_addr;	  /* unit address */	u16 devno;	  /* device number */	u8 path_mask;	u8 fla_valid_mask;	u16 sch;	  /* subchannel */	u8 chpid[8];	  /* chpids 0-7 */	u16 fla[8];	  /* full link addresses 0-7 */} __attribute__ ((packed));int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd){	unsigned long page;	struct chsc_ssd_area *ssd_area;	int ccode;	int ret;	int i;	int mask;	page = get_zeroed_page(GFP_KERNEL | GFP_DMA);	if (!page)		return -ENOMEM;	ssd_area = (struct chsc_ssd_area *) page;	ssd_area->request.length = 0x0010;	ssd_area->request.code = 0x0004;	ssd_area->ssid = schid.ssid;	ssd_area->f_sch = schid.sch_no;	ssd_area->l_sch = schid.sch_no;	ccode = chsc(ssd_area);	/* Check response. */	if (ccode > 0) {		ret = (ccode == 3) ? -ENODEV : -EBUSY;		goto out_free;	}	if (ssd_area->response.code != 0x0001) {		CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",			      schid.ssid, schid.sch_no,			      ssd_area->response.code);		ret = -EIO;		goto out_free;	}	if (!ssd_area->sch_valid) {		ret = -ENODEV;		goto out_free;	}	/* Copy data */	ret = 0;	memset(ssd, 0, sizeof(struct chsc_ssd_info));	if ((ssd_area->st != 0) && (ssd_area->st != 2))		goto out_free;	ssd->path_mask = ssd_area->path_mask;	ssd->fla_valid_mask = ssd_area->fla_valid_mask;	for (i = 0; i < 8; i++) {		mask = 0x80 >> i;		if (ssd_area->path_mask & mask) {			chp_id_init(&ssd->chpid[i]);			ssd->chpid[i].id = ssd_area->chpid[i];		}		if (ssd_area->fla_valid_mask & mask)			ssd->fla[i] = ssd_area->fla[i];	}out_free:	free_page(page);	return ret;}static int check_for_io_on_path(struct subchannel *sch, int mask){	int cc;	cc = stsch(sch->schid, &sch->schib);	if (cc)		return 0;	if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)		return 1;	return 0;}static void terminate_internal_io(struct subchannel *sch){	if (cio_clear(sch)) {		/* Recheck device in case clear failed. */		sch->lpm = 0;		if (device_trigger_verify(sch) != 0)			css_schedule_eval(sch->schid);		return;	}	/* Request retry of internal operation. */	device_set_intretry(sch);	/* Call handler. */	if (sch->driver && sch->driver->termination)		sch->driver->termination(&sch->dev);}static ints390_subchannel_remove_chpid(struct device *dev, void *data){	int j;	int mask;	struct subchannel *sch;	struct chp_id *chpid;	struct schib schib;	sch = to_subchannel(dev);	chpid = data;	for (j = 0; j < 8; j++) {		mask = 0x80 >> j;		if ((sch->schib.pmcw.pim & mask) &&		    (sch->schib.pmcw.chpid[j] == chpid->id))			break;	}	if (j >= 8)		return 0;	spin_lock_irq(sch->lock);	stsch(sch->schid, &schib);	if (!schib.pmcw.dnv)		goto out_unreg;	memcpy(&sch->schib, &schib, sizeof(struct schib));	/* Check for single path devices. */	if (sch->schib.pmcw.pim == 0x80)		goto out_unreg;	if (check_for_io_on_path(sch, mask)) {		if (device_is_online(sch))			device_kill_io(sch);		else {			terminate_internal_io(sch);			/* Re-start path verification. */			if (sch->driver && sch->driver->verify)				sch->driver->verify(&sch->dev);		}	} else {		/* trigger path verification. */		if (sch->driver && sch->driver->verify)			sch->driver->verify(&sch->dev);		else if (sch->lpm == mask)			goto out_unreg;	}	spin_unlock_irq(sch->lock);	return 0;out_unreg:	sch->lpm = 0;	spin_unlock_irq(sch->lock);	css_schedule_eval(sch->schid);	return 0;}void chsc_chp_offline(struct chp_id chpid){	char dbf_txt[15];	sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);	CIO_TRACE_EVENT(2, dbf_txt);	if (chp_get_status(chpid) <= 0)		return;	bus_for_each_dev(&css_bus_type, NULL, &chpid,			 s390_subchannel_remove_chpid);}static ints390_process_res_acc_new_sch(struct subchannel_id schid){	struct schib schib;	/*	 * We don't know the device yet, but since a path	 * may be available now to the device we'll have	 * to do recognition again.	 * Since we don't have any idea about which chpid	 * that beast may be on we'll have to do a stsch	 * on all devices, grr...	 */	if (stsch_err(schid, &schib))		/* We're through */		return -ENXIO;	/* Put it on the slow path. */	css_schedule_eval(schid);	return 0;}struct res_acc_data {	struct chp_id chpid;	u32 fla_mask;	u16 fla;};static int get_res_chpid_mask(struct chsc_ssd_info *ssd,			      struct res_acc_data *data){	int i;	int mask;	for (i = 0; i < 8; i++) {		mask = 0x80 >> i;		if (!(ssd->path_mask & mask))			continue;		if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))			continue;		if ((ssd->fla_valid_mask & mask) &&		    ((ssd->fla[i] & data->fla_mask) != data->fla))			continue;		return mask;	}	return 0;}static int__s390_process_res_acc(struct subchannel_id schid, void *data){	int chp_mask, old_lpm;	struct res_acc_data *res_data;	struct subchannel *sch;	res_data = data;	sch = get_subchannel_by_schid(schid);	if (!sch)		/* Check if a subchannel is newly available. */		return s390_process_res_acc_new_sch(schid);	spin_lock_irq(sch->lock);	chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);	if (chp_mask == 0)		goto out;	if (stsch(sch->schid, &sch->schib))		goto out;	old_lpm = sch->lpm;	sch->lpm = ((sch->schib.pmcw.pim &		     sch->schib.pmcw.pam &		     sch->schib.pmcw.pom)		    | chp_mask) & sch->opm;	if (!old_lpm && sch->lpm)		device_trigger_reprobe(sch);	else if (sch->driver && sch->driver->verify)		sch->driver->verify(&sch->dev);out:	spin_unlock_irq(sch->lock);	put_device(&sch->dev);	return 0;}static void s390_process_res_acc (struct res_acc_data *res_data){	char dbf_txt[15];	sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,		res_data->chpid.id);	CIO_TRACE_EVENT( 2, dbf_txt);	if (res_data->fla != 0) {		sprintf(dbf_txt, "fla%x", res_data->fla);		CIO_TRACE_EVENT( 2, dbf_txt);	}	/*	 * I/O resources may have become accessible.	 * Scan through all subchannels that may be concerned and	 * do a validation on those.	 * The more information we have (info), the less scanning	 * will we have to do.	 */	for_each_subchannel(__s390_process_res_acc, res_data);}static int__get_chpid_from_lir(void *data){	struct lir {		u8  iq;		u8  ic;		u16 sci;		/* incident-node descriptor */		u32 indesc[28];		/* attached-node descriptor */		u32 andesc[28];		/* incident-specific information */		u32 isinfo[28];	} __attribute__ ((packed)) *lir;	lir = data;	if (!(lir->iq&0x80))		/* NULL link incident record */		return -EINVAL;	if (!(lir->indesc[0]&0xc0000000))		/* node descriptor not valid */		return -EINVAL;	if (!(lir->indesc[0]&0x10000000))		/* don't handle device-type nodes - FIXME */		return -EINVAL;	/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */	return (u16) (lir->indesc[0]&0x000000ff);}struct chsc_sei_area {	struct chsc_header request;	u32 reserved1;	u32 reserved2;	u32 reserved3;	struct chsc_header response;	u32 reserved4;	u8  flags;	u8  vf;		/* validity flags */	u8  rs;		/* reporting source */	u8  cc;		/* content code */	u16 fla;	/* full link address */	u16 rsid;	/* reporting source id */	u32 reserved5;	u32 reserved6;	u8 ccdf[4096 - 16 - 24];	/* content-code dependent field */	/* ccdf has to be big enough for a link-incident record */} __attribute__ ((packed));static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area){	struct chp_id chpid;	int id;	CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",		      sei_area->rs, sei_area->rsid);	if (sei_area->rs != 4)		return;	id = __get_chpid_from_lir(sei_area->ccdf);	if (id < 0)		CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");	else {		chp_id_init(&chpid);		chpid.id = id;		chsc_chp_offline(chpid);	}}static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area){	struct res_acc_data res_data;	struct chp_id chpid;	int status;	CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "		      "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);	if (sei_area->rs != 4)		return;	chp_id_init(&chpid);	chpid.id = sei_area->rsid;	/* allocate a new channel path structure, if needed */	status = chp_get_status(chpid);	if (status < 0)		chp_new(chpid);	else if (!status)		return;	memset(&res_data, 0, sizeof(struct res_acc_data));	res_data.chpid = chpid;	if ((sei_area->vf & 0xc0) != 0) {		res_data.fla = sei_area->fla;		if ((sei_area->vf & 0xc0) == 0xc0)			/* full link address */			res_data.fla_mask = 0xffff;		else			/* link address */			res_data.fla_mask = 0xff00;	}	s390_process_res_acc(&res_data);}struct chp_config_data {	u8 map[32];	u8 op;	u8 pc;};static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area){	struct chp_config_data *data;	struct chp_id chpid;	int num;	CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");	if (sei_area->rs != 0)		return;	data = (struct chp_config_data *) &(sei_area->ccdf);	chp_id_init(&chpid);	for (num = 0; num <= __MAX_CHPID; num++) {		if (!chp_test_bit(data->map, num))			continue;		chpid.id = num;		printk(KERN_WARNING "cio: processing configure event %d for "		       "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);		switch (data->op) {		case 0:			chp_cfg_schedule(chpid, 1);			break;		case 1:			chp_cfg_schedule(chpid, 0);			break;		case 2:			chp_cfg_cancel_deconfigure(chpid);			break;		}	}}static void chsc_process_sei(struct chsc_sei_area *sei_area){	/* Check if we might have lost some information. */	if (sei_area->flags & 0x40) {		CIO_CRW_EVENT(2, "chsc: event overflow\n");		css_schedule_eval_all();	}	/* which kind of information was stored? */	switch (sei_area->cc) {	case 1: /* link incident*/		chsc_process_sei_link_incident(sei_area);		break;	case 2: /* i/o resource accessibiliy */		chsc_process_sei_res_acc(sei_area);		break;	case 8: /* channel-path-configuration notification */		chsc_process_sei_chp_config(sei_area);		break;	default: /* other stuff */		CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",			      sei_area->cc);		break;	}}void chsc_process_crw(void){	struct chsc_sei_area *sei_area;	if (!sei_page)		return;	/* Access to sei_page is serialized through machine check handler	 * thread, so no need for locking. */	sei_area = sei_page;	CIO_TRACE_EVENT( 2, "prcss");	do {		memset(sei_area, 0, sizeof(*sei_area));		sei_area->request.length = 0x0010;		sei_area->request.code = 0x000e;		if (chsc(sei_area))			break;		if (sei_area->response.code == 0x0001) {			CIO_CRW_EVENT(4, "chsc: sei successful\n");			chsc_process_sei(sei_area);		} else {			CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",				      sei_area->response.code);			break;		}	} while (sei_area->flags & 0x80);}static int__chp_add_new_sch(struct subchannel_id schid){	struct schib schib;	if (stsch_err(schid, &schib))		/* We're through */		return -ENXIO;	/* Put it on the slow path. */	css_schedule_eval(schid);	return 0;}static int__chp_add(struct subchannel_id schid, void *data){	int i, mask;	struct chp_id *chpid;	struct subchannel *sch;	chpid = data;	sch = get_subchannel_by_schid(schid);	if (!sch)		/* Check if the subchannel is now available. */		return __chp_add_new_sch(schid);	spin_lock_irq(sch->lock);	for (i=0; i<8; i++) {		mask = 0x80 >> i;		if ((sch->schib.pmcw.pim & mask) &&		    (sch->schib.pmcw.chpid[i] == chpid->id)) {			if (stsch(sch->schid, &sch->schib) != 0) {				/* Endgame. */				spin_unlock_irq(sch->lock);				return -ENXIO;			}			break;		}	}	if (i==8) {		spin_unlock_irq(sch->lock);		return 0;	}	sch->lpm = ((sch->schib.pmcw.pim &		     sch->schib.pmcw.pam &		     sch->schib.pmcw.pom)		    | mask) & sch->opm;	if (sch->driver && sch->driver->verify)		sch->driver->verify(&sch->dev);	spin_unlock_irq(sch->lock);	put_device(&sch->dev);	return 0;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -