⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 pktcdvd.c

📁 Linux块设备驱动源码
💻 C
📖 第 1 页 / 共 5 页
字号:
		reads_queued = (pd->iosched.read_queue != NULL);		writes_queued = (pd->iosched.write_queue != NULL);		spin_unlock(&pd->iosched.lock);		if (!reads_queued && !writes_queued)			break;		if (pd->iosched.writing) {			int need_write_seek = 1;			spin_lock(&pd->iosched.lock);			bio = pd->iosched.write_queue;			spin_unlock(&pd->iosched.lock);			if (bio && (bio->bi_sector == pd->iosched.last_write))				need_write_seek = 0;			if (need_write_seek && reads_queued) {				if (atomic_read(&pd->cdrw.pending_bios) > 0) {					VPRINTK("pktcdvd: write, waiting\n");					break;				}				pkt_flush_cache(pd);				pd->iosched.writing = 0;			}		} else {			if (!reads_queued && writes_queued) {				if (atomic_read(&pd->cdrw.pending_bios) > 0) {					VPRINTK("pktcdvd: read, waiting\n");					break;				}				pd->iosched.writing = 1;			}		}		spin_lock(&pd->iosched.lock);		if (pd->iosched.writing) {			bio = pkt_get_list_first(&pd->iosched.write_queue,						 &pd->iosched.write_queue_tail);		} else {			bio = pkt_get_list_first(&pd->iosched.read_queue,						 &pd->iosched.read_queue_tail);		}		spin_unlock(&pd->iosched.lock);		if (!bio)			continue;		if (bio_data_dir(bio) == READ)			pd->iosched.successive_reads += bio->bi_size >> 10;		else {			pd->iosched.successive_reads = 0;			pd->iosched.last_write = bio->bi_sector + bio_sectors(bio);		}		if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {			if (pd->read_speed == pd->write_speed) {				pd->read_speed = MAX_SPEED;				pkt_set_speed(pd, pd->write_speed, pd->read_speed);			}		} else {			if (pd->read_speed != pd->write_speed) {				pd->read_speed = pd->write_speed;				pkt_set_speed(pd, pd->write_speed, pd->read_speed);			}		}		atomic_inc(&pd->cdrw.pending_bios);		generic_make_request(bio);	}}/* * Special care is needed if the underlying block device has a small * max_phys_segments value. */static int pkt_set_segment_merging(struct pktcdvd_device *pd, request_queue_t *q){	if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {		/*		 * The cdrom device can handle one segment/frame		 */		clear_bit(PACKET_MERGE_SEGS, &pd->flags);		return 0;	} else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) {		/*		 * We can handle this case at the expense of some extra memory		 * copies during write operations		 */		set_bit(PACKET_MERGE_SEGS, &pd->flags);		return 0;	} else {		printk("pktcdvd: cdrom max_phys_segments too small\n");		return -EIO;	}}/* * Copy CD_FRAMESIZE bytes from src_bio into a destination page */static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct page *dst_page, int dst_offs){	unsigned int copy_size = CD_FRAMESIZE;	while (copy_size > 0) {		struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg);		void *vfrom = kmap_atomic(src_bvl->bv_page, KM_USER0) +			src_bvl->bv_offset + offs;		void *vto = page_address(dst_page) + dst_offs;		int len = min_t(int, copy_size, src_bvl->bv_len - offs);		BUG_ON(len < 0);		memcpy(vto, vfrom, len);		kunmap_atomic(vfrom, KM_USER0);		seg++;		offs = 0;		dst_offs += len;		copy_size -= len;	}}/* * Copy all data for this packet to pkt->pages[], so that * a) The number of required segments for the write bio is minimized, which *    is necessary for some scsi controllers. * b) The data can be used as cache to avoid read requests if we receive a *    new write request for the same zone. */static void pkt_make_local_copy(struct packet_data *pkt, struct page **pages, int *offsets){	int f, p, offs;	/* Copy all data to pkt->pages[] */	p = 0;	offs = 0;	for (f = 0; f < pkt->frames; f++) {		if (pages[f] != pkt->pages[p]) {			void *vfrom = kmap_atomic(pages[f], KM_USER0) + offsets[f];			void *vto = page_address(pkt->pages[p]) + offs;			memcpy(vto, vfrom, CD_FRAMESIZE);			kunmap_atomic(vfrom, KM_USER0);			pages[f] = pkt->pages[p];			offsets[f] = offs;		} else {			BUG_ON(offsets[f] != offs);		}		offs += CD_FRAMESIZE;		if (offs >= PAGE_SIZE) {			offs = 0;			p++;		}	}}static int pkt_end_io_read(struct bio *bio, unsigned int bytes_done, int err){	struct packet_data *pkt = bio->bi_private;	struct pktcdvd_device *pd = pkt->pd;	BUG_ON(!pd);	if (bio->bi_size)		return 1;	VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,		(unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);	if (err)		atomic_inc(&pkt->io_errors);	if (atomic_dec_and_test(&pkt->io_wait)) {		atomic_inc(&pkt->run_sm);		wake_up(&pd->wqueue);	}	pkt_bio_finished(pd);	return 0;}static int pkt_end_io_packet_write(struct bio *bio, unsigned int bytes_done, int err){	struct packet_data *pkt = bio->bi_private;	struct pktcdvd_device *pd = pkt->pd;	BUG_ON(!pd);	if (bio->bi_size)		return 1;	VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);	pd->stats.pkt_ended++;	pkt_bio_finished(pd);	atomic_dec(&pkt->io_wait);	atomic_inc(&pkt->run_sm);	wake_up(&pd->wqueue);	return 0;}/* * Schedule reads for the holes in a packet */static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt){	int frames_read = 0;	struct bio *bio;	int f;	char written[PACKET_MAX_SIZE];	BUG_ON(!pkt->orig_bios);	atomic_set(&pkt->io_wait, 0);	atomic_set(&pkt->io_errors, 0);	/*	 * Figure out which frames we need to read before we can write.	 */	memset(written, 0, sizeof(written));	spin_lock(&pkt->lock);	for (bio = pkt->orig_bios; bio; bio = bio->bi_next) {		int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);		int num_frames = bio->bi_size / CD_FRAMESIZE;		pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);		BUG_ON(first_frame < 0);		BUG_ON(first_frame + num_frames > pkt->frames);		for (f = first_frame; f < first_frame + num_frames; f++)			written[f] = 1;	}	spin_unlock(&pkt->lock);	if (pkt->cache_valid) {		VPRINTK("pkt_gather_data: zone %llx cached\n",			(unsigned long long)pkt->sector);		goto out_account;	}	/*	 * Schedule reads for missing parts of the packet.	 */	for (f = 0; f < pkt->frames; f++) {		int p, offset;		if (written[f])			continue;		bio = pkt->r_bios[f];		bio_init(bio);		bio->bi_max_vecs = 1;		bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);		bio->bi_bdev = pd->bdev;		bio->bi_end_io = pkt_end_io_read;		bio->bi_private = pkt;		p = (f * CD_FRAMESIZE) / PAGE_SIZE;		offset = (f * CD_FRAMESIZE) % PAGE_SIZE;		VPRINTK("pkt_gather_data: Adding frame %d, page:%p offs:%d\n",			f, pkt->pages[p], offset);		if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))			BUG();		atomic_inc(&pkt->io_wait);		bio->bi_rw = READ;		pkt_queue_bio(pd, bio);		frames_read++;	}out_account:	VPRINTK("pkt_gather_data: need %d frames for zone %llx\n",		frames_read, (unsigned long long)pkt->sector);	pd->stats.pkt_started++;	pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);}/* * Find a packet matching zone, or the least recently used packet if * there is no match. */static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone){	struct packet_data *pkt;	list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {		if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {			list_del_init(&pkt->list);			if (pkt->sector != zone)				pkt->cache_valid = 0;			return pkt;		}	}	BUG();	return NULL;}static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt){	if (pkt->cache_valid) {		list_add(&pkt->list, &pd->cdrw.pkt_free_list);	} else {		list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);	}}/* * recover a failed write, query for relocation if possible * * returns 1 if recovery is possible, or 0 if not * */static int pkt_start_recovery(struct packet_data *pkt){	/*	 * FIXME. We need help from the file system to implement	 * recovery handling.	 */	return 0;#if 0	struct request *rq = pkt->rq;	struct pktcdvd_device *pd = rq->rq_disk->private_data;	struct block_device *pkt_bdev;	struct super_block *sb = NULL;	unsigned long old_block, new_block;	sector_t new_sector;	pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev));	if (pkt_bdev) {		sb = get_super(pkt_bdev);		bdput(pkt_bdev);	}	if (!sb)		return 0;	if (!sb->s_op || !sb->s_op->relocate_blocks)		goto out;	old_block = pkt->sector / (CD_FRAMESIZE >> 9);	if (sb->s_op->relocate_blocks(sb, old_block, &new_block))		goto out;	new_sector = new_block * (CD_FRAMESIZE >> 9);	pkt->sector = new_sector;	pkt->bio->bi_sector = new_sector;	pkt->bio->bi_next = NULL;	pkt->bio->bi_flags = 1 << BIO_UPTODATE;	pkt->bio->bi_idx = 0;	BUG_ON(pkt->bio->bi_rw != (1 << BIO_RW));	BUG_ON(pkt->bio->bi_vcnt != pkt->frames);	BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);	BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);	BUG_ON(pkt->bio->bi_private != pkt);	drop_super(sb);	return 1;out:	drop_super(sb);	return 0;#endif}static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state){#if PACKET_DEBUG > 1	static const char *state_name[] = {		"IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"	};	enum packet_data_state old_state = pkt->state;	VPRINTK("pkt %2d : s=%6llx %s -> %s\n", pkt->id, (unsigned long long)pkt->sector,		state_name[old_state], state_name[state]);#endif	pkt->state = state;}/* * Scan the work queue to see if we can start a new packet. * returns non-zero if any work was done. */static int pkt_handle_queue(struct pktcdvd_device *pd){	struct packet_data *pkt, *p;	struct bio *bio = NULL;	sector_t zone = 0; /* Suppress gcc warning */	struct pkt_rb_node *node, *first_node;	struct rb_node *n;	VPRINTK("handle_queue\n");	atomic_set(&pd->scan_queue, 0);	if (list_empty(&pd->cdrw.pkt_free_list)) {		VPRINTK("handle_queue: no pkt\n");		return 0;	}	/*	 * Try to find a zone we are not already working on.	 */	spin_lock(&pd->lock);	first_node = pkt_rbtree_find(pd, pd->current_sector);	if (!first_node) {		n = rb_first(&pd->bio_queue);		if (n)			first_node = rb_entry(n, struct pkt_rb_node, rb_node);	}	node = first_node;	while (node) {		bio = node->bio;		zone = ZONE(bio->bi_sector, pd);		list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {			if (p->sector == zone) {				bio = NULL;				goto try_next_bio;			}		}		break;try_next_bio:		node = pkt_rbtree_next(node);		if (!node) {			n = rb_first(&pd->bio_queue);			if (n)				node = rb_entry(n, struct pkt_rb_node, rb_node);		}		if (node == first_node)			node = NULL;	}	spin_unlock(&pd->lock);	if (!bio) {		VPRINTK("handle_queue: no bio\n");		return 0;	}	pkt = pkt_get_packet_data(pd, zone);	pd->current_sector = zone + pd->settings.size;	pkt->sector = zone;	pkt->frames = pd->settings.size >> 2;	pkt->write_size = 0;	/*	 * Scan work queue for bios in the same zone and link them	 * to this packet.	 */	spin_lock(&pd->lock);	VPRINTK("pkt_handle_queue: looking for zone %llx\n", (unsigned long long)zone);	while ((node = pkt_rbtree_find(pd, zone)) != NULL) {		bio = node->bio;		VPRINTK("pkt_handle_queue: found zone=%llx\n",			(unsigned long long)ZONE(bio->bi_sector, pd));		if (ZONE(bio->bi_sector, pd) != zone)			break;		pkt_rbtree_erase(pd, node);		spin_lock(&pkt->lock);		pkt_add_list_last(bio, &pkt->orig_bios, &pkt->orig_bios_tail);		pkt->write_size += bio->bi_size / CD_FRAMESIZE;		spin_unlock(&pkt->lock);	}	spin_unlock(&pd->lock);	pkt->sleep_time = max(PACKET_WAIT_TIME, 1);	pkt_set_state(pkt, PACKET_WAITING_STATE);	atomic_set(&pkt->run_sm, 1);	spin_lock(&pd->cdrw.active_list_lock);	list_add(&pkt->list, &pd->cdrw.pkt_active_list);	spin_unlock(&pd->cdrw.active_list_lock);	return 1;}/* * Assemble a bio to write one packet and queue the bio for processing * by the underlying block device. */static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt){	struct bio *bio;	struct page *pages[PACKET_MAX_SIZE];	int offsets[PACKET_MAX_SIZE];	int f;	int frames_write;	for (f = 0; f < pkt->frames; f++) {		pages[f] = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];		offsets[f] = (f * CD_FRAMESIZE) % PAGE_SIZE;	}	/*	 * Fill-in pages[] and offsets[] with data from orig_bios.	 */	frames_write = 0;	spin_lock(&pkt->lock);	for (bio = pkt->orig_bios; bio; bio = bio->bi_next) {		int segment = bio->bi_idx;		int src_offs = 0;		int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);		int num_frames = bio->bi_size / CD_FRAMESIZE;		BUG_ON(first_frame < 0);		BUG_ON(first_frame + num_frames > pkt->frames);		for (f = first_frame; f < first_frame + num_frames; f++) {			struct bio_vec *src_bvl = bio_iovec_idx(bio, segment);			while (src_offs >= src_bvl->bv_len) {				src_offs -= src_bvl->bv_len;				segment++;				BUG_ON(segment >= bio->bi_vcnt);				src_bvl = bio_iovec_idx(bio, segment);			}			if (src_bvl->bv_len - src_offs >= CD_FRAMESIZE) {				pages[f] = src_bvl->bv_page;				offsets[f] = src_bvl->bv_offset + src_offs;			} else {				pkt_copy_bio_data(bio, segment, src_offs,						  pages[f], offsets[f]);			}			src_offs += CD_FRAMESIZE;			frames_write++;		}	}	pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);	spin_unlock(&pkt->lock);	VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n",		frames_write, (unsigned long long)pkt->sector);	BUG_ON(frames_write != pkt->write_size);	if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {		pkt_make_local_copy(pkt, pages, offsets);		pkt->cache_valid = 1;	} else {		pkt->cache_valid = 0;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -