📄 block.c
字号:
*devp = &bd->device; bd->open_count++; } return err; } if (err) { if (bd) { if (bd->port != IP_NULL) { ipc_kobject_set (bd->port, IKO_NULL, IKOT_NONE); ipc_port_dealloc_kernel (bd->port); } kfree ((vm_offset_t) bd, sizeof (struct block_data)); bd = NULL; } } else { bd->open_count = 1; bd->next = open_list; open_list = bd; } if (IP_VALID (reply_port)) ds_device_open_reply (reply_port, reply_port_type, err, dev_to_port (bd)); else if (! err) device_close (bd); return MIG_NO_REPLY;}static io_return_tdevice_close (void *d){ struct block_data *bd = d, *bdp, **prev; struct device_struct *ds = bd->ds; DECL_DATA; INIT_DATA (); /* Wait for any other open/close to complete. */ while (ds->busy) { ds->want = 1; assert_wait ((event_t) ds, FALSE); schedule (); } ds->busy = 1; if (--bd->open_count == 0) { /* Wait for pending I/O to complete. */ while (bd->iocount > 0) { bd->want = 1; assert_wait ((event_t) bd, FALSE); schedule (); } /* Remove device from open list. */ prev = &open_list; bdp = open_list; while (bdp) { if (bdp == bd) { *prev = bdp->next; break; } prev = &bdp->next; bdp = bdp->next; } assert (bdp == bd); if (ds->fops->release) (*ds->fops->release) (&td.inode, &td.file); ipc_kobject_set (bd->port, IKO_NULL, IKOT_NONE); ipc_port_dealloc_kernel (bd->port); kfree ((vm_offset_t) bd, sizeof (struct block_data)); } ds->busy = 0; if (ds->want) { ds->want = 0; thread_wakeup ((event_t) ds); } return D_SUCCESS;}#define MAX_COPY (VM_MAP_COPY_PAGE_LIST_MAX << PAGE_SHIFT)/* Check block BN and size COUNT for I/O validity to from device BD. Set *OFF to the byte offset where I/O is to begin and return the size of transfer. */static intcheck_limit (struct block_data *bd, loff_t *off, long bn, int count){ int major, minor; long maxsz, sz; struct disklabel *lp = NULL; if (count <= 0) return count; major = MAJOR (bd->dev); minor = MINOR (bd->dev); if (bd->ds->gd) { if (bd->part >= 0) { assert (bd->ds->labels); assert (bd->ds->labels[minor]); lp = bd->ds->labels[minor]; maxsz = lp->d_partitions[bd->part].p_size; } else maxsz = bd->ds->gd->part[minor].nr_sects; } else { assert (blk_size[major]); maxsz = blk_size[major][minor] << (BLOCK_SIZE_BITS - 9); } assert (maxsz > 0); sz = maxsz - bn; if (sz <= 0) return sz; if (sz < ((count + 511) >> 9)) count = sz << 9; if (lp) bn += (lp->d_partitions[bd->part].p_offset - bd->ds->gd->part[minor].start_sect); *off = (loff_t) bn << 9; bd->iocount++; return count;}static io_return_tdevice_write (void *d, ipc_port_t reply_port, mach_msg_type_name_t reply_port_type, dev_mode_t mode, recnum_t bn, io_buf_ptr_t data, unsigned int orig_count, int *bytes_written){ int resid, amt, i; int count = (int) orig_count; io_return_t err = 0; vm_map_copy_t copy; vm_offset_t addr, uaddr; vm_size_t len, size; struct block_data *bd = d; DECL_DATA; INIT_DATA (); *bytes_written = 0; if (bd->mode == O_RDONLY) return D_INVALID_OPERATION; if (! bd->ds->fops->write) return D_READ_ONLY; count = check_limit (bd, &td.file.f_pos, bn, count); if (count < 0) return D_INVALID_SIZE; if (count == 0) { vm_map_copy_discard (copy); return 0; } resid = count; copy = (vm_map_copy_t) data; uaddr = copy->offset; /* Allocate a kernel buffer. */ size = round_page (uaddr + count) - trunc_page (uaddr); if (size > MAX_COPY) size = MAX_COPY; addr = vm_map_min (device_io_map); err = vm_map_enter (device_io_map, &addr, size, 0, TRUE, NULL, 0, FALSE, VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE, VM_INHERIT_NONE); if (err) { vm_map_copy_discard (copy); goto out; } /* Determine size of I/O this time around. */ len = size - (uaddr & PAGE_MASK); if (len > resid) len = resid; while (1) { /* Map user pages. */ for (i = 0; i < copy->cpy_npages; i++) pmap_enter (vm_map_pmap (device_io_map), addr + (i << PAGE_SHIFT), copy->cpy_page_list[i]->phys_addr, VM_PROT_READ|VM_PROT_WRITE, TRUE); /* Do the write. */ amt = (*bd->ds->fops->write) (&td.inode, &td.file, (char *) addr + (uaddr & PAGE_MASK), len); /* Unmap pages and deallocate copy. */ pmap_remove (vm_map_pmap (device_io_map), addr, addr + (copy->cpy_npages << PAGE_SHIFT)); vm_map_copy_discard (copy); /* Check result of write. */ if (amt > 0) { resid -= amt; if (resid == 0) break; uaddr += amt; } else { if (amt < 0) err = linux_to_mach_error (amt); break; } /* Determine size of I/O this time around and copy in pages. */ len = round_page (uaddr + resid) - trunc_page (uaddr); if (len > MAX_COPY) len = MAX_COPY; len -= uaddr & PAGE_MASK; if (len > resid) len = resid; err = vm_map_copyin_page_list (current_map (), uaddr, len, FALSE, FALSE, ©, FALSE); if (err) break; } /* Delete kernel buffer. */ vm_map_remove (device_io_map, addr, addr + size);out: if (--bd->iocount == 0 && bd->want) { bd->want = 0; thread_wakeup ((event_t) bd); } if (IP_VALID (reply_port)) ds_device_write_reply (reply_port, reply_port_type, err, count - resid); return MIG_NO_REPLY;}static io_return_tdevice_read (void *d, ipc_port_t reply_port, mach_msg_type_name_t reply_port_type, dev_mode_t mode, recnum_t bn, int count, io_buf_ptr_t *data, unsigned *bytes_read){ boolean_t dirty; int resid, amt; io_return_t err = 0; queue_head_t pages; vm_map_copy_t copy; vm_offset_t addr, offset, alloc_offset, o; vm_object_t object; vm_page_t m; vm_size_t len, size; struct block_data *bd = d; DECL_DATA; INIT_DATA (); *data = 0; *bytes_read = 0; if (! bd->ds->fops->read) return D_INVALID_OPERATION; count = check_limit (bd, &td.file.f_pos, bn, count); if (count < 0) return D_INVALID_SIZE; if (count == 0) return 0; /* Allocate an object to hold the data. */ size = round_page (count); object = vm_object_allocate (size); if (! object) { err = D_NO_MEMORY; goto out; } alloc_offset = offset = 0; resid = count; /* Allocate a kernel buffer. */ addr = vm_map_min (device_io_map); if (size > MAX_COPY) size = MAX_COPY; err = vm_map_enter (device_io_map, &addr, size, 0, TRUE, NULL, 0, FALSE, VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE, VM_INHERIT_NONE); if (err) goto out; queue_init (&pages); while (resid) { /* Determine size of I/O this time around. */ len = round_page (offset + resid) - trunc_page (offset); if (len > MAX_COPY) len = MAX_COPY; /* Map any pages left from previous operation. */ o = trunc_page (offset); queue_iterate (&pages, m, vm_page_t, pageq) { pmap_enter (vm_map_pmap (device_io_map), addr + o - trunc_page (offset), m->phys_addr, VM_PROT_READ|VM_PROT_WRITE, TRUE); o += PAGE_SIZE; } assert (o == alloc_offset); /* Allocate and map pages. */ while (alloc_offset < trunc_page (offset) + len) { while ((m = vm_page_grab (FALSE)) == 0) VM_PAGE_WAIT (0); assert (! m->active && ! m->inactive); m->busy = TRUE; queue_enter (&pages, m, vm_page_t, pageq); pmap_enter (vm_map_pmap (device_io_map), addr + alloc_offset - trunc_page (offset), m->phys_addr, VM_PROT_READ|VM_PROT_WRITE, TRUE); alloc_offset += PAGE_SIZE; } /* Do the read. */ amt = len - (offset & PAGE_MASK); if (amt > resid) amt = resid; amt = (*bd->ds->fops->read) (&td.inode, &td.file, (char *) addr + (offset & PAGE_MASK), amt); /* Compute number of pages to insert in object. */ o = trunc_page (offset); if (amt > 0) { dirty = TRUE; resid -= amt; if (resid == 0) { /* Zero any unused space. */ if (offset + amt < o + len) memset ((void *) (addr + offset - o + amt), 0, o + len - offset - amt); offset = o + len; } else offset += amt; } else { dirty = FALSE; offset = o + len; } /* Unmap pages and add them to the object. */ pmap_remove (vm_map_pmap (device_io_map), addr, addr + len); vm_object_lock (object); while (o < trunc_page (offset)) { m = (vm_page_t) queue_first (&pages); assert (! queue_end (&pages, (queue_entry_t) m)); queue_remove (&pages, m, vm_page_t, pageq); assert (m->busy); vm_page_lock_queues (); if (dirty) { PAGE_WAKEUP_DONE (m); m->dirty = TRUE; vm_page_insert (m, object, o); } else vm_page_free (m); vm_page_unlock_queues (); o += PAGE_SIZE; } vm_object_unlock (object); if (amt <= 0) { if (amt < 0) err = linux_to_mach_error (amt); break; } } /* Delete kernel buffer. */ vm_map_remove (device_io_map, addr, addr + size); assert (queue_empty (&pages));out: if (! err) err = vm_map_copyin_object (object, 0, round_page (count), ©); if (! err) { *data = (io_buf_ptr_t) copy; *bytes_read = count - resid; } else vm_object_deallocate (object); if (--bd->iocount == 0 && bd->want) { bd->want = 0; thread_wakeup ((event_t) bd); } return err;}static io_return_tdevice_get_status (void *d, dev_flavor_t flavor, dev_status_t status, mach_msg_type_number_t *status_count){ struct block_data *bd = d; switch (flavor) { case DEV_GET_SIZE: if (disk_major (MAJOR (bd->dev))) { assert (bd->ds->gd); if (bd->part >= 0) { struct disklabel *lp; assert (bd->ds->labels); lp = bd->ds->labels[MINOR (bd->dev)]; assert (lp); (status[DEV_GET_SIZE_DEVICE_SIZE] = lp->d_partitions[bd->part].p_size << 9); } else (status[DEV_GET_SIZE_DEVICE_SIZE] = bd->ds->gd->part[MINOR (bd->dev)].nr_sects << 9); } else { assert (blk_size[MAJOR (bd->dev)]); (status[DEV_GET_SIZE_DEVICE_SIZE] = (blk_size[MAJOR (bd->dev)][MINOR (bd->dev)] << BLOCK_SIZE_BITS)); } /* It would be nice to return the block size as reported by the driver, but a lot of user level code assumes the sector size to be 512. */ status[DEV_GET_SIZE_RECORD_SIZE] = 512; /* Always return DEV_GET_SIZE_COUNT. This is what all native Mach drivers do, and makes it possible to detect the absence of the call by setting it to a different value on input. MiG makes sure that we will never return more integers than the user asked for. */ *status_count = DEV_GET_SIZE_COUNT; break; case DEV_GET_RECORDS: if (disk_major (MAJOR (bd->dev))) { assert (bd->ds->gd); if (bd->part >= 0) { struct disklabel *lp; assert (bd->ds->labels); lp = bd->ds->labels[MINOR (bd->dev)]; assert (lp); (status[DEV_GET_RECORDS_DEVICE_RECORDS] = lp->d_partitions[bd->part].p_size); } else (status[DEV_GET_RECORDS_DEVICE_RECORDS] = bd->ds->gd->part[MINOR (bd->dev)].nr_sects); } else { assert (blk_size[MAJOR (bd->dev)]); status[DEV_GET_RECORDS_DEVICE_RECORDS] = (blk_size[MAJOR (bd->dev)][MINOR (bd->dev)] << (BLOCK_SIZE_BITS - 9)); } /* It would be nice to return the block size as reported by the driver, but a lot of user level code assumes the sector size to be 512. */ status[DEV_GET_SIZE_RECORD_SIZE] = 512; /* Always return DEV_GET_RECORDS_COUNT. This is what all native Mach drivers do, and makes it possible to detect the absence of the call by setting it to a different value on input. MiG makes sure that we will never return more integers than the user asked for. */ *status_count = DEV_GET_RECORDS_COUNT; break; case V_GETPARMS: if (*status_count < (sizeof (struct disk_parms) / sizeof (int))) return D_INVALID_OPERATION; else { struct disk_parms *dp = status; struct hd_geometry hg; DECL_DATA; INIT_DATA(); if ((*bd->ds->fops->ioctl) (&td.inode, &td.file, HDIO_GETGEO, &hg)) return D_INVALID_OPERATION; dp->dp_type = DPT_WINI; /* XXX: It may be a floppy... */ dp->dp_heads = hg.heads; dp->dp_cyls = hg.cylinders; dp->dp_sectors = hg.sectors; dp->dp_dosheads = hg.heads; dp->dp_doscyls = hg.cylinders; dp->dp_dossectors = hg.sectors; dp->dp_secsiz = 512; /* XXX */ dp->dp_ptag = 0; dp->dp_pflag = 0; /* XXX */ dp->dp_pstartsec = -1; dp->dp_pnumsec = -1; *status_count = sizeof (struct disk_parms) / sizeof (int); } break; default: return D_INVALID_OPERATION; } return D_SUCCESS;}struct device_emulation_ops linux_block_emulation_ops ={ NULL, NULL, dev_to_port, device_open, device_close, device_write, NULL, device_read, NULL, NULL, device_get_status, NULL, NULL, NULL, NULL, NULL};
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -