📄 scsi_dma.c
字号:
* * Arguments: None. * * Lock status: No locks assumed to be held. This function is SMP-safe. * * Returns: Nothing * * Notes: Prior to the new queue code, this function was not SMP-safe. * Go through the device list and recompute the most appropriate * size for the dma pool. Then grab more memory (as required). */void scsi_resize_dma_pool(void){ int i, k; unsigned long size; unsigned long flags; struct Scsi_Host *shpnt; struct Scsi_Host *host = NULL; Scsi_Device *SDpnt; FreeSectorBitmap *new_dma_malloc_freelist = NULL; unsigned int new_dma_sectors = 0; unsigned int new_need_isa_buffer = 0; unsigned char **new_dma_malloc_pages = NULL; int out_of_space = 0; spin_lock_irqsave(&allocator_request_lock, flags); if (!scsi_hostlist) { /* * Free up the DMA pool. */ if (scsi_dma_free_sectors != dma_sectors) panic("SCSI DMA pool memory leak %d %d\n", scsi_dma_free_sectors, dma_sectors); for (i = 0; i < dma_sectors / SECTORS_PER_PAGE; i++) free_pages((unsigned long) dma_malloc_pages[i], 0); if (dma_malloc_pages) kfree((char *) dma_malloc_pages); dma_malloc_pages = NULL; if (dma_malloc_freelist) kfree((char *) dma_malloc_freelist); dma_malloc_freelist = NULL; dma_sectors = 0; scsi_dma_free_sectors = 0; spin_unlock_irqrestore(&allocator_request_lock, flags); return; } /* Next, check to see if we need to extend the DMA buffer pool */ new_dma_sectors = 2 * SECTORS_PER_PAGE; /* Base value we use */ if (__pa(high_memory) - 1 > ISA_DMA_THRESHOLD) need_isa_bounce_buffers = 1; else need_isa_bounce_buffers = 0; if (scsi_devicelist) for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) new_dma_sectors += SECTORS_PER_PAGE; /* Increment for each host */ for (host = scsi_hostlist; host; host = host->next) { for (SDpnt = host->host_queue; SDpnt; SDpnt = SDpnt->next) { /* * sd and sr drivers allocate scatterlists. * sr drivers may allocate for each command 1x2048 or 2x1024 extra * buffers for 2k sector size and 1k fs. * sg driver allocates buffers < 4k. * st driver does not need buffers from the dma pool. * estimate 4k buffer/command for devices of unknown type (should panic). */ if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM || SDpnt->type == TYPE_DISK || SDpnt->type == TYPE_MOD) { int nents = host->sg_tablesize;#ifdef DMA_CHUNK_SIZE /* If the architecture does DMA sg merging, make sure we count with at least 64 entries even for HBAs which handle very few sg entries. */ if (nents < 64) nents = 64;#endif new_dma_sectors += ((nents * sizeof(struct scatterlist) + 511) >> 9) * SDpnt->queue_depth; if (SDpnt->type == TYPE_WORM || SDpnt->type == TYPE_ROM) new_dma_sectors += (2048 >> 9) * SDpnt->queue_depth; } else if (SDpnt->type == TYPE_SCANNER || SDpnt->type == TYPE_PRINTER || SDpnt->type == TYPE_PROCESSOR || SDpnt->type == TYPE_COMM || SDpnt->type == TYPE_MEDIUM_CHANGER || SDpnt->type == TYPE_ENCLOSURE) { new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth; } else { if (SDpnt->type != TYPE_TAPE) { printk("resize_dma_pool: unknown device type %d\n", SDpnt->type); new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth; } } if (host->unchecked_isa_dma && need_isa_bounce_buffers && SDpnt->type != TYPE_TAPE) { new_dma_sectors += (PAGE_SIZE >> 9) * host->sg_tablesize * SDpnt->queue_depth; new_need_isa_buffer++; } } }#ifdef DEBUG_INIT printk("resize_dma_pool: needed dma sectors = %d\n", new_dma_sectors);#endif /* limit DMA memory to 32MB: */ new_dma_sectors = (new_dma_sectors + 15) & 0xfff0; /* * We never shrink the buffers - this leads to * race conditions that I would rather not even think * about right now. */#if 0 /* Why do this? No gain and risks out_of_space */ if (new_dma_sectors < dma_sectors) new_dma_sectors = dma_sectors;#endif if (new_dma_sectors <= dma_sectors) { spin_unlock_irqrestore(&allocator_request_lock, flags); return; /* best to quit while we are in front */ } for (k = 0; k < 20; ++k) { /* just in case */ out_of_space = 0; size = (new_dma_sectors / SECTORS_PER_PAGE) * sizeof(FreeSectorBitmap); new_dma_malloc_freelist = (FreeSectorBitmap *) kmalloc(size, GFP_ATOMIC); if (new_dma_malloc_freelist) { memset(new_dma_malloc_freelist, 0, size); size = (new_dma_sectors / SECTORS_PER_PAGE) * sizeof(*new_dma_malloc_pages); new_dma_malloc_pages = (unsigned char **) kmalloc(size, GFP_ATOMIC); if (!new_dma_malloc_pages) { size = (new_dma_sectors / SECTORS_PER_PAGE) * sizeof(FreeSectorBitmap); kfree((char *) new_dma_malloc_freelist); out_of_space = 1; } else { memset(new_dma_malloc_pages, 0, size); } } else out_of_space = 1; if ((!out_of_space) && (new_dma_sectors > dma_sectors)) { for (i = dma_sectors / SECTORS_PER_PAGE; i < new_dma_sectors / SECTORS_PER_PAGE; i++) { new_dma_malloc_pages[i] = (unsigned char *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 0); if (!new_dma_malloc_pages[i]) break; } if (i != new_dma_sectors / SECTORS_PER_PAGE) { /* clean up */ int k = i; out_of_space = 1; for (i = 0; i < k; ++i) free_pages((unsigned long) new_dma_malloc_pages[i], 0); } } if (out_of_space) { /* try scaling down new_dma_sectors request */ printk("scsi::resize_dma_pool: WARNING, dma_sectors=%u, " "wanted=%u, scaling\n", dma_sectors, new_dma_sectors); if (new_dma_sectors < (8 * SECTORS_PER_PAGE)) break; /* pretty well hopeless ... */ new_dma_sectors = (new_dma_sectors * 3) / 4; new_dma_sectors = (new_dma_sectors + 15) & 0xfff0; if (new_dma_sectors <= dma_sectors) break; /* stick with what we have got */ } else break; /* found space ... */ } /* end of for loop */ if (out_of_space) { spin_unlock_irqrestore(&allocator_request_lock, flags); scsi_need_isa_buffer = new_need_isa_buffer; /* some useful info */ printk(" WARNING, not enough memory, pool not expanded\n"); return; } /* When we dick with the actual DMA list, we need to * protect things */ if (dma_malloc_freelist) { size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(FreeSectorBitmap); memcpy(new_dma_malloc_freelist, dma_malloc_freelist, size); kfree((char *) dma_malloc_freelist); } dma_malloc_freelist = new_dma_malloc_freelist; if (dma_malloc_pages) { size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_pages); memcpy(new_dma_malloc_pages, dma_malloc_pages, size); kfree((char *) dma_malloc_pages); } scsi_dma_free_sectors += new_dma_sectors - dma_sectors; dma_malloc_pages = new_dma_malloc_pages; dma_sectors = new_dma_sectors; scsi_need_isa_buffer = new_need_isa_buffer; spin_unlock_irqrestore(&allocator_request_lock, flags);#ifdef DEBUG_INIT printk("resize_dma_pool: dma free sectors = %d\n", scsi_dma_free_sectors); printk("resize_dma_pool: dma sectors = %d\n", dma_sectors); printk("resize_dma_pool: need isa buffers = %d\n", scsi_need_isa_buffer);#endif}/* * Function: scsi_init_minimal_dma_pool * * Purpose: Allocate a minimal (1-page) DMA pool. * * Arguments: None. * * Lock status: No locks assumed to be held. This function is SMP-safe. * * Returns: Nothing * * Notes: */int scsi_init_minimal_dma_pool(void){ unsigned long size; unsigned long flags; int has_space = 0; spin_lock_irqsave(&allocator_request_lock, flags); dma_sectors = PAGE_SIZE / SECTOR_SIZE; scsi_dma_free_sectors = dma_sectors; /* * Set up a minimal DMA buffer list - this will be used during scan_scsis * in some cases. */ /* One bit per sector to indicate free/busy */ size = (dma_sectors / SECTORS_PER_PAGE) * sizeof(FreeSectorBitmap); dma_malloc_freelist = (FreeSectorBitmap *) kmalloc(size, GFP_ATOMIC); if (dma_malloc_freelist) { memset(dma_malloc_freelist, 0, size); /* One pointer per page for the page list */ dma_malloc_pages = (unsigned char **) kmalloc( (dma_sectors / SECTORS_PER_PAGE) * sizeof(*dma_malloc_pages), GFP_ATOMIC); if (dma_malloc_pages) { memset(dma_malloc_pages, 0, size); dma_malloc_pages[0] = (unsigned char *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 0); if (dma_malloc_pages[0]) has_space = 1; } } if (!has_space) { if (dma_malloc_freelist) { kfree((char *) dma_malloc_freelist); if (dma_malloc_pages) kfree((char *) dma_malloc_pages); } spin_unlock_irqrestore(&allocator_request_lock, flags); printk("scsi::init_module: failed, out of memory\n"); return 1; } spin_unlock_irqrestore(&allocator_request_lock, flags); return 0;}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -