📄 subr_extent.c
字号:
* finally insert ourselves, we go at the head of the * list. See extent_insert_and_optimize() for deatails. */ last = NULL; /* * Keep track of size and location of the smallest * chunk we fit in. * * Since the extent can be as large as the numeric range * of the CPU (0 - 0xffffffff for 32-bit systems), the * best overhead value can be the maximum unsigned integer. * Thus, we initialize "bestovh" to 0, since we insert ourselves * into the region list immediately on an exact match (which * is the only case where "bestovh" would be set to 0). */ bestovh = 0; beststart = 0; bestlast = NULL; /* * For N allocated regions, we must make (N + 1) * checks for unallocated space. The first chunk we * check is the area from the beginning of the subregion * to the first allocated region after that point. */ newstart = EXTENT_ALIGN(substart, alignment, skew); if (newstart < ex->ex_start) {#ifdef DIAGNOSTIC printf( "extent_alloc_subregion: extent `%s' (0x%lx - 0x%lx), alignment 0x%lx\n", ex->ex_name, ex->ex_start, ex->ex_end, alignment); simple_unlock(&ex->ex_slock); panic("extent_alloc_subregion: overflow after alignment");#else extent_free_region_descriptor(ex, myrp); simple_unlock(&ex->ex_slock); return (EINVAL);#endif } /* * Find the first allocated region that begins on or after * the subregion start, advancing the "last" pointer along * the way. */ for (rp = ex->ex_regions.lh_first; rp != NULL; rp = rp->er_link.le_next) { if (rp->er_start >= newstart) break; last = rp; } /* * Relocate the start of our candidate region to the end of * the last allocated region (if there was one overlapping * our subrange). */ if (last != NULL && last->er_end >= newstart) newstart = EXTENT_ALIGN((last->er_end + 1), alignment, skew); for (; rp != NULL; rp = rp->er_link.le_next) { /* * Check the chunk before "rp". Note that our * comparison is safe from overflow conditions. */ if (LE_OV(newstart, size, rp->er_start)) { /* * Do a boundary check, if necessary. Note * that a region may *begin* on the boundary, * but it must end before the boundary. */ if (boundary) { newend = newstart + (size - 1); /* * Calculate the next boundary after the start * of this region. */ dontcross = EXTENT_ALIGN(newstart+1, boundary, (flags & EX_BOUNDZERO) ? 0 : ex->ex_start) - 1;#if 0 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n", newstart, newend, ex->ex_start, ex->ex_end, boundary, dontcross);#endif /* Check for overflow */ if (dontcross < ex->ex_start) dontcross = ex->ex_end; else if (newend > dontcross) { /* * Candidate region crosses boundary. * Throw away the leading part and see * if we still fit. */ newstart = dontcross + 1; newend = newstart + (size - 1); dontcross += boundary; if (!LE_OV(newstart, size, rp->er_start)) continue; } /* * If we run past the end of * the extent or the boundary * overflows, then the request * can't fit. */ if (newstart + size - 1 > ex->ex_end || dontcross < newstart) goto fail; } /* * We would fit into this space. Calculate * the overhead (wasted space). If we exactly * fit, or we're taking the first fit, insert * ourselves into the region list. */ ovh = rp->er_start - newstart - size; if ((flags & EX_FAST) || (ovh == 0)) goto found; /* * Don't exactly fit, but check to see * if we're better than any current choice. */ if ((bestovh == 0) || (ovh < bestovh)) { bestovh = ovh; beststart = newstart; bestlast = last; } } /* * Skip past the current region and check again. */ newstart = EXTENT_ALIGN((rp->er_end + 1), alignment, skew); if (newstart < rp->er_end) { /* * Overflow condition. Don't error out, since * we might have a chunk of space that we can * use. */ goto fail; } last = rp; } /* * The final check is from the current starting point to the * end of the subregion. If there were no allocated regions, * "newstart" is set to the beginning of the subregion, or * just past the end of the last allocated region, adjusted * for alignment in either case. */ if (LE_OV(newstart, (size - 1), subend)) { /* * Do a boundary check, if necessary. Note * that a region may *begin* on the boundary, * but it must end before the boundary. */ if (boundary) { newend = newstart + (size - 1); /* * Calculate the next boundary after the start * of this region. */ dontcross = EXTENT_ALIGN(newstart+1, boundary, (flags & EX_BOUNDZERO) ? 0 : ex->ex_start) - 1;#if 0 printf("newstart=%lx newend=%lx ex_start=%lx ex_end=%lx boundary=%lx dontcross=%lx\n", newstart, newend, ex->ex_start, ex->ex_end, boundary, dontcross);#endif /* Check for overflow */ if (dontcross < ex->ex_start) dontcross = ex->ex_end; else if (newend > dontcross) { /* * Candidate region crosses boundary. * Throw away the leading part and see * if we still fit. */ newstart = dontcross + 1; newend = newstart + (size - 1); dontcross += boundary; if (!LE_OV(newstart, (size - 1), subend)) goto fail; } /* * If we run past the end of * the extent or the boundary * overflows, then the request * can't fit. */ if (newstart + size - 1 > ex->ex_end || dontcross < newstart) goto fail; } /* * We would fit into this space. Calculate * the overhead (wasted space). If we exactly * fit, or we're taking the first fit, insert * ourselves into the region list. */ ovh = ex->ex_end - newstart - (size - 1); if ((flags & EX_FAST) || (ovh == 0)) goto found; /* * Don't exactly fit, but check to see * if we're better than any current choice. */ if ((bestovh == 0) || (ovh < bestovh)) { bestovh = ovh; beststart = newstart; bestlast = last; } } fail: /* * One of the following two conditions have * occurred: * * There is no chunk large enough to hold the request. * * If EX_FAST was not specified, there is not an * exact match for the request. * * Note that if we reach this point and EX_FAST is * set, then we know there is no space in the extent for * the request. */ if (((flags & EX_FAST) == 0) && (bestovh != 0)) { /* * We have a match that's "good enough". */ newstart = beststart; last = bestlast; goto found; } /* * No space currently available. Wait for it to free up, * if possible. */ if (flags & EX_WAITSPACE) { ex->ex_flags |= EXF_WANTED; error = ltsleep(ex, PNORELOCK | PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), "extnt", 0, &ex->ex_slock); if (error) return (error); goto alloc_start; } extent_free_region_descriptor(ex, myrp); simple_unlock(&ex->ex_slock); return (EAGAIN); found: /* * Insert ourselves into the region list. */ extent_insert_and_optimize(ex, newstart, size, flags, last, myrp); simple_unlock(&ex->ex_slock); *result = newstart; return (0);}intextent_free(ex, start, size, flags) struct extent *ex; u_long start, size; int flags;{ struct extent_region *rp, *nrp = NULL; u_long end = start + (size - 1); int exflags;#ifdef DIAGNOSTIC /* * Check arguments. * * We don't lock to check these, because these values * are never modified, and if another thread deletes the * extent, we're screwed anyway. */ if (ex == NULL) panic("extent_free: NULL extent"); if ((start < ex->ex_start) || (start > ex->ex_end)) { extent_print(ex); printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n", ex->ex_name, start, size); panic("extent_free: extent `%s', region not within extent", ex->ex_name); } /* Check for an overflow. */ if (end < start) { extent_print(ex); printf("extent_free: extent `%s', start 0x%lx, size 0x%lx\n", ex->ex_name, start, size); panic("extent_free: overflow"); }#endif /* * If we're allowing coalescing, we must allocate a region * descriptor now, since it might block. * * XXX Make a static, create-time flags word, so we don't * XXX have to lock to read it! */ simple_lock(&ex->ex_slock); exflags = ex->ex_flags; simple_unlock(&ex->ex_slock); if ((exflags & EXF_NOCOALESCE) == 0) { /* Allocate a region descriptor. */ nrp = extent_alloc_region_descriptor(ex, flags); if (nrp == NULL) return (ENOMEM); } simple_lock(&ex->ex_slock); /* * Find region and deallocate. Several possibilities: * * 1. (start == er_start) && (end == er_end): * Free descriptor. * * 2. (start == er_start) && (end < er_end): * Adjust er_start. * * 3. (start > er_start) && (end == er_end): * Adjust er_end. * * 4. (start > er_start) && (end < er_end): * Fragment region. Requires descriptor alloc. * * Cases 2, 3, and 4 require that the EXF_NOCOALESCE flag * is not set. */ for (rp = ex->ex_regions.lh_first; rp != NULL; rp = rp->er_link.le_next) { /* * Save ourselves some comparisons; does the current * region end before chunk to be freed begins? If so, * then we haven't found the appropriate region descriptor. */ if (rp->er_end < start) continue; /* * Save ourselves some traversal; does the current * region begin after the chunk to be freed ends? If so, * then we've already passed any possible region descriptors * that might have contained the chunk to be freed. */ if (rp->er_start > end) break; /* Case 1. */ if ((start == rp->er_start) && (end == rp->er_end)) { LIST_REMOVE(rp, er_link); extent_free_region_descriptor(ex, rp); goto done; } /* * The following cases all require that EXF_NOCOALESCE * is not set. */ if (ex->ex_flags & EXF_NOCOALESCE) continue; /* Case 2. */ if ((start == rp->er_start) && (end < rp->er_end)) { rp->er_start = (end + 1); goto done; } /* Case 3. */ if ((start > rp->er_start) && (end == rp->er_end)) { rp->er_end = (start - 1); goto done; } /* Case 4. */ if ((start > rp->er_start) && (end < rp->er_end)) { /* Fill in new descriptor. */ nrp->er_start = end + 1; nrp->er_end = rp->er_end; /* Adjust current descriptor. */ rp->er_end = start - 1; /* Insert new descriptor after current. */ LIST_INSERT_AFTER(rp, nrp, er_link); /* We used the new descriptor, so don't free it below */ nrp = NULL; goto done; } } /* Region not found, or request otherwise invalid. */ simple_unlock(&ex->ex_slock); extent_print(ex); printf("extent_free: start 0x%lx, end 0x%lx\n", start, end); panic("extent_free: region not found"); done: if (nrp != NULL) extent_free_region_descriptor(ex, nrp); if (ex->ex_flags & EXF_WANTED) { ex->ex_flags &= ~EXF_WANTED; wakeup(ex); } simple_unlock(&ex->ex_slock); return (0);}/* * Allocate an extent region descriptor. EXTENT MUST NOT BE LOCKED, * AS THIS FUNCTION MAY BLOCK! We will handle any locking we may need. */static struct extent_region *extent_alloc_region_descriptor(ex, flags) struct extent *ex; int flags;{ struct extent_region *rp; int exflags; int s; /* * If the kernel memory allocator is not yet running, we can't * use it (obviously). */ if (KMEM_IS_RUNNING == 0) flags &= ~EX_MALLOCOK; /* * XXX Make a static, create-time flags word, so we don't * XXX have to lock to read it! */ simple_lock(&ex->ex_slock); exflags = ex->ex_flags; simple_unlock(&ex->ex_slock); if (exflags & EXF_FIXED) { struct extent_fixed *fex = (struct extent_fixed *)ex; for (;;) { simple_lock(&ex->ex_slock); if ((rp = fex->fex_freelist.lh_first) != NULL) { /* * Don't muck with flags after pulling it off * the freelist; it may have been dynamically * allocated, and kindly given to us. We * need to remember that information. */ LIST_REMOVE(rp, er_link); simple_unlock(&ex->ex_slock); return (rp); } if (flags & EX_MALLOCOK) { simple_unlock(&ex->ex_slock); goto alloc; } if ((flags & EX_WAITOK) == 0) { simple_unlock(&ex->ex_slock); return (NULL); } ex->ex_flags |= EXF_FLWANTED; if (ltsleep(&fex->fex_freelist, PNORELOCK| PRIBIO | ((flags & EX_CATCH) ? PCATCH : 0), "extnt", 0, &ex->ex_slock)) return (NULL); } } alloc: s = splhigh(); if (expool == NULL && !expool_create()) { splx(s); return (NULL); } rp = pool_get(expool, (flags & EX_WAITOK) ? PR_WAITOK : 0); splx(s); if (rp != NULL) rp->er_flags = ER_ALLOC; return (rp);}/* * Free an extent region descriptor. EXTENT _MUST_ BE LOCKED! This * is safe as we do not block here. */static voidextent_free_region_descriptor(ex, rp) struct extent *ex; struct extent_region *rp;{ int s; if (ex->ex_flags & EXF_FIXED) { struct extent_fixed *fex = (struct extent_fixed *)ex; /* * If someone's waiting for a region descriptor, * be nice and give them this one, rather than * just free'ing it back to the system. */ if (rp->er_flags & ER_ALLOC) { if (ex->ex_flags & EXF_FLWANTED) { /* Clear all but ER_ALLOC flag. */ rp->er_flags = ER_ALLOC; LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link); goto wake_em_up; } else { s = splhigh(); pool_put(expool, rp); splx(s); } } else { /* Clear all flags. */ rp->er_flags = 0; LIST_INSERT_HEAD(&fex->fex_freelist, rp, er_link); } if (ex->ex_flags & EXF_FLWANTED) { wake_em_up: ex->ex_flags &= ~EXF_FLWANTED; wakeup(&fex->fex_freelist); } return; } /* * We know it's dynamically allocated if we get here. */ s = splhigh(); pool_put(expool, rp); splx(s);}voidextent_print(ex) struct extent *ex;{ struct extent_region *rp; if (ex == NULL) panic("extent_print: NULL extent"); simple_lock(&ex->ex_slock); printf("extent `%s' (0x%lx - 0x%lx), flags = 0x%x\n", ex->ex_name, ex->ex_start, ex->ex_end, ex->ex_flags); for (rp = ex->ex_regions.lh_first; rp != NULL; rp = rp->er_link.le_next) printf(" 0x%lx - 0x%lx\n", rp->er_start, rp->er_end); simple_unlock(&ex->ex_slock);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -