⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 alloc.c

📁 debug source code under unix platform.
💻 C
📖 第 1 页 / 共 2 页
字号:
    {        /* There is space in the existing allocated pages to perform the         * resize without requiring the modification or creation of a         * neighbouring free node so we remove the watch point area if it         * exists.         */        if (h->flags & FLG_OFLOWWATCH)            __mp_memwatch(&h->heap.memory, (char *) n->block + n->size,                          m - n->size, MA_READWRITE);    }    else if (d > 0)    {        /* If the request was to increase the size of the node and we have no         * suitable node to merge with or the total size of both nodes is still         * too small then we just fail.  The relocation to a larger memory         * allocation is done by the calling function.         */        if ((p == NULL) || (m + p->size < l))            return 0;        __mp_treeremove(&h->ftree, &p->tnode);        if (h->flags & FLG_PAGEALLOC)        {            s = __mp_roundup(l, h->heap.memory.page) - m;            /* Remove any memory protection and the watch point area if it             * exists.             */            __mp_memprotect(&h->heap.memory, (char *) p->block - h->oflow, s,                            MA_READWRITE);            if (h->flags & FLG_OFLOWWATCH)                __mp_memwatch(&h->heap.memory, (char *) n->block + n->size,                              m - n->size, MA_READWRITE);        }        else        {            s = d;            /* Remove the right-most watch point area if it exists.             */            if (h->flags & FLG_OFLOWWATCH)                __mp_memwatch(&h->heap.memory, (char *) n->block + m, h->oflow,                              MA_READWRITE);        }        p->block = (char *) p->block + s;        p->size -= s;        /* If the resulting size of the free block we merged with is zero then         * we can just delete it, otherwise we must insert it back into the         * free tree.         */        if (p->size == 0)        {            __mp_remove(&h->list, &p->lnode);            __mp_freeslot(&h->table, p);        }        else            __mp_treeinsert(&h->ftree, &p->tnode, p->size);        h->fsize -= s;    }    else if (d < 0)    {        /* If the request was to decrease the size of the node then we         * must either increase the size of the bordering node, or create         * a new free node.         */        if (p == NULL)        {            if ((p = getnode(h)) == NULL)                return 0;            __mp_insert(&h->list, &n->lnode, &p->lnode);            p->block = (char *) n->block + m + h->oflow;            p->size = 0;            p->info = NULL;        }        else            __mp_treeremove(&h->ftree, &p->tnode);        if (h->flags & FLG_PAGEALLOC)        {            s = m - __mp_roundup(l, h->heap.memory.page);            /* Remove the watch point area if it exists.             */            if (h->flags & FLG_OFLOWWATCH)                __mp_memwatch(&h->heap.memory, (char *) n->block + n->size,                              m - n->size, MA_READWRITE);        }        else        {            s = -d;            /* Remove the right-most watch point area if it exists.             */            if (h->flags & FLG_OFLOWWATCH)                __mp_memwatch(&h->heap.memory, (char *) n->block + m, h->oflow,                              MA_READWRITE);        }        p->block = (char *) p->block - s;        p->size += s;        if (h->flags & FLG_PAGEALLOC)            __mp_memprotect(&h->heap.memory, p->block, s, MA_NOACCESS);        else            __mp_memset(p->block, h->fbyte, s);        __mp_treeinsert(&h->ftree, &p->tnode, p->size);        h->fsize += s;    }    if (h->flags & FLG_PAGEALLOC)    {        s = __mp_roundup(l, h->heap.memory.page) - l;        if (h->flags & FLG_OFLOWWATCH)            __mp_memwatch(&h->heap.memory, (char *) n->block + l, s,                          MA_NOACCESS);        else            __mp_memset((char *) n->block + l, h->obyte, s);    }    else if (h->flags & FLG_OFLOWWATCH)        __mp_memwatch(&h->heap.memory, (char *) n->block + l, h->oflow,                      MA_NOACCESS);    else        __mp_memset((char *) n->block + l, h->obyte, h->oflow);    n->size = l;    h->asize += d;    return 1;}/* Free an existing allocation node. */MP_GLOBALvoid__mp_freealloc(allochead *h, allocnode *n, void *i){    void *p;    size_t l, s;    /* If we are keeping the details (and possibly the contents) of a specified     * number of recently freed memory allocations then we may have to recycle     * the oldest freed allocation if the length of the queue would extend past     * the user-specified limit.     */    if ((i != NULL) && (h->flist.size != 0) && (h->flist.size == h->fmax))        __mp_recyclefreed(h);    /* Remove the allocated node from the allocation tree.     */    __mp_treeremove(&h->atree, &n->tnode);    h->asize -= n->size;    if (h->flags & FLG_PAGEALLOC)    {        p = (void *) __mp_rounddown((unsigned long) n->block,                                    h->heap.memory.page);        s = __mp_roundup(n->size + ((char *) n->block - (char *) p),                         h->heap.memory.page);        if (h->flags & FLG_OFLOWWATCH)        {            /* Remove any watch points within the allocated pages.             */            if ((l = (char *) n->block - (char *) p) > 0)                __mp_memwatch(&h->heap.memory, p, l, MA_READWRITE);            if ((l = s - n->size - l) > 0)                __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, l,                              MA_READWRITE);        }    }    if (i != NULL)    {        /* We are keeping this node and so place it on the freed tree.         * If all allocations are pages then we either prevent the original         * contents from being both read or written to, or prevent the         * allocation from being written to.  If not then we may optionally         * preserve its contents, otherwise it will be filled with the free         * byte.         */        n->info = i;        if (h->flags & FLG_PAGEALLOC)            if (h->flags & FLG_PRESERVE)            {                __mp_memprotect(&h->heap.memory, n->block, n->size,                                MA_READONLY);                if (h->flags & FLG_OFLOWWATCH)                {                    /* Replace any watch points within the allocated pages.                     * We have to do this here because when we change the                     * memory protection we may trigger a watch point on some                     * systems.                     */                    if ((l = (char *) n->block - (char *) p) > 0)                        __mp_memwatch(&h->heap.memory, p, l, MA_NOACCESS);                    if ((l = s - n->size - l) > 0)                        __mp_memwatch(&h->heap.memory, (char *) n->block +                                      n->size, l, MA_NOACCESS);                }            }            else                __mp_memprotect(&h->heap.memory, n->block, n->size,                                MA_NOACCESS);        else if (!(h->flags & FLG_PRESERVE))            __mp_memset(n->block, h->fbyte, n->size);        __mp_addtail(&h->flist, &n->fnode);        __mp_treeinsert(&h->gtree, &n->tnode, (unsigned long) n->block);        h->gsize += n->size;    }    else    {        /* We are placing this node on the free tree and so it will become         * available for reuse.  If all allocations are pages then we prevent         * the contents from being read or written to, otherwise the contents         * will be filled with the free byte.         */        if (h->flags & FLG_PAGEALLOC)        {            /* Any watch points will have already been removed, and the             * surrounding overflow buffers will already be protected with             * the MA_NOACCESS flag.             */            __mp_memprotect(&h->heap.memory, n->block, n->size, MA_NOACCESS);            n->block = p;            n->size = s;        }        else if (h->flags & FLG_OFLOWWATCH)        {            /* Remove any watch points that were made to monitor the overflow             * buffers.             */            __mp_memwatch(&h->heap.memory, (char *) n->block - h->oflow,                          h->oflow, MA_READWRITE);            __mp_memwatch(&h->heap.memory, (char *) n->block + n->size,                          h->oflow, MA_READWRITE);        }        n->block = (char *) n->block - h->oflow;        n->size += h->oflow << 1;        n->info = NULL;        if (!(h->flags & FLG_PAGEALLOC))            __mp_memset(n->block, h->fbyte, n->size);        __mp_treeinsert(&h->ftree, &n->tnode, n->size);        h->fsize += n->size;        mergenode(h, n);    }}/* Recycle a freed allocation node. */MP_GLOBALvoid__mp_recyclefreed(allochead *h){    allocnode *n;    void *p;    size_t l, s;    n = (allocnode *) ((char *) h->flist.head - offsetof(allocnode, fnode));    /* Remove the freed node from the freed list and the freed tree.     */    __mp_remove(&h->flist, &n->fnode);    __mp_treeremove(&h->gtree, &n->tnode);    h->gsize -= n->size;    if (h->flags & FLG_PAGEALLOC)    {        p = (void *) __mp_rounddown((unsigned long) n->block,                                    h->heap.memory.page);        s = __mp_roundup(n->size + ((char *) n->block - (char *) p),                         h->heap.memory.page);        if (h->flags & FLG_OFLOWWATCH)        {            /* Remove any watch points within the allocated pages.             */            if ((l = (char *) n->block - (char *) p) > 0)                __mp_memwatch(&h->heap.memory, p, l, MA_READWRITE);            if ((l = s - n->size - l) > 0)                __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, l,                              MA_READWRITE);        }    }    /* We are placing this node on the free tree and so it will become     * available for reuse.  If all allocations are pages then we prevent     * the contents from being read or written to, otherwise the contents     * will be filled with the free byte.     */    if (h->flags & FLG_PAGEALLOC)    {        /* Any watch points will have already been removed, and the         * surrounding overflow buffers will already be protected with         * the MA_NOACCESS flag.         */        __mp_memprotect(&h->heap.memory, n->block, n->size, MA_NOACCESS);        n->block = p;        n->size = s;    }    else if (h->flags & FLG_OFLOWWATCH)    {        /* Remove any watch points that were made to monitor the overflow         * buffers.         */        __mp_memwatch(&h->heap.memory, (char *) n->block - h->oflow, h->oflow,                      MA_READWRITE);        __mp_memwatch(&h->heap.memory, (char *) n->block + n->size, h->oflow,                      MA_READWRITE);    }    n->block = (char *) n->block - h->oflow;    n->size += h->oflow << 1;    n->info = NULL;    if (!(h->flags & FLG_PAGEALLOC))        __mp_memset(n->block, h->fbyte, n->size);    __mp_treeinsert(&h->ftree, &n->tnode, n->size);    h->fsize += n->size;    mergenode(h, n);}/* Protect the internal memory blocks used by the allocation manager with the * supplied access permission. */MP_GLOBALint__mp_protectalloc(allochead *h, memaccess a){    allocnode *n;    treenode *t;    if (!__mp_heapprotect(&h->heap, a))        return 0;    /* The library already knows what its protection status is so we don't     * need to do anything if the request has already been done.     */    if (h->prot == a)    {        h->protrecur++;        return 1;    }    else if (h->protrecur > 0)    {        h->protrecur--;        return 1;    }    h->prot = a;    for (t = __mp_minimum(h->itree.root); t != NULL; t = __mp_successor(t))    {        n = (allocnode *) ((char *) t - offsetof(allocnode, tnode));        if (!__mp_memprotect(&h->heap.memory, n->block, n->size, a))            return 0;    }    return 1;}/* Search for an allocated node which contains a given address. */MP_GLOBALallocnode *__mp_findalloc(allochead *h, void *p){    allocnode *n;    treenode *t;    if (t = __mp_searchlower(h->atree.root, (unsigned long) p))    {        n = (allocnode *) ((char *) t - offsetof(allocnode, tnode));        if ((char *) n->block + n->size > (char *) p)            return n;    }    return NULL;}/* Search for a freed node which contains a given address. */MP_GLOBALallocnode *__mp_findfreed(allochead *h, void *p){    allocnode *n;    treenode *t;    if (t = __mp_searchlower(h->gtree.root, (unsigned long) p))    {        n = (allocnode *) ((char *) t - offsetof(allocnode, tnode));        if ((char *) n->block + n->size > (char *) p)            return n;    }    return NULL;}/* Search for a node which contains a given address, either within * its allocation or as part of an overflow buffer. */MP_GLOBALallocnode *__mp_findnode(allochead *h, void *p, size_t s){    allocnode *n;    treenode *t;    void *b;    size_t l;    /* Search for the lowest node that is closest to the given address.     */    if ((t = __mp_searchlower(h->atree.root, (unsigned long) p)) ||        (t = __mp_searchlower(h->gtree.root, (unsigned long) p)))        n = (allocnode *) ((char *) t - offsetof(allocnode, tnode));    else        n = (allocnode *) h->list.head;    /* Loop through the list of suitable nodes looking for a likely     * candidate.     */    while (n->lnode.next != NULL)    {        if ((h->flags & FLG_PAGEALLOC) && (n->info != NULL))        {            b = (void *) __mp_rounddown((unsigned long) n->block,                                        h->heap.memory.page);            l = __mp_roundup(n->size + ((char *) n->block - (char *) b),                             h->heap.memory.page);        }        else        {            b = n->block;            l = n->size;        }        if (n->info != NULL)        {            b = (char *) b - h->oflow;            l += h->oflow << 1;        }        if (p < b)            if ((char *) p + s > (char *) b)                return n;            else                break;        else if ((char *) b + l > (char *) p)            return n;        n = (allocnode *) n->lnode.next;    }    return NULL;}#ifdef __cplusplus}#endif /* __cplusplus */

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -