⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 mthca_memfree.c

📁 linux内核源码
💻 C
📖 第 1 页 / 共 2 页
字号:
/* * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved. * Copyright (c) 2005 Cisco Systems.  All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses.  You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * *     Redistribution and use in source and binary forms, with or *     without modification, are permitted provided that the following *     conditions are met: * *      - Redistributions of source code must retain the above *        copyright notice, this list of conditions and the following *        disclaimer. * *      - Redistributions in binary form must reproduce the above *        copyright notice, this list of conditions and the following *        disclaimer in the documentation and/or other materials *        provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * $Id$ */#include <linux/mm.h>#include <linux/scatterlist.h>#include <linux/sched.h>#include <asm/page.h>#include "mthca_memfree.h"#include "mthca_dev.h"#include "mthca_cmd.h"/* * We allocate in as big chunks as we can, up to a maximum of 256 KB * per chunk. */enum {	MTHCA_ICM_ALLOC_SIZE   = 1 << 18,	MTHCA_TABLE_CHUNK_SIZE = 1 << 18};struct mthca_user_db_table {	struct mutex mutex;	struct {		u64                uvirt;		struct scatterlist mem;		int                refcount;	}                page[0];};static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk){	int i;	if (chunk->nsg > 0)		pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,			     PCI_DMA_BIDIRECTIONAL);	for (i = 0; i < chunk->npages; ++i)		__free_pages(sg_page(&chunk->mem[i]),			     get_order(chunk->mem[i].length));}static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk){	int i;	for (i = 0; i < chunk->npages; ++i) {		dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,				  lowmem_page_address(sg_page(&chunk->mem[i])),				  sg_dma_address(&chunk->mem[i]));	}}void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent){	struct mthca_icm_chunk *chunk, *tmp;	if (!icm)		return;	list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {		if (coherent)			mthca_free_icm_coherent(dev, chunk);		else			mthca_free_icm_pages(dev, chunk);		kfree(chunk);	}	kfree(icm);}static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask){	struct page *page;	page = alloc_pages(gfp_mask, order);	if (!page)		return -ENOMEM;	sg_set_page(mem, page, PAGE_SIZE << order, 0);	return 0;}static int mthca_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,				    int order, gfp_t gfp_mask){	void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, &sg_dma_address(mem),				       gfp_mask);	if (!buf)		return -ENOMEM;	sg_set_buf(mem, buf, PAGE_SIZE << order);	BUG_ON(mem->offset);	sg_dma_len(mem) = PAGE_SIZE << order;	return 0;}struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,				  gfp_t gfp_mask, int coherent){	struct mthca_icm *icm;	struct mthca_icm_chunk *chunk = NULL;	int cur_order;	int ret;	/* We use sg_set_buf for coherent allocs, which assumes low memory */	BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));	icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));	if (!icm)		return icm;	icm->refcount = 0;	INIT_LIST_HEAD(&icm->chunk_list);	cur_order = get_order(MTHCA_ICM_ALLOC_SIZE);	while (npages > 0) {		if (!chunk) {			chunk = kmalloc(sizeof *chunk,					gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));			if (!chunk)				goto fail;			sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN);			chunk->npages = 0;			chunk->nsg    = 0;			list_add_tail(&chunk->list, &icm->chunk_list);		}		while (1 << cur_order > npages)			--cur_order;		if (coherent)			ret = mthca_alloc_icm_coherent(&dev->pdev->dev,						       &chunk->mem[chunk->npages],						       cur_order, gfp_mask);		else			ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages],						    cur_order, gfp_mask);		if (!ret) {			++chunk->npages;			if (coherent)				++chunk->nsg;			else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) {				chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,							chunk->npages,							PCI_DMA_BIDIRECTIONAL);				if (chunk->nsg <= 0)					goto fail;			}			if (chunk->npages == MTHCA_ICM_CHUNK_LEN)				chunk = NULL;			npages -= 1 << cur_order;		} else {			--cur_order;			if (cur_order < 0)				goto fail;		}	}	if (!coherent && chunk) {		chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,					chunk->npages,					PCI_DMA_BIDIRECTIONAL);		if (chunk->nsg <= 0)			goto fail;	}	return icm;fail:	mthca_free_icm(dev, icm, coherent);	return NULL;}int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj){	int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;	int ret = 0;	u8 status;	mutex_lock(&table->mutex);	if (table->icm[i]) {		++table->icm[i]->refcount;		goto out;	}	table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT,					(table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |					__GFP_NOWARN, table->coherent);	if (!table->icm[i]) {		ret = -ENOMEM;		goto out;	}	if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE,			  &status) || status) {		mthca_free_icm(dev, table->icm[i], table->coherent);		table->icm[i] = NULL;		ret = -ENOMEM;		goto out;	}	++table->icm[i]->refcount;out:	mutex_unlock(&table->mutex);	return ret;}void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj){	int i;	u8 status;	if (!mthca_is_memfree(dev))		return;	i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;	mutex_lock(&table->mutex);	if (--table->icm[i]->refcount == 0) {		mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,				MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,				&status);		mthca_free_icm(dev, table->icm[i], table->coherent);		table->icm[i] = NULL;	}	mutex_unlock(&table->mutex);}void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_handle){	int idx, offset, dma_offset, i;	struct mthca_icm_chunk *chunk;	struct mthca_icm *icm;	struct page *page = NULL;	if (!table->lowmem)		return NULL;	mutex_lock(&table->mutex);	idx = (obj & (table->num_obj - 1)) * table->obj_size;	icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE];	dma_offset = offset = idx % MTHCA_TABLE_CHUNK_SIZE;	if (!icm)		goto out;	list_for_each_entry(chunk, &icm->chunk_list, list) {		for (i = 0; i < chunk->npages; ++i) {			if (dma_handle && dma_offset >= 0) {				if (sg_dma_len(&chunk->mem[i]) > dma_offset)					*dma_handle = sg_dma_address(&chunk->mem[i]) +						dma_offset;				dma_offset -= sg_dma_len(&chunk->mem[i]);			}			/* DMA mapping can merge pages but not split them,			 * so if we found the page, dma_handle has already			 * been assigned to. */			if (chunk->mem[i].length > offset) {				page = sg_page(&chunk->mem[i]);				goto out;			}			offset -= chunk->mem[i].length;		}	}out:	mutex_unlock(&table->mutex);	return page ? lowmem_page_address(page) + offset : NULL;}int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table,			  int start, int end){	int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size;	int i, err;	for (i = start; i <= end; i += inc) {		err = mthca_table_get(dev, table, i);		if (err)			goto fail;	}	return 0;fail:	while (i > start) {		i -= inc;		mthca_table_put(dev, table, i);	}	return err;}void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table,			   int start, int end){	int i;	if (!mthca_is_memfree(dev))		return;	for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size)		mthca_table_put(dev, table, i);}struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,					      u64 virt, int obj_size,					      int nobj, int reserved,					      int use_lowmem, int use_coherent){	struct mthca_icm_table *table;	int num_icm;	unsigned chunk_size;	int i;	u8 status;	num_icm = (obj_size * nobj + MTHCA_TABLE_CHUNK_SIZE - 1) / MTHCA_TABLE_CHUNK_SIZE;	table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);	if (!table)		return NULL;	table->virt     = virt;	table->num_icm  = num_icm;	table->num_obj  = nobj;	table->obj_size = obj_size;	table->lowmem   = use_lowmem;	table->coherent = use_coherent;	mutex_init(&table->mutex);	for (i = 0; i < num_icm; ++i)		table->icm[i] = NULL;	for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {		chunk_size = MTHCA_TABLE_CHUNK_SIZE;

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -