mdev->fw.arbel.fw_icm =
mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages,
- GFP_HIGHUSER | __GFP_NOWARN);
+ GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!mdev->fw.arbel.fw_icm) {
mthca_err(mdev, "Couldn't allocate FW area, aborting.\n");
return -ENOMEM;
mthca_UNMAP_FA(mdev, &status);
err_free:
- mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
+ mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
return err;
}
(unsigned long long) aux_pages << 2);
mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, aux_pages,
- GFP_HIGHUSER | __GFP_NOWARN);
+ GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!mdev->fw.arbel.aux_icm) {
mthca_err(mdev, "Couldn't allocate aux memory, aborting.\n");
return -ENOMEM;
mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
MTHCA_MTT_SEG_SIZE,
mdev->limits.num_mtt_segs,
- mdev->limits.reserved_mtts, 1);
+ mdev->limits.reserved_mtts,
+ 1, 0);
if (!mdev->mr_table.mtt_table) {
mthca_err(mdev, "Failed to map MTT context memory, aborting.\n");
err = -ENOMEM;
mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base,
dev_lim->mpt_entry_sz,
mdev->limits.num_mpts,
- mdev->limits.reserved_mrws, 1);
+ mdev->limits.reserved_mrws,
+ 1, 1);
if (!mdev->mr_table.mpt_table) {
mthca_err(mdev, "Failed to map MPT context memory, aborting.\n");
err = -ENOMEM;
mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base,
dev_lim->qpc_entry_sz,
mdev->limits.num_qps,
- mdev->limits.reserved_qps, 0);
+ mdev->limits.reserved_qps,
+ 0, 0);
if (!mdev->qp_table.qp_table) {
mthca_err(mdev, "Failed to map QP context memory, aborting.\n");
err = -ENOMEM;
mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base,
dev_lim->eqpc_entry_sz,
mdev->limits.num_qps,
- mdev->limits.reserved_qps, 0);
+ mdev->limits.reserved_qps,
+ 0, 0);
if (!mdev->qp_table.eqp_table) {
mthca_err(mdev, "Failed to map EQP context memory, aborting.\n");
err = -ENOMEM;
mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base,
MTHCA_RDB_ENTRY_SIZE,
mdev->limits.num_qps <<
- mdev->qp_table.rdb_shift,
+ mdev->qp_table.rdb_shift, 0,
0, 0);
if (!mdev->qp_table.rdb_table) {
mthca_err(mdev, "Failed to map RDB context memory, aborting\n");
mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base,
dev_lim->cqc_entry_sz,
mdev->limits.num_cqs,
- mdev->limits.reserved_cqs, 0);
+ mdev->limits.reserved_cqs,
+ 0, 0);
if (!mdev->cq_table.table) {
mthca_err(mdev, "Failed to map CQ context memory, aborting.\n");
err = -ENOMEM;
mthca_alloc_icm_table(mdev, init_hca->srqc_base,
dev_lim->srq_entry_sz,
mdev->limits.num_srqs,
- mdev->limits.reserved_srqs, 0);
+ mdev->limits.reserved_srqs,
+ 0, 0);
if (!mdev->srq_table.table) {
mthca_err(mdev, "Failed to map SRQ context memory, "
"aborting.\n");
mdev->limits.num_amgms,
mdev->limits.num_mgms +
mdev->limits.num_amgms,
- 0);
+ 0, 0);
if (!mdev->mcg_table.table) {
mthca_err(mdev, "Failed to map MCG context memory, aborting.\n");
err = -ENOMEM;
mthca_UNMAP_ICM_AUX(mdev, &status);
err_free_aux:
- mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
+ mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
return err;
}
mthca_unmap_eq_icm(mdev);
mthca_UNMAP_ICM_AUX(mdev, &status);
- mthca_free_icm(mdev, mdev->fw.arbel.aux_icm);
+ mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
}
static int mthca_init_arbel(struct mthca_dev *mdev)
err_stop_fw:
mthca_UNMAP_FA(mdev, &status);
- mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
+ mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
err_disable:
if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
mthca_free_icms(mdev);
mthca_UNMAP_FA(mdev, &status);
- mthca_free_icm(mdev, mdev->fw.arbel.fw_icm);
+ mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
mthca_DISABLE_LAM(mdev, &status);
*/
#include <linux/mm.h>
+#include <linux/scatterlist.h>
+
+#include <asm/page.h>
#include "mthca_memfree.h"
#include "mthca_dev.h"
} page[0];
};
-void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm)
+static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
+{
+ int i;
+
+ if (chunk->nsg > 0)
+ pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
+ PCI_DMA_BIDIRECTIONAL);
+
+ for (i = 0; i < chunk->npages; ++i)
+ __free_pages(chunk->mem[i].page,
+ get_order(chunk->mem[i].length));
+}
+
+static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
{
- struct mthca_icm_chunk *chunk, *tmp;
int i;
+ for (i = 0; i < chunk->npages; ++i) {
+ dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
+ lowmem_page_address(chunk->mem[i].page),
+ sg_dma_address(&chunk->mem[i]));
+ }
+}
+
+void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent)
+{
+ struct mthca_icm_chunk *chunk, *tmp;
+
if (!icm)
return;
list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
- if (chunk->nsg > 0)
- pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
- PCI_DMA_BIDIRECTIONAL);
-
- for (i = 0; i < chunk->npages; ++i)
- __free_pages(chunk->mem[i].page,
- get_order(chunk->mem[i].length));
+ if (coherent)
+ mthca_free_icm_coherent(dev, chunk);
+ else
+ mthca_free_icm_pages(dev, chunk);
kfree(chunk);
}
kfree(icm);
}
+static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
+{
+ mem->page = alloc_pages(gfp_mask, order);
+ if (!mem->page)
+ return -ENOMEM;
+
+ mem->length = PAGE_SIZE << order;
+ mem->offset = 0;
+ return 0;
+}
+
+static int mthca_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
+ int order, gfp_t gfp_mask)
+{
+ void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, &sg_dma_address(mem),
+ gfp_mask);
+ if (!buf)
+ return -ENOMEM;
+
+ sg_set_buf(mem, buf, PAGE_SIZE << order);
+ BUG_ON(mem->offset);
+ sg_dma_len(mem) = PAGE_SIZE << order;
+ return 0;
+}
+
struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
- gfp_t gfp_mask)
+ gfp_t gfp_mask, int coherent)
{
struct mthca_icm *icm;
struct mthca_icm_chunk *chunk = NULL;
int cur_order;
+ int ret;
+
+ /* We use sg_set_buf for coherent allocs, which assumes low memory */
+ BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
if (!icm)
while (1 << cur_order > npages)
--cur_order;
- chunk->mem[chunk->npages].page = alloc_pages(gfp_mask, cur_order);
- if (chunk->mem[chunk->npages].page) {
- chunk->mem[chunk->npages].length = PAGE_SIZE << cur_order;
- chunk->mem[chunk->npages].offset = 0;
+ if (coherent)
+ ret = mthca_alloc_icm_coherent(&dev->pdev->dev,
+ &chunk->mem[chunk->npages],
+ cur_order, gfp_mask);
+ else
+ ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages],
+ cur_order, gfp_mask);
- if (++chunk->npages == MTHCA_ICM_CHUNK_LEN) {
+ if (!ret) {
+ ++chunk->npages;
+
+ if (!coherent && chunk->npages == MTHCA_ICM_CHUNK_LEN) {
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
chunk->npages,
PCI_DMA_BIDIRECTIONAL);
if (chunk->nsg <= 0)
goto fail;
+ }
+ if (chunk->npages == MTHCA_ICM_CHUNK_LEN)
chunk = NULL;
- }
npages -= 1 << cur_order;
} else {
}
}
- if (chunk) {
+ if (!coherent && chunk) {
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
chunk->npages,
PCI_DMA_BIDIRECTIONAL);
return icm;
fail:
- mthca_free_icm(dev, icm);
+ mthca_free_icm(dev, icm, coherent);
return NULL;
}
table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
(table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
- __GFP_NOWARN);
+ __GFP_NOWARN, table->coherent);
if (!table->icm[i]) {
ret = -ENOMEM;
goto out;
if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
&status) || status) {
- mthca_free_icm(dev, table->icm[i]);
+ mthca_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
ret = -ENOMEM;
goto out;
mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
&status);
- mthca_free_icm(dev, table->icm[i]);
+ mthca_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
}
mutex_unlock(&table->mutex);
}
-void *mthca_table_find(struct mthca_icm_table *table, int obj)
+void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_handle)
{
- int idx, offset, i;
+ int idx, offset, dma_offset, i;
struct mthca_icm_chunk *chunk;
struct mthca_icm *icm;
struct page *page = NULL;
idx = (obj & (table->num_obj - 1)) * table->obj_size;
icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE];
- offset = idx % MTHCA_TABLE_CHUNK_SIZE;
+ dma_offset = offset = idx % MTHCA_TABLE_CHUNK_SIZE;
if (!icm)
goto out;
list_for_each_entry(chunk, &icm->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i) {
+ if (dma_handle && dma_offset >= 0) {
+ if (sg_dma_len(&chunk->mem[i]) > dma_offset)
+ *dma_handle = sg_dma_address(&chunk->mem[i]) +
+ dma_offset;
+ dma_offset -= sg_dma_len(&chunk->mem[i]);
+ }
+ /* DMA mapping can merge pages but not split them,
+ * so if we found the page, dma_handle has already
+ * been assigned to. */
if (chunk->mem[i].length > offset) {
page = chunk->mem[i].page;
goto out;
struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
u64 virt, int obj_size,
int nobj, int reserved,
- int use_lowmem)
+ int use_lowmem, int use_coherent)
{
struct mthca_icm_table *table;
int num_icm;
table->num_obj = nobj;
table->obj_size = obj_size;
table->lowmem = use_lowmem;
+ table->coherent = use_coherent;
mutex_init(&table->mutex);
for (i = 0; i < num_icm; ++i)
table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
(use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
- __GFP_NOWARN);
+ __GFP_NOWARN, use_coherent);
if (!table->icm[i])
goto err;
if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE,
&status) || status) {
- mthca_free_icm(dev, table->icm[i]);
+ mthca_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
goto err;
}
mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
&status);
- mthca_free_icm(dev, table->icm[i]);
+ mthca_free_icm(dev, table->icm[i], table->coherent);
}
kfree(table);
mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
&status);
- mthca_free_icm(dev, table->icm[i]);
+ mthca_free_icm(dev, table->icm[i], table->coherent);
}
kfree(table);