!(rcvctrl & RCV_CTXT_CTRL_ENABLE_SMASK)) {
/* reset the tail and hdr addresses, and sequence count */
write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
- rcd->rcvhdrq_phys);
+ rcd->rcvhdrq_dma);
if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
- rcd->rcvhdrqtailaddr_phys);
+ rcd->rcvhdrqtailaddr_dma);
rcd->seq_cnt = 1;
/* reset the cached receive header queue head value */
* update with a dummy tail address and then disable
* receive context.
*/
- if (dd->rcvhdrtail_dummy_physaddr) {
+ if (dd->rcvhdrtail_dummy_dma) {
write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
- dd->rcvhdrtail_dummy_physaddr);
+ dd->rcvhdrtail_dummy_dma);
/* Enabling RcvCtxtCtrl.TailUpd is intentional. */
rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
}
rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
- if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_phys)
+ if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
/* See comment on RcvCtxtCtrl.TailUpd above */
* so it doesn't contain an address that is invalid.
*/
write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
- dd->rcvhdrtail_dummy_physaddr);
+ dd->rcvhdrtail_dummy_dma);
}
u32 hfi1_read_cntrs(struct hfi1_devdata *dd, char **namep, u64 **cntrp)
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_devdata *dd;
- unsigned long flags, pfn;
+ unsigned long flags;
u64 token = vma->vm_pgoff << PAGE_SHIFT,
memaddr = 0;
+ void *memvirt = NULL;
u8 subctxt, mapio = 0, vmf = 0, type;
ssize_t memlen = 0;
int ret = 0;
* second or third page allocated for credit returns (if number
* of enabled contexts > 64 and 128 respectively).
*/
- memaddr = dd->cr_base[uctxt->numa_id].pa +
+ memvirt = dd->cr_base[uctxt->numa_id].va;
+ memaddr = virt_to_phys(memvirt) +
(((u64)uctxt->sc->hw_free -
(u64)dd->cr_base[uctxt->numa_id].va) & PAGE_MASK);
memlen = PAGE_SIZE;
mapio = 1;
break;
case RCV_HDRQ:
- memaddr = uctxt->rcvhdrq_phys;
memlen = uctxt->rcvhdrq_size;
+ memvirt = uctxt->rcvhdrq;
break;
case RCV_EGRBUF: {
unsigned long addr;
vma->vm_flags &= ~VM_MAYWRITE;
addr = vma->vm_start;
for (i = 0 ; i < uctxt->egrbufs.numbufs; i++) {
+ memlen = uctxt->egrbufs.buffers[i].len;
+ memvirt = uctxt->egrbufs.buffers[i].addr;
ret = remap_pfn_range(
vma, addr,
- uctxt->egrbufs.buffers[i].phys >> PAGE_SHIFT,
- uctxt->egrbufs.buffers[i].len,
+ /*
+ * virt_to_pfn() does the same, but
+ * it's not available on x86_64
+ * when CONFIG_MMU is enabled.
+ */
+ PFN_DOWN(__pa(memvirt)),
+ memlen,
vma->vm_page_prot);
if (ret < 0)
goto done;
- addr += uctxt->egrbufs.buffers[i].len;
+ addr += memlen;
}
ret = 0;
goto done;
ret = -EPERM;
goto done;
}
- memaddr = uctxt->rcvhdrqtailaddr_phys;
memlen = PAGE_SIZE;
+ memvirt = (void *)uctxt->rcvhdrtail_kvaddr;
flags &= ~VM_MAYWRITE;
break;
case SUBCTXT_UREGS:
"%u:%u type:%u io/vf:%d/%d, addr:0x%llx, len:%lu(%lu), flags:0x%lx\n",
ctxt, subctxt, type, mapio, vmf, memaddr, memlen,
vma->vm_end - vma->vm_start, vma->vm_flags);
- pfn = (unsigned long)(memaddr >> PAGE_SHIFT);
if (vmf) {
- vma->vm_pgoff = pfn;
+ vma->vm_pgoff = PFN_DOWN(memaddr);
vma->vm_ops = &vm_ops;
ret = 0;
} else if (mapio) {
- ret = io_remap_pfn_range(vma, vma->vm_start, pfn, memlen,
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ PFN_DOWN(memaddr),
+ memlen,
vma->vm_page_prot);
+ } else if (memvirt) {
+ ret = remap_pfn_range(vma, vma->vm_start,
+ PFN_DOWN(__pa(memvirt)),
+ memlen,
+ vma->vm_page_prot);
} else {
- ret = remap_pfn_range(vma, vma->vm_start, pfn, memlen,
+ ret = remap_pfn_range(vma, vma->vm_start,
+ PFN_DOWN(memaddr),
+ memlen,
vma->vm_page_prot);
}
done:
uctxt->rcvhdrq);
binfo.rcvegr_bufbase = HFI1_MMAP_TOKEN(RCV_EGRBUF, uctxt->ctxt,
fd->subctxt,
- uctxt->egrbufs.rcvtids[0].phys);
+ uctxt->egrbufs.rcvtids[0].dma);
binfo.sdma_comp_bufbase = HFI1_MMAP_TOKEN(SDMA_COMP, uctxt->ctxt,
fd->subctxt, 0);
/*
u32 threshold; /* head update threshold */
struct eager_buffer {
void *addr;
- dma_addr_t phys;
+ dma_addr_t dma;
ssize_t len;
} *buffers;
struct {
void *addr;
- dma_addr_t phys;
+ dma_addr_t dma;
} *rcvtids;
};
/* size of each of the rcvhdrq entries */
u16 rcvhdrqentsize;
/* mmap of hdrq, must fit in 44 bits */
- dma_addr_t rcvhdrq_phys;
- dma_addr_t rcvhdrqtailaddr_phys;
+ dma_addr_t rcvhdrq_dma;
+ dma_addr_t rcvhdrqtailaddr_dma;
struct ctxt_eager_bufs egrbufs;
/* this receive context's assigned PIO ACK send context */
struct send_context *sc;
/* receive context tail dummy address */
__le64 *rcvhdrtail_dummy_kvaddr;
- dma_addr_t rcvhdrtail_dummy_physaddr;
+ dma_addr_t rcvhdrtail_dummy_dma;
bool eprom_available; /* true if EPROM is available for this device */
bool aspm_supported; /* Does HW support ASPM */
/* allocate dummy tail memory for all receive contexts */
dd->rcvhdrtail_dummy_kvaddr = dma_zalloc_coherent(
&dd->pcidev->dev, sizeof(u64),
- &dd->rcvhdrtail_dummy_physaddr,
+ &dd->rcvhdrtail_dummy_dma,
GFP_KERNEL);
if (!dd->rcvhdrtail_dummy_kvaddr) {
if (rcd->rcvhdrq) {
dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size,
- rcd->rcvhdrq, rcd->rcvhdrq_phys);
+ rcd->rcvhdrq, rcd->rcvhdrq_dma);
rcd->rcvhdrq = NULL;
if (rcd->rcvhdrtail_kvaddr) {
dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
(void *)rcd->rcvhdrtail_kvaddr,
- rcd->rcvhdrqtailaddr_phys);
+ rcd->rcvhdrqtailaddr_dma);
rcd->rcvhdrtail_kvaddr = NULL;
}
}
kfree(rcd->egrbufs.rcvtids);
for (e = 0; e < rcd->egrbufs.alloced; e++) {
- if (rcd->egrbufs.buffers[e].phys)
+ if (rcd->egrbufs.buffers[e].dma)
dma_free_coherent(&dd->pcidev->dev,
rcd->egrbufs.buffers[e].len,
rcd->egrbufs.buffers[e].addr,
- rcd->egrbufs.buffers[e].phys);
+ rcd->egrbufs.buffers[e].dma);
}
kfree(rcd->egrbufs.buffers);
if (dd->rcvhdrtail_dummy_kvaddr) {
dma_free_coherent(&dd->pcidev->dev, sizeof(u64),
(void *)dd->rcvhdrtail_dummy_kvaddr,
- dd->rcvhdrtail_dummy_physaddr);
+ dd->rcvhdrtail_dummy_dma);
dd->rcvhdrtail_dummy_kvaddr = NULL;
}
u64 reg;
if (!rcd->rcvhdrq) {
- dma_addr_t phys_hdrqtail;
+ dma_addr_t dma_hdrqtail;
gfp_t gfp_flags;
/*
gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ?
GFP_USER : GFP_KERNEL;
rcd->rcvhdrq = dma_zalloc_coherent(
- &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys,
+ &dd->pcidev->dev, amt, &rcd->rcvhdrq_dma,
gfp_flags | __GFP_COMP);
if (!rcd->rcvhdrq) {
if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
- &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
+ &dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail,
gfp_flags);
if (!rcd->rcvhdrtail_kvaddr)
goto bail_free;
- rcd->rcvhdrqtailaddr_phys = phys_hdrqtail;
+ rcd->rcvhdrqtailaddr_dma = dma_hdrqtail;
}
rcd->rcvhdrq_size = amt;
* before enabling any receive context
*/
write_kctxt_csr(dd, rcd->ctxt, RCV_HDR_TAIL_ADDR,
- dd->rcvhdrtail_dummy_physaddr);
+ dd->rcvhdrtail_dummy_dma);
return 0;
vfree(rcd->user_event_mask);
rcd->user_event_mask = NULL;
dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq,
- rcd->rcvhdrq_phys);
+ rcd->rcvhdrq_dma);
rcd->rcvhdrq = NULL;
bail:
return -ENOMEM;
rcd->egrbufs.buffers[idx].addr =
dma_zalloc_coherent(&dd->pcidev->dev,
rcd->egrbufs.rcvtid_size,
- &rcd->egrbufs.buffers[idx].phys,
+ &rcd->egrbufs.buffers[idx].dma,
gfp_flags);
if (rcd->egrbufs.buffers[idx].addr) {
rcd->egrbufs.buffers[idx].len =
rcd->egrbufs.rcvtid_size;
rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].addr =
rcd->egrbufs.buffers[idx].addr;
- rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].phys =
- rcd->egrbufs.buffers[idx].phys;
+ rcd->egrbufs.rcvtids[rcd->egrbufs.alloced].dma =
+ rcd->egrbufs.buffers[idx].dma;
rcd->egrbufs.alloced++;
alloced_bytes += rcd->egrbufs.rcvtid_size;
idx++;
for (i = 0, j = 0, offset = 0; j < idx; i++) {
if (i >= rcd->egrbufs.count)
break;
- rcd->egrbufs.rcvtids[i].phys =
- rcd->egrbufs.buffers[j].phys + offset;
+ rcd->egrbufs.rcvtids[i].dma =
+ rcd->egrbufs.buffers[j].dma + offset;
rcd->egrbufs.rcvtids[i].addr =
rcd->egrbufs.buffers[j].addr + offset;
rcd->egrbufs.alloced++;
- if ((rcd->egrbufs.buffers[j].phys + offset +
+ if ((rcd->egrbufs.buffers[j].dma + offset +
new_size) ==
- (rcd->egrbufs.buffers[j].phys +
+ (rcd->egrbufs.buffers[j].dma +
rcd->egrbufs.buffers[j].len)) {
j++;
offset = 0;
for (idx = 0; idx < rcd->egrbufs.alloced; idx++) {
hfi1_put_tid(dd, rcd->eager_base + idx, PT_EAGER,
- rcd->egrbufs.rcvtids[idx].phys, order);
+ rcd->egrbufs.rcvtids[idx].dma, order);
cond_resched();
}
goto bail;
dma_free_coherent(&dd->pcidev->dev,
rcd->egrbufs.buffers[idx].len,
rcd->egrbufs.buffers[idx].addr,
- rcd->egrbufs.buffers[idx].phys);
+ rcd->egrbufs.buffers[idx].dma);
rcd->egrbufs.buffers[idx].addr = NULL;
- rcd->egrbufs.buffers[idx].phys = 0;
+ rcd->egrbufs.buffers[idx].dma = 0;
rcd->egrbufs.buffers[idx].len = 0;
}
bail:
}
/*
- * Obtain the credit return addresses, kernel virtual and physical, for the
+ * Obtain the credit return addresses, kernel virtual and bus, for the
* given sc.
*
* To understand this routine:
- * o va and pa are arrays of struct credit_return. One for each physical
+ * o va and dma are arrays of struct credit_return. One for each physical
* send context, per NUMA.
* o Each send context always looks in its relative location in a struct
* credit_return for its credit return.
* with the same value. Use the address of the first send context in the
* group.
*/
-static void cr_group_addresses(struct send_context *sc, dma_addr_t *pa)
+static void cr_group_addresses(struct send_context *sc, dma_addr_t *dma)
{
u32 gc = group_context(sc->hw_context, sc->group);
u32 index = sc->hw_context & 0x7;
sc->hw_free = &sc->dd->cr_base[sc->node].va[gc].cr[index];
- *pa = (unsigned long)
- &((struct credit_return *)sc->dd->cr_base[sc->node].pa)[gc];
+ *dma = (unsigned long)
+ &((struct credit_return *)sc->dd->cr_base[sc->node].dma)[gc];
}
/*
{
struct send_context_info *sci;
struct send_context *sc = NULL;
- dma_addr_t pa;
+ dma_addr_t dma;
unsigned long flags;
u64 reg;
u32 thresh;
sc->sw_index = sw_index;
sc->hw_context = hw_context;
- cr_group_addresses(sc, &pa);
+ cr_group_addresses(sc, &dma);
sc->credits = sci->credits;
/* PIO Send Memory Address details */
((u64)opval << SC(CHECK_OPCODE_VALUE_SHIFT)));
/* set up credit return */
- reg = pa & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK);
+ reg = dma & SC(CREDIT_RETURN_ADDR_ADDRESS_SMASK);
write_kctxt_csr(dd, hw_context, SC(CREDIT_RETURN_ADDR), reg);
/*
dd->cr_base[i].va = dma_zalloc_coherent(
&dd->pcidev->dev,
bytes,
- &dd->cr_base[i].pa,
+ &dd->cr_base[i].dma,
GFP_KERNEL);
if (!dd->cr_base[i].va) {
set_dev_node(&dd->pcidev->dev, dd->node);
TXE_NUM_CONTEXTS *
sizeof(struct credit_return),
dd->cr_base[i].va,
- dd->cr_base[i].pa);
+ dd->cr_base[i].dma);
}
}
kfree(dd->cr_base);
/* NUMA indexed credit return array */
struct credit_return_base {
struct credit_return *va;
- dma_addr_t pa;
+ dma_addr_t dma;
};
/* send context configuration sizes (one per type) */
__field(u64, hw_free)
__field(void __iomem *, piobase)
__field(u16, rcvhdrq_cnt)
- __field(u64, rcvhdrq_phys)
+ __field(u64, rcvhdrq_dma)
__field(u32, eager_cnt)
- __field(u64, rcvegr_phys)
+ __field(u64, rcvegr_dma)
),
TP_fast_assign(DD_DEV_ASSIGN(dd);
__entry->ctxt = uctxt->ctxt;
__entry->hw_free = le64_to_cpu(*uctxt->sc->hw_free);
__entry->piobase = uctxt->sc->base_addr;
__entry->rcvhdrq_cnt = uctxt->rcvhdrq_cnt;
- __entry->rcvhdrq_phys = uctxt->rcvhdrq_phys;
+ __entry->rcvhdrq_dma = uctxt->rcvhdrq_dma;
__entry->eager_cnt = uctxt->egrbufs.alloced;
- __entry->rcvegr_phys =
- uctxt->egrbufs.rcvtids[0].phys;
+ __entry->rcvegr_dma = uctxt->egrbufs.rcvtids[0].dma;
),
TP_printk("[%s] ctxt %u " UCTXT_FMT,
__get_str(dev),
__entry->hw_free,
__entry->piobase,
__entry->rcvhdrq_cnt,
- __entry->rcvhdrq_phys,
+ __entry->rcvhdrq_dma,
__entry->eager_cnt,
- __entry->rcvegr_phys
+ __entry->rcvegr_dma
)
);