NTB door bell usage depends on NTB hardware.
ex: intel NTB gen1 has one peer door bell register which can be controlled
by the bitmap writen to it, while Intel NTB gen3 has a registers
per door bell and the data trigering the each door bell is always 1.
therefore exposing only peer door bell address forcing the user
to be aware of such low level details
Signed-off-by: Leonid Ravich <Leonid.Ravich@emc.com>
Acked-by: Logan Gunthorpe <logang@deltatee.com>
Acked-by: Dave Jiang <dave.jiang@intel.com>
Acked-by: Allen Hubbe <allenbh@gmail.com>
Signed-off-by: Jon Mason <jdmason@kudzu.us>
return ndev->reg->mw_bar[idx];
}
-static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
+void ndev_db_addr(struct intel_ntb_dev *ndev,
phys_addr_t *db_addr, resource_size_t *db_size,
phys_addr_t reg_addr, unsigned long reg)
{
*db_size = ndev->reg->db_size;
dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size);
}
-
- return 0;
}
u64 ndev_db_read(struct intel_ntb_dev *ndev,
ndev->self_reg->db_mask);
}
-int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
- resource_size_t *db_size)
+static int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
+ resource_size_t *db_size, u64 *db_data, int db_bit)
{
+ u64 db_bits;
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
- return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
+ if (unlikely(db_bit >= BITS_PER_LONG_LONG))
+ return -EINVAL;
+
+ db_bits = BIT_ULL(db_bit);
+
+ if (unlikely(db_bits & ~ntb_ndev(ntb)->db_valid_mask))
+ return -EINVAL;
+
+ ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
ndev->peer_reg->db_bell);
+
+ if (db_data)
+ *db_data = db_bits;
+
+
+ return 0;
}
static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
int ndev_init_isr(struct intel_ntb_dev *ndev, int msix_min, int msix_max,
int msix_shift, int total_shift);
enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd);
+void ndev_db_addr(struct intel_ntb_dev *ndev,
+ phys_addr_t *db_addr, resource_size_t *db_size,
+ phys_addr_t reg_addr, unsigned long reg);
u64 ndev_db_read(struct intel_ntb_dev *ndev, void __iomem *mmio);
int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
void __iomem *mmio);
u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector);
int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits);
int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits);
-int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
- resource_size_t *db_size);
int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb);
int intel_ntb_spad_count(struct ntb_dev *ntb);
u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx);
return 0;
}
+int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
+ resource_size_t *db_size,
+ u64 *db_data, int db_bit)
+{
+ phys_addr_t db_addr_base;
+ struct intel_ntb_dev *ndev = ntb_ndev(ntb);
+
+ if (unlikely(db_bit >= BITS_PER_LONG_LONG))
+ return -EINVAL;
+
+ if (unlikely(BIT_ULL(db_bit) & ~ntb_ndev(ntb)->db_valid_mask))
+ return -EINVAL;
+
+ ndev_db_addr(ndev, &db_addr_base, db_size, ndev->peer_addr,
+ ndev->peer_reg->db_bell);
+
+ if (db_addr) {
+ *db_addr = db_addr_base + (db_bit * 4);
+ dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx db bit %d\n",
+ *db_addr, db_bit);
+ }
+
+ if (db_data) {
+ *db_data = 1;
+ dev_dbg(&ndev->ntb.pdev->dev, "Peer db data %llx db bit %d\n",
+ *db_data, db_bit);
+ }
+
+ return 0;
+}
+
static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
{
struct intel_ntb_dev *ndev = ntb_ndev(ntb);
.db_clear = intel_ntb3_db_clear,
.db_set_mask = intel_ntb_db_set_mask,
.db_clear_mask = intel_ntb_db_clear_mask,
- .peer_db_addr = intel_ntb_peer_db_addr,
+ .peer_db_addr = intel_ntb3_peer_db_addr,
.peer_db_set = intel_ntb3_peer_db_set,
.spad_is_unsafe = intel_ntb_spad_is_unsafe,
.spad_count = intel_ntb_spad_count,
static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
phys_addr_t *db_addr,
- resource_size_t *db_size)
+ resource_size_t *db_size,
+ u64 *db_data,
+ int db_bit)
{
struct switchtec_ntb *sndev = ntb_sndev(ntb);
unsigned long offset;
+ if (unlikely(db_bit >= BITS_PER_LONG_LONG))
+ return -EINVAL;
+
offset = (unsigned long)sndev->mmio_peer_dbmsg->odb -
(unsigned long)sndev->stdev->mmio;
*db_addr = pci_resource_start(ntb->pdev, 0) + offset;
if (db_size)
*db_size = sizeof(u32);
+ if (db_data)
+ *db_data = BIT_ULL(db_bit) << sndev->db_peer_shift;
return 0;
}
int (*db_clear_mask)(struct ntb_dev *ntb, u64 db_bits);
int (*peer_db_addr)(struct ntb_dev *ntb,
- phys_addr_t *db_addr, resource_size_t *db_size);
+ phys_addr_t *db_addr, resource_size_t *db_size,
+ u64 *db_data, int db_bit);
u64 (*peer_db_read)(struct ntb_dev *ntb);
int (*peer_db_set)(struct ntb_dev *ntb, u64 db_bits);
int (*peer_db_clear)(struct ntb_dev *ntb, u64 db_bits);
* @ntb: NTB device context.
* @db_addr: OUT - The address of the peer doorbell register.
* @db_size: OUT - The number of bytes to write the peer doorbell register.
+ * @db_data: OUT - The data of peer doorbell register
+ * @db_bit: door bell bit number
*
* Return the address of the peer doorbell register. This may be used, for
* example, by drivers that offload memory copy operations to a dma engine.
*/
static inline int ntb_peer_db_addr(struct ntb_dev *ntb,
phys_addr_t *db_addr,
- resource_size_t *db_size)
+ resource_size_t *db_size,
+ u64 *db_data, int db_bit)
{
if (!ntb->ops->peer_db_addr)
return -EINVAL;
- return ntb->ops->peer_db_addr(ntb, db_addr, db_size);
+ return ntb->ops->peer_db_addr(ntb, db_addr, db_size, db_data, db_bit);
}
/**