]> Git Repo - linux.git/blame - net/smc/smc_ib.h
Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux.git] / net / smc / smc_ib.h
CommitLineData
a4cf0443
UB
1/*
2 * Shared Memory Communications over RDMA (SMC-R) and RoCE
3 *
4 * Definitions for IB environment
5 *
6 * Copyright IBM Corp. 2016
7 *
8 * Author(s): Ursula Braun <Ursula [email protected]>
9 */
10
11#ifndef _SMC_IB_H
12#define _SMC_IB_H
13
143c0171 14#include <linux/if_ether.h>
a4cf0443
UB
15#include <rdma/ib_verbs.h>
16
17#define SMC_MAX_PORTS 2 /* Max # of ports */
18#define SMC_GID_SIZE sizeof(union ib_gid)
19
f38ba179
UB
20#define SMC_IB_MAX_SEND_SGE 2
21
a4cf0443
UB
22struct smc_ib_devices { /* list of smc ib devices definition */
23 struct list_head list;
24 spinlock_t lock; /* protects list of smc ib devices */
25};
26
27extern struct smc_ib_devices smc_ib_devices; /* list of smc ib devices */
28
29struct smc_ib_device { /* ib-device infos for smc */
30 struct list_head list;
31 struct ib_device *ibdev;
32 struct ib_port_attr pattr[SMC_MAX_PORTS]; /* ib dev. port attrs */
bd4ad577 33 struct ib_event_handler event_handler; /* global ib_event handler */
f38ba179
UB
34 struct ib_cq *roce_cq_send; /* send completion queue */
35 struct ib_cq *roce_cq_recv; /* recv completion queue */
36 struct tasklet_struct send_tasklet; /* called by send cq handler */
37 struct tasklet_struct recv_tasklet; /* called by recv cq handler */
143c0171
UB
38 char mac[SMC_MAX_PORTS][ETH_ALEN];
39 /* mac address per port*/
a4cf0443
UB
40 union ib_gid gid[SMC_MAX_PORTS]; /* gid per port */
41 u8 initialized : 1; /* ib dev CQ, evthdl done */
bd4ad577
UB
42 struct work_struct port_event_work;
43 unsigned long port_event_mask;
a4cf0443
UB
44};
45
cd6851f3 46struct smc_buf_desc;
f38ba179 47struct smc_link;
cd6851f3 48
a4cf0443
UB
49int smc_ib_register_client(void) __init;
50void smc_ib_unregister_client(void);
51bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport);
52int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport);
cd6851f3
UB
53int smc_ib_buf_map(struct smc_ib_device *smcibdev, int buf_size,
54 struct smc_buf_desc *buf_slot,
55 enum dma_data_direction data_direction);
bd4ad577
UB
56void smc_ib_buf_unmap(struct smc_ib_device *smcibdev, int bufsize,
57 struct smc_buf_desc *buf_slot,
58 enum dma_data_direction data_direction);
f38ba179
UB
59void smc_ib_dealloc_protection_domain(struct smc_link *lnk);
60int smc_ib_create_protection_domain(struct smc_link *lnk);
61void smc_ib_destroy_queue_pair(struct smc_link *lnk);
62int smc_ib_create_queue_pair(struct smc_link *lnk);
bd4ad577
UB
63int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
64 struct ib_mr **mr);
65int smc_ib_ready_link(struct smc_link *lnk);
66int smc_ib_modify_qp_rts(struct smc_link *lnk);
67int smc_ib_modify_qp_reset(struct smc_link *lnk);
68long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev);
69
a4cf0443
UB
70
71#endif
This page took 0.076238 seconds and 4 git commands to generate.