Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Open sidebar
OpenBMC Firmware
talos-obmc-linux
Commits
7f1d25b4
Commit
7f1d25b4
authored
8 years ago
by
Doug Ledford
Browse files
Options
Download
Plain Diff
Merge branches 'misc' and 'rxe' into k.o/for-4.8-1
parents
6a89d89d
ab15c95a
8700e3e7
Changes
59
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
849 additions
and
65 deletions
+849
-65
MAINTAINERS
MAINTAINERS
+9
-0
drivers/infiniband/Kconfig
drivers/infiniband/Kconfig
+1
-0
drivers/infiniband/core/cma.c
drivers/infiniband/core/cma.c
+93
-7
drivers/infiniband/core/iwpm_util.c
drivers/infiniband/core/iwpm_util.c
+2
-1
drivers/infiniband/core/multicast.c
drivers/infiniband/core/multicast.c
+0
-12
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/sa_query.c
+41
-0
drivers/infiniband/core/ucma.c
drivers/infiniband/core/ucma.c
+14
-4
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs.h
+1
-0
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/core/uverbs_main.c
+24
-13
drivers/infiniband/hw/hfi1/Kconfig
drivers/infiniband/hw/hfi1/Kconfig
+0
-1
drivers/infiniband/hw/hfi1/file_ops.c
drivers/infiniband/hw/hfi1/file_ops.c
+1
-1
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/cq.c
+2
-2
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/main.c
+197
-1
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/mlx4_ib.h
+9
-0
drivers/infiniband/hw/mthca/mthca_reset.c
drivers/infiniband/hw/mthca/mthca_reset.c
+20
-22
drivers/infiniband/sw/Makefile
drivers/infiniband/sw/Makefile
+1
-0
drivers/infiniband/sw/rdmavt/Kconfig
drivers/infiniband/sw/rdmavt/Kconfig
+0
-1
drivers/infiniband/sw/rxe/Kconfig
drivers/infiniband/sw/rxe/Kconfig
+24
-0
drivers/infiniband/sw/rxe/Makefile
drivers/infiniband/sw/rxe/Makefile
+24
-0
drivers/infiniband/sw/rxe/rxe.c
drivers/infiniband/sw/rxe/rxe.c
+386
-0
No files found.
MAINTAINERS
View file @
7f1d25b4
...
...
@@ -7444,6 +7444,15 @@ W: http://www.mellanox.com
Q: http://patchwork.ozlabs.org/project/netdev/list/
F: drivers/net/ethernet/mellanox/mlxsw/
SOFT-ROCE DRIVER (rxe)
M: Moni Shoua <monis@mellanox.com>
L: linux-rdma@vger.kernel.org
S: Supported
W: https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
Q: http://patchwork.kernel.org/project/linux-rdma/list/
F: drivers/infiniband/hw/rxe/
F: include/uapi/rdma/rdma_user_rxe.h
MEMBARRIER SUPPORT
M: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
...
...
This diff is collapsed.
Click to expand it.
drivers/infiniband/Kconfig
View file @
7f1d25b4
...
...
@@ -84,6 +84,7 @@ source "drivers/infiniband/ulp/iser/Kconfig"
source "drivers/infiniband/ulp/isert/Kconfig"
source "drivers/infiniband/sw/rdmavt/Kconfig"
source "drivers/infiniband/sw/rxe/Kconfig"
source "drivers/infiniband/hw/hfi1/Kconfig"
...
...
This diff is collapsed.
Click to expand it.
drivers/infiniband/core/cma.c
View file @
7f1d25b4
...
...
@@ -68,6 +68,7 @@ MODULE_DESCRIPTION("Generic RDMA CM Agent");
MODULE_LICENSE("Dual BSD/GPL");
#define CMA_CM_RESPONSE_TIMEOUT 20
#define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000
#define CMA_MAX_CM_RETRIES 15
#define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
#define CMA_IBOE_PACKET_LIFETIME 18
...
...
@@ -162,6 +163,14 @@ struct rdma_bind_list {
unsigned short port;
};
struct class_port_info_context {
struct ib_class_port_info *class_port_info;
struct ib_device *device;
struct completion done;
struct ib_sa_query *sa_query;
u8 port_num;
};
static int cma_ps_alloc(struct net *net, enum rdma_port_space ps,
struct rdma_bind_list *bind_list, int snum)
{
...
...
@@ -306,6 +315,7 @@ struct cma_multicast {
struct sockaddr_storage addr;
struct kref mcref;
bool igmp_joined;
u8 join_state;
};
struct cma_work {
...
...
@@ -3754,10 +3764,63 @@ static void cma_set_mgid(struct rdma_id_private *id_priv,
}
}
static void cma_query_sa_classport_info_cb(int status,
struct ib_class_port_info *rec,
void *context)
{
struct class_port_info_context *cb_ctx = context;
WARN_ON(!context);
if (status || !rec) {
pr_debug("RDMA CM: %s port %u failed query ClassPortInfo status: %d\n",
cb_ctx->device->name, cb_ctx->port_num, status);
goto out;
}
memcpy(cb_ctx->class_port_info, rec, sizeof(struct ib_class_port_info));
out:
complete(&cb_ctx->done);
}
static int cma_query_sa_classport_info(struct ib_device *device, u8 port_num,
struct ib_class_port_info *class_port_info)
{
struct class_port_info_context *cb_ctx;
int ret;
cb_ctx = kmalloc(sizeof(*cb_ctx), GFP_KERNEL);
if (!cb_ctx)
return -ENOMEM;
cb_ctx->device = device;
cb_ctx->class_port_info = class_port_info;
cb_ctx->port_num = port_num;
init_completion(&cb_ctx->done);
ret = ib_sa_classport_info_rec_query(&sa_client, device, port_num,
CMA_QUERY_CLASSPORT_INFO_TIMEOUT,
GFP_KERNEL, cma_query_sa_classport_info_cb,
cb_ctx, &cb_ctx->sa_query);
if (ret < 0) {
pr_err("RDMA CM: %s port %u failed to send ClassPortInfo query, ret: %d\n",
device->name, port_num, ret);
goto out;
}
wait_for_completion(&cb_ctx->done);
out:
kfree(cb_ctx);
return ret;
}
static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
struct cma_multicast *mc)
{
struct ib_sa_mcmember_rec rec;
struct ib_class_port_info class_port_info;
struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
ib_sa_comp_mask comp_mask;
int ret;
...
...
@@ -3776,7 +3839,24 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
rec.qkey = cpu_to_be32(id_priv->qkey);
rdma_addr_get_sgid(dev_addr, &rec.port_gid);
rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
rec.join_state = 1;
rec.join_state = mc->join_state;
if (rec.join_state == BIT(SENDONLY_FULLMEMBER_JOIN)) {
ret = cma_query_sa_classport_info(id_priv->id.device,
id_priv->id.port_num,
&class_port_info);
if (ret)
return ret;
if (!(ib_get_cpi_capmask2(&class_port_info) &
IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT)) {
pr_warn("RDMA CM: %s port %u Unable to multicast join\n"
"RDMA CM: SM doesn't support Send Only Full Member option\n",
id_priv->id.device->name, id_priv->id.port_num);
return -EOPNOTSUPP;
}
}
comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
...
...
@@ -3845,6 +3925,9 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
struct sockaddr *addr = (struct sockaddr *)&mc->addr;
struct net_device *ndev = NULL;
enum ib_gid_type gid_type;
bool send_only;
send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
if (cma_zero_addr((struct sockaddr *)&mc->addr))
return -EINVAL;
...
...
@@ -3878,12 +3961,14 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
rdma_start_port(id_priv->cma_dev->device)];
if (addr->sa_family == AF_INET) {
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
true);
if (!err) {
mc->igmp_joined = true;
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
if (!send_only) {
err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
true);
if (!err)
mc->igmp_joined = true;
}
}
} else {
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
...
...
@@ -3913,7 +3998,7 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
}
int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
void *context)
u8 join_state,
void *context)
{
struct rdma_id_private *id_priv;
struct cma_multicast *mc;
...
...
@@ -3932,6 +4017,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
mc->context = context;
mc->id_priv = id_priv;
mc->igmp_joined = false;
mc->join_state = join_state;
spin_lock(&id_priv->lock);
list_add(&mc->list, &id_priv->mc_list);
spin_unlock(&id_priv->lock);
...
...
This diff is collapsed.
Click to expand it.
drivers/infiniband/core/iwpm_util.c
View file @
7f1d25b4
...
...
@@ -37,6 +37,7 @@
#define IWPM_MAPINFO_HASH_MASK (IWPM_MAPINFO_HASH_SIZE - 1)
#define IWPM_REMINFO_HASH_SIZE 64
#define IWPM_REMINFO_HASH_MASK (IWPM_REMINFO_HASH_SIZE - 1)
#define IWPM_MSG_SIZE 512
static
LIST_HEAD
(
iwpm_nlmsg_req_list
);
static
DEFINE_SPINLOCK
(
iwpm_nlmsg_req_lock
);
...
...
@@ -452,7 +453,7 @@ struct sk_buff *iwpm_create_nlmsg(u32 nl_op, struct nlmsghdr **nlh,
{
struct
sk_buff
*
skb
=
NULL
;
skb
=
dev_alloc_skb
(
NLMSG_GOOD
SIZE
);
skb
=
dev_alloc_skb
(
IWPM_MSG_
SIZE
);
if
(
!
skb
)
{
pr_err
(
"%s Unable to allocate skb
\n
"
,
__func__
);
goto
create_nlmsg_exit
;
...
...
This diff is collapsed.
Click to expand it.
drivers/infiniband/core/multicast.c
View file @
7f1d25b4
...
...
@@ -93,18 +93,6 @@ enum {
struct
mcast_member
;
/*
* There are 4 types of join states:
* FullMember, NonMember, SendOnlyNonMember, SendOnlyFullMember.
*/
enum
{
FULLMEMBER_JOIN
,
NONMEMBER_JOIN
,
SENDONLY_NONMEBER_JOIN
,
SENDONLY_FULLMEMBER_JOIN
,
NUM_JOIN_MEMBERSHIP_TYPES
,
};
struct
mcast_group
{
struct
ib_sa_mcmember_rec
rec
;
struct
rb_node
node
;
...
...
This diff is collapsed.
Click to expand it.
drivers/infiniband/core/sa_query.c
View file @
7f1d25b4
...
...
@@ -65,10 +65,17 @@ struct ib_sa_sm_ah {
u8
src_path_mask
;
};
struct
ib_sa_classport_cache
{
bool
valid
;
struct
ib_class_port_info
data
;
};
struct
ib_sa_port
{
struct
ib_mad_agent
*
agent
;
struct
ib_sa_sm_ah
*
sm_ah
;
struct
work_struct
update_task
;
struct
ib_sa_classport_cache
classport_info
;
spinlock_t
classport_lock
;
/* protects class port info set */
spinlock_t
ah_lock
;
u8
port_num
;
};
...
...
@@ -998,6 +1005,13 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event
port
->
sm_ah
=
NULL
;
spin_unlock_irqrestore
(
&
port
->
ah_lock
,
flags
);
if
(
event
->
event
==
IB_EVENT_SM_CHANGE
||
event
->
event
==
IB_EVENT_CLIENT_REREGISTER
||
event
->
event
==
IB_EVENT_LID_CHANGE
)
{
spin_lock_irqsave
(
&
port
->
classport_lock
,
flags
);
port
->
classport_info
.
valid
=
false
;
spin_unlock_irqrestore
(
&
port
->
classport_lock
,
flags
);
}
queue_work
(
ib_wq
,
&
sa_dev
->
port
[
event
->
element
.
port_num
-
sa_dev
->
start_port
].
update_task
);
}
...
...
@@ -1719,6 +1733,7 @@ static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
int
status
,
struct
ib_sa_mad
*
mad
)
{
unsigned
long
flags
;
struct
ib_sa_classport_info_query
*
query
=
container_of
(
sa_query
,
struct
ib_sa_classport_info_query
,
sa_query
);
...
...
@@ -1728,6 +1743,16 @@ static void ib_sa_classport_info_rec_callback(struct ib_sa_query *sa_query,
ib_unpack
(
classport_info_rec_table
,
ARRAY_SIZE
(
classport_info_rec_table
),
mad
->
data
,
&
rec
);
spin_lock_irqsave
(
&
sa_query
->
port
->
classport_lock
,
flags
);
if
(
!
status
&&
!
sa_query
->
port
->
classport_info
.
valid
)
{
memcpy
(
&
sa_query
->
port
->
classport_info
.
data
,
&
rec
,
sizeof
(
sa_query
->
port
->
classport_info
.
data
));
sa_query
->
port
->
classport_info
.
valid
=
true
;
}
spin_unlock_irqrestore
(
&
sa_query
->
port
->
classport_lock
,
flags
);
query
->
callback
(
status
,
&
rec
,
query
->
context
);
}
else
{
query
->
callback
(
status
,
NULL
,
query
->
context
);
...
...
@@ -1754,7 +1779,9 @@ int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
struct
ib_sa_port
*
port
;
struct
ib_mad_agent
*
agent
;
struct
ib_sa_mad
*
mad
;
struct
ib_class_port_info
cached_class_port_info
;
int
ret
;
unsigned
long
flags
;
if
(
!
sa_dev
)
return
-
ENODEV
;
...
...
@@ -1762,6 +1789,17 @@ int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
port
=
&
sa_dev
->
port
[
port_num
-
sa_dev
->
start_port
];
agent
=
port
->
agent
;
/* Use cached ClassPortInfo attribute if valid instead of sending mad */
spin_lock_irqsave
(
&
port
->
classport_lock
,
flags
);
if
(
port
->
classport_info
.
valid
&&
callback
)
{
memcpy
(
&
cached_class_port_info
,
&
port
->
classport_info
.
data
,
sizeof
(
cached_class_port_info
));
spin_unlock_irqrestore
(
&
port
->
classport_lock
,
flags
);
callback
(
0
,
&
cached_class_port_info
,
context
);
return
0
;
}
spin_unlock_irqrestore
(
&
port
->
classport_lock
,
flags
);
query
=
kzalloc
(
sizeof
(
*
query
),
gfp_mask
);
if
(
!
query
)
return
-
ENOMEM
;
...
...
@@ -1885,6 +1923,9 @@ static void ib_sa_add_one(struct ib_device *device)
sa_dev
->
port
[
i
].
sm_ah
=
NULL
;
sa_dev
->
port
[
i
].
port_num
=
i
+
s
;
spin_lock_init
(
&
sa_dev
->
port
[
i
].
classport_lock
);
sa_dev
->
port
[
i
].
classport_info
.
valid
=
false
;
sa_dev
->
port
[
i
].
agent
=
ib_register_mad_agent
(
device
,
i
+
s
,
IB_QPT_GSI
,
NULL
,
0
,
send_handler
,
...
...
This diff is collapsed.
Click to expand it.
drivers/infiniband/core/ucma.c
View file @
7f1d25b4
...
...
@@ -106,6 +106,7 @@ struct ucma_multicast {
int
events_reported
;
u64
uid
;
u8
join_state
;
struct
list_head
list
;
struct
sockaddr_storage
addr
;
};
...
...
@@ -1317,12 +1318,20 @@ static ssize_t ucma_process_join(struct ucma_file *file,
struct
ucma_multicast
*
mc
;
struct
sockaddr
*
addr
;
int
ret
;
u8
join_state
;
if
(
out_len
<
sizeof
(
resp
))
return
-
ENOSPC
;
addr
=
(
struct
sockaddr
*
)
&
cmd
->
addr
;
if
(
cmd
->
reserved
||
!
cmd
->
addr_size
||
(
cmd
->
addr_size
!=
rdma_addr_size
(
addr
)))
if
(
!
cmd
->
addr_size
||
(
cmd
->
addr_size
!=
rdma_addr_size
(
addr
)))
return
-
EINVAL
;
if
(
cmd
->
join_flags
==
RDMA_MC_JOIN_FLAG_FULLMEMBER
)
join_state
=
BIT
(
FULLMEMBER_JOIN
);
else
if
(
cmd
->
join_flags
==
RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER
)
join_state
=
BIT
(
SENDONLY_FULLMEMBER_JOIN
);
else
return
-
EINVAL
;
ctx
=
ucma_get_ctx
(
file
,
cmd
->
id
);
...
...
@@ -1335,10 +1344,11 @@ static ssize_t ucma_process_join(struct ucma_file *file,
ret
=
-
ENOMEM
;
goto
err1
;
}
mc
->
join_state
=
join_state
;
mc
->
uid
=
cmd
->
uid
;
memcpy
(
&
mc
->
addr
,
addr
,
cmd
->
addr_size
);
ret
=
rdma_join_multicast
(
ctx
->
cm_id
,
(
struct
sockaddr
*
)
&
mc
->
addr
,
mc
);
ret
=
rdma_join_multicast
(
ctx
->
cm_id
,
(
struct
sockaddr
*
)
&
mc
->
addr
,
join_state
,
mc
);
if
(
ret
)
goto
err2
;
...
...
@@ -1382,7 +1392,7 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
join_cmd
.
uid
=
cmd
.
uid
;
join_cmd
.
id
=
cmd
.
id
;
join_cmd
.
addr_size
=
rdma_addr_size
((
struct
sockaddr
*
)
&
cmd
.
addr
);
join_cmd
.
reserved
=
0
;
join_cmd
.
join_flags
=
RDMA_MC_JOIN_FLAG_FULLMEMBER
;
memcpy
(
&
join_cmd
.
addr
,
&
cmd
.
addr
,
join_cmd
.
addr_size
);
return
ucma_process_join
(
file
,
&
join_cmd
,
out_len
);
...
...
This diff is collapsed.
Click to expand it.
drivers/infiniband/core/uverbs.h
View file @
7f1d25b4
...
...
@@ -116,6 +116,7 @@ struct ib_uverbs_event_file {
struct
ib_uverbs_file
{
struct
kref
ref
;
struct
mutex
mutex
;
struct
mutex
cleanup_mutex
;
/* protect cleanup */
struct
ib_uverbs_device
*
device
;
struct
ib_ucontext
*
ucontext
;
struct
ib_event_handler
event_handler
;
...
...
This diff is collapsed.
Click to expand it.
drivers/infiniband/core/uverbs_main.c
View file @
7f1d25b4
...
...
@@ -969,6 +969,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
file
->
async_file
=
NULL
;
kref_init
(
&
file
->
ref
);
mutex_init
(
&
file
->
mutex
);
mutex_init
(
&
file
->
cleanup_mutex
);
filp
->
private_data
=
file
;
kobject_get
(
&
dev
->
kobj
);
...
...
@@ -994,18 +995,20 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
{
struct
ib_uverbs_file
*
file
=
filp
->
private_data
;
struct
ib_uverbs_device
*
dev
=
file
->
device
;
struct
ib_ucontext
*
ucontext
=
NULL
;
mutex_lock
(
&
file
->
cleanup_mutex
);
if
(
file
->
ucontext
)
{
ib_uverbs_cleanup_ucontext
(
file
,
file
->
ucontext
);
file
->
ucontext
=
NULL
;
}
mutex_unlock
(
&
file
->
cleanup_mutex
);
mutex_lock
(
&
file
->
device
->
lists_mutex
);
ucontext
=
file
->
ucontext
;
file
->
ucontext
=
NULL
;
if
(
!
file
->
is_closed
)
{
list_del
(
&
file
->
list
);
file
->
is_closed
=
1
;
}
mutex_unlock
(
&
file
->
device
->
lists_mutex
);
if
(
ucontext
)
ib_uverbs_cleanup_ucontext
(
file
,
ucontext
);
if
(
file
->
async_file
)
kref_put
(
&
file
->
async_file
->
ref
,
ib_uverbs_release_event_file
);
...
...
@@ -1219,22 +1222,30 @@ static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
mutex_lock
(
&
uverbs_dev
->
lists_mutex
);
while
(
!
list_empty
(
&
uverbs_dev
->
uverbs_file_list
))
{
struct
ib_ucontext
*
ucontext
;
file
=
list_first_entry
(
&
uverbs_dev
->
uverbs_file_list
,
struct
ib_uverbs_file
,
list
);
file
->
is_closed
=
1
;
ucontext
=
file
->
ucontext
;
list_del
(
&
file
->
list
);
file
->
ucontext
=
NULL
;
kref_get
(
&
file
->
ref
);
mutex_unlock
(
&
uverbs_dev
->
lists_mutex
);
/* We must release the mutex before going ahead and calling
* disassociate_ucontext. disassociate_ucontext might end up
* indirectly calling uverbs_close, for example due to freeing
* the resources (e.g mmput).
*/
ib_uverbs_event_handler
(
&
file
->
event_handler
,
&
event
);
mutex_lock
(
&
file
->
cleanup_mutex
);
ucontext
=
file
->
ucontext
;
file
->
ucontext
=
NULL
;
mutex_unlock
(
&
file
->
cleanup_mutex
);
/* At this point ib_uverbs_close cannot be running
* ib_uverbs_cleanup_ucontext
*/
if
(
ucontext
)
{
/* We must release the mutex before going ahead and
* calling disassociate_ucontext. disassociate_ucontext
* might end up indirectly calling uverbs_close,
* for example due to freeing the resources
* (e.g mmput).
*/
ib_dev
->
disassociate_ucontext
(
ucontext
);
ib_uverbs_cleanup_ucontext
(
file
,
ucontext
);
}
...
...
This diff is collapsed.
Click to expand it.
drivers/infiniband/hw/hfi1/Kconfig
View file @
7f1d25b4
...
...
@@ -3,7 +3,6 @@ config INFINIBAND_HFI1
depends on X86_64 && INFINIBAND_RDMAVT
select MMU_NOTIFIER
select CRC32
default m
---help---
This is a low-level driver for Intel OPA Gen1 adapter.
config HFI1_DEBUG_SDMA_ORDER
...
...
This diff is collapsed.
Click to expand it.
drivers/infiniband/hw/hfi1/file_ops.c
View file @
7f1d25b4
...
...
@@ -225,7 +225,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
sizeof
(
struct
hfi1_base_info
));
break
;
case
HFI1_IOCTL_CREDIT_UPD
:
if
(
uctxt
&&
uctxt
->
sc
)
if
(
uctxt
)
sc_return_credits
(
uctxt
->
sc
);
break
;
...
...
This diff is collapsed.
Click to expand it.
drivers/infiniband/hw/mlx4/cq.c
View file @
7f1d25b4
...
...
@@ -288,7 +288,7 @@ static int mlx4_alloc_resize_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq,
if
(
cq
->
resize_buf
)
return
-
EBUSY
;
cq
->
resize_buf
=
kmalloc
(
sizeof
*
cq
->
resize_buf
,
GFP_
ATOMIC
);
cq
->
resize_buf
=
kmalloc
(
sizeof
*
cq
->
resize_buf
,
GFP_
KERNEL
);
if
(
!
cq
->
resize_buf
)
return
-
ENOMEM
;
...
...
@@ -316,7 +316,7 @@ static int mlx4_alloc_resize_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq *cq
if
(
ib_copy_from_udata
(
&
ucmd
,
udata
,
sizeof
ucmd
))
return
-
EFAULT
;
cq
->
resize_buf
=
kmalloc
(
sizeof
*
cq
->
resize_buf
,
GFP_
ATOMIC
);
cq
->
resize_buf
=
kmalloc
(
sizeof
*
cq
->
resize_buf
,
GFP_
KERNEL
);
if
(
!
cq
->
resize_buf
)
return
-
ENOMEM
;
...
...
This diff is collapsed.
Click to expand it.
drivers/infiniband/hw/mlx4/main.c
View file @
7f1d25b4
...
...
@@ -2049,6 +2049,195 @@ static struct device_attribute *mlx4_class_attributes[] = {
&
dev_attr_board_id
};
struct
diag_counter
{
const
char
*
name
;
u32
offset
;
};
#define DIAG_COUNTER(_name, _offset) \
{ .name = #_name, .offset = _offset }
static
const
struct
diag_counter
diag_basic
[]
=
{
DIAG_COUNTER
(
rq_num_lle
,
0x00
),
DIAG_COUNTER
(
sq_num_lle
,
0x04
),
DIAG_COUNTER
(
rq_num_lqpoe
,
0x08
),
DIAG_COUNTER
(
sq_num_lqpoe
,
0x0C
),
DIAG_COUNTER
(
rq_num_lpe
,
0x18
),
DIAG_COUNTER
(
sq_num_lpe
,
0x1C
),
DIAG_COUNTER
(
rq_num_wrfe
,
0x20
),
DIAG_COUNTER
(
sq_num_wrfe
,
0x24
),
DIAG_COUNTER
(
sq_num_mwbe
,
0x2C
),
DIAG_COUNTER
(
sq_num_bre
,
0x34
),
DIAG_COUNTER
(
sq_num_rire
,
0x44
),
DIAG_COUNTER
(
rq_num_rire
,
0x48
),
DIAG_COUNTER
(
sq_num_rae
,
0x4C
),
DIAG_COUNTER
(
rq_num_rae
,
0x50
),
DIAG_COUNTER
(
sq_num_roe
,
0x54
),
DIAG_COUNTER
(
sq_num_tree
,
0x5C
),
DIAG_COUNTER
(
sq_num_rree
,
0x64
),
DIAG_COUNTER
(
rq_num_rnr
,
0x68
),
DIAG_COUNTER
(
sq_num_rnr
,
0x6C
),
DIAG_COUNTER
(
rq_num_oos
,
0x100
),
DIAG_COUNTER
(
sq_num_oos
,
0x104
),
};
static
const
struct
diag_counter
diag_ext
[]
=
{
DIAG_COUNTER
(
rq_num_dup
,
0x130
),
DIAG_COUNTER
(
sq_num_to
,
0x134
),
};
static
const
struct
diag_counter
diag_device_only
[]
=
{
DIAG_COUNTER
(
num_cqovf
,
0x1A0
),
DIAG_COUNTER
(
rq_num_udsdprd
,
0x118
),
};
static
struct
rdma_hw_stats
*
mlx4_ib_alloc_hw_stats
(
struct
ib_device
*
ibdev
,
u8
port_num
)
{
struct
mlx4_ib_dev
*
dev
=
to_mdev
(
ibdev
);
struct
mlx4_ib_diag_counters
*
diag
=
dev
->
diag_counters
;
if
(
!
diag
[
!!
port_num
].
name
)
return
NULL
;
return
rdma_alloc_hw_stats_struct
(
diag
[
!!
port_num
].
name
,
diag
[
!!
port_num
].
num_counters
,
RDMA_HW_STATS_DEFAULT_LIFESPAN
);
}
static
int
mlx4_ib_get_hw_stats
(
struct
ib_device
*
ibdev
,
struct
rdma_hw_stats
*
stats
,
u8
port
,
int
index
)
{
struct
mlx4_ib_dev
*
dev
=
to_mdev
(
ibdev
);
struct
mlx4_ib_diag_counters
*
diag
=
dev
->
diag_counters
;
u32
hw_value
[
ARRAY_SIZE
(
diag_device_only
)
+
ARRAY_SIZE
(
diag_ext
)
+
ARRAY_SIZE
(
diag_basic
)]
=
{};
int
ret
;
int
i
;
ret
=
mlx4_query_diag_counters
(
dev
->
dev
,
MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS
,
diag
[
!!
port
].
offset
,
hw_value
,
diag
[
!!
port
].
num_counters
,
port
);
if
(
ret
)
return
ret
;
for
(
i
=
0
;
i
<
diag
[
!!
port
].
num_counters
;
i
++
)
stats
->
value
[
i
]
=
hw_value
[
i
];
return
diag
[
!!
port
].
num_counters
;
}
static
int
__mlx4_ib_alloc_diag_counters
(
struct
mlx4_ib_dev
*
ibdev
,
const
char
***
name
,
u32
**
offset
,
u32
*
num
,
bool
port
)
{
u32
num_counters
;
num_counters
=
ARRAY_SIZE
(
diag_basic
);
if
(
ibdev
->
dev
->
caps
.
flags2
&
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT
)
num_counters
+=
ARRAY_SIZE
(
diag_ext
);
if
(
!
port
)
num_counters
+=
ARRAY_SIZE
(
diag_device_only
);
*
name
=
kcalloc
(
num_counters
,
sizeof
(
**
name
),
GFP_KERNEL
);
if
(
!*
name
)
return
-
ENOMEM
;
*
offset
=
kcalloc
(
num_counters
,
sizeof
(
**
offset
),
GFP_KERNEL
);
if
(
!*
offset
)
goto
err_name
;
*
num
=
num_counters
;
return
0
;
err_name:
kfree
(
*
name
);
return
-
ENOMEM
;
}
static
void
mlx4_ib_fill_diag_counters
(
struct
mlx4_ib_dev
*
ibdev
,
const
char
**
name
,
u32
*
offset
,
bool
port
)
{
int
i
;
int
j
;
for
(
i
=
0
,
j
=
0
;
i
<
ARRAY_SIZE
(
diag_basic
);
i
++
,
j
++
)
{
name
[
i
]
=
diag_basic
[
i
].
name
;
offset
[
i
]
=
diag_basic
[
i
].
offset
;
}
if
(
ibdev
->
dev
->
caps
.
flags2
&
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT
)
{
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
diag_ext
);
i
++
,
j
++
)
{
name
[
j
]
=
diag_ext
[
i
].
name
;
offset
[
j
]
=
diag_ext
[
i
].
offset
;
}
}
if
(
!
port
)
{
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
diag_device_only
);
i
++
,
j
++
)
{
name
[
j
]
=
diag_device_only
[
i
].
name
;
offset
[
j
]
=
diag_device_only
[
i
].
offset
;
}
}
}
static
int
mlx4_ib_alloc_diag_counters
(
struct
mlx4_ib_dev
*
ibdev
)
{
struct
mlx4_ib_diag_counters
*
diag
=
ibdev
->
diag_counters
;
int
i
;
int
ret
;
bool
per_port
=
!!
(
ibdev
->
dev
->
caps
.
flags2
&
MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT
);
for
(
i
=
0
;
i
<
MLX4_DIAG_COUNTERS_TYPES
;
i
++
)
{
/* i == 1 means we are building port counters */
if
(
i
&&
!
per_port
)
continue
;
ret
=
__mlx4_ib_alloc_diag_counters
(
ibdev
,
&
diag
[
i
].
name
,
&
diag
[
i
].
offset
,
&
diag
[
i
].
num_counters
,
i
);
if
(
ret
)
goto
err_alloc
;
mlx4_ib_fill_diag_counters
(
ibdev
,
diag
[
i
].
name
,
diag
[
i
].
offset
,
i
);
}
ibdev
->
ib_dev
.
get_hw_stats
=
mlx4_ib_get_hw_stats
;
ibdev
->
ib_dev
.
alloc_hw_stats
=
mlx4_ib_alloc_hw_stats
;
return
0
;
err_alloc:
if
(
i
)
{
kfree
(
diag
[
i
-
1
].
name
);
kfree
(
diag
[
i
-
1
].
offset
);
}
return
ret
;
}
static
void
mlx4_ib_diag_cleanup
(
struct
mlx4_ib_dev
*
ibdev
)
{
int
i
;
for
(
i
=
0
;
i
<
MLX4_DIAG_COUNTERS_TYPES
;
i
++
)
{
kfree
(
ibdev
->
diag_counters
[
i
].
offset
);
kfree
(
ibdev
->
diag_counters
[
i
].
name
);
}
}
#define MLX4_IB_INVALID_MAC ((u64)-1)
static
void
mlx4_ib_update_qps
(
struct
mlx4_ib_dev
*
ibdev
,
struct
net_device
*
dev
,
...
...
@@ -2552,9 +2741,12 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
for
(
j
=
1
;
j
<=
ibdev
->
dev
->
caps
.
num_ports
;
j
++
)
atomic64_set
(
&
iboe
->
mac
[
j
-
1
],
ibdev
->
dev
->
caps
.
def_mac
[
j
]);
if
(
ib_register_device
(
&
ibdev
->
ib_dev
,
NULL
))
if
(
mlx4_ib_alloc_diag_counters
(
ibdev
))
goto
err_steer_free_bitmap
;
if
(
ib_register_device
(
&
ibdev
->
ib_dev
,
NULL
))
goto
err_diag_counters
;
if
(
mlx4_ib_mad_init
(
ibdev
))
goto
err_reg
;
...
...
@@ -2620,6 +2812,9 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
err_reg:
ib_unregister_device
(
&
ibdev
->
ib_dev
);
err_diag_counters:
mlx4_ib_diag_cleanup
(
ibdev
);
err_steer_free_bitmap:
kfree
(
ibdev
->
ib_uc_qpns_bitmap
);
...
...
@@ -2723,6 +2918,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
mlx4_ib_close_sriov
(
ibdev
);
mlx4_ib_mad_cleanup
(
ibdev
);
ib_unregister_device
(
&
ibdev
->
ib_dev
);
mlx4_ib_diag_cleanup
(
ibdev
);
if
(
ibdev
->
iboe
.
nb
.
notifier_call
)
{
if
(
unregister_netdevice_notifier
(
&
ibdev
->
iboe
.
nb
))
pr_warn
(
"failure unregistering notifier
\n
"
);
...
...
This diff is collapsed.
Click to expand it.
drivers/infiniband/hw/mlx4/mlx4_ib.h
View file @
7f1d25b4
...
...
@@ -549,6 +549,14 @@ struct mlx4_ib_counters {
u32
default_counter
;
};
#define MLX4_DIAG_COUNTERS_TYPES 2
struct
mlx4_ib_diag_counters
{
const
char
**
name
;
u32
*
offset
;
u32
num_counters
;
};
struct
mlx4_ib_dev
{
struct
ib_device
ib_dev
;
struct
mlx4_dev
*
dev
;
...
...
@@ -585,6 +593,7 @@ struct mlx4_ib_dev {
/* protect resources needed as part of reset flow */
spinlock_t
reset_flow_resource_lock
;
struct
list_head
qp_list
;
struct
mlx4_ib_diag_counters
diag_counters
[
MLX4_DIAG_COUNTERS_TYPES
];
};
struct
ib_event_work
{
...
...
This diff is collapsed.
Click to expand it.
drivers/infiniband/hw/mthca/mthca_reset.c
View file @
7f1d25b4
...
...
@@ -98,7 +98,7 @@ int mthca_reset(struct mthca_dev *mdev)
err
=
-
ENOMEM
;
mthca_err
(
mdev
,
"Couldn't allocate memory to save HCA "
"PCI header, aborting.
\n
"
);
goto
o
ut
;
goto
p
ut
_dev
;
}
for
(
i
=
0
;
i
<
64
;
++
i
)
{
...
...
@@ -108,7 +108,7 @@ int mthca_reset(struct mthca_dev *mdev)
err
=
-
ENODEV
;
mthca_err
(
mdev
,
"Couldn't save HCA "
"PCI header, aborting.
\n
"
);
goto
out
;
goto
free_hca
;
}
}
...
...
@@ -121,7 +121,7 @@ int mthca_reset(struct mthca_dev *mdev)
err
=
-
ENOMEM
;
mthca_err
(
mdev
,
"Couldn't allocate memory to save HCA "
"bridge PCI header, aborting.
\n
"
);
goto
out
;
goto
free_hca
;
}
for
(
i
=
0
;
i
<
64
;
++
i
)
{
...
...
@@ -131,7 +131,7 @@ int mthca_reset(struct mthca_dev *mdev)
err
=
-
ENODEV
;
mthca_err
(
mdev
,
"Couldn't save HCA bridge "
"PCI header, aborting.
\n
"
);
goto
out
;
goto
free_bh
;
}
}
bridge_pcix_cap
=
pci_find_capability
(
bridge
,
PCI_CAP_ID_PCIX
);
...
...
@@ -139,7 +139,7 @@ int mthca_reset(struct mthca_dev *mdev)
err
=
-
ENODEV
;
mthca_err
(
mdev
,
"Couldn't locate HCA bridge "
"PCI-X capability, aborting.
\n
"
);
goto
out
;
goto
free_bh
;
}
}
...
...
@@ -152,7 +152,7 @@ int mthca_reset(struct mthca_dev *mdev)
err
=
-
ENOMEM
;
mthca_err
(
mdev
,
"Couldn't map HCA reset register, "
"aborting.
\n
"
);
goto
out
;
goto
free_bh
;
}
writel
(
MTHCA_RESET_VALUE
,
reset
);
...
...
@@ -172,7 +172,7 @@ int mthca_reset(struct mthca_dev *mdev)
err
=
-
ENODEV
;
mthca_err
(
mdev
,
"Couldn't access HCA after reset, "
"aborting.
\n
"
);
goto
out
;
goto
free_bh
;
}
if
(
v
!=
0xffffffff
)
...
...
@@ -184,7 +184,7 @@ int mthca_reset(struct mthca_dev *mdev)
err
=
-
ENODEV
;
mthca_err
(
mdev
,
"PCI device did not come back after reset, "
"aborting.
\n
"
);
goto
out
;
goto
free_bh
;
}
good:
...
...
@@ -195,14 +195,14 @@ int mthca_reset(struct mthca_dev *mdev)
err
=
-
ENODEV
;
mthca_err
(
mdev
,
"Couldn't restore HCA bridge Upstream "
"split transaction control, aborting.
\n
"
);
goto
out
;
goto
free_bh
;
}
if
(
pci_write_config_dword
(
bridge
,
bridge_pcix_cap
+
0xc
,
bridge_header
[(
bridge_pcix_cap
+
0xc
)
/
4
]))
{
err
=
-
ENODEV
;
mthca_err
(
mdev
,
"Couldn't restore HCA bridge Downstream "
"split transaction control, aborting.
\n
"
);
goto
out
;
goto
free_bh
;
}
/*
* Bridge control register is at 0x3e, so we'll
...
...
@@ -216,7 +216,7 @@ int mthca_reset(struct mthca_dev *mdev)
err
=
-
ENODEV
;
mthca_err
(
mdev
,
"Couldn't restore HCA bridge reg %x, "
"aborting.
\n
"
,
i
);
goto
out
;
goto
free_bh
;
}
}
...
...
@@ -225,7 +225,7 @@ int mthca_reset(struct mthca_dev *mdev)
err
=
-
ENODEV
;
mthca_err
(
mdev
,
"Couldn't restore HCA bridge COMMAND, "
"aborting.
\n
"
);
goto
out
;
goto
free_bh
;
}
}
...
...
@@ -235,7 +235,7 @@ int mthca_reset(struct mthca_dev *mdev)
err
=
-
ENODEV
;
mthca_err
(
mdev
,
"Couldn't restore HCA PCI-X "
"command register, aborting.
\n
"
);
goto
out
;
goto
free_bh
;
}
}
...
...
@@ -246,7 +246,7 @@ int mthca_reset(struct mthca_dev *mdev)
err
=
-
ENODEV
;
mthca_err
(
mdev
,
"Couldn't restore HCA PCI Express "
"Device Control register, aborting.
\n
"
);
goto
out
;
goto
free_bh
;
}
linkctl
=
hca_header
[(
hca_pcie_cap
+
PCI_EXP_LNKCTL
)
/
4
];
if
(
pcie_capability_write_word
(
mdev
->
pdev
,
PCI_EXP_LNKCTL
,
...
...
@@ -254,7 +254,7 @@ int mthca_reset(struct mthca_dev *mdev)
err
=
-
ENODEV
;
mthca_err
(
mdev
,
"Couldn't restore HCA PCI Express "
"Link control register, aborting.
\n
"
);
goto
out
;
goto
free_bh
;
}
}
...
...
@@ -266,7 +266,7 @@ int mthca_reset(struct mthca_dev *mdev)
err
=
-
ENODEV
;
mthca_err
(
mdev
,
"Couldn't restore HCA reg %x, "
"aborting.
\n
"
,
i
);
goto
out
;
goto
free_bh
;
}
}
...
...
@@ -275,14 +275,12 @@ int mthca_reset(struct mthca_dev *mdev)
err
=
-
ENODEV
;
mthca_err
(
mdev
,
"Couldn't restore HCA COMMAND, "
"aborting.
\n
"
);
goto
out
;
}
out:
if
(
bridge
)
pci_dev_put
(
bridge
);
free_bh:
kfree
(
bridge_header
);
free_hca:
kfree
(
hca_header
);
put_dev:
pci_dev_put
(
bridge
);
return
err
;
}
This diff is collapsed.
Click to expand it.
drivers/infiniband/sw/Makefile
View file @
7f1d25b4
obj-$(CONFIG_INFINIBAND_RDMAVT)
+=
rdmavt/
obj-$(CONFIG_RDMA_RXE)
+=
rxe/
This diff is collapsed.
Click to expand it.
drivers/infiniband/sw/rdmavt/Kconfig
View file @
7f1d25b4
config INFINIBAND_RDMAVT
tristate "RDMA verbs transport library"
depends on 64BIT
default m
---help---
This is a common software verbs provider for RDMA networks.
This diff is collapsed.
Click to expand it.
drivers/infiniband/sw/rxe/Kconfig
0 → 100644
View file @
7f1d25b4
config RDMA_RXE
tristate "Software RDMA over Ethernet (RoCE) driver"
depends on INET && PCI && INFINIBAND
depends on NET_UDP_TUNNEL
---help---
This driver implements the InfiniBand RDMA transport over
the Linux network stack. It enables a system with a
standard Ethernet adapter to interoperate with a RoCE
adapter or with another system running the RXE driver.
Documentation on InfiniBand and RoCE can be downloaded at
www.infinibandta.org and www.openfabrics.org. (See also
siw which is a similar software driver for iWARP.)
The driver is split into two layers, one interfaces with the
Linux RDMA stack and implements a kernel or user space
verbs API. The user space verbs API requires a support
library named librxe which is loaded by the generic user
space verbs API, libibverbs. The other layer interfaces
with the Linux network stack at layer 3.
To configure and work with soft-RoCE driver please use the
following wiki page under "configure Soft-RoCE (RXE)" section:
https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
This diff is collapsed.
Click to expand it.
drivers/infiniband/sw/rxe/Makefile
0 → 100644
View file @
7f1d25b4
obj-$(CONFIG_RDMA_RXE)
+=
rdma_rxe.o
rdma_rxe-y
:=
\
rxe.o
\
rxe_comp.o
\
rxe_req.o
\
rxe_resp.o
\
rxe_recv.o
\
rxe_pool.o
\
rxe_queue.o
\
rxe_verbs.o
\
rxe_av.o
\
rxe_srq.o
\
rxe_qp.o
\
rxe_cq.o
\
rxe_mr.o
\
rxe_dma.o
\
rxe_opcode.o
\
rxe_mmap.o
\
rxe_icrc.o
\
rxe_mcast.o
\
rxe_task.o
\
rxe_net.o
\
rxe_sysfs.o
This diff is collapsed.
Click to expand it.
drivers/infiniband/sw/rxe/rxe.c
0 → 100644
View file @
7f1d25b4
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "rxe.h"
#include "rxe_loc.h"
MODULE_AUTHOR
(
"Bob Pearson, Frank Zago, John Groves, Kamal Heib"
);
MODULE_DESCRIPTION
(
"Soft RDMA transport"
);
MODULE_LICENSE
(
"Dual BSD/GPL"
);
MODULE_VERSION
(
"0.2"
);
/* free resources for all ports on a device */
static
void
rxe_cleanup_ports
(
struct
rxe_dev
*
rxe
)
{
kfree
(
rxe
->
port
.
pkey_tbl
);
rxe
->
port
.
pkey_tbl
=
NULL
;
}
/* free resources for a rxe device all objects created for this device must
* have been destroyed
*/
static
void
rxe_cleanup
(
struct
rxe_dev
*
rxe
)
{
rxe_pool_cleanup
(
&
rxe
->
uc_pool
);
rxe_pool_cleanup
(
&
rxe
->
pd_pool
);
rxe_pool_cleanup
(
&
rxe
->
ah_pool
);
rxe_pool_cleanup
(
&
rxe
->
srq_pool
);
rxe_pool_cleanup
(
&
rxe
->
qp_pool
);
rxe_pool_cleanup
(
&
rxe
->
cq_pool
);
rxe_pool_cleanup
(
&
rxe
->
mr_pool
);
rxe_pool_cleanup
(
&
rxe
->
mw_pool
);
rxe_pool_cleanup
(
&
rxe
->
mc_grp_pool
);
rxe_pool_cleanup
(
&
rxe
->
mc_elem_pool
);
rxe_cleanup_ports
(
rxe
);
}
/* called when all references have been dropped */
void
rxe_release
(
struct
kref
*
kref
)
{
struct
rxe_dev
*
rxe
=
container_of
(
kref
,
struct
rxe_dev
,
ref_cnt
);
rxe_cleanup
(
rxe
);
ib_dealloc_device
(
&
rxe
->
ib_dev
);
}
void
rxe_dev_put
(
struct
rxe_dev
*
rxe
)
{
kref_put
(
&
rxe
->
ref_cnt
,
rxe_release
);
}
EXPORT_SYMBOL_GPL
(
rxe_dev_put
);
/* initialize rxe device parameters */
static
int
rxe_init_device_param
(
struct
rxe_dev
*
rxe
)
{
rxe
->
max_inline_data
=
RXE_MAX_INLINE_DATA
;
rxe
->
attr
.
fw_ver
=
RXE_FW_VER
;
rxe
->
attr
.
max_mr_size
=
RXE_MAX_MR_SIZE
;
rxe
->
attr
.
page_size_cap
=
RXE_PAGE_SIZE_CAP
;
rxe
->
attr
.
vendor_id
=
RXE_VENDOR_ID
;
rxe
->
attr
.
vendor_part_id
=
RXE_VENDOR_PART_ID
;
rxe
->
attr
.
hw_ver
=
RXE_HW_VER
;
rxe
->
attr
.
max_qp
=
RXE_MAX_QP
;
rxe
->
attr
.
max_qp_wr
=
RXE_MAX_QP_WR
;
rxe
->
attr
.
device_cap_flags
=
RXE_DEVICE_CAP_FLAGS
;
rxe
->
attr
.
max_sge
=
RXE_MAX_SGE
;
rxe
->
attr
.
max_sge_rd
=
RXE_MAX_SGE_RD
;
rxe
->
attr
.
max_cq
=
RXE_MAX_CQ
;
rxe
->
attr
.
max_cqe
=
(
1
<<
RXE_MAX_LOG_CQE
)
-
1
;
rxe
->
attr
.
max_mr
=
RXE_MAX_MR
;
rxe
->
attr
.
max_pd
=
RXE_MAX_PD
;
rxe
->
attr
.
max_qp_rd_atom
=
RXE_MAX_QP_RD_ATOM
;
rxe
->
attr
.
max_ee_rd_atom
=
RXE_MAX_EE_RD_ATOM
;
rxe
->
attr
.
max_res_rd_atom
=
RXE_MAX_RES_RD_ATOM
;
rxe
->
attr
.
max_qp_init_rd_atom
=
RXE_MAX_QP_INIT_RD_ATOM
;
rxe
->
attr
.
max_ee_init_rd_atom
=
RXE_MAX_EE_INIT_RD_ATOM
;
rxe
->
attr
.
atomic_cap
=
RXE_ATOMIC_CAP
;
rxe
->
attr
.
max_ee
=
RXE_MAX_EE
;
rxe
->
attr
.
max_rdd
=
RXE_MAX_RDD
;
rxe
->
attr
.
max_mw
=
RXE_MAX_MW
;
rxe
->
attr
.
max_raw_ipv6_qp
=
RXE_MAX_RAW_IPV6_QP
;
rxe
->
attr
.
max_raw_ethy_qp
=
RXE_MAX_RAW_ETHY_QP
;
rxe
->
attr
.
max_mcast_grp
=
RXE_MAX_MCAST_GRP
;
rxe
->
attr
.
max_mcast_qp_attach
=
RXE_MAX_MCAST_QP_ATTACH
;
rxe
->
attr
.
max_total_mcast_qp_attach
=
RXE_MAX_TOT_MCAST_QP_ATTACH
;
rxe
->
attr
.
max_ah
=
RXE_MAX_AH
;
rxe
->
attr
.
max_fmr
=
RXE_MAX_FMR
;
rxe
->
attr
.
max_map_per_fmr
=
RXE_MAX_MAP_PER_FMR
;
rxe
->
attr
.
max_srq
=
RXE_MAX_SRQ
;
rxe
->
attr
.
max_srq_wr
=
RXE_MAX_SRQ_WR
;
rxe
->
attr
.
max_srq_sge
=
RXE_MAX_SRQ_SGE
;
rxe
->
attr
.
max_fast_reg_page_list_len
=
RXE_MAX_FMR_PAGE_LIST_LEN
;
rxe
->
attr
.
max_pkeys
=
RXE_MAX_PKEYS
;
rxe
->
attr
.
local_ca_ack_delay
=
RXE_LOCAL_CA_ACK_DELAY
;
rxe
->
max_ucontext
=
RXE_MAX_UCONTEXT
;
return
0
;
}
/* initialize port attributes */
static
int
rxe_init_port_param
(
struct
rxe_port
*
port
)
{
port
->
attr
.
state
=
RXE_PORT_STATE
;
port
->
attr
.
max_mtu
=
RXE_PORT_MAX_MTU
;
port
->
attr
.
active_mtu
=
RXE_PORT_ACTIVE_MTU
;
port
->
attr
.
gid_tbl_len
=
RXE_PORT_GID_TBL_LEN
;
port
->
attr
.
port_cap_flags
=
RXE_PORT_PORT_CAP_FLAGS
;
port
->
attr
.
max_msg_sz
=
RXE_PORT_MAX_MSG_SZ
;
port
->
attr
.
bad_pkey_cntr
=
RXE_PORT_BAD_PKEY_CNTR
;
port
->
attr
.
qkey_viol_cntr
=
RXE_PORT_QKEY_VIOL_CNTR
;
port
->
attr
.
pkey_tbl_len
=
RXE_PORT_PKEY_TBL_LEN
;
port
->
attr
.
lid
=
RXE_PORT_LID
;
port
->
attr
.
sm_lid
=
RXE_PORT_SM_LID
;
port
->
attr
.
lmc
=
RXE_PORT_LMC
;
port
->
attr
.
max_vl_num
=
RXE_PORT_MAX_VL_NUM
;
port
->
attr
.
sm_sl
=
RXE_PORT_SM_SL
;
port
->
attr
.
subnet_timeout
=
RXE_PORT_SUBNET_TIMEOUT
;
port
->
attr
.
init_type_reply
=
RXE_PORT_INIT_TYPE_REPLY
;
port
->
attr
.
active_width
=
RXE_PORT_ACTIVE_WIDTH
;
port
->
attr
.
active_speed
=
RXE_PORT_ACTIVE_SPEED
;
port
->
attr
.
phys_state
=
RXE_PORT_PHYS_STATE
;
port
->
mtu_cap
=
ib_mtu_enum_to_int
(
RXE_PORT_ACTIVE_MTU
);
port
->
subnet_prefix
=
cpu_to_be64
(
RXE_PORT_SUBNET_PREFIX
);
return
0
;
}
/* initialize port state, note IB convention that HCA ports are always
* numbered from 1
*/
static
int
rxe_init_ports
(
struct
rxe_dev
*
rxe
)
{
struct
rxe_port
*
port
=
&
rxe
->
port
;
rxe_init_port_param
(
port
);
if
(
!
port
->
attr
.
pkey_tbl_len
||
!
port
->
attr
.
gid_tbl_len
)
return
-
EINVAL
;
port
->
pkey_tbl
=
kcalloc
(
port
->
attr
.
pkey_tbl_len
,
sizeof
(
*
port
->
pkey_tbl
),
GFP_KERNEL
);
if
(
!
port
->
pkey_tbl
)
return
-
ENOMEM
;
port
->
pkey_tbl
[
0
]
=
0xffff
;
port
->
port_guid
=
rxe
->
ifc_ops
->
port_guid
(
rxe
);
spin_lock_init
(
&
port
->
port_lock
);
return
0
;
}
/* init pools of managed objects */
static
int
rxe_init_pools
(
struct
rxe_dev
*
rxe
)
{
int
err
;
err
=
rxe_pool_init
(
rxe
,
&
rxe
->
uc_pool
,
RXE_TYPE_UC
,
rxe
->
max_ucontext
);
if
(
err
)
goto
err1
;
err
=
rxe_pool_init
(
rxe
,
&
rxe
->
pd_pool
,
RXE_TYPE_PD
,
rxe
->
attr
.
max_pd
);
if
(
err
)
goto
err2
;
err
=
rxe_pool_init
(
rxe
,
&
rxe
->
ah_pool
,
RXE_TYPE_AH
,
rxe
->
attr
.
max_ah
);
if
(
err
)
goto
err3
;
err
=
rxe_pool_init
(
rxe
,
&
rxe
->
srq_pool
,
RXE_TYPE_SRQ
,
rxe
->
attr
.
max_srq
);
if
(
err
)
goto
err4
;
err
=
rxe_pool_init
(
rxe
,
&
rxe
->
qp_pool
,
RXE_TYPE_QP
,
rxe
->
attr
.
max_qp
);
if
(
err
)
goto
err5
;
err
=
rxe_pool_init
(
rxe
,
&
rxe
->
cq_pool
,
RXE_TYPE_CQ
,
rxe
->
attr
.
max_cq
);
if
(
err
)
goto
err6
;
err
=
rxe_pool_init
(
rxe
,
&
rxe
->
mr_pool
,
RXE_TYPE_MR
,
rxe
->
attr
.
max_mr
);
if
(
err
)
goto
err7
;
err
=
rxe_pool_init
(
rxe
,
&
rxe
->
mw_pool
,
RXE_TYPE_MW
,
rxe
->
attr
.
max_mw
);
if
(
err
)
goto
err8
;
err
=
rxe_pool_init
(
rxe
,
&
rxe
->
mc_grp_pool
,
RXE_TYPE_MC_GRP
,
rxe
->
attr
.
max_mcast_grp
);
if
(
err
)
goto
err9
;
err
=
rxe_pool_init
(
rxe
,
&
rxe
->
mc_elem_pool
,
RXE_TYPE_MC_ELEM
,
rxe
->
attr
.
max_total_mcast_qp_attach
);
if
(
err
)
goto
err10
;
return
0
;
err10:
rxe_pool_cleanup
(
&
rxe
->
mc_grp_pool
);
err9:
rxe_pool_cleanup
(
&
rxe
->
mw_pool
);
err8:
rxe_pool_cleanup
(
&
rxe
->
mr_pool
);
err7:
rxe_pool_cleanup
(
&
rxe
->
cq_pool
);
err6:
rxe_pool_cleanup
(
&
rxe
->
qp_pool
);
err5:
rxe_pool_cleanup
(
&
rxe
->
srq_pool
);
err4:
rxe_pool_cleanup
(
&
rxe
->
ah_pool
);
err3:
rxe_pool_cleanup
(
&
rxe
->
pd_pool
);
err2:
rxe_pool_cleanup
(
&
rxe
->
uc_pool
);
err1:
return
err
;
}
/* initialize rxe device state */
static
int
rxe_init
(
struct
rxe_dev
*
rxe
)
{
int
err
;
/* init default device parameters */
rxe_init_device_param
(
rxe
);
err
=
rxe_init_ports
(
rxe
);
if
(
err
)
goto
err1
;
err
=
rxe_init_pools
(
rxe
);
if
(
err
)
goto
err2
;
/* init pending mmap list */
spin_lock_init
(
&
rxe
->
mmap_offset_lock
);
spin_lock_init
(
&
rxe
->
pending_lock
);
INIT_LIST_HEAD
(
&
rxe
->
pending_mmaps
);
INIT_LIST_HEAD
(
&
rxe
->
list
);
mutex_init
(
&
rxe
->
usdev_lock
);
return
0
;
err2:
rxe_cleanup_ports
(
rxe
);
err1:
return
err
;
}
int
rxe_set_mtu
(
struct
rxe_dev
*
rxe
,
unsigned
int
ndev_mtu
)
{
struct
rxe_port
*
port
=
&
rxe
->
port
;
enum
ib_mtu
mtu
;
mtu
=
eth_mtu_int_to_enum
(
ndev_mtu
);
/* Make sure that new MTU in range */
mtu
=
mtu
?
min_t
(
enum
ib_mtu
,
mtu
,
RXE_PORT_MAX_MTU
)
:
IB_MTU_256
;
port
->
attr
.
active_mtu
=
mtu
;
port
->
mtu_cap
=
ib_mtu_enum_to_int
(
mtu
);
return
0
;
}
EXPORT_SYMBOL
(
rxe_set_mtu
);
/* called by ifc layer to create new rxe device.
* The caller should allocate memory for rxe by calling ib_alloc_device.
*/
int
rxe_add
(
struct
rxe_dev
*
rxe
,
unsigned
int
mtu
)
{
int
err
;
kref_init
(
&
rxe
->
ref_cnt
);
err
=
rxe_init
(
rxe
);
if
(
err
)
goto
err1
;
err
=
rxe_set_mtu
(
rxe
,
mtu
);
if
(
err
)
goto
err1
;
err
=
rxe_register_device
(
rxe
);
if
(
err
)
goto
err1
;
return
0
;
err1:
rxe_dev_put
(
rxe
);
return
err
;
}
EXPORT_SYMBOL
(
rxe_add
);
/* called by the ifc layer to remove a device */
void
rxe_remove
(
struct
rxe_dev
*
rxe
)
{
rxe_unregister_device
(
rxe
);
rxe_dev_put
(
rxe
);
}
EXPORT_SYMBOL
(
rxe_remove
);
static
int
__init
rxe_module_init
(
void
)
{
int
err
;
/* initialize slab caches for managed objects */
err
=
rxe_cache_init
();
if
(
err
)
{
pr_err
(
"rxe: unable to init object pools
\n
"
);
return
err
;
}
err
=
rxe_net_init
();
if
(
err
)
{
pr_err
(
"rxe: unable to init
\n
"
);
rxe_cache_exit
();
return
err
;
}
pr_info
(
"rxe: loaded
\n
"
);
return
0
;
}
static
void
__exit
rxe_module_exit
(
void
)
{
rxe_remove_all
();
rxe_net_exit
();
rxe_cache_exit
();
pr_info
(
"rxe: unloaded
\n
"
);
}
module_init
(
rxe_module_init
);
module_exit
(
rxe_module_exit
);
This diff is collapsed.
Click to expand it.
Prev
1
2
3
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment