Skip to content

Commit d41e9b2

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma fixes from Jason Gunthorpe: "Two security issues and several small bug fixes. Things seem to have stabilized for this release here. Summary: - Significant out of bounds access security issue in i40iw - Fix misuse of mmu notifiers in hfi1 - Several errors in the register map/usage in hns - Missing error returns in mthca" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: RDMA/hns: Bugfix for memory window mtpt configuration RDMA/hns: Fix retry_cnt and rnr_cnt when querying QP RDMA/hns: Fix wrong field of SRQ number the device supports IB/hfi1: Ensure correct mm is used at all times RDMA/i40iw: Address an mmap handler exploit in i40iw IB/mthca: fix return value of error branch in mthca_init_cq()
2 parents 76dc2bf + 17475e1 commit d41e9b2

13 files changed

Lines changed: 98 additions & 93 deletions

File tree

drivers/infiniband/hw/hfi1/file_ops.c

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
/*
2+
* Copyright(c) 2020 Cornelis Networks, Inc.
23
* Copyright(c) 2015-2020 Intel Corporation.
34
*
45
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -206,8 +207,6 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
206207
spin_lock_init(&fd->tid_lock);
207208
spin_lock_init(&fd->invalid_lock);
208209
fd->rec_cpu_num = -1; /* no cpu affinity by default */
209-
fd->mm = current->mm;
210-
mmgrab(fd->mm);
211210
fd->dd = dd;
212211
fp->private_data = fd;
213212
return 0;
@@ -711,7 +710,6 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
711710

712711
deallocate_ctxt(uctxt);
713712
done:
714-
mmdrop(fdata->mm);
715713

716714
if (atomic_dec_and_test(&dd->user_refcount))
717715
complete(&dd->user_comp);

drivers/infiniband/hw/hfi1/hfi.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
#ifndef _HFI1_KERNEL_H
22
#define _HFI1_KERNEL_H
33
/*
4+
* Copyright(c) 2020 Cornelis Networks, Inc.
45
* Copyright(c) 2015-2020 Intel Corporation.
56
*
67
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -1451,7 +1452,6 @@ struct hfi1_filedata {
14511452
u32 invalid_tid_idx;
14521453
/* protect invalid_tids array and invalid_tid_idx */
14531454
spinlock_t invalid_lock;
1454-
struct mm_struct *mm;
14551455
};
14561456

14571457
extern struct xarray hfi1_dev_table;

drivers/infiniband/hw/hfi1/mmu_rb.c

Lines changed: 35 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
/*
2+
* Copyright(c) 2020 Cornelis Networks, Inc.
23
* Copyright(c) 2016 - 2017 Intel Corporation.
34
*
45
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -48,23 +49,11 @@
4849
#include <linux/rculist.h>
4950
#include <linux/mmu_notifier.h>
5051
#include <linux/interval_tree_generic.h>
52+
#include <linux/sched/mm.h>
5153

5254
#include "mmu_rb.h"
5355
#include "trace.h"
5456

55-
struct mmu_rb_handler {
56-
struct mmu_notifier mn;
57-
struct rb_root_cached root;
58-
void *ops_arg;
59-
spinlock_t lock; /* protect the RB tree */
60-
struct mmu_rb_ops *ops;
61-
struct mm_struct *mm;
62-
struct list_head lru_list;
63-
struct work_struct del_work;
64-
struct list_head del_list;
65-
struct workqueue_struct *wq;
66-
};
67-
6857
static unsigned long mmu_node_start(struct mmu_rb_node *);
6958
static unsigned long mmu_node_last(struct mmu_rb_node *);
7059
static int mmu_notifier_range_start(struct mmu_notifier *,
@@ -92,37 +81,36 @@ static unsigned long mmu_node_last(struct mmu_rb_node *node)
9281
return PAGE_ALIGN(node->addr + node->len) - 1;
9382
}
9483

95-
int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
84+
int hfi1_mmu_rb_register(void *ops_arg,
9685
struct mmu_rb_ops *ops,
9786
struct workqueue_struct *wq,
9887
struct mmu_rb_handler **handler)
9988
{
100-
struct mmu_rb_handler *handlr;
89+
struct mmu_rb_handler *h;
10190
int ret;
10291

103-
handlr = kmalloc(sizeof(*handlr), GFP_KERNEL);
104-
if (!handlr)
92+
h = kmalloc(sizeof(*h), GFP_KERNEL);
93+
if (!h)
10594
return -ENOMEM;
10695

107-
handlr->root = RB_ROOT_CACHED;
108-
handlr->ops = ops;
109-
handlr->ops_arg = ops_arg;
110-
INIT_HLIST_NODE(&handlr->mn.hlist);
111-
spin_lock_init(&handlr->lock);
112-
handlr->mn.ops = &mn_opts;
113-
handlr->mm = mm;
114-
INIT_WORK(&handlr->del_work, handle_remove);
115-
INIT_LIST_HEAD(&handlr->del_list);
116-
INIT_LIST_HEAD(&handlr->lru_list);
117-
handlr->wq = wq;
118-
119-
ret = mmu_notifier_register(&handlr->mn, handlr->mm);
96+
h->root = RB_ROOT_CACHED;
97+
h->ops = ops;
98+
h->ops_arg = ops_arg;
99+
INIT_HLIST_NODE(&h->mn.hlist);
100+
spin_lock_init(&h->lock);
101+
h->mn.ops = &mn_opts;
102+
INIT_WORK(&h->del_work, handle_remove);
103+
INIT_LIST_HEAD(&h->del_list);
104+
INIT_LIST_HEAD(&h->lru_list);
105+
h->wq = wq;
106+
107+
ret = mmu_notifier_register(&h->mn, current->mm);
120108
if (ret) {
121-
kfree(handlr);
109+
kfree(h);
122110
return ret;
123111
}
124112

125-
*handler = handlr;
113+
*handler = h;
126114
return 0;
127115
}
128116

@@ -134,7 +122,7 @@ void hfi1_mmu_rb_unregister(struct mmu_rb_handler *handler)
134122
struct list_head del_list;
135123

136124
/* Unregister first so we don't get any more notifications. */
137-
mmu_notifier_unregister(&handler->mn, handler->mm);
125+
mmu_notifier_unregister(&handler->mn, handler->mn.mm);
138126

139127
/*
140128
* Make sure the wq delete handler is finished running. It will not
@@ -166,6 +154,10 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
166154
int ret = 0;
167155

168156
trace_hfi1_mmu_rb_insert(mnode->addr, mnode->len);
157+
158+
if (current->mm != handler->mn.mm)
159+
return -EPERM;
160+
169161
spin_lock_irqsave(&handler->lock, flags);
170162
node = __mmu_rb_search(handler, mnode->addr, mnode->len);
171163
if (node) {
@@ -180,6 +172,7 @@ int hfi1_mmu_rb_insert(struct mmu_rb_handler *handler,
180172
__mmu_int_rb_remove(mnode, &handler->root);
181173
list_del(&mnode->list); /* remove from LRU list */
182174
}
175+
mnode->handler = handler;
183176
unlock:
184177
spin_unlock_irqrestore(&handler->lock, flags);
185178
return ret;
@@ -217,6 +210,9 @@ bool hfi1_mmu_rb_remove_unless_exact(struct mmu_rb_handler *handler,
217210
unsigned long flags;
218211
bool ret = false;
219212

213+
if (current->mm != handler->mn.mm)
214+
return ret;
215+
220216
spin_lock_irqsave(&handler->lock, flags);
221217
node = __mmu_rb_search(handler, addr, len);
222218
if (node) {
@@ -239,6 +235,9 @@ void hfi1_mmu_rb_evict(struct mmu_rb_handler *handler, void *evict_arg)
239235
unsigned long flags;
240236
bool stop = false;
241237

238+
if (current->mm != handler->mn.mm)
239+
return;
240+
242241
INIT_LIST_HEAD(&del_list);
243242

244243
spin_lock_irqsave(&handler->lock, flags);
@@ -272,6 +271,9 @@ void hfi1_mmu_rb_remove(struct mmu_rb_handler *handler,
272271
{
273272
unsigned long flags;
274273

274+
if (current->mm != handler->mn.mm)
275+
return;
276+
275277
/* Validity of handler and node pointers has been checked by caller. */
276278
trace_hfi1_mmu_rb_remove(node->addr, node->len);
277279
spin_lock_irqsave(&handler->lock, flags);

drivers/infiniband/hw/hfi1/mmu_rb.h

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
/*
2+
* Copyright(c) 2020 Cornelis Networks, Inc.
23
* Copyright(c) 2016 Intel Corporation.
34
*
45
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -54,6 +55,7 @@ struct mmu_rb_node {
5455
unsigned long len;
5556
unsigned long __last;
5657
struct rb_node node;
58+
struct mmu_rb_handler *handler;
5759
struct list_head list;
5860
};
5961

@@ -71,7 +73,19 @@ struct mmu_rb_ops {
7173
void *evict_arg, bool *stop);
7274
};
7375

74-
int hfi1_mmu_rb_register(void *ops_arg, struct mm_struct *mm,
76+
struct mmu_rb_handler {
77+
struct mmu_notifier mn;
78+
struct rb_root_cached root;
79+
void *ops_arg;
80+
spinlock_t lock; /* protect the RB tree */
81+
struct mmu_rb_ops *ops;
82+
struct list_head lru_list;
83+
struct work_struct del_work;
84+
struct list_head del_list;
85+
struct workqueue_struct *wq;
86+
};
87+
88+
int hfi1_mmu_rb_register(void *ops_arg,
7589
struct mmu_rb_ops *ops,
7690
struct workqueue_struct *wq,
7791
struct mmu_rb_handler **handler);

drivers/infiniband/hw/hfi1/user_exp_rcv.c

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
/*
2+
* Copyright(c) 2020 Cornelis Networks, Inc.
23
* Copyright(c) 2015-2018 Intel Corporation.
34
*
45
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -173,15 +174,18 @@ static void unpin_rcv_pages(struct hfi1_filedata *fd,
173174
{
174175
struct page **pages;
175176
struct hfi1_devdata *dd = fd->uctxt->dd;
177+
struct mm_struct *mm;
176178

177179
if (mapped) {
178180
pci_unmap_single(dd->pcidev, node->dma_addr,
179181
node->npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
180182
pages = &node->pages[idx];
183+
mm = mm_from_tid_node(node);
181184
} else {
182185
pages = &tidbuf->pages[idx];
186+
mm = current->mm;
183187
}
184-
hfi1_release_user_pages(fd->mm, pages, npages, mapped);
188+
hfi1_release_user_pages(mm, pages, npages, mapped);
185189
fd->tid_n_pinned -= npages;
186190
}
187191

@@ -216,12 +220,12 @@ static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
216220
* pages, accept the amount pinned so far and program only that.
217221
* User space knows how to deal with partially programmed buffers.
218222
*/
219-
if (!hfi1_can_pin_pages(dd, fd->mm, fd->tid_n_pinned, npages)) {
223+
if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) {
220224
kfree(pages);
221225
return -ENOMEM;
222226
}
223227

224-
pinned = hfi1_acquire_user_pages(fd->mm, vaddr, npages, true, pages);
228+
pinned = hfi1_acquire_user_pages(current->mm, vaddr, npages, true, pages);
225229
if (pinned <= 0) {
226230
kfree(pages);
227231
return pinned;
@@ -756,7 +760,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
756760

757761
if (fd->use_mn) {
758762
ret = mmu_interval_notifier_insert(
759-
&node->notifier, fd->mm,
763+
&node->notifier, current->mm,
760764
tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE,
761765
&tid_mn_ops);
762766
if (ret)

drivers/infiniband/hw/hfi1/user_exp_rcv.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
#ifndef _HFI1_USER_EXP_RCV_H
22
#define _HFI1_USER_EXP_RCV_H
33
/*
4+
* Copyright(c) 2020 - Cornelis Networks, Inc.
45
* Copyright(c) 2015 - 2017 Intel Corporation.
56
*
67
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -95,4 +96,9 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
9596
int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
9697
struct hfi1_tid_info *tinfo);
9798

99+
static inline struct mm_struct *mm_from_tid_node(struct tid_rb_node *node)
100+
{
101+
return node->notifier.mm;
102+
}
103+
98104
#endif /* _HFI1_USER_EXP_RCV_H */

drivers/infiniband/hw/hfi1/user_sdma.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
/*
2+
* Copyright(c) 2020 - Cornelis Networks, Inc.
23
* Copyright(c) 2015 - 2018 Intel Corporation.
34
*
45
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -188,7 +189,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
188189
atomic_set(&pq->n_reqs, 0);
189190
init_waitqueue_head(&pq->wait);
190191
atomic_set(&pq->n_locked, 0);
191-
pq->mm = fd->mm;
192192

193193
iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue,
194194
activate_packet_queue, NULL, NULL);
@@ -230,7 +230,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
230230

231231
cq->nentries = hfi1_sdma_comp_ring_size;
232232

233-
ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq,
233+
ret = hfi1_mmu_rb_register(pq, &sdma_rb_ops, dd->pport->hfi1_wq,
234234
&pq->handler);
235235
if (ret) {
236236
dd_dev_err(dd, "Failed to register with MMU %d", ret);
@@ -980,13 +980,13 @@ static int pin_sdma_pages(struct user_sdma_request *req,
980980

981981
npages -= node->npages;
982982
retry:
983-
if (!hfi1_can_pin_pages(pq->dd, pq->mm,
983+
if (!hfi1_can_pin_pages(pq->dd, current->mm,
984984
atomic_read(&pq->n_locked), npages)) {
985985
cleared = sdma_cache_evict(pq, npages);
986986
if (cleared >= npages)
987987
goto retry;
988988
}
989-
pinned = hfi1_acquire_user_pages(pq->mm,
989+
pinned = hfi1_acquire_user_pages(current->mm,
990990
((unsigned long)iovec->iov.iov_base +
991991
(node->npages * PAGE_SIZE)), npages, 0,
992992
pages + node->npages);
@@ -995,7 +995,7 @@ static int pin_sdma_pages(struct user_sdma_request *req,
995995
return pinned;
996996
}
997997
if (pinned != npages) {
998-
unpin_vector_pages(pq->mm, pages, node->npages, pinned);
998+
unpin_vector_pages(current->mm, pages, node->npages, pinned);
999999
return -EFAULT;
10001000
}
10011001
kfree(node->pages);
@@ -1008,7 +1008,8 @@ static int pin_sdma_pages(struct user_sdma_request *req,
10081008
static void unpin_sdma_pages(struct sdma_mmu_node *node)
10091009
{
10101010
if (node->npages) {
1011-
unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages);
1011+
unpin_vector_pages(mm_from_sdma_node(node), node->pages, 0,
1012+
node->npages);
10121013
atomic_sub(node->npages, &node->pq->n_locked);
10131014
}
10141015
}

drivers/infiniband/hw/hfi1/user_sdma.h

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
#ifndef _HFI1_USER_SDMA_H
22
#define _HFI1_USER_SDMA_H
33
/*
4+
* Copyright(c) 2020 - Cornelis Networks, Inc.
45
* Copyright(c) 2015 - 2018 Intel Corporation.
56
*
67
* This file is provided under a dual BSD/GPLv2 license. When using or
@@ -133,7 +134,6 @@ struct hfi1_user_sdma_pkt_q {
133134
unsigned long unpinned;
134135
struct mmu_rb_handler *handler;
135136
atomic_t n_locked;
136-
struct mm_struct *mm;
137137
};
138138

139139
struct hfi1_user_sdma_comp_q {
@@ -250,4 +250,9 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
250250
struct iovec *iovec, unsigned long dim,
251251
unsigned long *count);
252252

253+
static inline struct mm_struct *mm_from_sdma_node(struct sdma_mmu_node *node)
254+
{
255+
return node->rb.handler->mn.mm;
256+
}
257+
253258
#endif /* _HFI1_USER_SDMA_H */

drivers/infiniband/hw/hns/hns_roce_hw_v2.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2936,6 +2936,7 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw)
29362936

29372937
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_R_INV_EN_S, 1);
29382938
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_L_INV_EN_S, 1);
2939+
roce_set_bit(mpt_entry->byte_8_mw_cnt_en, V2_MPT_BYTE_8_LW_EN_S, 1);
29392940

29402941
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_PA_S, 0);
29412942
roce_set_bit(mpt_entry->byte_12_mw_pa, V2_MPT_BYTE_12_MR_MW_S, 1);
@@ -4989,11 +4990,11 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
49894990
V2_QPC_BYTE_28_AT_M,
49904991
V2_QPC_BYTE_28_AT_S);
49914992
qp_attr->retry_cnt = roce_get_field(context.byte_212_lsn,
4992-
V2_QPC_BYTE_212_RETRY_CNT_M,
4993-
V2_QPC_BYTE_212_RETRY_CNT_S);
4993+
V2_QPC_BYTE_212_RETRY_NUM_INIT_M,
4994+
V2_QPC_BYTE_212_RETRY_NUM_INIT_S);
49944995
qp_attr->rnr_retry = roce_get_field(context.byte_244_rnr_rxack,
4995-
V2_QPC_BYTE_244_RNR_CNT_M,
4996-
V2_QPC_BYTE_244_RNR_CNT_S);
4996+
V2_QPC_BYTE_244_RNR_NUM_INIT_M,
4997+
V2_QPC_BYTE_244_RNR_NUM_INIT_S);
49974998

49984999
done:
49995000
qp_attr->cur_qp_state = qp_attr->qp_state;

0 commit comments

Comments
 (0)