- 12 Jun, 2018 2 commits
-
-
Kees Cook authored
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This patch replaces cases of: kmalloc(a * b, gfp) with: kmalloc_array(a * b, gfp) as well as handling cases of: kmalloc(a * b * c, gfp) with: kmalloc(array3_size(a, b, c), gfp) as it's slightly less ugly than: kmalloc_array(array_size(a, b), c, gfp) This does, however, attempt to ignore constant size factors like: kmalloc(4 * 1024, gfp) though any constants defined via macros get caught up in the conversion. Any factors with a sizeof() of "unsigned char", "char", and "u8" were dropped, since they're redundant. The tools/ directory was manually excluded, since it has its own implementation of kmalloc(). The Coccinelle script used for this was: // Fix redundant parens around sizeof(). @@ type TYPE; expression THING, E; @@ ( kmalloc( - (sizeof(TYPE)) * E + sizeof(TYPE) * E , ...) | kmalloc( - (sizeof(THING)) * E + sizeof(THING) * E , ...) ) // Drop single-byte sizes and redundant parens. @@ expression COUNT; typedef u8; typedef __u8; @@ ( kmalloc( - sizeof(u8) * (COUNT) + COUNT , ...) | kmalloc( - sizeof(__u8) * (COUNT) + COUNT , ...) | kmalloc( - sizeof(char) * (COUNT) + COUNT , ...) | kmalloc( - sizeof(unsigned char) * (COUNT) + COUNT , ...) | kmalloc( - sizeof(u8) * COUNT + COUNT , ...) | kmalloc( - sizeof(__u8) * COUNT + COUNT , ...) | kmalloc( - sizeof(char) * COUNT + COUNT , ...) | kmalloc( - sizeof(unsigned char) * COUNT + COUNT , ...) ) // 2-factor product with sizeof(type/expression) and identifier or constant. @@ type TYPE; expression THING; identifier COUNT_ID; constant COUNT_CONST; @@ ( - kmalloc + kmalloc_array ( - sizeof(TYPE) * (COUNT_ID) + COUNT_ID, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * COUNT_ID + COUNT_ID, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * (COUNT_CONST) + COUNT_CONST, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * COUNT_CONST + COUNT_CONST, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * (COUNT_ID) + COUNT_ID, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * COUNT_ID + COUNT_ID, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * (COUNT_CONST) + COUNT_CONST, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * COUNT_CONST + COUNT_CONST, sizeof(THING) , ...) ) // 2-factor product, only identifiers. @@ identifier SIZE, COUNT; @@ - kmalloc + kmalloc_array ( - SIZE * COUNT + COUNT, SIZE , ...) // 3-factor product with 1 sizeof(type) or sizeof(expression), with // redundant parens removed. @@ expression THING; identifier STRIDE, COUNT; type TYPE; @@ ( kmalloc( - sizeof(TYPE) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kmalloc( - sizeof(TYPE) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kmalloc( - sizeof(TYPE) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kmalloc( - sizeof(TYPE) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(TYPE)) , ...) | kmalloc( - sizeof(THING) * (COUNT) * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kmalloc( - sizeof(THING) * (COUNT) * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kmalloc( - sizeof(THING) * COUNT * (STRIDE) + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) | kmalloc( - sizeof(THING) * COUNT * STRIDE + array3_size(COUNT, STRIDE, sizeof(THING)) , ...) ) // 3-factor product with 2 sizeof(variable), with redundant parens removed. @@ expression THING1, THING2; identifier COUNT; type TYPE1, TYPE2; @@ ( kmalloc( - sizeof(TYPE1) * sizeof(TYPE2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kmalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2)) , ...) | kmalloc( - sizeof(THING1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kmalloc( - sizeof(THING1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(THING1), sizeof(THING2)) , ...) | kmalloc( - sizeof(TYPE1) * sizeof(THING2) * COUNT + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) | kmalloc( - sizeof(TYPE1) * sizeof(THING2) * (COUNT) + array3_size(COUNT, sizeof(TYPE1), sizeof(THING2)) , ...) ) // 3-factor product, only identifiers, with redundant parens removed. @@ identifier STRIDE, SIZE, COUNT; @@ ( kmalloc( - (COUNT) * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - COUNT * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - COUNT * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - (COUNT) * (STRIDE) * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - COUNT * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - (COUNT) * STRIDE * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - (COUNT) * (STRIDE) * (SIZE) + array3_size(COUNT, STRIDE, SIZE) , ...) | kmalloc( - COUNT * STRIDE * SIZE + array3_size(COUNT, STRIDE, SIZE) , ...) ) // Any remaining multi-factor products, first at least 3-factor products, // when they're not all constants... @@ expression E1, E2, E3; constant C1, C2, C3; @@ ( kmalloc(C1 * C2 * C3, ...) | kmalloc( - (E1) * E2 * E3 + array3_size(E1, E2, E3) , ...) | kmalloc( - (E1) * (E2) * E3 + array3_size(E1, E2, E3) , ...) | kmalloc( - (E1) * (E2) * (E3) + array3_size(E1, E2, E3) , ...) | kmalloc( - E1 * E2 * E3 + array3_size(E1, E2, E3) , ...) ) // And then all remaining 2 factors products when they're not all constants, // keeping sizeof() as the second factor argument. @@ expression THING, E1, E2; type TYPE; constant C1, C2, C3; @@ ( kmalloc(sizeof(THING) * C2, ...) | kmalloc(sizeof(TYPE) * C2, ...) | kmalloc(C1 * C2 * C3, ...) | kmalloc(C1 * C2, ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * (E2) + E2, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(TYPE) * E2 + E2, sizeof(TYPE) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * (E2) + E2, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - sizeof(THING) * E2 + E2, sizeof(THING) , ...) | - kmalloc + kmalloc_array ( - (E1) * E2 + E1, E2 , ...) | - kmalloc + kmalloc_array ( - (E1) * (E2) + E1, E2 , ...) | - kmalloc + kmalloc_array ( - E1 * E2 + E1, E2 , ...) ) Signed-off-by:
Kees Cook <keescook@chromium.org>
-
Dan Carpenter authored
root_device_register() returns error pointers, it never returns NULL. Fixes: ca90800a ("test_overflow: Add memory allocation overflow tests") Signed-off-by:
Dan Carpenter <dan.carpenter@oracle.com> Signed-off-by:
Kees Cook <keescook@chromium.org>
-
- 08 Jun, 2018 6 commits
-
-
Randy Dunlap authored
Fix missing MODULE_LICENSE() warning in lib/ucs2_string.c: WARNING: modpost: missing MODULE_LICENSE() in lib/ucs2_string.o see include/linux/module.h for more information Link: http://lkml.kernel.org/r/b2505bb4-dcf5-fc46-443d-e47db1cb2f59@infradead.org Signed-off-by:
Randy Dunlap <rdunlap@infradead.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Matthew Garrett <matthew.garrett@nebula.com> Signed-off-by:
Andrew Morton <akpm@linux-foundation.org> Signed-off-by:
Linus Torvalds <torvalds@linux-foundation.org>
-
Vasily Averin authored
MPI headers contain definitions for huge number of non-existing functions. Most part of these functions was removed in 2012 by Dmitry Kasatkin - 7cf4206a ("Remove unused code from MPI library") - 9e235dca ("Revert "crypto: GnuPG based MPI lib - additional ...") - bc95eead ("lib/mpi: removed unused functions") however headers wwere not updated properly. Also I deleted some unused macros. Link: http://lkml.kernel.org/r/fb2fc1ef-1185-f0a3-d8d0-173d2f97bbaf@virtuozzo.com Signed-off-by:
Vasily Averin <vvs@virtuozzo.com> Reviewed-by:
Andrew Morton <akpm@linux-foundation.org> Cc: Dmitry Kasatkin <dmitry.kasatkin@huawei.com> Signed-off-by:
Andrew Morton <akpm@linux-foundation.org> Signed-off-by:
Linus Torvalds <torvalds@linux-foundation.org>
-
Sebastian Andrzej Siewior authored
percpu_ida() decouples disabling interrupts from the locking operations. This breaks some assumptions if the locking operations are replaced like they are under -RT. The same locking can be achieved by avoiding local_irq_save() and using spin_lock_irqsave() instead. percpu_ida_alloc() gains one more preemption point because after unlocking the fastpath and before the pool lock is acquired, the interrupts are briefly enabled. Link: http://lkml.kernel.org/r/20180504153218.7301-1-bigeasy@linutronix.de Signed-off-by:
Sebastian Andrzej Siewior <bigeasy@linutronix.de> Reviewed-by:
Andrew Morton <akpm@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Nicholas Bellinger <nab@linux-iscsi.org> Cc: Shaohua Li <shli@fb.com> Cc: Kent Overstreet <kent.overstreet@gmail.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by:
Andrew Morton <akpm@linux-foundation.org> Signed-off-by:
Linus Torvalds <torvalds@linux-foundation.org>
-
Matthew Wilcox authored
Improve the scalability of the IDA by using the per-IDA xa_lock rather than the global simple_ida_lock. IDAs are not typically used in performance-sensitive locations, but since we have this lock anyway, we can use it. It is also a step towards converting the IDA from the radix tree to the XArray. [akpm@linux-foundation.org: idr.c needs xarray.h] Link: http://lkml.kernel.org/r/20180331125332.GF13332@bombadil.infradead.org Signed-off-by:
Matthew Wilcox <mawilcox@microsoft.com> Reviewed-by:
Andrew Morton <akpm@linux-foundation.org> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Tejun Heo <tj@kernel.org> Signed-off-by:
Andrew Morton <akpm@linux-foundation.org> Signed-off-by:
Linus Torvalds <torvalds@linux-foundation.org>
-
Yury Norov authored
Use BITS_TO_LONGS() macro to avoid calculation of reminder (bits % BITS_PER_LONG) On ARM64 it saves 5 instruction for function - 16 before and 11 after. Link: http://lkml.kernel.org/r/20180411145914.6011-1-ynorov@caviumnetworks.com Signed-off-by:
Yury Norov <ynorov@caviumnetworks.com> Reviewed-by:
Andrew Morton <akpm@linux-foundation.org> Cc: Matthew Wilcox <mawilcox@microsoft.com> Cc: Rasmus Villemoes <linux@rasmusvillemoes.dk> Signed-off-by:
Andrew Morton <akpm@linux-foundation.org> Signed-off-by:
Linus Torvalds <torvalds@linux-foundation.org>
-
Michal Hocko authored
kvmalloc warned about incompatible gfp_mask to catch abusers (mostly GFP_NOFS) with an intention that this will motivate authors of the code to fix those. Linus argues that this just motivates people to do even more hacks like if (gfp == GFP_KERNEL) kvmalloc else kmalloc I haven't seen this happening much (Linus pointed to bucket_lock special cases an atomic allocation but my git foo hasn't found much more) but it is true that we can grow those in future. Therefore Linus suggested to simply not fallback to vmalloc for incompatible gfp flags and rather stick with the kmalloc path. Link: http://lkml.kernel.org/r/20180601115329.27807-1-mhocko@kernel.org Signed-off-by:
Michal Hocko <mhocko@suse.com> Suggested-by:
Linus Torvalds <torvalds@linux-foundation.org> Cc: Tom Herbert <tom@quantonium.net> Signed-off-by:
Andrew Morton <akpm@linux-foundation.org> Signed-off-by:
Linus Torvalds <torvalds@linux-foundation.org>
-
- 05 Jun, 2018 5 commits
-
-
Kees Cook authored
Make sure that the memory allocators are behaving as expected in the face of overflows of multiplied arguments or when using the array_size()-family helpers. Example output of new tests (with the expected __alloc_pages_slowpath and vmalloc warnings about refusing giant allocations removed): [ 93.062076] test_overflow: kmalloc detected saturation [ 93.062988] test_overflow: kmalloc_node detected saturation [ 93.063818] test_overflow: kzalloc detected saturation [ 93.064539] test_overflow: kzalloc_node detected saturation [ 93.120386] test_overflow: kvmalloc detected saturation [ 93.143458] test_overflow: kvmalloc_node detected saturation [ 93.166861] test_overflow: kvzalloc detected saturation [ 93.189924] test_overflow: kvzalloc_node detected saturation [ 93.221671] test_overflow: vmalloc detected saturation [ 93.246326] test_overflow: vmalloc_node detected saturation [ 93.270260] test_overflow: vzalloc detected saturation [ 93.293824] test_overflow: vzalloc_node detected saturation [ 93.294597] test_overflow: devm_kmalloc detected saturation [ 93.295383] test_overflow: devm_kzalloc detected saturation [ 93.296217] test_overflow: all tests passed Signed-off-by:
Kees Cook <keescook@chromium.org>
-
Kees Cook authored
This adjusts the overflow test to report failures, and prepares to add allocation tests. Signed-off-by:
Kees Cook <keescook@chromium.org>
-
Rasmus Villemoes authored
Obviously a+b==b+a and a*b==b*a, but the implementation of the fallback checks are not entirely symmetric in how they treat a and b. So we might as well check the (b,a,r,of) tuple as well as the (a,b,r,of) one for + and *. Rather than more copy-paste, factor out the common part to check_one_op. Signed-off-by:
Rasmus Villemoes <linux@rasmusvillemoes.dk> Signed-off-by:
Kees Cook <keescook@chromium.org>
-
Rasmus Villemoes authored
This adds a small module for testing that the check_*_overflow functions work as expected, whether implemented in C or using gcc builtins. Example output: test_overflow: u8 : 18 tests test_overflow: s8 : 19 tests test_overflow: u16: 17 tests test_overflow: s16: 17 tests test_overflow: u32: 17 tests test_overflow: s32: 17 tests test_overflow: u64: 17 tests test_overflow: s64: 21 tests Signed-off-by:
Rasmus Villemoes <linux@rasmusvillemoes.dk> [kees: add output to commit log, drop u64 tests on 32-bit] Signed-off-by:
Kees Cook <keescook@chromium.org>
-
Geert Uytterhoeven authored
"%pCr" formats the current rate of a clock, and calls clk_get_rate(). The latter obtains a mutex, hence it must not be called from atomic context. Remove support for this rarely-used format, as vsprintf() (and e.g. printk()) must be callable from any context. Any remaining out-of-tree users will start seeing the clock's name printed instead of its rate. Reported-by:
Jia-Ju Bai <baijiaju1990@gmail.com> Fixes: 900cca29 ("lib/vsprintf: add %pC{,n,r} format specifiers for clocks") Link: http://lkml.kernel.org/r/1527845302-12159-5-git-send-email-geert+renesas@glider.be To: Jia-Ju Bai <baijiaju1990@gmail.com> To: Jonathan Corbet <corbet@lwn.net> To: Michael Turquette <mturquette@baylibre.com> To: Stephen Boyd <sboyd@kernel.org> To: Zhang Rui <rui.zhang@intel.com> To: Eduardo Valentin <edubezval@gmail.com> To: Eric Anholt <eric@anholt.net> To: Stefan Wahren <stefan.wahren@i2se.com> To: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: linux-doc@vger.kernel.org Cc: linux-clk@vger.kernel.org Cc: linux-pm@vger.kernel.org Cc: linux-serial@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-renesas-soc@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: Geert Uytterhoeven <geert+renesas@glider.be> Cc: stable@vger.kernel.org # 4.1+ Signed-off-by:
Geert Uytterhoeven <geert+renesas@glider.be> Signed-off-by:
Petr Mladek <pmladek@suse.com>
-
- 03 Jun, 2018 1 commit
-
-
Daniel Borkmann authored
We have one triggering on eBPF but lets also add a cBPF example to make sure we keep tracking them. Also add anther cBPF test running max number of MSH ops. Signed-off-by:
Daniel Borkmann <daniel@iogearbox.net> Acked-by:
Alexei Starovoitov <ast@kernel.org> Acked-by:
Song Liu <songliubraving@fb.com> Signed-off-by:
Alexei Starovoitov <ast@kernel.org>
-
- 31 May, 2018 1 commit
-
-
Christoph Hellwig authored
Print a useful warning instead. Reported-by:
Finn Thain <fthain@telegraphics.com.au> Tested-by:
Finn Thain <fthain@telegraphics.com.au> Signed-off-by:
Christoph Hellwig <hch@lst.de>
-
- 28 May, 2018 1 commit
-
-
Christoph Hellwig authored
Various PCI bridges (VIA PCI, Xilinx PCIe) limit DMA to only 32-bits even if the device itself supports more. Add a single bit flag to struct device (to be moved into the dma extension once we get to it) to flag such devices and reject larger DMA to them. Signed-off-by:
Christoph Hellwig <hch@lst.de> Reviewed-by:
Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
- 26 May, 2018 1 commit
-
-
Matthew Wilcox authored
If the radix tree underlying the IDR happens to be full and we attempt to remove an id which is larger than any id in the IDR, we will call __radix_tree_delete() with an uninitialised 'slot' pointer, at which point anything could happen. This was easiest to hit with a single entry at id 0 and attempting to remove a non-0 id, but it could have happened with 64 entries and attempting to remove an id >= 64. Roman said: The syzcaller test boils down to opening /dev/kvm, creating an eventfd, and calling a couple of KVM ioctls. None of this requires superuser. And the result is dereferencing an uninitialized pointer which is likely a crash. The specific path caught by syzbot is via KVM_HYPERV_EVENTD ioctl which is new in 4.17. But I guess there are other user-triggerable paths, so cc:stable is probably justified. Matthew added: We have around 250 calls to idr_remove() in the kernel today. Many of them pass an ID which is embedded in the object they're removing, so they're safe. Picking a few likely candidates: drivers/firewire/core-cdev.c looks unsafe; the ID comes from an ioctl. drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c is similar drivers/atm/nicstar.c could be taken down by a handcrafted packet Link: http://lkml.kernel.org/r/20180518175025.GD6361@bombadil.infradead.org Fixes: 0a835c4f ("Reimplement IDR and IDA using the radix tree") Reported-by: <syzbot+35666cba7f0a337e2e79@syzkaller.appspotmail.com> Debugged-by:
Roman Kagan <rkagan@virtuozzo.com> Signed-off-by:
Matthew Wilcox <mawilcox@microsoft.com> Cc: <stable@vger.kernel.org> Signed-off-by:
Andrew Morton <akpm@linux-foundation.org> Signed-off-by:
Linus Torvalds <torvalds@linux-foundation.org>
-
- 24 May, 2018 2 commits
-
-
Ming Lei authored
When the allocation process is scheduled back and the mapped hw queue is changed, fake one extra wake up on previous queue for compensating wake up miss, so other allocations on the previous queue won't be starved. This patch fixes one request allocation hang issue, which can be triggered easily in case of very low nr_request. The race is as follows: 1) 2 hw queues, nr_requests are 2, and wake_batch is one 2) there are 3 waiters on hw queue 0 3) two in-flight requests in hw queue 0 are completed, and only two waiters of 3 are waken up because of wake_batch, but both the two waiters can be scheduled to another CPU and cause to switch to hw queue 1 4) then the 3rd waiter will wait for ever, since no in-flight request is in hw queue 0 any more. 5) this patch fixes it by the fake wakeup when waiter is scheduled to another hw queue Cc: <stable@vger.kernel.org> Reviewed-by:
Omar Sandoval <osandov@fb.com> Signed-off-by:
Ming Lei <ming.lei@redhat.com> Modified commit message to make it clearer, and make it apply on top of the 4.18 branch. Signed-off-by:
Jens Axboe <axboe@kernel.dk>
-
Robin Murphy authored
Drivers/subsystems creating scatterlists for DMA should be taking care to respect the scatter-gather limitations of the appropriate device, as described by dma_parms. A DMA API implementation cannot feasibly split a scatterlist into *more* entries than originally passed, so it is not well defined what they should do when given a segment larger than the limit they are also required to respect. Conversely, devices which are less limited than the rather conservative defaults, or indeed have no limitations at all (e.g. GPUs with their own internal MMU), should be encouraged to set appropriate dma_parms, as they may get more efficient DMA mapping performance out of it. Signed-off-by:
Robin Murphy <robin.murphy@arm.com> Signed-off-by:
Christoph Hellwig <hch@lst.de>
-
- 23 May, 2018 1 commit
-
-
Dan Williams authored
Add a common Kconfig CONFIG_ARCH_HAS_UACCESS_MCSAFE that archs can optionally select, and fixup the declaration of _copy_to_iter_mcsafe(). Fixes: 8780356e ("x86/asm/memcpy_mcsafe: Define copy_to_iter_mcsafe()") Signed-off-by:
Dan Williams <dan.j.williams@intel.com>
-
- 19 May, 2018 4 commits
-
-
Christoph Hellwig authored
Add a new dma_map_ops implementation that uses dma-direct for the address mapping of streaming mappings, and which requires arch-specific implemenations of coherent allocate/free. Architectures have to provide flushing helpers to ownership trasnfers to the device and/or CPU, and can provide optional implementations of the coherent mmap functionality, and the cache_flush routines for non-coherent long term allocations. Signed-off-by:
Christoph Hellwig <hch@lst.de> Tested-by:
Alexey Brodkin <abrodkin@synopsys.com> Acked-by:
Vineet Gupta <vgupta@synopsys.com>
-
Christoph Hellwig authored
ARCH_DMA_ADDR_T_64BIT is always true for 64-bit architectures now, so we can skip the clause requiring it. 'n' is the default default, so no need to explicitly state it. Tested-by:
Alexey Brodkin <abrodkin@synopsys.com> Acked-by:
Vineet Gupta <vgupta@synopsys.com> Signed-off-by:
Christoph Hellwig <hch@lst.de>
-
Ross Zwisler authored
Fix a race in the multi-order iteration code which causes the kernel to hit a GP fault. This was first seen with a production v4.15 based kernel (4.15.6-300.fc27.x86_64) utilizing a DAX workload which used order 9 PMD DAX entries. The race has to do with how we tear down multi-order sibling entries when we are removing an item from the tree. Remember for example that an order 2 entry looks like this: struct radix_tree_node.slots[] = [entry][sibling][sibling][sibling] where 'entry' is in some slot in the struct radix_tree_node, and the three slots following 'entry' contain sibling pointers which point back to 'entry.' When we delete 'entry' from the tree, we call : radix_tree_delete() radix_tree_delete_item() __radix_tree_delete() replace_slot() replace_slot() first removes the siblings in order from the first to the last, then at then replaces 'entry' with NULL. This means that for a brief period of time we end up with one or more of the siblings removed, so: struct radix_tree_node.slots[] = [entry][NULL][sibling][sibling] This causes an issue if you have a reader iterating over the slots in the tree via radix_tree_for_each_slot() while only under rcu_read_lock()/rcu_read_unlock() protection. This is a common case in mm/filemap.c. The issue is that when __radix_tree_next_slot() => skip_siblings() tries to skip over the sibling entries in the slots, it currently does so with an exact match on the slot directly preceding our current slot. Normally this works: V preceding slot struct radix_tree_node.slots[] = [entry][sibling][sibling][sibling] ^ current slot This lets you find the first sibling, and you skip them all in order. But in the case where one of the siblings is NULL, that slot is skipped and then our sibling detection is interrupted: V preceding slot struct radix_tree_node.slots[] = [entry][NULL][sibling][sibling] ^ current slot This means that the sibling pointers aren't recognized since they point all the way back to 'entry', so we think that they are normal internal radix tree pointers. This causes us to think we need to walk down to a struct radix_tree_node starting at the address of 'entry'. In a real running kernel this will crash the thread with a GP fault when you try and dereference the slots in your broken node starting at 'entry'. We fix this race by fixing the way that skip_siblings() detects sibling nodes. Instead of testing against the preceding slot we instead look for siblings via is_sibling_entry() which compares against the position of the struct radix_tree_node.slots[] array. This ensures that sibling entries are properly identified, even if they are no longer contiguous with the 'entry' they point to. Link: http://lkml.kernel.org/r/20180503192430.7582-6-ross.zwisler@linux.intel.com Fixes: 148deab2 ("radix-tree: improve multiorder iterators") Signed-off-by:
Ross Zwisler <ross.zwisler@linux.intel.com> Reported-by:
CR, Sapthagirish <sapthagirish.cr@intel.com> Reviewed-by:
Jan Kara <jack@suse.cz> Cc: Matthew Wilcox <willy@infradead.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Chinner <david@fromorbit.com> Cc: <stable@vger.kernel.org> Signed-off-by:
Andrew Morton <akpm@linux-foundation.org> Signed-off-by:
Linus Torvalds <torvalds@linux-foundation.org>
-
Matthew Wilcox authored
I had neglected to increment the error counter when the tests failed, which made the tests noisy when they fail, but not actually return an error code. Link: http://lkml.kernel.org/r/20180509114328.9887-1-mpe@ellerman.id.au Fixes: 3cc78125 ("lib/test_bitmap.c: add optimisation tests") Signed-off-by:
Matthew Wilcox <mawilcox@microsoft.com> Signed-off-by:
Michael Ellerman <mpe@ellerman.id.au> Reported-by:
Michael Ellerman <mpe@ellerman.id.au> Tested-by:
Michael Ellerman <mpe@ellerman.id.au> Reviewed-by:
Kees Cook <keescook@chromium.org> Cc: Yury Norov <ynorov@caviumnetworks.com> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: <stable@vger.kernel.org> [4.13+] Signed-off-by:
Andrew Morton <akpm@linux-foundation.org> Signed-off-by:
Linus Torvalds <torvalds@linux-foundation.org>
-
- 16 May, 2018 1 commit
-
-
Steven Rostedt (VMware) authored
Reviewing Tobin's patches for getting pointers out early before entropy has been established, I noticed that there's a lone smp_mb() in the code. As with most lone memory barriers, this one appears to be incorrectly used. We currently basically have this: get_random_bytes(&ptr_key, sizeof(ptr_key)); /* * have_filled_random_ptr_key==true is dependent on get_random_bytes(). * ptr_to_id() needs to see have_filled_random_ptr_key==true * after get_random_bytes() returns. */ smp_mb(); WRITE_ONCE(have_filled_random_ptr_key, true); And later we have: if (unlikely(!have_filled_random_ptr_key)) return string(buf, end, "(ptrval)", spec); /* Missing memory barrier here. */ hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key); As the CPU can perform speculative loads, we could have a situation with the following: CPU0 CPU1 ---- ---- load ptr_key = 0 store ptr_key = random smp_mb() store have_filled_random_ptr_key load have_filled_random_ptr_key = true BAD BAD BAD! (you're so bad!) Because nothing prevents CPU1 from loading ptr_key before loading have_filled_random_ptr_key. But this race is very unlikely, but we can't keep an incorrect smp_mb() in place. Instead, replace the have_filled_random_ptr_key with a static_branch not_filled_random_ptr_key, that is initialized to true and changed to false when we get enough entropy. If the update happens in early boot, the static_key is updated immediately, otherwise it will have to wait till entropy is filled and this happens in an interrupt handler which can't enable a static_key, as that requires a preemptible context. In that case, a work_queue is used to enable it, as entropy already took too long to establish in the first place waiting a little more shouldn't hurt anything. The benefit of using the static key is that the unlikely branch in vsprintf() now becomes a nop. Link: http://lkml.kernel.org/r/20180515100558.21df515e@gandalf.local.home Cc: stable@vger.kernel.org Fixes: ad67b74d ("printk: hash addresses printed with %p") Acked-by:
Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by:
Steven Rostedt (VMware) <rostedt@goodmis.org>
-
- 15 May, 2018 1 commit
-
-
Dan Williams authored
Use the updated memcpy_mcsafe() implementation to define copy_user_mcsafe() and copy_to_iter_mcsafe(). The most significant difference from typical copy_to_iter() is that the ITER_KVEC and ITER_BVEC iterator types can fail to complete a full transfer. Signed-off-by:
Dan Williams <dan.j.williams@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: hch@lst.de Cc: linux-fsdevel@vger.kernel.org Cc: linux-nvdimm@lists.01.org Link: http://lkml.kernel.org/r/152539239150.31796.9189779163576449784.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by:
Ingo Molnar <mingo@kernel.org>
-
- 14 May, 2018 1 commit
-
-
Jens Axboe authored
If we have multiple callers of sbq_wake_up(), we can end up in a situation where the wait_cnt will continually go more and more negative. Consider the case where our wake batch is 1, hence wait_cnt will start out as 1. wait_cnt == 1 CPU0 CPU1 atomic_dec_return(), cnt == 0 atomic_dec_return(), cnt == -1 cmpxchg(-1, 0) (succeeds) [wait_cnt now 0] cmpxchg(0, 1) (fails) This ends up with wait_cnt being 0, we'll wakeup immediately next time. Going through the same loop as above again, and we'll have wait_cnt -1. For the case where we have a larger wake batch, the only difference is that the starting point will be higher. We'll still end up with continually smaller batch wakeups, which defeats the purpose of the rolling wakeups. Always reset the wait_cnt to the batch value. Then it doesn't matter who wins the race. But ensure that whomever does win the race is the one that increments the ws index and wakes up our batch count, loser gets to call __sbq_wake_up() again to account his wakeups towards the next active wait state index. Fixes: 6c0ca7ae ("sbitmap: fix wakeup hang after sbq resize") Reviewed-by:
Omar Sandoval <osandov@fb.com> Signed-off-by:
Jens Axboe <axboe@kernel.dk>
-
- 12 May, 2018 2 commits
-
-
Jean Delvare authored
If DMA_ATTR_NO_WARN is passed to swiotlb_alloc_buffer(), it should be passed further down to swiotlb_tbl_map_single(). Otherwise we escape half of the warnings but still log the other half. This is one of the multiple causes of spurious warnings reported at: https://bugs.freedesktop.org/show_bug.cgi?id=104082 Signed-off-by:
Jean Delvare <jdelvare@suse.de> Fixes: 0176adb0 ("swiotlb: refactor coherent buffer allocation") Cc: Christoph Hellwig <hch@lst.de> Cc: Christian König <christian.koenig@amd.com> Cc: Michel Dänzer <michel@daenzer.net> Cc: Takashi Iwai <tiwai@suse.de> Cc: stable@vger.kernel.org # v4.16
-
Yury Norov authored
test_find_first_bit() is intentionally sub-optimal, and may cause soft lockup due to long time of run on some systems. So decrease length of bitmap to traverse to avoid lockup. With the change below, time of test execution doesn't exceed 0.2 seconds on my testing system. Link: http://lkml.kernel.org/r/20180420171949.15710-1-ynorov@caviumnetworks.com Fixes: 4441fca0 ("lib: test module for find_*_bit() functions") Signed-off-by:
Yury Norov <ynorov@caviumnetworks.com> Reviewed-by:
Andrew Morton <akpm@linux-foundation.org> Reported-by:
Fengguang Wu <fengguang.wu@intel.com> Signed-off-by:
Andrew Morton <akpm@linux-foundation.org> Signed-off-by:
Linus Torvalds <torvalds@linux-foundation.org>
-
- 10 May, 2018 2 commits
-
-
Omar Sandoval authored
Make sure the user passed the right value to sbitmap_queue_min_shallow_depth(). Acked-by:
Paolo Valente <paolo.valente@linaro.org> Signed-off-by:
Omar Sandoval <osandov@fb.com> Signed-off-by:
Jens Axboe <axboe@kernel.dk>
-
Omar Sandoval authored
The sbitmap queue wake batch is calculated such that once allocations start blocking, all of the bits which are already allocated must be enough to fulfill the batch counters of all of the waitqueues. However, the shallow allocation depth can break this invariant, since we block before our full depth is being utilized. Add sbitmap_queue_min_shallow_depth(), which saves the minimum shallow depth the sbq will use, and update sbq_calc_wake_batch() to take it into account. Acked-by:
Paolo Valente <paolo.valente@linaro.org> Signed-off-by:
Omar Sandoval <osandov@fb.com> Signed-off-by:
Jens Axboe <axboe@kernel.dk>
-
- 09 May, 2018 9 commits
-
-
Yisheng Xie authored
swiotlb use physical address of bounce buffer when do map and unmap, therefore, related comment should be updated. Signed-off-by:
Yisheng Xie <xieyisheng1@huawei.com> Signed-off-by:
Christoph Hellwig <hch@lst.de>
-
Christoph Hellwig authored
swiotlb now selects the DMA_DIRECT_OPS config symbol, so this will always be true. Signed-off-by:
Christoph Hellwig <hch@lst.de>
-
Christoph Hellwig authored
This way we have one central definition of it, and user can select it as needed. The new option is not user visible, which is the behavior it had in most architectures, with a few notable exceptions: - On x86_64 and mips/loongson3 it used to be user selectable, but defaulted to y. It now is unconditional, which seems like the right thing for 64-bit architectures without guaranteed availablity of IOMMUs. - on powerpc the symbol is user selectable and defaults to n, but many boards select it. This change assumes no working setup required a manual selection, but if that turned out to be wrong we'll have to add another select statement or two for the respective boards. Signed-off-by:
Christoph Hellwig <hch@lst.de>
-
Christoph Hellwig authored
Define this symbol if the architecture either uses 64-bit pointers or the PHYS_ADDR_T_64BIT is set. This covers 95% of the old arch magic. We only need an additional select for Xen on ARM (why anyway?), and we now always set ARCH_DMA_ADDR_T_64BIT on mips boards with 64-bit physical addressing instead of only doing it when highmem is set. Signed-off-by:
Christoph Hellwig <hch@lst.de> Acked-by:
James Hogan <jhogan@kernel.org>
-
Christoph Hellwig authored
This way we have one central definition of it, and user can select it as needed. Note that we now also always select it when CONFIG_DMA_API_DEBUG is select, which fixes some incorrect checks in a few network drivers. Signed-off-by:
Christoph Hellwig <hch@lst.de> Reviewed-by:
Anshuman Khandual <khandual@linux.vnet.ibm.com>
-
Christoph Hellwig authored
This way we have one central definition of it, and user can select it as needed. Signed-off-by:
Christoph Hellwig <hch@lst.de> Reviewed-by:
Anshuman Khandual <khandual@linux.vnet.ibm.com>
-
Christoph Hellwig authored
This way we have one central definition of it, and user can select it as needed. Signed-off-by:
Christoph Hellwig <hch@lst.de> Reviewed-by:
Anshuman Khandual <khandual@linux.vnet.ibm.com>
-
Christoph Hellwig authored
This avoids selecting IOMMU_HELPER just for this function. And we only use it once or twice in normal builds so this often even is a size reduction. Signed-off-by:
Christoph Hellwig <hch@lst.de>
-
Christoph Hellwig authored
This function is only used by built-in code. Signed-off-by:
Christoph Hellwig <hch@lst.de> Reviewed-by:
Anshuman Khandual <khandual@linux.vnet.ibm.com>
-