diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-06-10 05:30:17 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-06-10 05:30:17 -0300 |
commit | d635711daa98be86d4c7fd01499c34f566b54ccb (patch) | |
tree | aa5cc3760a27c3d57146498cb82fa549547de06c /lib | |
parent | c91265cd0efb83778f015b4d4b1129bd2cfd075e (diff) |
Linux-libre 4.6.2-gnu
Diffstat (limited to 'lib')
-rw-r--r-- | lib/842/842_decompress.c | 2 | ||||
-rw-r--r-- | lib/Kconfig | 4 | ||||
-rw-r--r-- | lib/Kconfig.debug | 56 | ||||
-rw-r--r-- | lib/Kconfig.kasan | 5 | ||||
-rw-r--r-- | lib/Kconfig.ubsan | 5 | ||||
-rw-r--r-- | lib/Makefile | 17 | ||||
-rw-r--r-- | lib/asn1_decoder.c | 16 | ||||
-rw-r--r-- | lib/atomic64_test.c | 2 | ||||
-rw-r--r-- | lib/bitmap.c | 89 | ||||
-rw-r--r-- | lib/bug.c | 15 | ||||
-rw-r--r-- | lib/checksum.c | 4 | ||||
-rw-r--r-- | lib/cpumask.c | 1 | ||||
-rw-r--r-- | lib/devres.c | 2 | ||||
-rw-r--r-- | lib/dma-debug.c | 2 | ||||
-rw-r--r-- | lib/dma-noop.c | 75 | ||||
-rw-r--r-- | lib/extable.c | 50 | ||||
-rw-r--r-- | lib/flex_proportions.c | 2 | ||||
-rw-r--r-- | lib/iov_iter.c | 19 | ||||
-rw-r--r-- | lib/kobject.c | 1 | ||||
-rw-r--r-- | lib/kstrtox.c | 64 | ||||
-rw-r--r-- | lib/lz4/lz4defs.h | 4 | ||||
-rw-r--r-- | lib/mpi/longlong.h | 2 | ||||
-rw-r--r-- | lib/mpi/mpi-inline.h | 2 | ||||
-rw-r--r-- | lib/mpi/mpi-internal.h | 8 | ||||
-rw-r--r-- | lib/percpu-refcount.c | 2 | ||||
-rw-r--r-- | lib/radix-tree.c | 182 | ||||
-rw-r--r-- | lib/random32.c | 1 | ||||
-rw-r--r-- | lib/stackdepot.c | 284 | ||||
-rw-r--r-- | lib/string.c | 45 | ||||
-rw-r--r-- | lib/test_bitmap.c | 358 | ||||
-rw-r--r-- | lib/test_bpf.c | 229 | ||||
-rw-r--r-- | lib/test_kasan.c | 30 | ||||
-rw-r--r-- | lib/test_printf.c | 53 | ||||
-rw-r--r-- | lib/test_static_keys.c | 62 | ||||
-rw-r--r-- | lib/vsprintf.c | 134 |
35 files changed, 1694 insertions, 133 deletions
diff --git a/lib/842/842_decompress.c b/lib/842/842_decompress.c index a7f278d2e..11fc39b40 100644 --- a/lib/842/842_decompress.c +++ b/lib/842/842_decompress.c @@ -254,7 +254,7 @@ static int do_op(struct sw842_param *p, u8 o) case OP_ACTION_NOOP: break; default: - pr_err("Interal error, invalid op %x\n", op); + pr_err("Internal error, invalid op %x\n", op); return -EINVAL; } diff --git a/lib/Kconfig b/lib/Kconfig index 133ebc0c1..3cca12225 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -536,4 +536,8 @@ config ARCH_HAS_PMEM_API config ARCH_HAS_MMIO_FLUSH bool +config STACKDEPOT + bool + select STACKTRACE + endmenu diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e0b3031bf..b4e287cde 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -342,6 +342,18 @@ config FRAME_POINTER larger and slower, but it gives very useful debugging information in case of kernel bugs. (precise oopses/stacktraces/warnings) +config STACK_VALIDATION + bool "Compile-time stack metadata validation" + depends on HAVE_STACK_VALIDATION + default n + help + Add compile-time checks to validate stack metadata, including frame + pointers (if CONFIG_FRAME_POINTER is enabled). This helps ensure + that runtime stack traces are more reliable. + + For more information, see + tools/objtool/Documentation/stack-validation.txt. + config DEBUG_FORCE_WEAK_PER_CPU bool "Force weak per-cpu definitions" depends on DEBUG_KERNEL @@ -546,7 +558,7 @@ config DEBUG_KMEMLEAK_DEFAULT_OFF config DEBUG_STACK_USAGE bool "Stack utilization instrumentation" - depends on DEBUG_KERNEL && !IA64 && !PARISC && !METAG + depends on DEBUG_KERNEL && !IA64 help Enables the display of the minimum amount of free stack which each task has ever had available in the sysrq-T and sysrq-P debug output. @@ -684,6 +696,27 @@ source "lib/Kconfig.kasan" endmenu # "Memory Debugging" +config ARCH_HAS_KCOV + bool + help + KCOV does not have any arch-specific code, but currently it is enabled + only for x86_64. KCOV requires testing on other archs, and most likely + disabling of instrumentation for some early boot code. + +config KCOV + bool "Code coverage for fuzzing" + depends on ARCH_HAS_KCOV + select DEBUG_FS + help + KCOV exposes kernel code coverage information in a form suitable + for coverage-guided fuzzing (randomized testing). + + If RANDOMIZE_BASE is enabled, PC values will not be stable across + different machines and across reboots. If you need stable PC values, + disable RANDOMIZE_BASE. + + For more details, see Documentation/kcov.txt. + config DEBUG_SHIRQ bool "Debug shared IRQ handlers" depends on DEBUG_KERNEL @@ -1442,6 +1475,19 @@ config DEBUG_BLOCK_EXT_DEVT Say N if you are unsure. +config CPU_HOTPLUG_STATE_CONTROL + bool "Enable CPU hotplug state control" + depends on DEBUG_KERNEL + depends on HOTPLUG_CPU + default n + help + Allows to write steps between "offline" and "online" to the CPUs + sysfs target file so states can be stepped granular. This is a debug + option for now as the hotplug machinery cannot be stopped and + restarted at arbitrary points yet. + + Say N if your are unsure. + config NOTIFIER_ERROR_INJECTION tristate "Notifier error injection" depends on DEBUG_KERNEL @@ -1754,6 +1800,14 @@ config TEST_KSTRTOX config TEST_PRINTF tristate "Test printf() family of functions at runtime" +config TEST_BITMAP + tristate "Test bitmap_*() family of functions at runtime" + default n + help + Enable this option to test the bitmap functions at boot. + + If unsure, say N. + config TEST_RHASHTABLE tristate "Perform selftest on resizable hash table" default n diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 0fee5acd5..67d8c6838 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -5,8 +5,9 @@ if HAVE_ARCH_KASAN config KASAN bool "KASan: runtime memory debugger" - depends on SLUB_DEBUG + depends on SLUB_DEBUG || (SLAB && !DEBUG_SLAB) select CONSTRUCTORS + select STACKDEPOT if SLAB help Enables kernel address sanitizer - runtime memory debugger, designed to find out-of-bounds accesses and use-after-free bugs. @@ -16,6 +17,8 @@ config KASAN This feature consumes about 1/8 of available memory and brings about ~x3 performance slowdown. For better error detection enable CONFIG_STACKTRACE. + Currently CONFIG_KASAN doesn't work with CONFIG_DEBUG_SLAB + (the resulting kernel does not boot). choice prompt "Instrumentation type" diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index e07c1ba9b..39494af9a 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -13,6 +13,11 @@ config UBSAN_SANITIZE_ALL bool "Enable instrumentation for the entire kernel" depends on UBSAN depends on ARCH_HAS_UBSAN_SANITIZE_ALL + + # We build with -Wno-maybe-uninitilzed, but we still want to + # use -Wmaybe-uninitilized in allmodconfig builds. + # So dependsy bellow used to disable this option in allmodconfig + depends on !COMPILE_TEST default y help This option activates instrumentation for the entire kernel. diff --git a/lib/Makefile b/lib/Makefile index a7c26a41a..7bd6fd436 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -7,6 +7,18 @@ ORIG_CFLAGS := $(KBUILD_CFLAGS) KBUILD_CFLAGS = $(subst $(CC_FLAGS_FTRACE),,$(ORIG_CFLAGS)) endif +# These files are disabled because they produce lots of non-interesting and/or +# flaky coverage that is not a function of syscall inputs. For example, +# rbtree can be global and individual rotations don't correlate with inputs. +KCOV_INSTRUMENT_string.o := n +KCOV_INSTRUMENT_rbtree.o := n +KCOV_INSTRUMENT_list_debug.o := n +KCOV_INSTRUMENT_debugobjects.o := n +KCOV_INSTRUMENT_dynamic_debug.o := n +# Kernel does not boot if we instrument this file as it uses custom calling +# convention (see CONFIG_ARCH_HWEIGHT_CFLAGS). +KCOV_INSTRUMENT_hweight.o := n + lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o timerqueue.o\ idr.o int_sqrt.o extable.o \ @@ -18,6 +30,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ obj-$(CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS) += usercopy.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o +lib-$(CONFIG_HAS_DMA) += dma-noop.o lib-y += kobject.o klist.o obj-y += lockref.o @@ -43,6 +56,7 @@ obj-$(CONFIG_TEST_USER_COPY) += test_user_copy.o obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_keys.o obj-$(CONFIG_TEST_STATIC_KEYS) += test_static_key_base.o obj-$(CONFIG_TEST_PRINTF) += test_printf.o +obj-$(CONFIG_TEST_BITMAP) += test_bitmap.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG @@ -167,6 +181,9 @@ obj-$(CONFIG_SG_SPLIT) += sg_split.o obj-$(CONFIG_STMP_DEVICE) += stmp_device.o obj-$(CONFIG_IRQ_POLL) += irq_poll.o +obj-$(CONFIG_STACKDEPOT) += stackdepot.o +KASAN_SANITIZE_stackdepot.o := n + libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \ fdt_empty_tree.o $(foreach file, $(libfdt_files), \ diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c index 2b3f46c04..554522934 100644 --- a/lib/asn1_decoder.c +++ b/lib/asn1_decoder.c @@ -74,7 +74,7 @@ next_tag: /* Extract a tag from the data */ tag = data[dp++]; - if (tag == 0) { + if (tag == ASN1_EOC) { /* It appears to be an EOC. */ if (data[dp++] != 0) goto invalid_eoc; @@ -96,10 +96,8 @@ next_tag: /* Extract the length */ len = data[dp++]; - if (len <= 0x7f) { - dp += len; - goto next_tag; - } + if (len <= 0x7f) + goto check_length; if (unlikely(len == ASN1_INDEFINITE_LENGTH)) { /* Indefinite length */ @@ -110,14 +108,18 @@ next_tag: } n = len - 0x80; - if (unlikely(n > sizeof(size_t) - 1)) + if (unlikely(n > sizeof(len) - 1)) goto length_too_long; if (unlikely(n > datalen - dp)) goto data_overrun_error; - for (len = 0; n > 0; n--) { + len = 0; + for (; n > 0; n--) { len <<= 8; len |= data[dp++]; } +check_length: + if (len > datalen - dp) + goto data_overrun_error; dp += len; goto next_tag; diff --git a/lib/atomic64_test.c b/lib/atomic64_test.c index d62de8bf0..123481814 100644 --- a/lib/atomic64_test.c +++ b/lib/atomic64_test.c @@ -17,7 +17,7 @@ #include <linux/atomic.h> #ifdef CONFIG_X86 -#include <asm/processor.h> /* for boot_cpu_has below */ +#include <asm/cpufeature.h> /* for boot_cpu_has below */ #endif #define TEST(bit, op, c_op, val) \ diff --git a/lib/bitmap.c b/lib/bitmap.c index 814814397..c66da508c 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -12,6 +12,8 @@ #include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/bug.h> +#include <linux/kernel.h> +#include <linux/string.h> #include <asm/page.h> #include <asm/uaccess.h> @@ -1060,6 +1062,93 @@ int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order) EXPORT_SYMBOL(bitmap_allocate_region); /** + * bitmap_from_u32array - copy the contents of a u32 array of bits to bitmap + * @bitmap: array of unsigned longs, the destination bitmap, non NULL + * @nbits: number of bits in @bitmap + * @buf: array of u32 (in host byte order), the source bitmap, non NULL + * @nwords: number of u32 words in @buf + * + * copy min(nbits, 32*nwords) bits from @buf to @bitmap, remaining + * bits between nword and nbits in @bitmap (if any) are cleared. In + * last word of @bitmap, the bits beyond nbits (if any) are kept + * unchanged. + * + * Return the number of bits effectively copied. + */ +unsigned int +bitmap_from_u32array(unsigned long *bitmap, unsigned int nbits, + const u32 *buf, unsigned int nwords) +{ + unsigned int dst_idx, src_idx; + + for (src_idx = dst_idx = 0; dst_idx < BITS_TO_LONGS(nbits); ++dst_idx) { + unsigned long part = 0; + + if (src_idx < nwords) + part = buf[src_idx++]; + +#if BITS_PER_LONG == 64 + if (src_idx < nwords) + part |= ((unsigned long) buf[src_idx++]) << 32; +#endif + + if (dst_idx < nbits/BITS_PER_LONG) + bitmap[dst_idx] = part; + else { + unsigned long mask = BITMAP_LAST_WORD_MASK(nbits); + + bitmap[dst_idx] = (bitmap[dst_idx] & ~mask) + | (part & mask); + } + } + + return min_t(unsigned int, nbits, 32*nwords); +} +EXPORT_SYMBOL(bitmap_from_u32array); + +/** + * bitmap_to_u32array - copy the contents of bitmap to a u32 array of bits + * @buf: array of u32 (in host byte order), the dest bitmap, non NULL + * @nwords: number of u32 words in @buf + * @bitmap: array of unsigned longs, the source bitmap, non NULL + * @nbits: number of bits in @bitmap + * + * copy min(nbits, 32*nwords) bits from @bitmap to @buf. Remaining + * bits after nbits in @buf (if any) are cleared. + * + * Return the number of bits effectively copied. + */ +unsigned int +bitmap_to_u32array(u32 *buf, unsigned int nwords, + const unsigned long *bitmap, unsigned int nbits) +{ + unsigned int dst_idx = 0, src_idx = 0; + + while (dst_idx < nwords) { + unsigned long part = 0; + + if (src_idx < BITS_TO_LONGS(nbits)) { + part = bitmap[src_idx]; + if (src_idx >= nbits/BITS_PER_LONG) + part &= BITMAP_LAST_WORD_MASK(nbits); + src_idx++; + } + + buf[dst_idx++] = part & 0xffffffffUL; + +#if BITS_PER_LONG == 64 + if (dst_idx < nwords) { + part >>= 32; + buf[dst_idx++] = part & 0xffffffffUL; + } +#endif + } + + return min_t(unsigned int, nbits, 32*nwords); +} +EXPORT_SYMBOL(bitmap_to_u32array); + +/** * bitmap_copy_le - copy a bitmap, putting the bits into little-endian order. * @dst: destination buffer * @src: bitmap to copy @@ -167,19 +167,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs) if (warning) { /* this is a WARN_ON rather than BUG/BUG_ON */ - pr_warn("------------[ cut here ]------------\n"); - - if (file) - pr_warn("WARNING: at %s:%u\n", file, line); - else - pr_warn("WARNING: at %p [verbose debug info unavailable]\n", - (void *)bugaddr); - - print_modules(); - show_regs(regs); - print_oops_end_marker(); - /* Just a warning, don't kill lockdep. */ - add_taint(BUG_GET_TAINT(bug), LOCKDEP_STILL_OK); + __warn(file, line, (void *)bugaddr, BUG_GET_TAINT(bug), regs, + NULL); return BUG_TRAP_TYPE_WARN; } diff --git a/lib/checksum.c b/lib/checksum.c index 8b39e86db..d3ec93f9e 100644 --- a/lib/checksum.c +++ b/lib/checksum.c @@ -191,9 +191,7 @@ static inline u32 from64to32(u64 x) } __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) + __u32 len, __u8 proto, __wsum sum) { unsigned long long s = (__force u32)sum; diff --git a/lib/cpumask.c b/lib/cpumask.c index 5a70f6196..81dedaab3 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -41,6 +41,7 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) break; return i; } +EXPORT_SYMBOL(cpumask_any_but); /* These are not inline because of header tangles. */ #ifdef CONFIG_CPUMASK_OFFSTACK diff --git a/lib/devres.c b/lib/devres.c index 8c8567263..cb1464c41 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -236,7 +236,7 @@ struct pcim_iomap_devres { static void pcim_iomap_release(struct device *gendev, void *res) { - struct pci_dev *dev = container_of(gendev, struct pci_dev, dev); + struct pci_dev *dev = to_pci_dev(gendev); struct pcim_iomap_devres *this = res; int i; diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 4a1515f4b..51a76af25 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -657,9 +657,9 @@ static struct dma_debug_entry *dma_entry_alloc(void) spin_lock_irqsave(&free_entries_lock, flags); if (list_empty(&free_entries)) { - pr_err("DMA-API: debugging out of memory - disabling\n"); global_disable = true; spin_unlock_irqrestore(&free_entries_lock, flags); + pr_err("DMA-API: debugging out of memory - disabling\n"); return NULL; } diff --git a/lib/dma-noop.c b/lib/dma-noop.c new file mode 100644 index 000000000..721456468 --- /dev/null +++ b/lib/dma-noop.c @@ -0,0 +1,75 @@ +/* + * lib/dma-noop.c + * + * Simple DMA noop-ops that map 1:1 with memory + */ +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/dma-mapping.h> +#include <linux/scatterlist.h> + +static void *dma_noop_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + struct dma_attrs *attrs) +{ + void *ret; + + ret = (void *)__get_free_pages(gfp, get_order(size)); + if (ret) + *dma_handle = virt_to_phys(ret); + return ret; +} + +static void dma_noop_free(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_addr, + struct dma_attrs *attrs) +{ + free_pages((unsigned long)cpu_addr, get_order(size)); +} + +static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + return page_to_phys(page) + offset; +} + +static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, struct dma_attrs *attrs) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sgl, sg, nents, i) { + void *va; + + BUG_ON(!sg_page(sg)); + va = sg_virt(sg); + sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va); + sg_dma_len(sg) = sg->length; + } + + return nents; +} + +static int dma_noop_mapping_error(struct device *dev, dma_addr_t dma_addr) +{ + return 0; +} + +static int dma_noop_supported(struct device *dev, u64 mask) +{ + return 1; +} + +struct dma_map_ops dma_noop_ops = { + .alloc = dma_noop_alloc, + .free = dma_noop_free, + .map_page = dma_noop_map_page, + .map_sg = dma_noop_map_sg, + .mapping_error = dma_noop_mapping_error, + .dma_supported = dma_noop_supported, +}; + +EXPORT_SYMBOL(dma_noop_ops); diff --git a/lib/extable.c b/lib/extable.c index 4cac81ec2..0be02ad56 100644 --- a/lib/extable.c +++ b/lib/extable.c @@ -14,7 +14,37 @@ #include <linux/sort.h> #include <asm/uaccess.h> +#ifndef ARCH_HAS_RELATIVE_EXTABLE +#define ex_to_insn(x) ((x)->insn) +#else +static inline unsigned long ex_to_insn(const struct exception_table_entry *x) +{ + return (unsigned long)&x->insn + x->insn; +} +#endif + #ifndef ARCH_HAS_SORT_EXTABLE +#ifndef ARCH_HAS_RELATIVE_EXTABLE +#define swap_ex NULL +#else +static void swap_ex(void *a, void *b, int size) +{ + struct exception_table_entry *x = a, *y = b, tmp; + int delta = b - a; + + tmp = *x; + x->insn = y->insn + delta; + y->insn = tmp.insn - delta; + +#ifdef swap_ex_entry_fixup + swap_ex_entry_fixup(x, y, tmp, delta); +#else + x->fixup = y->fixup + delta; + y->fixup = tmp.fixup - delta; +#endif +} +#endif /* ARCH_HAS_RELATIVE_EXTABLE */ + /* * The exception table needs to be sorted so that the binary * search that we use to find entries in it works properly. @@ -26,9 +56,9 @@ static int cmp_ex(const void *a, const void *b) const struct exception_table_entry *x = a, *y = b; /* avoid overflow */ - if (x->insn > y->insn) + if (ex_to_insn(x) > ex_to_insn(y)) return 1; - if (x->insn < y->insn) + if (ex_to_insn(x) < ex_to_insn(y)) return -1; return 0; } @@ -37,7 +67,7 @@ void sort_extable(struct exception_table_entry *start, struct exception_table_entry *finish) { sort(start, finish - start, sizeof(struct exception_table_entry), - cmp_ex, NULL); + cmp_ex, swap_ex); } #ifdef CONFIG_MODULES @@ -48,13 +78,15 @@ void sort_extable(struct exception_table_entry *start, void trim_init_extable(struct module *m) { /*trim the beginning*/ - while (m->num_exentries && within_module_init(m->extable[0].insn, m)) { + while (m->num_exentries && + within_module_init(ex_to_insn(&m->extable[0]), m)) { m->extable++; m->num_exentries--; } /*trim the end*/ while (m->num_exentries && - within_module_init(m->extable[m->num_exentries-1].insn, m)) + within_module_init(ex_to_insn(&m->extable[m->num_exentries - 1]), + m)) m->num_exentries--; } #endif /* CONFIG_MODULES */ @@ -81,13 +113,13 @@ search_extable(const struct exception_table_entry *first, * careful, the distance between value and insn * can be larger than MAX_LONG: */ - if (mid->insn < value) + if (ex_to_insn(mid) < value) first = mid + 1; - else if (mid->insn > value) + else if (ex_to_insn(mid) > value) last = mid - 1; else return mid; - } - return NULL; + } + return NULL; } #endif diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c index 8f25652f4..a71cf1bdd 100644 --- a/lib/flex_proportions.c +++ b/lib/flex_proportions.c @@ -17,7 +17,7 @@ * * \Sum_{j} p_{j} = 1, * - * This formula can be straightforwardly computed by maintaing denominator + * This formula can be straightforwardly computed by maintaining denominator * (let's call it 'd') and for each event type its numerator (let's call it * 'n_j'). When an event of type 'j' happens, we simply need to do: * n_j++; d++; diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 5fecddc32..ca5316e00 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -569,6 +569,25 @@ unsigned long iov_iter_alignment(const struct iov_iter *i) } EXPORT_SYMBOL(iov_iter_alignment); +unsigned long iov_iter_gap_alignment(const struct iov_iter *i) +{ + unsigned long res = 0; + size_t size = i->count; + if (!size) + return 0; + + iterate_all_kinds(i, size, v, + (res |= (!res ? 0 : (unsigned long)v.iov_base) | + (size != v.iov_len ? size : 0), 0), + (res |= (!res ? 0 : (unsigned long)v.bv_offset) | + (size != v.bv_len ? size : 0)), + (res |= (!res ? 0 : (unsigned long)v.iov_base) | + (size != v.iov_len ? size : 0)) + ); + return res; +} +EXPORT_SYMBOL(iov_iter_gap_alignment); + ssize_t iov_iter_get_pages(struct iov_iter *i, struct page **pages, size_t maxsize, unsigned maxpages, size_t *start) diff --git a/lib/kobject.c b/lib/kobject.c index 7cbccd2b4..445dcaeb0 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -861,6 +861,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name) spin_unlock(&kset->list_lock); return ret; } +EXPORT_SYMBOL_GPL(kset_find_obj); static void kset_release(struct kobject *kobj) { diff --git a/lib/kstrtox.c b/lib/kstrtox.c index 94be244e8..d8a5cf66c 100644 --- a/lib/kstrtox.c +++ b/lib/kstrtox.c @@ -321,6 +321,70 @@ int kstrtos8(const char *s, unsigned int base, s8 *res) } EXPORT_SYMBOL(kstrtos8); +/** + * kstrtobool - convert common user inputs into boolean values + * @s: input string + * @res: result + * + * This routine returns 0 iff the first character is one of 'Yy1Nn0', or + * [oO][NnFf] for "on" and "off". Otherwise it will return -EINVAL. Value + * pointed to by res is updated upon finding a match. + */ +int kstrtobool(const char *s, bool *res) +{ + if (!s) + return -EINVAL; + + switch (s[0]) { + case 'y': + case 'Y': + case '1': + *res = true; + return 0; + case 'n': + case 'N': + case '0': + *res = false; + return 0; + case 'o': + case 'O': + switch (s[1]) { + case 'n': + case 'N': + *res = true; + return 0; + case 'f': + case 'F': + *res = false; + return 0; + default: + break; + } + default: + break; + } + + return -EINVAL; +} +EXPORT_SYMBOL(kstrtobool); + +/* + * Since "base" would be a nonsense argument, this open-codes the + * _from_user helper instead of using the helper macro below. + */ +int kstrtobool_from_user(const char __user *s, size_t count, bool *res) +{ + /* Longest string needed to differentiate, newline, terminator */ + char buf[4]; + + count = min(count, sizeof(buf) - 1); + if (copy_from_user(buf, s, count)) + return -EFAULT; + buf[count] = '\0'; + return kstrtobool(buf, res); +} +EXPORT_SYMBOL(kstrtobool_from_user); + #define kstrto_from_user(f, g, type) \ int f(const char __user *s, size_t count, unsigned int base, type *res) \ { \ diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h index 0710a62ad..c79d7ea8a 100644 --- a/lib/lz4/lz4defs.h +++ b/lib/lz4/lz4defs.h @@ -24,9 +24,7 @@ typedef struct _U16_S { u16 v; } U16_S; typedef struct _U32_S { u32 v; } U32_S; typedef struct _U64_S { u64 v; } U64_S; -#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \ - || defined(CONFIG_ARM) && __LINUX_ARM_ARCH__ >= 6 \ - && defined(ARM_EFFICIENT_UNALIGNED_ACCESS) +#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) #define A16(x) (((U16_S *)(x))->v) #define A32(x) (((U32_S *)(x))->v) diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h index b90e255c2..93336502a 100644 --- a/lib/mpi/longlong.h +++ b/lib/mpi/longlong.h @@ -216,7 +216,7 @@ extern UDItype __udiv_qrnnd(UDItype *, UDItype, UDItype, UDItype); __asm__ ("%@ Inlined umul_ppmm\n" \ "umull %r1, %r0, %r2, %r3" \ : "=&r" ((USItype)(xh)), \ - "=r" ((USItype)(xl)) \ + "=&r" ((USItype)(xl)) \ : "r" ((USItype)(a)), \ "r" ((USItype)(b)) \ : "r0", "r1") diff --git a/lib/mpi/mpi-inline.h b/lib/mpi/mpi-inline.h index e2b39852b..c245ea31f 100644 --- a/lib/mpi/mpi-inline.h +++ b/lib/mpi/mpi-inline.h @@ -30,7 +30,7 @@ #define G10_MPI_INLINE_H #ifndef G10_MPI_INLINE_DECL -#define G10_MPI_INLINE_DECL extern inline +#define G10_MPI_INLINE_DECL static inline #endif G10_MPI_INLINE_DECL mpi_limb_t diff --git a/lib/mpi/mpi-internal.h b/lib/mpi/mpi-internal.h index c65dd1bff..7eceeddb3 100644 --- a/lib/mpi/mpi-internal.h +++ b/lib/mpi/mpi-internal.h @@ -168,19 +168,19 @@ void mpi_rshift_limbs(MPI a, unsigned int count); int mpi_lshift_limbs(MPI a, unsigned int count); /*-- mpihelp-add.c --*/ -mpi_limb_t mpihelp_add_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, +static inline mpi_limb_t mpihelp_add_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, mpi_limb_t s2_limb); mpi_limb_t mpihelp_add_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_ptr_t s2_ptr, mpi_size_t size); -mpi_limb_t mpihelp_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, +static inline mpi_limb_t mpihelp_add(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, mpi_ptr_t s2_ptr, mpi_size_t s2_size); /*-- mpihelp-sub.c --*/ -mpi_limb_t mpihelp_sub_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, +static inline mpi_limb_t mpihelp_sub_1(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, mpi_limb_t s2_limb); mpi_limb_t mpihelp_sub_n(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_ptr_t s2_ptr, mpi_size_t size); -mpi_limb_t mpihelp_sub(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, +static inline mpi_limb_t mpihelp_sub(mpi_ptr_t res_ptr, mpi_ptr_t s1_ptr, mpi_size_t s1_size, mpi_ptr_t s2_ptr, mpi_size_t s2_size); /*-- mpihelp-cmp.c --*/ diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c index 6111bcb28..27fe74948 100644 --- a/lib/percpu-refcount.c +++ b/lib/percpu-refcount.c @@ -12,7 +12,7 @@ * particular cpu can (and will) wrap - this is fine, when we go to shutdown the * percpu counters will all sum to the correct value * - * (More precisely: because moduler arithmatic is commutative the sum of all the + * (More precisely: because modular arithmetic is commutative the sum of all the * percpu_count vars will be equal to what it would have been if all the gets * and puts were done to a single integer, even if some of the percpu integers * overflow or underflow). diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 6b79e9026..1624c4117 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -173,6 +173,41 @@ radix_tree_find_next_bit(const unsigned long *addr, return size; } +#if 0 +static void dump_node(void *slot, int height, int offset) +{ + struct radix_tree_node *node; + int i; + + if (!slot) + return; + + if (height == 0) { + pr_debug("radix entry %p offset %d\n", slot, offset); + return; + } + + node = indirect_to_ptr(slot); + pr_debug("radix node: %p offset %d tags %lx %lx %lx path %x count %d parent %p\n", + slot, offset, node->tags[0][0], node->tags[1][0], + node->tags[2][0], node->path, node->count, node->parent); + + for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) + dump_node(node->slots[i], height - 1, i); +} + +/* For debug */ +static void radix_tree_dump(struct radix_tree_root *root) +{ + pr_debug("radix root: %p height %d rnode %p tags %x\n", + root, root->height, root->rnode, + root->gfp_mask >> __GFP_BITS_SHIFT); + if (!radix_tree_is_indirect_ptr(root->rnode)) + return; + dump_node(root->rnode, root->height, 0); +} +#endif + /* * This assumes that the caller has performed appropriate preallocation, and * that the caller has pinned this thread of control to the current CPU. @@ -192,6 +227,15 @@ radix_tree_node_alloc(struct radix_tree_root *root) struct radix_tree_preload *rtp; /* + * Even if the caller has preloaded, try to allocate from the + * cache first for the new node to get accounted. + */ + ret = kmem_cache_alloc(radix_tree_node_cachep, + gfp_mask | __GFP_ACCOUNT | __GFP_NOWARN); + if (ret) + goto out; + + /* * Provided the caller has preloaded here, we will always * succeed in getting a node here (and never reach * kmem_cache_alloc) @@ -208,10 +252,11 @@ radix_tree_node_alloc(struct radix_tree_root *root) * for debugging. */ kmemleak_update_trace(ret); + goto out; } - if (ret == NULL) - ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); - + ret = kmem_cache_alloc(radix_tree_node_cachep, + gfp_mask | __GFP_ACCOUNT); +out: BUG_ON(radix_tree_is_indirect_ptr(ret)); return ret; } @@ -323,7 +368,8 @@ static inline unsigned long radix_tree_maxindex(unsigned int height) /* * Extend a radix tree so it can store key @index. */ -static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) +static int radix_tree_extend(struct radix_tree_root *root, + unsigned long index, unsigned order) { struct radix_tree_node *node; struct radix_tree_node *slot; @@ -335,7 +381,7 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) while (index > radix_tree_maxindex(height)) height++; - if (root->rnode == NULL) { + if ((root->rnode == NULL) && (order == 0)) { root->height = height; goto out; } @@ -358,9 +404,10 @@ static int radix_tree_extend(struct radix_tree_root *root, unsigned long index) node->count = 1; node->parent = NULL; slot = root->rnode; - if (newheight > 1) { + if (radix_tree_is_indirect_ptr(slot) && newheight > 1) { slot = indirect_to_ptr(slot); slot->parent = node; + slot = ptr_to_indirect(slot); } node->slots[0] = slot; node = ptr_to_indirect(node); @@ -375,6 +422,7 @@ out: * __radix_tree_create - create a slot in a radix tree * @root: radix tree root * @index: index key + * @order: index occupies 2^order aligned slots * @nodep: returns node * @slotp: returns slot * @@ -388,26 +436,29 @@ out: * Returns -ENOMEM, or 0 for success. */ int __radix_tree_create(struct radix_tree_root *root, unsigned long index, - struct radix_tree_node **nodep, void ***slotp) + unsigned order, struct radix_tree_node **nodep, + void ***slotp) { struct radix_tree_node *node = NULL, *slot; unsigned int height, shift, offset; int error; + BUG_ON((0 < order) && (order < RADIX_TREE_MAP_SHIFT)); + /* Make sure the tree is high enough. */ if (index > radix_tree_maxindex(root->height)) { - error = radix_tree_extend(root, index); + error = radix_tree_extend(root, index, order); if (error) return error; } - slot = indirect_to_ptr(root->rnode); + slot = root->rnode; height = root->height; - shift = (height-1) * RADIX_TREE_MAP_SHIFT; + shift = height * RADIX_TREE_MAP_SHIFT; offset = 0; /* uninitialised var warning */ - while (height > 0) { + while (shift > order) { if (slot == NULL) { /* Have to add a child node. */ if (!(slot = radix_tree_node_alloc(root))) @@ -415,19 +466,38 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index, slot->path = height; slot->parent = node; if (node) { - rcu_assign_pointer(node->slots[offset], slot); + rcu_assign_pointer(node->slots[offset], + ptr_to_indirect(slot)); node->count++; slot->path |= offset << RADIX_TREE_HEIGHT_SHIFT; } else - rcu_assign_pointer(root->rnode, ptr_to_indirect(slot)); - } + rcu_assign_pointer(root->rnode, + ptr_to_indirect(slot)); + } else if (!radix_tree_is_indirect_ptr(slot)) + break; /* Go a level down */ + height--; + shift -= RADIX_TREE_MAP_SHIFT; offset = (index >> shift) & RADIX_TREE_MAP_MASK; - node = slot; + node = indirect_to_ptr(slot); slot = node->slots[offset]; - shift -= RADIX_TREE_MAP_SHIFT; - height--; + } + + /* Insert pointers to the canonical entry */ + if ((shift - order) > 0) { + int i, n = 1 << (shift - order); + offset = offset & ~(n - 1); + slot = ptr_to_indirect(&node->slots[offset]); + for (i = 0; i < n; i++) { + if (node->slots[offset + i]) + return -EEXIST; + } + + for (i = 1; i < n; i++) { + rcu_assign_pointer(node->slots[offset + i], slot); + node->count++; + } } if (nodep) @@ -438,15 +508,16 @@ int __radix_tree_create(struct radix_tree_root *root, unsigned long index, } /** - * radix_tree_insert - insert into a radix tree + * __radix_tree_insert - insert into a radix tree * @root: radix tree root * @index: index key + * @order: key covers the 2^order indices around index * @item: item to insert * * Insert an item into the radix tree at position @index. */ -int radix_tree_insert(struct radix_tree_root *root, - unsigned long index, void *item) +int __radix_tree_insert(struct radix_tree_root *root, unsigned long index, + unsigned order, void *item) { struct radix_tree_node *node; void **slot; @@ -454,7 +525,7 @@ int radix_tree_insert(struct radix_tree_root *root, BUG_ON(radix_tree_is_indirect_ptr(item)); - error = __radix_tree_create(root, index, &node, &slot); + error = __radix_tree_create(root, index, order, &node, &slot); if (error) return error; if (*slot != NULL) @@ -472,7 +543,7 @@ int radix_tree_insert(struct radix_tree_root *root, return 0; } -EXPORT_SYMBOL(radix_tree_insert); +EXPORT_SYMBOL(__radix_tree_insert); /** * __radix_tree_lookup - lookup an item in a radix tree @@ -523,6 +594,9 @@ void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index, node = rcu_dereference_raw(*slot); if (node == NULL) return NULL; + if (!radix_tree_is_indirect_ptr(node)) + break; + node = indirect_to_ptr(node); shift -= RADIX_TREE_MAP_SHIFT; height--; @@ -609,6 +683,9 @@ void *radix_tree_tag_set(struct radix_tree_root *root, tag_set(slot, tag, offset); slot = slot->slots[offset]; BUG_ON(slot == NULL); + if (!radix_tree_is_indirect_ptr(slot)) + break; + slot = indirect_to_ptr(slot); shift -= RADIX_TREE_MAP_SHIFT; height--; } @@ -648,11 +725,14 @@ void *radix_tree_tag_clear(struct radix_tree_root *root, goto out; shift = height * RADIX_TREE_MAP_SHIFT; - slot = indirect_to_ptr(root->rnode); + slot = root->rnode; while (shift) { if (slot == NULL) goto out; + if (!radix_tree_is_indirect_ptr(slot)) + break; + slot = indirect_to_ptr(slot); shift -= RADIX_TREE_MAP_SHIFT; offset = (index >> shift) & RADIX_TREE_MAP_MASK; @@ -728,6 +808,7 @@ int radix_tree_tag_get(struct radix_tree_root *root, if (node == NULL) return 0; + node = indirect_to_ptr(node); offset = (index >> shift) & RADIX_TREE_MAP_MASK; if (!tag_get(node, tag, offset)) @@ -735,6 +816,8 @@ int radix_tree_tag_get(struct radix_tree_root *root, if (height == 1) return 1; node = rcu_dereference_raw(node->slots[offset]); + if (!radix_tree_is_indirect_ptr(node)) + return 1; shift -= RADIX_TREE_MAP_SHIFT; height--; } @@ -795,6 +878,7 @@ restart: node = rnode; while (1) { + struct radix_tree_node *slot; if ((flags & RADIX_TREE_ITER_TAGGED) ? !test_bit(offset, node->tags[tag]) : !node->slots[offset]) { @@ -825,9 +909,12 @@ restart: if (!shift) break; - node = rcu_dereference_raw(node->slots[offset]); - if (node == NULL) + slot = rcu_dereference_raw(node->slots[offset]); + if (slot == NULL) goto restart; + if (!radix_tree_is_indirect_ptr(slot)) + break; + node = indirect_to_ptr(slot); shift -= RADIX_TREE_MAP_SHIFT; offset = (index >> shift) & RADIX_TREE_MAP_MASK; } @@ -925,15 +1012,20 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, if (!tag_get(slot, iftag, offset)) goto next; if (shift) { - /* Go down one level */ - shift -= RADIX_TREE_MAP_SHIFT; node = slot; slot = slot->slots[offset]; - continue; + if (radix_tree_is_indirect_ptr(slot)) { + slot = indirect_to_ptr(slot); + shift -= RADIX_TREE_MAP_SHIFT; + continue; + } else { + slot = node; + node = node->parent; + } } /* tag the leaf */ - tagged++; + tagged += 1 << shift; tag_set(slot, settag, offset); /* walk back up the path tagging interior nodes */ @@ -1181,10 +1273,20 @@ static unsigned long __locate(struct radix_tree_node *slot, void *item, goto out; } - shift -= RADIX_TREE_MAP_SHIFT; slot = rcu_dereference_raw(slot->slots[i]); if (slot == NULL) goto out; + if (!radix_tree_is_indirect_ptr(slot)) { + if (slot == item) { + *found_index = index + i; + index = 0; + } else { + index += shift; + } + goto out; + } + slot = indirect_to_ptr(slot); + shift -= RADIX_TREE_MAP_SHIFT; } /* Bottom level: check items */ @@ -1264,11 +1366,13 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) /* * The candidate node has more than one child, or its child - * is not at the leftmost slot, we cannot shrink. + * is not at the leftmost slot, or it is a multiorder entry, + * we cannot shrink. */ if (to_free->count != 1) break; - if (!to_free->slots[0]) + slot = to_free->slots[0]; + if (!slot) break; /* @@ -1278,8 +1382,11 @@ static inline void radix_tree_shrink(struct radix_tree_root *root) * (to_free->slots[0]), it will be safe to dereference the new * one (root->rnode) as far as dependent read barriers go. */ - slot = to_free->slots[0]; if (root->height > 1) { + if (!radix_tree_is_indirect_ptr(slot)) + break; + + slot = indirect_to_ptr(slot); slot->parent = NULL; slot = ptr_to_indirect(slot); } @@ -1377,7 +1484,7 @@ void *radix_tree_delete_item(struct radix_tree_root *root, unsigned long index, void *item) { struct radix_tree_node *node; - unsigned int offset; + unsigned int offset, i; void **slot; void *entry; int tag; @@ -1406,6 +1513,13 @@ void *radix_tree_delete_item(struct radix_tree_root *root, radix_tree_tag_clear(root, index, tag); } + /* Delete any sibling slots pointing to this slot */ + for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) { + if (node->slots[offset + i] != ptr_to_indirect(slot)) + break; + node->slots[offset + i] = NULL; + node->count--; + } node->slots[offset] = NULL; node->count--; diff --git a/lib/random32.c b/lib/random32.c index 12111910c..510d1ce7d 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -255,6 +255,7 @@ void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state) prandom_warmup(state); } } +EXPORT_SYMBOL(prandom_seed_full_state); /* * Generate better values after random number generator diff --git a/lib/stackdepot.c b/lib/stackdepot.c new file mode 100644 index 000000000..53ad6c083 --- /dev/null +++ b/lib/stackdepot.c @@ -0,0 +1,284 @@ +/* + * Generic stack depot for storing stack traces. + * + * Some debugging tools need to save stack traces of certain events which can + * be later presented to the user. For example, KASAN needs to safe alloc and + * free stacks for each object, but storing two stack traces per object + * requires too much memory (e.g. SLUB_DEBUG needs 256 bytes per object for + * that). + * + * Instead, stack depot maintains a hashtable of unique stacktraces. Since alloc + * and free stacks repeat a lot, we save about 100x space. + * Stacks are never removed from depot, so we store them contiguously one after + * another in a contiguos memory allocation. + * + * Author: Alexander Potapenko <glider@google.com> + * Copyright (C) 2016 Google, Inc. + * + * Based on code by Dmitry Chernenkov. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + */ + +#include <linux/gfp.h> +#include <linux/jhash.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/percpu.h> +#include <linux/printk.h> +#include <linux/slab.h> +#include <linux/stacktrace.h> +#include <linux/stackdepot.h> +#include <linux/string.h> +#include <linux/types.h> + +#define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8) + +#define STACK_ALLOC_NULL_PROTECTION_BITS 1 +#define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */ +#define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER)) +#define STACK_ALLOC_ALIGN 4 +#define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \ + STACK_ALLOC_ALIGN) +#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \ + STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS) +#define STACK_ALLOC_SLABS_CAP 1024 +#define STACK_ALLOC_MAX_SLABS \ + (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \ + (1LL << (STACK_ALLOC_INDEX_BITS)) : STACK_ALLOC_SLABS_CAP) + +/* The compact structure to store the reference to stacks. */ +union handle_parts { + depot_stack_handle_t handle; + struct { + u32 slabindex : STACK_ALLOC_INDEX_BITS; + u32 offset : STACK_ALLOC_OFFSET_BITS; + u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS; + }; +}; + +struct stack_record { + struct stack_record *next; /* Link in the hashtable */ + u32 hash; /* Hash in the hastable */ + u32 size; /* Number of frames in the stack */ + union handle_parts handle; + unsigned long entries[1]; /* Variable-sized array of entries. */ +}; + +static void *stack_slabs[STACK_ALLOC_MAX_SLABS]; + +static int depot_index; +static int next_slab_inited; +static size_t depot_offset; +static DEFINE_SPINLOCK(depot_lock); + +static bool init_stack_slab(void **prealloc) +{ + if (!*prealloc) + return false; + /* + * This smp_load_acquire() pairs with smp_store_release() to + * |next_slab_inited| below and in depot_alloc_stack(). + */ + if (smp_load_acquire(&next_slab_inited)) + return true; + if (stack_slabs[depot_index] == NULL) { + stack_slabs[depot_index] = *prealloc; + } else { + stack_slabs[depot_index + 1] = *prealloc; + /* + * This smp_store_release pairs with smp_load_acquire() from + * |next_slab_inited| above and in depot_save_stack(). + */ + smp_store_release(&next_slab_inited, 1); + } + *prealloc = NULL; + return true; +} + +/* Allocation of a new stack in raw storage */ +static struct stack_record *depot_alloc_stack(unsigned long *entries, int size, + u32 hash, void **prealloc, gfp_t alloc_flags) +{ + int required_size = offsetof(struct stack_record, entries) + + sizeof(unsigned long) * size; + struct stack_record *stack; + + required_size = ALIGN(required_size, 1 << STACK_ALLOC_ALIGN); + + if (unlikely(depot_offset + required_size > STACK_ALLOC_SIZE)) { + if (unlikely(depot_index + 1 >= STACK_ALLOC_MAX_SLABS)) { + WARN_ONCE(1, "Stack depot reached limit capacity"); + return NULL; + } + depot_index++; + depot_offset = 0; + /* + * smp_store_release() here pairs with smp_load_acquire() from + * |next_slab_inited| in depot_save_stack() and + * init_stack_slab(). + */ + if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) + smp_store_release(&next_slab_inited, 0); + } + init_stack_slab(prealloc); + if (stack_slabs[depot_index] == NULL) + return NULL; + + stack = stack_slabs[depot_index] + depot_offset; + + stack->hash = hash; + stack->size = size; + stack->handle.slabindex = depot_index; + stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN; + stack->handle.valid = 1; + memcpy(stack->entries, entries, size * sizeof(unsigned long)); + depot_offset += required_size; + + return stack; +} + +#define STACK_HASH_ORDER 20 +#define STACK_HASH_SIZE (1L << STACK_HASH_ORDER) +#define STACK_HASH_MASK (STACK_HASH_SIZE - 1) +#define STACK_HASH_SEED 0x9747b28c + +static struct stack_record *stack_table[STACK_HASH_SIZE] = { + [0 ... STACK_HASH_SIZE - 1] = NULL +}; + +/* Calculate hash for a stack */ +static inline u32 hash_stack(unsigned long *entries, unsigned int size) +{ + return jhash2((u32 *)entries, + size * sizeof(unsigned long) / sizeof(u32), + STACK_HASH_SEED); +} + +/* Find a stack that is equal to the one stored in entries in the hash */ +static inline struct stack_record *find_stack(struct stack_record *bucket, + unsigned long *entries, int size, + u32 hash) +{ + struct stack_record *found; + + for (found = bucket; found; found = found->next) { + if (found->hash == hash && + found->size == size && + !memcmp(entries, found->entries, + size * sizeof(unsigned long))) { + return found; + } + } + return NULL; +} + +void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace) +{ + union handle_parts parts = { .handle = handle }; + void *slab = stack_slabs[parts.slabindex]; + size_t offset = parts.offset << STACK_ALLOC_ALIGN; + struct stack_record *stack = slab + offset; + + trace->nr_entries = trace->max_entries = stack->size; + trace->entries = stack->entries; + trace->skip = 0; +} + +/** + * depot_save_stack - save stack in a stack depot. + * @trace - the stacktrace to save. + * @alloc_flags - flags for allocating additional memory if required. + * + * Returns the handle of the stack struct stored in depot. + */ +depot_stack_handle_t depot_save_stack(struct stack_trace *trace, + gfp_t alloc_flags) +{ + u32 hash; + depot_stack_handle_t retval = 0; + struct stack_record *found = NULL, **bucket; + unsigned long flags; + struct page *page = NULL; + void *prealloc = NULL; + + if (unlikely(trace->nr_entries == 0)) + goto fast_exit; + + hash = hash_stack(trace->entries, trace->nr_entries); + bucket = &stack_table[hash & STACK_HASH_MASK]; + + /* + * Fast path: look the stack trace up without locking. + * The smp_load_acquire() here pairs with smp_store_release() to + * |bucket| below. + */ + found = find_stack(smp_load_acquire(bucket), trace->entries, + trace->nr_entries, hash); + if (found) + goto exit; + + /* + * Check if the current or the next stack slab need to be initialized. + * If so, allocate the memory - we won't be able to do that under the + * lock. + * + * The smp_load_acquire() here pairs with smp_store_release() to + * |next_slab_inited| in depot_alloc_stack() and init_stack_slab(). + */ + if (unlikely(!smp_load_acquire(&next_slab_inited))) { + /* + * Zero out zone modifiers, as we don't have specific zone + * requirements. Keep the flags related to allocation in atomic + * contexts and I/O. + */ + alloc_flags &= ~GFP_ZONEMASK; + alloc_flags &= (GFP_ATOMIC | GFP_KERNEL); + page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER); + if (page) + prealloc = page_address(page); + } + + spin_lock_irqsave(&depot_lock, flags); + + found = find_stack(*bucket, trace->entries, trace->nr_entries, hash); + if (!found) { + struct stack_record *new = + depot_alloc_stack(trace->entries, trace->nr_entries, + hash, &prealloc, alloc_flags); + if (new) { + new->next = *bucket; + /* + * This smp_store_release() pairs with + * smp_load_acquire() from |bucket| above. + */ + smp_store_release(bucket, new); + found = new; + } + } else if (prealloc) { + /* + * We didn't need to store this stack trace, but let's keep + * the preallocated memory for the future. + */ + WARN_ON(!init_stack_slab(&prealloc)); + } + + spin_unlock_irqrestore(&depot_lock, flags); +exit: + if (prealloc) { + /* Nobody used this memory, ok to free it. */ + free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER); + } + if (found) + retval = found->handle.handle; +fast_exit: + return retval; +} diff --git a/lib/string.c b/lib/string.c index 0323c0d56..ed83562a5 100644 --- a/lib/string.c +++ b/lib/string.c @@ -631,33 +631,30 @@ bool sysfs_streq(const char *s1, const char *s2) EXPORT_SYMBOL(sysfs_streq); /** - * strtobool - convert common user inputs into boolean values - * @s: input string - * @res: result + * match_string - matches given string in an array + * @array: array of strings + * @n: number of strings in the array or -1 for NULL terminated arrays + * @string: string to match with * - * This routine returns 0 iff the first character is one of 'Yy1Nn0'. - * Otherwise it will return -EINVAL. Value pointed to by res is - * updated upon finding a match. - */ -int strtobool(const char *s, bool *res) -{ - switch (s[0]) { - case 'y': - case 'Y': - case '1': - *res = true; - break; - case 'n': - case 'N': - case '0': - *res = false; - break; - default: - return -EINVAL; + * Return: + * index of a @string in the @array if matches, or %-EINVAL otherwise. + */ +int match_string(const char * const *array, size_t n, const char *string) +{ + int index; + const char *item; + + for (index = 0; index < n; index++) { + item = array[index]; + if (!item) + break; + if (!strcmp(item, string)) + return index; } - return 0; + + return -EINVAL; } -EXPORT_SYMBOL(strtobool); +EXPORT_SYMBOL(match_string); #ifndef __HAVE_ARCH_MEMSET /** diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c new file mode 100644 index 000000000..e2cbd43d1 --- /dev/null +++ b/lib/test_bitmap.c @@ -0,0 +1,358 @@ +/* + * Test cases for printf facility. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/bitmap.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/printk.h> +#include <linux/slab.h> +#include <linux/string.h> + +static unsigned total_tests __initdata; +static unsigned failed_tests __initdata; + +static char pbl_buffer[PAGE_SIZE] __initdata; + + +static bool __init +__check_eq_uint(const char *srcfile, unsigned int line, + const unsigned int exp_uint, unsigned int x) +{ + if (exp_uint != x) { + pr_warn("[%s:%u] expected %u, got %u\n", + srcfile, line, exp_uint, x); + return false; + } + return true; +} + + +static bool __init +__check_eq_bitmap(const char *srcfile, unsigned int line, + const unsigned long *exp_bmap, unsigned int exp_nbits, + const unsigned long *bmap, unsigned int nbits) +{ + if (exp_nbits != nbits) { + pr_warn("[%s:%u] bitmap length mismatch: expected %u, got %u\n", + srcfile, line, exp_nbits, nbits); + return false; + } + + if (!bitmap_equal(exp_bmap, bmap, nbits)) { + pr_warn("[%s:%u] bitmaps contents differ: expected \"%*pbl\", got \"%*pbl\"\n", + srcfile, line, + exp_nbits, exp_bmap, nbits, bmap); + return false; + } + return true; +} + +static bool __init +__check_eq_pbl(const char *srcfile, unsigned int line, + const char *expected_pbl, + const unsigned long *bitmap, unsigned int nbits) +{ + snprintf(pbl_buffer, sizeof(pbl_buffer), "%*pbl", nbits, bitmap); + if (strcmp(expected_pbl, pbl_buffer)) { + pr_warn("[%s:%u] expected \"%s\", got \"%s\"\n", + srcfile, line, + expected_pbl, pbl_buffer); + return false; + } + return true; +} + +static bool __init +__check_eq_u32_array(const char *srcfile, unsigned int line, + const u32 *exp_arr, unsigned int exp_len, + const u32 *arr, unsigned int len) +{ + if (exp_len != len) { + pr_warn("[%s:%u] array length differ: expected %u, got %u\n", + srcfile, line, + exp_len, len); + return false; + } + + if (memcmp(exp_arr, arr, len*sizeof(*arr))) { + pr_warn("[%s:%u] array contents differ\n", srcfile, line); + print_hex_dump(KERN_WARNING, " exp: ", DUMP_PREFIX_OFFSET, + 32, 4, exp_arr, exp_len*sizeof(*exp_arr), false); + print_hex_dump(KERN_WARNING, " got: ", DUMP_PREFIX_OFFSET, + 32, 4, arr, len*sizeof(*arr), false); + return false; + } + + return true; +} + +#define __expect_eq(suffix, ...) \ + ({ \ + int result = 0; \ + total_tests++; \ + if (!__check_eq_ ## suffix(__FILE__, __LINE__, \ + ##__VA_ARGS__)) { \ + failed_tests++; \ + result = 1; \ + } \ + result; \ + }) + +#define expect_eq_uint(...) __expect_eq(uint, ##__VA_ARGS__) +#define expect_eq_bitmap(...) __expect_eq(bitmap, ##__VA_ARGS__) +#define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__) +#define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__) + +static void __init test_zero_fill_copy(void) +{ + DECLARE_BITMAP(bmap1, 1024); + DECLARE_BITMAP(bmap2, 1024); + + bitmap_zero(bmap1, 1024); + bitmap_zero(bmap2, 1024); + + /* single-word bitmaps */ + expect_eq_pbl("", bmap1, 23); + + bitmap_fill(bmap1, 19); + expect_eq_pbl("0-18", bmap1, 1024); + + bitmap_copy(bmap2, bmap1, 23); + expect_eq_pbl("0-18", bmap2, 1024); + + bitmap_fill(bmap2, 23); + expect_eq_pbl("0-22", bmap2, 1024); + + bitmap_copy(bmap2, bmap1, 23); + expect_eq_pbl("0-18", bmap2, 1024); + + bitmap_zero(bmap1, 23); + expect_eq_pbl("", bmap1, 1024); + + /* multi-word bitmaps */ + bitmap_zero(bmap1, 1024); + expect_eq_pbl("", bmap1, 1024); + + bitmap_fill(bmap1, 109); + expect_eq_pbl("0-108", bmap1, 1024); + + bitmap_copy(bmap2, bmap1, 1024); + expect_eq_pbl("0-108", bmap2, 1024); + + bitmap_fill(bmap2, 1024); + expect_eq_pbl("0-1023", bmap2, 1024); + + bitmap_copy(bmap2, bmap1, 1024); + expect_eq_pbl("0-108", bmap2, 1024); + + /* the following tests assume a 32- or 64-bit arch (even 128b + * if we care) + */ + + bitmap_fill(bmap2, 1024); + bitmap_copy(bmap2, bmap1, 109); /* ... but 0-padded til word length */ + expect_eq_pbl("0-108,128-1023", bmap2, 1024); + + bitmap_fill(bmap2, 1024); + bitmap_copy(bmap2, bmap1, 97); /* ... but aligned on word length */ + expect_eq_pbl("0-108,128-1023", bmap2, 1024); + + bitmap_zero(bmap2, 97); /* ... but 0-padded til word length */ + expect_eq_pbl("128-1023", bmap2, 1024); +} + +static void __init test_bitmap_u32_array_conversions(void) +{ + DECLARE_BITMAP(bmap1, 1024); + DECLARE_BITMAP(bmap2, 1024); + u32 exp_arr[32], arr[32]; + unsigned nbits; + + for (nbits = 0 ; nbits < 257 ; ++nbits) { + const unsigned int used_u32s = DIV_ROUND_UP(nbits, 32); + unsigned int i, rv; + + bitmap_zero(bmap1, nbits); + bitmap_set(bmap1, nbits, 1024 - nbits); /* garbage */ + + memset(arr, 0xff, sizeof(arr)); + rv = bitmap_to_u32array(arr, used_u32s, bmap1, nbits); + expect_eq_uint(nbits, rv); + + memset(exp_arr, 0xff, sizeof(exp_arr)); + memset(exp_arr, 0, used_u32s*sizeof(*exp_arr)); + expect_eq_u32_array(exp_arr, 32, arr, 32); + + bitmap_fill(bmap2, 1024); + rv = bitmap_from_u32array(bmap2, nbits, arr, used_u32s); + expect_eq_uint(nbits, rv); + expect_eq_bitmap(bmap1, 1024, bmap2, 1024); + + for (i = 0 ; i < nbits ; ++i) { + /* + * test conversion bitmap -> u32[] + */ + + bitmap_zero(bmap1, 1024); + __set_bit(i, bmap1); + bitmap_set(bmap1, nbits, 1024 - nbits); /* garbage */ + + memset(arr, 0xff, sizeof(arr)); + rv = bitmap_to_u32array(arr, used_u32s, bmap1, nbits); + expect_eq_uint(nbits, rv); + + /* 1st used u32 words contain expected bit set, the + * remaining words are left unchanged (0xff) + */ + memset(exp_arr, 0xff, sizeof(exp_arr)); + memset(exp_arr, 0, used_u32s*sizeof(*exp_arr)); + exp_arr[i/32] = (1U<<(i%32)); + expect_eq_u32_array(exp_arr, 32, arr, 32); + + + /* same, with longer array to fill + */ + memset(arr, 0xff, sizeof(arr)); + rv = bitmap_to_u32array(arr, 32, bmap1, nbits); + expect_eq_uint(nbits, rv); + + /* 1st used u32 words contain expected bit set, the + * remaining words are all 0s + */ + memset(exp_arr, 0, sizeof(exp_arr)); + exp_arr[i/32] = (1U<<(i%32)); + expect_eq_u32_array(exp_arr, 32, arr, 32); + + /* + * test conversion u32[] -> bitmap + */ + + /* the 1st nbits of bmap2 are identical to + * bmap1, the remaining bits of bmap2 are left + * unchanged (all 1s) + */ + bitmap_fill(bmap2, 1024); + rv = bitmap_from_u32array(bmap2, nbits, + exp_arr, used_u32s); + expect_eq_uint(nbits, rv); + + expect_eq_bitmap(bmap1, 1024, bmap2, 1024); + + /* same, with more bits to fill + */ + memset(arr, 0xff, sizeof(arr)); /* garbage */ + memset(arr, 0, used_u32s*sizeof(u32)); + arr[i/32] = (1U<<(i%32)); + + bitmap_fill(bmap2, 1024); + rv = bitmap_from_u32array(bmap2, 1024, arr, used_u32s); + expect_eq_uint(used_u32s*32, rv); + + /* the 1st nbits of bmap2 are identical to + * bmap1, the remaining bits of bmap2 are cleared + */ + bitmap_zero(bmap1, 1024); + __set_bit(i, bmap1); + expect_eq_bitmap(bmap1, 1024, bmap2, 1024); + + + /* + * test short conversion bitmap -> u32[] (1 + * word too short) + */ + if (used_u32s > 1) { + bitmap_zero(bmap1, 1024); + __set_bit(i, bmap1); + bitmap_set(bmap1, nbits, + 1024 - nbits); /* garbage */ + memset(arr, 0xff, sizeof(arr)); + + rv = bitmap_to_u32array(arr, used_u32s - 1, + bmap1, nbits); + expect_eq_uint((used_u32s - 1)*32, rv); + + /* 1st used u32 words contain expected + * bit set, the remaining words are + * left unchanged (0xff) + */ + memset(exp_arr, 0xff, sizeof(exp_arr)); + memset(exp_arr, 0, + (used_u32s-1)*sizeof(*exp_arr)); + if ((i/32) < (used_u32s - 1)) + exp_arr[i/32] = (1U<<(i%32)); + expect_eq_u32_array(exp_arr, 32, arr, 32); + } + + /* + * test short conversion u32[] -> bitmap (3 + * bits too short) + */ + if (nbits > 3) { + memset(arr, 0xff, sizeof(arr)); /* garbage */ + memset(arr, 0, used_u32s*sizeof(*arr)); + arr[i/32] = (1U<<(i%32)); + + bitmap_zero(bmap1, 1024); + rv = bitmap_from_u32array(bmap1, nbits - 3, + arr, used_u32s); + expect_eq_uint(nbits - 3, rv); + + /* we are expecting the bit < nbits - + * 3 (none otherwise), and the rest of + * bmap1 unchanged (0-filled) + */ + bitmap_zero(bmap2, 1024); + if (i < nbits - 3) + __set_bit(i, bmap2); + expect_eq_bitmap(bmap2, 1024, bmap1, 1024); + + /* do the same with bmap1 initially + * 1-filled + */ + + bitmap_fill(bmap1, 1024); + rv = bitmap_from_u32array(bmap1, nbits - 3, + arr, used_u32s); + expect_eq_uint(nbits - 3, rv); + + /* we are expecting the bit < nbits - + * 3 (none otherwise), and the rest of + * bmap1 unchanged (1-filled) + */ + bitmap_zero(bmap2, 1024); + if (i < nbits - 3) + __set_bit(i, bmap2); + bitmap_set(bmap2, nbits-3, 1024 - nbits + 3); + expect_eq_bitmap(bmap2, 1024, bmap1, 1024); + } + } + } +} + +static int __init test_bitmap_init(void) +{ + test_zero_fill_copy(); + test_bitmap_u32_array_conversions(); + + if (failed_tests == 0) + pr_info("all %u tests passed\n", total_tests); + else + pr_warn("failed %u out of %u tests\n", + failed_tests, total_tests); + + return failed_tests ? -EINVAL : 0; +} + +static void __exit test_bitmap_cleanup(void) +{ +} + +module_init(test_bitmap_init); +module_exit(test_bitmap_cleanup); + +MODULE_AUTHOR("david decotigny <david.decotigny@googlers.com>"); +MODULE_LICENSE("GPL"); diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 27a7a26b1..8f22fbedc 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -2444,6 +2444,22 @@ static struct bpf_test tests[] = { { { 0, 4294967295U } }, }, { + "ALU_ADD_X: 2 + 4294967294 = 0", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_LD_IMM64(R1, 4294967294U), + BPF_ALU32_REG(BPF_ADD, R0, R1), + BPF_JMP_IMM(BPF_JEQ, R0, 0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { "ALU64_ADD_X: 1 + 2 = 3", .u.insns_int = { BPF_LD_IMM64(R0, 1), @@ -2467,6 +2483,23 @@ static struct bpf_test tests[] = { { }, { { 0, 4294967295U } }, }, + { + "ALU64_ADD_X: 2 + 4294967294 = 4294967296", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_LD_IMM64(R1, 4294967294U), + BPF_LD_IMM64(R2, 4294967296ULL), + BPF_ALU64_REG(BPF_ADD, R0, R1), + BPF_JMP_REG(BPF_JEQ, R0, R2, 2), + BPF_MOV32_IMM(R0, 0), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, /* BPF_ALU | BPF_ADD | BPF_K */ { "ALU_ADD_K: 1 + 2 = 3", @@ -2502,6 +2535,21 @@ static struct bpf_test tests[] = { { { 0, 4294967295U } }, }, { + "ALU_ADD_K: 4294967294 + 2 = 0", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967294U), + BPF_ALU32_IMM(BPF_ADD, R0, 2), + BPF_JMP_IMM(BPF_JEQ, R0, 0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { "ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff", .u.insns_int = { BPF_LD_IMM64(R2, 0x0), @@ -2518,6 +2566,70 @@ static struct bpf_test tests[] = { { { 0, 0x1 } }, }, { + "ALU_ADD_K: 0 + 0xffff = 0xffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0xffff), + BPF_ALU32_IMM(BPF_ADD, R2, 0xffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU_ADD_K: 0 + 0x7fffffff = 0x7fffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0x7fffffff), + BPF_ALU32_IMM(BPF_ADD, R2, 0x7fffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU_ADD_K: 0 + 0x80000000 = 0x80000000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0x80000000), + BPF_ALU32_IMM(BPF_ADD, R2, 0x80000000), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU_ADD_K: 0 + 0x80008000 = 0x80008000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0x80008000), + BPF_ALU32_IMM(BPF_ADD, R2, 0x80008000), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { "ALU64_ADD_K: 1 + 2 = 3", .u.insns_int = { BPF_LD_IMM64(R0, 1), @@ -2551,6 +2663,22 @@ static struct bpf_test tests[] = { { { 0, 2147483647 } }, }, { + "ALU64_ADD_K: 4294967294 + 2 = 4294967296", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967294U), + BPF_LD_IMM64(R1, 4294967296ULL), + BPF_ALU64_IMM(BPF_ADD, R0, 2), + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { "ALU64_ADD_K: 2147483646 + -2147483647 = -1", .u.insns_int = { BPF_LD_IMM64(R0, 2147483646), @@ -2593,6 +2721,70 @@ static struct bpf_test tests[] = { { }, { { 0, 0x1 } }, }, + { + "ALU64_ADD_K: 0 + 0xffff = 0xffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0xffff), + BPF_ALU64_IMM(BPF_ADD, R2, 0xffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_ADD_K: 0 + 0x7fffffff = 0x7fffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0x7fffffff), + BPF_ALU64_IMM(BPF_ADD, R2, 0x7fffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_ADD_K: 0 + 0x80000000 = 0xffffffff80000000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0xffffffff80000000LL), + BPF_ALU64_IMM(BPF_ADD, R2, 0x80000000), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU_ADD_K: 0 + 0x80008000 = 0xffffffff80008000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0xffffffff80008000LL), + BPF_ALU64_IMM(BPF_ADD, R2, 0x80008000), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, /* BPF_ALU | BPF_SUB | BPF_X */ { "ALU_SUB_X: 3 - 1 = 2", @@ -4222,6 +4414,20 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } }, }, + { + "JMP_JGT_K: Unsigned jump: if (-1 > 1) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_JMP_IMM(BPF_JGT, R1, 1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, /* BPF_JMP | BPF_JGE | BPF_K */ { "JMP_JGE_K: if (3 >= 2) return 1", @@ -4303,7 +4509,7 @@ static struct bpf_test tests[] = { .u.insns_int = { BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_LD_IMM64(R1, 3), - BPF_JMP_IMM(BPF_JNE, R1, 2, 1), + BPF_JMP_IMM(BPF_JSET, R1, 2, 1), BPF_EXIT_INSN(), BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), @@ -4317,7 +4523,7 @@ static struct bpf_test tests[] = { .u.insns_int = { BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_LD_IMM64(R1, 3), - BPF_JMP_IMM(BPF_JNE, R1, 0xffffffff, 1), + BPF_JMP_IMM(BPF_JSET, R1, 0xffffffff, 1), BPF_EXIT_INSN(), BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), @@ -4404,6 +4610,21 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } }, }, + { + "JMP_JGT_X: Unsigned jump: if (-1 > 1) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, 1), + BPF_JMP_REG(BPF_JGT, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, /* BPF_JMP | BPF_JGE | BPF_X */ { "JMP_JGE_X: if (3 >= 2) return 1", @@ -4474,7 +4695,7 @@ static struct bpf_test tests[] = { BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_LD_IMM64(R1, 3), BPF_LD_IMM64(R2, 2), - BPF_JMP_REG(BPF_JNE, R1, R2, 1), + BPF_JMP_REG(BPF_JSET, R1, R2, 1), BPF_EXIT_INSN(), BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), @@ -4489,7 +4710,7 @@ static struct bpf_test tests[] = { BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_LD_IMM64(R1, 3), BPF_LD_IMM64(R2, 0xffffffff), - BPF_JMP_REG(BPF_JNE, R1, R2, 1), + BPF_JMP_REG(BPF_JSET, R1, R2, 1), BPF_EXIT_INSN(), BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), diff --git a/lib/test_kasan.c b/lib/test_kasan.c index c32f3b004..82169fbf2 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -65,11 +65,34 @@ static noinline void __init kmalloc_node_oob_right(void) kfree(ptr); } -static noinline void __init kmalloc_large_oob_right(void) +#ifdef CONFIG_SLUB +static noinline void __init kmalloc_pagealloc_oob_right(void) { char *ptr; size_t size = KMALLOC_MAX_CACHE_SIZE + 10; + /* Allocate a chunk that does not fit into a SLUB cache to trigger + * the page allocator fallback. + */ + pr_info("kmalloc pagealloc allocation: out-of-bounds to right\n"); + ptr = kmalloc(size, GFP_KERNEL); + if (!ptr) { + pr_err("Allocation failed\n"); + return; + } + + ptr[size] = 0; + kfree(ptr); +} +#endif + +static noinline void __init kmalloc_large_oob_right(void) +{ + char *ptr; + size_t size = KMALLOC_MAX_CACHE_SIZE - 256; + /* Allocate a chunk that is large enough, but still fits into a slab + * and does not trigger the page allocator fallback in SLUB. + */ pr_info("kmalloc large allocation: out-of-bounds to right\n"); ptr = kmalloc(size, GFP_KERNEL); if (!ptr) { @@ -271,6 +294,8 @@ static noinline void __init kmalloc_uaf2(void) } ptr1[40] = 'x'; + if (ptr1 == ptr2) + pr_err("Could not detect use-after-free: ptr1 == ptr2\n"); kfree(ptr2); } @@ -324,6 +349,9 @@ static int __init kmalloc_tests_init(void) kmalloc_oob_right(); kmalloc_oob_left(); kmalloc_node_oob_right(); +#ifdef CONFIG_SLUB + kmalloc_pagealloc_oob_right(); +#endif kmalloc_large_oob_right(); kmalloc_oob_krealloc_more(); kmalloc_oob_krealloc_less(); diff --git a/lib/test_printf.c b/lib/test_printf.c index 4f6ae6043..563f10e68 100644 --- a/lib/test_printf.c +++ b/lib/test_printf.c @@ -17,6 +17,9 @@ #include <linux/socket.h> #include <linux/in.h> +#include <linux/gfp.h> +#include <linux/mm.h> + #define BUF_SIZE 256 #define PAD_SIZE 16 #define FILL_CHAR '$' @@ -411,6 +414,55 @@ netdev_features(void) } static void __init +flags(void) +{ + unsigned long flags; + gfp_t gfp; + char *cmp_buffer; + + flags = 0; + test("", "%pGp", &flags); + + /* Page flags should filter the zone id */ + flags = 1UL << NR_PAGEFLAGS; + test("", "%pGp", &flags); + + flags |= 1UL << PG_uptodate | 1UL << PG_dirty | 1UL << PG_lru + | 1UL << PG_active | 1UL << PG_swapbacked; + test("uptodate|dirty|lru|active|swapbacked", "%pGp", &flags); + + + flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC + | VM_DENYWRITE; + test("read|exec|mayread|maywrite|mayexec|denywrite", "%pGv", &flags); + + gfp = GFP_TRANSHUGE; + test("GFP_TRANSHUGE", "%pGg", &gfp); + + gfp = GFP_ATOMIC|__GFP_DMA; + test("GFP_ATOMIC|GFP_DMA", "%pGg", &gfp); + + gfp = __GFP_ATOMIC; + test("__GFP_ATOMIC", "%pGg", &gfp); + + cmp_buffer = kmalloc(BUF_SIZE, GFP_KERNEL); + if (!cmp_buffer) + return; + + /* Any flags not translated by the table should remain numeric */ + gfp = ~__GFP_BITS_MASK; + snprintf(cmp_buffer, BUF_SIZE, "%#lx", (unsigned long) gfp); + test(cmp_buffer, "%pGg", &gfp); + + snprintf(cmp_buffer, BUF_SIZE, "__GFP_ATOMIC|%#lx", + (unsigned long) gfp); + gfp |= __GFP_ATOMIC; + test(cmp_buffer, "%pGg", &gfp); + + kfree(cmp_buffer); +} + +static void __init test_pointer(void) { plain(); @@ -428,6 +480,7 @@ test_pointer(void) struct_clk(); bitmap(); netdev_features(); + flags(); } static int __init diff --git a/lib/test_static_keys.c b/lib/test_static_keys.c index c61b299e3..915d75df2 100644 --- a/lib/test_static_keys.c +++ b/lib/test_static_keys.c @@ -46,8 +46,11 @@ struct test_key { bool (*test_key)(void); }; -#define test_key_func(key, branch) \ - ({bool func(void) { return branch(key); } func; }) +#define test_key_func(key, branch) \ +static bool key ## _ ## branch(void) \ +{ \ + return branch(&key); \ +} static void invert_key(struct static_key *key) { @@ -92,6 +95,25 @@ static int verify_keys(struct test_key *keys, int size, bool invert) return 0; } +test_key_func(old_true_key, static_key_true) +test_key_func(old_false_key, static_key_false) +test_key_func(true_key, static_branch_likely) +test_key_func(true_key, static_branch_unlikely) +test_key_func(false_key, static_branch_likely) +test_key_func(false_key, static_branch_unlikely) +test_key_func(base_old_true_key, static_key_true) +test_key_func(base_inv_old_true_key, static_key_true) +test_key_func(base_old_false_key, static_key_false) +test_key_func(base_inv_old_false_key, static_key_false) +test_key_func(base_true_key, static_branch_likely) +test_key_func(base_true_key, static_branch_unlikely) +test_key_func(base_inv_true_key, static_branch_likely) +test_key_func(base_inv_true_key, static_branch_unlikely) +test_key_func(base_false_key, static_branch_likely) +test_key_func(base_false_key, static_branch_unlikely) +test_key_func(base_inv_false_key, static_branch_likely) +test_key_func(base_inv_false_key, static_branch_unlikely) + static int __init test_static_key_init(void) { int ret; @@ -102,95 +124,95 @@ static int __init test_static_key_init(void) { .init_state = true, .key = &old_true_key, - .test_key = test_key_func(&old_true_key, static_key_true), + .test_key = &old_true_key_static_key_true, }, { .init_state = false, .key = &old_false_key, - .test_key = test_key_func(&old_false_key, static_key_false), + .test_key = &old_false_key_static_key_false, }, /* internal keys - new keys */ { .init_state = true, .key = &true_key.key, - .test_key = test_key_func(&true_key, static_branch_likely), + .test_key = &true_key_static_branch_likely, }, { .init_state = true, .key = &true_key.key, - .test_key = test_key_func(&true_key, static_branch_unlikely), + .test_key = &true_key_static_branch_unlikely, }, { .init_state = false, .key = &false_key.key, - .test_key = test_key_func(&false_key, static_branch_likely), + .test_key = &false_key_static_branch_likely, }, { .init_state = false, .key = &false_key.key, - .test_key = test_key_func(&false_key, static_branch_unlikely), + .test_key = &false_key_static_branch_unlikely, }, /* external keys - old keys */ { .init_state = true, .key = &base_old_true_key, - .test_key = test_key_func(&base_old_true_key, static_key_true), + .test_key = &base_old_true_key_static_key_true, }, { .init_state = false, .key = &base_inv_old_true_key, - .test_key = test_key_func(&base_inv_old_true_key, static_key_true), + .test_key = &base_inv_old_true_key_static_key_true, }, { .init_state = false, .key = &base_old_false_key, - .test_key = test_key_func(&base_old_false_key, static_key_false), + .test_key = &base_old_false_key_static_key_false, }, { .init_state = true, .key = &base_inv_old_false_key, - .test_key = test_key_func(&base_inv_old_false_key, static_key_false), + .test_key = &base_inv_old_false_key_static_key_false, }, /* external keys - new keys */ { .init_state = true, .key = &base_true_key.key, - .test_key = test_key_func(&base_true_key, static_branch_likely), + .test_key = &base_true_key_static_branch_likely, }, { .init_state = true, .key = &base_true_key.key, - .test_key = test_key_func(&base_true_key, static_branch_unlikely), + .test_key = &base_true_key_static_branch_unlikely, }, { .init_state = false, .key = &base_inv_true_key.key, - .test_key = test_key_func(&base_inv_true_key, static_branch_likely), + .test_key = &base_inv_true_key_static_branch_likely, }, { .init_state = false, .key = &base_inv_true_key.key, - .test_key = test_key_func(&base_inv_true_key, static_branch_unlikely), + .test_key = &base_inv_true_key_static_branch_unlikely, }, { .init_state = false, .key = &base_false_key.key, - .test_key = test_key_func(&base_false_key, static_branch_likely), + .test_key = &base_false_key_static_branch_likely, }, { .init_state = false, .key = &base_false_key.key, - .test_key = test_key_func(&base_false_key, static_branch_unlikely), + .test_key = &base_false_key_static_branch_unlikely, }, { .init_state = true, .key = &base_inv_false_key.key, - .test_key = test_key_func(&base_inv_false_key, static_branch_likely), + .test_key = &base_inv_false_key_static_branch_likely, }, { .init_state = true, .key = &base_inv_false_key.key, - .test_key = test_key_func(&base_inv_false_key, static_branch_unlikely), + .test_key = &base_inv_false_key_static_branch_unlikely, }, }; diff --git a/lib/vsprintf.c b/lib/vsprintf.c index f44e178e6..ccb664b54 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -35,6 +35,8 @@ #include <linux/blkdev.h> #endif +#include "../mm/internal.h" /* For the trace_print_flags arrays */ + #include <asm/page.h> /* for PAGE_SIZE */ #include <asm/sections.h> /* for dereference_function_descriptor() */ #include <asm/byteorder.h> /* cpu_to_le16 */ @@ -1407,6 +1409,72 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec, } } +static +char *format_flags(char *buf, char *end, unsigned long flags, + const struct trace_print_flags *names) +{ + unsigned long mask; + const struct printf_spec strspec = { + .field_width = -1, + .precision = -1, + }; + const struct printf_spec numspec = { + .flags = SPECIAL|SMALL, + .field_width = -1, + .precision = -1, + .base = 16, + }; + + for ( ; flags && names->name; names++) { + mask = names->mask; + if ((flags & mask) != mask) + continue; + + buf = string(buf, end, names->name, strspec); + + flags &= ~mask; + if (flags) { + if (buf < end) + *buf = '|'; + buf++; + } + } + + if (flags) + buf = number(buf, end, flags, numspec); + + return buf; +} + +static noinline_for_stack +char *flags_string(char *buf, char *end, void *flags_ptr, const char *fmt) +{ + unsigned long flags; + const struct trace_print_flags *names; + + switch (fmt[1]) { + case 'p': + flags = *(unsigned long *)flags_ptr; + /* Remove zone id */ + flags &= (1UL << NR_PAGEFLAGS) - 1; + names = pageflag_names; + break; + case 'v': + flags = *(unsigned long *)flags_ptr; + names = vmaflag_names; + break; + case 'g': + flags = *(gfp_t *)flags_ptr; + names = gfpflag_names; + break; + default: + WARN_ONCE(1, "Unsupported flags modifier: %c\n", fmt[1]); + return buf; + } + + return format_flags(buf, end, flags, names); +} + int kptr_restrict __read_mostly; /* @@ -1495,6 +1563,11 @@ int kptr_restrict __read_mostly; * - 'Cn' For a clock, it prints the name (Common Clock Framework) or address * (legacy clock framework) of the clock * - 'Cr' For a clock, it prints the current rate of the clock + * - 'G' For flags to be printed as a collection of symbolic strings that would + * construct the specific value. Supported flags given by option: + * p page flags (see struct page) given as pointer to unsigned long + * g gfp flags (GFP_* and __GFP_*) given as pointer to gfp_t + * v vma flags (VM_*) given as pointer to unsigned long * * ** Please update also Documentation/printk-formats.txt when making changes ** * @@ -1648,6 +1721,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr, return bdev_name(buf, end, ptr, spec, fmt); #endif + case 'G': + return flags_string(buf, end, ptr, fmt); } spec.flags |= SMALL; if (spec.field_width == -1) { @@ -2565,8 +2640,12 @@ int vsscanf(const char *buf, const char *fmt, va_list args) if (*fmt == '*') { if (!*str) break; - while (!isspace(*fmt) && *fmt != '%' && *fmt) + while (!isspace(*fmt) && *fmt != '%' && *fmt) { + /* '%*[' not yet supported, invalid format */ + if (*fmt == '[') + return num; fmt++; + } while (!isspace(*str) && *str) str++; continue; @@ -2639,6 +2718,59 @@ int vsscanf(const char *buf, const char *fmt, va_list args) num++; } continue; + /* + * Warning: This implementation of the '[' conversion specifier + * deviates from its glibc counterpart in the following ways: + * (1) It does NOT support ranges i.e. '-' is NOT a special + * character + * (2) It cannot match the closing bracket ']' itself + * (3) A field width is required + * (4) '%*[' (discard matching input) is currently not supported + * + * Example usage: + * ret = sscanf("00:0a:95","%2[^:]:%2[^:]:%2[^:]", + * buf1, buf2, buf3); + * if (ret < 3) + * // etc.. + */ + case '[': + { + char *s = (char *)va_arg(args, char *); + DECLARE_BITMAP(set, 256) = {0}; + unsigned int len = 0; + bool negate = (*fmt == '^'); + + /* field width is required */ + if (field_width == -1) + return num; + + if (negate) + ++fmt; + + for ( ; *fmt && *fmt != ']'; ++fmt, ++len) + set_bit((u8)*fmt, set); + + /* no ']' or no character set found */ + if (!*fmt || !len) + return num; + ++fmt; + + if (negate) { + bitmap_complement(set, set, 256); + /* exclude null '\0' byte */ + clear_bit(0, set); + } + + /* match must be non-empty */ + if (!test_bit((u8)*str, set)) + return num; + + while (test_bit((u8)*str, set) && field_width--) + *s++ = *str++; + *s = '\0'; + ++num; + } + continue; case 'o': base = 8; break; |