diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-09-08 01:01:14 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2015-09-08 01:01:14 -0300 |
commit | e5fd91f1ef340da553f7a79da9540c3db711c937 (patch) | |
tree | b11842027dc6641da63f4bcc524f8678263304a3 /lib | |
parent | 2a9b0348e685a63d97486f6749622b61e9e3292f (diff) |
Linux-libre 4.2-gnu
Diffstat (limited to 'lib')
39 files changed, 4415 insertions, 237 deletions
diff --git a/lib/842/842.h b/lib/842/842.h new file mode 100644 index 000000000..7c200030a --- /dev/null +++ b/lib/842/842.h @@ -0,0 +1,127 @@ + +#ifndef __842_H__ +#define __842_H__ + +/* The 842 compressed format is made up of multiple blocks, each of + * which have the format: + * + * <template>[arg1][arg2][arg3][arg4] + * + * where there are between 0 and 4 template args, depending on the specific + * template operation. For normal operations, each arg is either a specific + * number of data bytes to add to the output buffer, or an index pointing + * to a previously-written number of data bytes to copy to the output buffer. + * + * The template code is a 5-bit value. This code indicates what to do with + * the following data. Template codes from 0 to 0x19 should use the template + * table, the static "decomp_ops" table used in decompress. For each template + * (table row), there are between 1 and 4 actions; each action corresponds to + * an arg following the template code bits. Each action is either a "data" + * type action, or a "index" type action, and each action results in 2, 4, or 8 + * bytes being written to the output buffer. Each template (i.e. all actions + * in the table row) will add up to 8 bytes being written to the output buffer. + * Any row with less than 4 actions is padded with noop actions, indicated by + * N0 (for which there is no corresponding arg in the compressed data buffer). + * + * "Data" actions, indicated in the table by D2, D4, and D8, mean that the + * corresponding arg is 2, 4, or 8 bytes, respectively, in the compressed data + * buffer should be copied directly to the output buffer. + * + * "Index" actions, indicated in the table by I2, I4, and I8, mean the + * corresponding arg is an index parameter that points to, respectively, a 2, + * 4, or 8 byte value already in the output buffer, that should be copied to + * the end of the output buffer. Essentially, the index points to a position + * in a ring buffer that contains the last N bytes of output buffer data. + * The number of bits for each index's arg are: 8 bits for I2, 9 bits for I4, + * and 8 bits for I8. Since each index points to a 2, 4, or 8 byte section, + * this means that I2 can reference 512 bytes ((2^8 bits = 256) * 2 bytes), I4 + * can reference 2048 bytes ((2^9 = 512) * 4 bytes), and I8 can reference 2048 + * bytes ((2^8 = 256) * 8 bytes). Think of it as a kind-of ring buffer for + * each of I2, I4, and I8 that are updated for each byte written to the output + * buffer. In this implementation, the output buffer is directly used for each + * index; there is no additional memory required. Note that the index is into + * a ring buffer, not a sliding window; for example, if there have been 260 + * bytes written to the output buffer, an I2 index of 0 would index to byte 256 + * in the output buffer, while an I2 index of 16 would index to byte 16 in the + * output buffer. + * + * There are also 3 special template codes; 0x1b for "repeat", 0x1c for + * "zeros", and 0x1e for "end". The "repeat" operation is followed by a 6 bit + * arg N indicating how many times to repeat. The last 8 bytes written to the + * output buffer are written again to the output buffer, N + 1 times. The + * "zeros" operation, which has no arg bits, writes 8 zeros to the output + * buffer. The "end" operation, which also has no arg bits, signals the end + * of the compressed data. There may be some number of padding (don't care, + * but usually 0) bits after the "end" operation bits, to fill the buffer + * length to a specific byte multiple (usually a multiple of 8, 16, or 32 + * bytes). + * + * This software implementation also uses one of the undefined template values, + * 0x1d as a special "short data" template code, to represent less than 8 bytes + * of uncompressed data. It is followed by a 3 bit arg N indicating how many + * data bytes will follow, and then N bytes of data, which should be copied to + * the output buffer. This allows the software 842 compressor to accept input + * buffers that are not an exact multiple of 8 bytes long. However, those + * compressed buffers containing this sw-only template will be rejected by + * the 842 hardware decompressor, and must be decompressed with this software + * library. The 842 software compression module includes a parameter to + * disable using this sw-only "short data" template, and instead simply + * reject any input buffer that is not a multiple of 8 bytes long. + * + * After all actions for each operation code are processed, another template + * code is in the next 5 bits. The decompression ends once the "end" template + * code is detected. + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/bitops.h> +#include <asm/unaligned.h> + +#include <linux/sw842.h> + +/* special templates */ +#define OP_REPEAT (0x1B) +#define OP_ZEROS (0x1C) +#define OP_END (0x1E) + +/* sw only template - this is not in the hw design; it's used only by this + * software compressor and decompressor, to allow input buffers that aren't + * a multiple of 8. + */ +#define OP_SHORT_DATA (0x1D) + +/* additional bits of each op param */ +#define OP_BITS (5) +#define REPEAT_BITS (6) +#define SHORT_DATA_BITS (3) +#define I2_BITS (8) +#define I4_BITS (9) +#define I8_BITS (8) + +#define REPEAT_BITS_MAX (0x3f) +#define SHORT_DATA_BITS_MAX (0x7) + +/* Arbitrary values used to indicate action */ +#define OP_ACTION (0x70) +#define OP_ACTION_INDEX (0x10) +#define OP_ACTION_DATA (0x20) +#define OP_ACTION_NOOP (0x40) +#define OP_AMOUNT (0x0f) +#define OP_AMOUNT_0 (0x00) +#define OP_AMOUNT_2 (0x02) +#define OP_AMOUNT_4 (0x04) +#define OP_AMOUNT_8 (0x08) + +#define D2 (OP_ACTION_DATA | OP_AMOUNT_2) +#define D4 (OP_ACTION_DATA | OP_AMOUNT_4) +#define D8 (OP_ACTION_DATA | OP_AMOUNT_8) +#define I2 (OP_ACTION_INDEX | OP_AMOUNT_2) +#define I4 (OP_ACTION_INDEX | OP_AMOUNT_4) +#define I8 (OP_ACTION_INDEX | OP_AMOUNT_8) +#define N0 (OP_ACTION_NOOP | OP_AMOUNT_0) + +/* the max of the regular templates - not including the special templates */ +#define OPS_MAX (0x1a) + +#endif diff --git a/lib/842/842_compress.c b/lib/842/842_compress.c new file mode 100644 index 000000000..7ce68948e --- /dev/null +++ b/lib/842/842_compress.c @@ -0,0 +1,626 @@ +/* + * 842 Software Compression + * + * Copyright (C) 2015 Dan Streetman, IBM Corp + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * See 842.h for details of the 842 compressed format. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define MODULE_NAME "842_compress" + +#include <linux/hashtable.h> + +#include "842.h" +#include "842_debugfs.h" + +#define SW842_HASHTABLE8_BITS (10) +#define SW842_HASHTABLE4_BITS (11) +#define SW842_HASHTABLE2_BITS (10) + +/* By default, we allow compressing input buffers of any length, but we must + * use the non-standard "short data" template so the decompressor can correctly + * reproduce the uncompressed data buffer at the right length. However the + * hardware 842 compressor will not recognize the "short data" template, and + * will fail to decompress any compressed buffer containing it (I have no idea + * why anyone would want to use software to compress and hardware to decompress + * but that's beside the point). This parameter forces the compression + * function to simply reject any input buffer that isn't a multiple of 8 bytes + * long, instead of using the "short data" template, so that all compressed + * buffers produced by this function will be decompressable by the 842 hardware + * decompressor. Unless you have a specific need for that, leave this disabled + * so that any length buffer can be compressed. + */ +static bool sw842_strict; +module_param_named(strict, sw842_strict, bool, 0644); + +static u8 comp_ops[OPS_MAX][5] = { /* params size in bits */ + { I8, N0, N0, N0, 0x19 }, /* 8 */ + { I4, I4, N0, N0, 0x18 }, /* 18 */ + { I4, I2, I2, N0, 0x17 }, /* 25 */ + { I2, I2, I4, N0, 0x13 }, /* 25 */ + { I2, I2, I2, I2, 0x12 }, /* 32 */ + { I4, I2, D2, N0, 0x16 }, /* 33 */ + { I4, D2, I2, N0, 0x15 }, /* 33 */ + { I2, D2, I4, N0, 0x0e }, /* 33 */ + { D2, I2, I4, N0, 0x09 }, /* 33 */ + { I2, I2, I2, D2, 0x11 }, /* 40 */ + { I2, I2, D2, I2, 0x10 }, /* 40 */ + { I2, D2, I2, I2, 0x0d }, /* 40 */ + { D2, I2, I2, I2, 0x08 }, /* 40 */ + { I4, D4, N0, N0, 0x14 }, /* 41 */ + { D4, I4, N0, N0, 0x04 }, /* 41 */ + { I2, I2, D4, N0, 0x0f }, /* 48 */ + { I2, D2, I2, D2, 0x0c }, /* 48 */ + { I2, D4, I2, N0, 0x0b }, /* 48 */ + { D2, I2, I2, D2, 0x07 }, /* 48 */ + { D2, I2, D2, I2, 0x06 }, /* 48 */ + { D4, I2, I2, N0, 0x03 }, /* 48 */ + { I2, D2, D4, N0, 0x0a }, /* 56 */ + { D2, I2, D4, N0, 0x05 }, /* 56 */ + { D4, I2, D2, N0, 0x02 }, /* 56 */ + { D4, D2, I2, N0, 0x01 }, /* 56 */ + { D8, N0, N0, N0, 0x00 }, /* 64 */ +}; + +struct sw842_hlist_node8 { + struct hlist_node node; + u64 data; + u8 index; +}; + +struct sw842_hlist_node4 { + struct hlist_node node; + u32 data; + u16 index; +}; + +struct sw842_hlist_node2 { + struct hlist_node node; + u16 data; + u8 index; +}; + +#define INDEX_NOT_FOUND (-1) +#define INDEX_NOT_CHECKED (-2) + +struct sw842_param { + u8 *in; + u8 *instart; + u64 ilen; + u8 *out; + u64 olen; + u8 bit; + u64 data8[1]; + u32 data4[2]; + u16 data2[4]; + int index8[1]; + int index4[2]; + int index2[4]; + DECLARE_HASHTABLE(htable8, SW842_HASHTABLE8_BITS); + DECLARE_HASHTABLE(htable4, SW842_HASHTABLE4_BITS); + DECLARE_HASHTABLE(htable2, SW842_HASHTABLE2_BITS); + struct sw842_hlist_node8 node8[1 << I8_BITS]; + struct sw842_hlist_node4 node4[1 << I4_BITS]; + struct sw842_hlist_node2 node2[1 << I2_BITS]; +}; + +#define get_input_data(p, o, b) \ + be##b##_to_cpu(get_unaligned((__be##b *)((p)->in + (o)))) + +#define init_hashtable_nodes(p, b) do { \ + int _i; \ + hash_init((p)->htable##b); \ + for (_i = 0; _i < ARRAY_SIZE((p)->node##b); _i++) { \ + (p)->node##b[_i].index = _i; \ + (p)->node##b[_i].data = 0; \ + INIT_HLIST_NODE(&(p)->node##b[_i].node); \ + } \ +} while (0) + +#define find_index(p, b, n) ({ \ + struct sw842_hlist_node##b *_n; \ + p->index##b[n] = INDEX_NOT_FOUND; \ + hash_for_each_possible(p->htable##b, _n, node, p->data##b[n]) { \ + if (p->data##b[n] == _n->data) { \ + p->index##b[n] = _n->index; \ + break; \ + } \ + } \ + p->index##b[n] >= 0; \ +}) + +#define check_index(p, b, n) \ + ((p)->index##b[n] == INDEX_NOT_CHECKED \ + ? find_index(p, b, n) \ + : (p)->index##b[n] >= 0) + +#define replace_hash(p, b, i, d) do { \ + struct sw842_hlist_node##b *_n = &(p)->node##b[(i)+(d)]; \ + hash_del(&_n->node); \ + _n->data = (p)->data##b[d]; \ + pr_debug("add hash index%x %x pos %x data %lx\n", b, \ + (unsigned int)_n->index, \ + (unsigned int)((p)->in - (p)->instart), \ + (unsigned long)_n->data); \ + hash_add((p)->htable##b, &_n->node, _n->data); \ +} while (0) + +static u8 bmask[8] = { 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe }; + +static int add_bits(struct sw842_param *p, u64 d, u8 n); + +static int __split_add_bits(struct sw842_param *p, u64 d, u8 n, u8 s) +{ + int ret; + + if (n <= s) + return -EINVAL; + + ret = add_bits(p, d >> s, n - s); + if (ret) + return ret; + return add_bits(p, d & GENMASK_ULL(s - 1, 0), s); +} + +static int add_bits(struct sw842_param *p, u64 d, u8 n) +{ + int b = p->bit, bits = b + n, s = round_up(bits, 8) - bits; + u64 o; + u8 *out = p->out; + + pr_debug("add %u bits %lx\n", (unsigned char)n, (unsigned long)d); + + if (n > 64) + return -EINVAL; + + /* split this up if writing to > 8 bytes (i.e. n == 64 && p->bit > 0), + * or if we're at the end of the output buffer and would write past end + */ + if (bits > 64) + return __split_add_bits(p, d, n, 32); + else if (p->olen < 8 && bits > 32 && bits <= 56) + return __split_add_bits(p, d, n, 16); + else if (p->olen < 4 && bits > 16 && bits <= 24) + return __split_add_bits(p, d, n, 8); + + if (DIV_ROUND_UP(bits, 8) > p->olen) + return -ENOSPC; + + o = *out & bmask[b]; + d <<= s; + + if (bits <= 8) + *out = o | d; + else if (bits <= 16) + put_unaligned(cpu_to_be16(o << 8 | d), (__be16 *)out); + else if (bits <= 24) + put_unaligned(cpu_to_be32(o << 24 | d << 8), (__be32 *)out); + else if (bits <= 32) + put_unaligned(cpu_to_be32(o << 24 | d), (__be32 *)out); + else if (bits <= 40) + put_unaligned(cpu_to_be64(o << 56 | d << 24), (__be64 *)out); + else if (bits <= 48) + put_unaligned(cpu_to_be64(o << 56 | d << 16), (__be64 *)out); + else if (bits <= 56) + put_unaligned(cpu_to_be64(o << 56 | d << 8), (__be64 *)out); + else + put_unaligned(cpu_to_be64(o << 56 | d), (__be64 *)out); + + p->bit += n; + + if (p->bit > 7) { + p->out += p->bit / 8; + p->olen -= p->bit / 8; + p->bit %= 8; + } + + return 0; +} + +static int add_template(struct sw842_param *p, u8 c) +{ + int ret, i, b = 0; + u8 *t = comp_ops[c]; + bool inv = false; + + if (c >= OPS_MAX) + return -EINVAL; + + pr_debug("template %x\n", t[4]); + + ret = add_bits(p, t[4], OP_BITS); + if (ret) + return ret; + + for (i = 0; i < 4; i++) { + pr_debug("op %x\n", t[i]); + + switch (t[i] & OP_AMOUNT) { + case OP_AMOUNT_8: + if (b) + inv = true; + else if (t[i] & OP_ACTION_INDEX) + ret = add_bits(p, p->index8[0], I8_BITS); + else if (t[i] & OP_ACTION_DATA) + ret = add_bits(p, p->data8[0], 64); + else + inv = true; + break; + case OP_AMOUNT_4: + if (b == 2 && t[i] & OP_ACTION_DATA) + ret = add_bits(p, get_input_data(p, 2, 32), 32); + else if (b != 0 && b != 4) + inv = true; + else if (t[i] & OP_ACTION_INDEX) + ret = add_bits(p, p->index4[b >> 2], I4_BITS); + else if (t[i] & OP_ACTION_DATA) + ret = add_bits(p, p->data4[b >> 2], 32); + else + inv = true; + break; + case OP_AMOUNT_2: + if (b != 0 && b != 2 && b != 4 && b != 6) + inv = true; + if (t[i] & OP_ACTION_INDEX) + ret = add_bits(p, p->index2[b >> 1], I2_BITS); + else if (t[i] & OP_ACTION_DATA) + ret = add_bits(p, p->data2[b >> 1], 16); + else + inv = true; + break; + case OP_AMOUNT_0: + inv = (b != 8) || !(t[i] & OP_ACTION_NOOP); + break; + default: + inv = true; + break; + } + + if (ret) + return ret; + + if (inv) { + pr_err("Invalid templ %x op %d : %x %x %x %x\n", + c, i, t[0], t[1], t[2], t[3]); + return -EINVAL; + } + + b += t[i] & OP_AMOUNT; + } + + if (b != 8) { + pr_err("Invalid template %x len %x : %x %x %x %x\n", + c, b, t[0], t[1], t[2], t[3]); + return -EINVAL; + } + + if (sw842_template_counts) + atomic_inc(&template_count[t[4]]); + + return 0; +} + +static int add_repeat_template(struct sw842_param *p, u8 r) +{ + int ret; + + /* repeat param is 0-based */ + if (!r || --r > REPEAT_BITS_MAX) + return -EINVAL; + + ret = add_bits(p, OP_REPEAT, OP_BITS); + if (ret) + return ret; + + ret = add_bits(p, r, REPEAT_BITS); + if (ret) + return ret; + + if (sw842_template_counts) + atomic_inc(&template_repeat_count); + + return 0; +} + +static int add_short_data_template(struct sw842_param *p, u8 b) +{ + int ret, i; + + if (!b || b > SHORT_DATA_BITS_MAX) + return -EINVAL; + + ret = add_bits(p, OP_SHORT_DATA, OP_BITS); + if (ret) + return ret; + + ret = add_bits(p, b, SHORT_DATA_BITS); + if (ret) + return ret; + + for (i = 0; i < b; i++) { + ret = add_bits(p, p->in[i], 8); + if (ret) + return ret; + } + + if (sw842_template_counts) + atomic_inc(&template_short_data_count); + + return 0; +} + +static int add_zeros_template(struct sw842_param *p) +{ + int ret = add_bits(p, OP_ZEROS, OP_BITS); + + if (ret) + return ret; + + if (sw842_template_counts) + atomic_inc(&template_zeros_count); + + return 0; +} + +static int add_end_template(struct sw842_param *p) +{ + int ret = add_bits(p, OP_END, OP_BITS); + + if (ret) + return ret; + + if (sw842_template_counts) + atomic_inc(&template_end_count); + + return 0; +} + +static bool check_template(struct sw842_param *p, u8 c) +{ + u8 *t = comp_ops[c]; + int i, match, b = 0; + + if (c >= OPS_MAX) + return false; + + for (i = 0; i < 4; i++) { + if (t[i] & OP_ACTION_INDEX) { + if (t[i] & OP_AMOUNT_2) + match = check_index(p, 2, b >> 1); + else if (t[i] & OP_AMOUNT_4) + match = check_index(p, 4, b >> 2); + else if (t[i] & OP_AMOUNT_8) + match = check_index(p, 8, 0); + else + return false; + if (!match) + return false; + } + + b += t[i] & OP_AMOUNT; + } + + return true; +} + +static void get_next_data(struct sw842_param *p) +{ + p->data8[0] = get_input_data(p, 0, 64); + p->data4[0] = get_input_data(p, 0, 32); + p->data4[1] = get_input_data(p, 4, 32); + p->data2[0] = get_input_data(p, 0, 16); + p->data2[1] = get_input_data(p, 2, 16); + p->data2[2] = get_input_data(p, 4, 16); + p->data2[3] = get_input_data(p, 6, 16); +} + +/* update the hashtable entries. + * only call this after finding/adding the current template + * the dataN fields for the current 8 byte block must be already updated + */ +static void update_hashtables(struct sw842_param *p) +{ + u64 pos = p->in - p->instart; + u64 n8 = (pos >> 3) % (1 << I8_BITS); + u64 n4 = (pos >> 2) % (1 << I4_BITS); + u64 n2 = (pos >> 1) % (1 << I2_BITS); + + replace_hash(p, 8, n8, 0); + replace_hash(p, 4, n4, 0); + replace_hash(p, 4, n4, 1); + replace_hash(p, 2, n2, 0); + replace_hash(p, 2, n2, 1); + replace_hash(p, 2, n2, 2); + replace_hash(p, 2, n2, 3); +} + +/* find the next template to use, and add it + * the p->dataN fields must already be set for the current 8 byte block + */ +static int process_next(struct sw842_param *p) +{ + int ret, i; + + p->index8[0] = INDEX_NOT_CHECKED; + p->index4[0] = INDEX_NOT_CHECKED; + p->index4[1] = INDEX_NOT_CHECKED; + p->index2[0] = INDEX_NOT_CHECKED; + p->index2[1] = INDEX_NOT_CHECKED; + p->index2[2] = INDEX_NOT_CHECKED; + p->index2[3] = INDEX_NOT_CHECKED; + + /* check up to OPS_MAX - 1; last op is our fallback */ + for (i = 0; i < OPS_MAX - 1; i++) { + if (check_template(p, i)) + break; + } + + ret = add_template(p, i); + if (ret) + return ret; + + return 0; +} + +/** + * sw842_compress + * + * Compress the uncompressed buffer of length @ilen at @in to the output buffer + * @out, using no more than @olen bytes, using the 842 compression format. + * + * Returns: 0 on success, error on failure. The @olen parameter + * will contain the number of output bytes written on success, or + * 0 on error. + */ +int sw842_compress(const u8 *in, unsigned int ilen, + u8 *out, unsigned int *olen, void *wmem) +{ + struct sw842_param *p = (struct sw842_param *)wmem; + int ret; + u64 last, next, pad, total; + u8 repeat_count = 0; + + BUILD_BUG_ON(sizeof(*p) > SW842_MEM_COMPRESS); + + init_hashtable_nodes(p, 8); + init_hashtable_nodes(p, 4); + init_hashtable_nodes(p, 2); + + p->in = (u8 *)in; + p->instart = p->in; + p->ilen = ilen; + p->out = out; + p->olen = *olen; + p->bit = 0; + + total = p->olen; + + *olen = 0; + + /* if using strict mode, we can only compress a multiple of 8 */ + if (sw842_strict && (ilen % 8)) { + pr_err("Using strict mode, can't compress len %d\n", ilen); + return -EINVAL; + } + + /* let's compress at least 8 bytes, mkay? */ + if (unlikely(ilen < 8)) + goto skip_comp; + + /* make initial 'last' different so we don't match the first time */ + last = ~get_unaligned((u64 *)p->in); + + while (p->ilen > 7) { + next = get_unaligned((u64 *)p->in); + + /* must get the next data, as we need to update the hashtable + * entries with the new data every time + */ + get_next_data(p); + + /* we don't care about endianness in last or next; + * we're just comparing 8 bytes to another 8 bytes, + * they're both the same endianness + */ + if (next == last) { + /* repeat count bits are 0-based, so we stop at +1 */ + if (++repeat_count <= REPEAT_BITS_MAX) + goto repeat; + } + if (repeat_count) { + ret = add_repeat_template(p, repeat_count); + repeat_count = 0; + if (next == last) /* reached max repeat bits */ + goto repeat; + } + + if (next == 0) + ret = add_zeros_template(p); + else + ret = process_next(p); + + if (ret) + return ret; + +repeat: + last = next; + update_hashtables(p); + p->in += 8; + p->ilen -= 8; + } + + if (repeat_count) { + ret = add_repeat_template(p, repeat_count); + if (ret) + return ret; + } + +skip_comp: + if (p->ilen > 0) { + ret = add_short_data_template(p, p->ilen); + if (ret) + return ret; + + p->in += p->ilen; + p->ilen = 0; + } + + ret = add_end_template(p); + if (ret) + return ret; + + if (p->bit) { + p->out++; + p->olen--; + p->bit = 0; + } + + /* pad compressed length to multiple of 8 */ + pad = (8 - ((total - p->olen) % 8)) % 8; + if (pad) { + if (pad > p->olen) /* we were so close! */ + return -ENOSPC; + memset(p->out, 0, pad); + p->out += pad; + p->olen -= pad; + } + + if (unlikely((total - p->olen) > UINT_MAX)) + return -ENOSPC; + + *olen = total - p->olen; + + return 0; +} +EXPORT_SYMBOL_GPL(sw842_compress); + +static int __init sw842_init(void) +{ + if (sw842_template_counts) + sw842_debugfs_create(); + + return 0; +} +module_init(sw842_init); + +static void __exit sw842_exit(void) +{ + if (sw842_template_counts) + sw842_debugfs_remove(); +} +module_exit(sw842_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Software 842 Compressor"); +MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); diff --git a/lib/842/842_debugfs.h b/lib/842/842_debugfs.h new file mode 100644 index 000000000..e7f3bffaf --- /dev/null +++ b/lib/842/842_debugfs.h @@ -0,0 +1,52 @@ + +#ifndef __842_DEBUGFS_H__ +#define __842_DEBUGFS_H__ + +#include <linux/debugfs.h> + +static bool sw842_template_counts; +module_param_named(template_counts, sw842_template_counts, bool, 0444); + +static atomic_t template_count[OPS_MAX], template_repeat_count, + template_zeros_count, template_short_data_count, template_end_count; + +static struct dentry *sw842_debugfs_root; + +static int __init sw842_debugfs_create(void) +{ + umode_t m = S_IRUGO | S_IWUSR; + int i; + + if (!debugfs_initialized()) + return -ENODEV; + + sw842_debugfs_root = debugfs_create_dir(MODULE_NAME, NULL); + if (IS_ERR(sw842_debugfs_root)) + return PTR_ERR(sw842_debugfs_root); + + for (i = 0; i < ARRAY_SIZE(template_count); i++) { + char name[32]; + + snprintf(name, 32, "template_%02x", i); + debugfs_create_atomic_t(name, m, sw842_debugfs_root, + &template_count[i]); + } + debugfs_create_atomic_t("template_repeat", m, sw842_debugfs_root, + &template_repeat_count); + debugfs_create_atomic_t("template_zeros", m, sw842_debugfs_root, + &template_zeros_count); + debugfs_create_atomic_t("template_short_data", m, sw842_debugfs_root, + &template_short_data_count); + debugfs_create_atomic_t("template_end", m, sw842_debugfs_root, + &template_end_count); + + return 0; +} + +static void __exit sw842_debugfs_remove(void) +{ + if (sw842_debugfs_root && !IS_ERR(sw842_debugfs_root)) + debugfs_remove_recursive(sw842_debugfs_root); +} + +#endif diff --git a/lib/842/842_decompress.c b/lib/842/842_decompress.c new file mode 100644 index 000000000..5446ff0c9 --- /dev/null +++ b/lib/842/842_decompress.c @@ -0,0 +1,405 @@ +/* + * 842 Software Decompression + * + * Copyright (C) 2015 Dan Streetman, IBM Corp + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * See 842.h for details of the 842 compressed format. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#define MODULE_NAME "842_decompress" + +#include "842.h" +#include "842_debugfs.h" + +/* rolling fifo sizes */ +#define I2_FIFO_SIZE (2 * (1 << I2_BITS)) +#define I4_FIFO_SIZE (4 * (1 << I4_BITS)) +#define I8_FIFO_SIZE (8 * (1 << I8_BITS)) + +static u8 decomp_ops[OPS_MAX][4] = { + { D8, N0, N0, N0 }, + { D4, D2, I2, N0 }, + { D4, I2, D2, N0 }, + { D4, I2, I2, N0 }, + { D4, I4, N0, N0 }, + { D2, I2, D4, N0 }, + { D2, I2, D2, I2 }, + { D2, I2, I2, D2 }, + { D2, I2, I2, I2 }, + { D2, I2, I4, N0 }, + { I2, D2, D4, N0 }, + { I2, D4, I2, N0 }, + { I2, D2, I2, D2 }, + { I2, D2, I2, I2 }, + { I2, D2, I4, N0 }, + { I2, I2, D4, N0 }, + { I2, I2, D2, I2 }, + { I2, I2, I2, D2 }, + { I2, I2, I2, I2 }, + { I2, I2, I4, N0 }, + { I4, D4, N0, N0 }, + { I4, D2, I2, N0 }, + { I4, I2, D2, N0 }, + { I4, I2, I2, N0 }, + { I4, I4, N0, N0 }, + { I8, N0, N0, N0 } +}; + +struct sw842_param { + u8 *in; + u8 bit; + u64 ilen; + u8 *out; + u8 *ostart; + u64 olen; +}; + +#define beN_to_cpu(d, s) \ + ((s) == 2 ? be16_to_cpu(get_unaligned((__be16 *)d)) : \ + (s) == 4 ? be32_to_cpu(get_unaligned((__be32 *)d)) : \ + (s) == 8 ? be64_to_cpu(get_unaligned((__be64 *)d)) : \ + WARN(1, "pr_debug param err invalid size %x\n", s)) + +static int next_bits(struct sw842_param *p, u64 *d, u8 n); + +static int __split_next_bits(struct sw842_param *p, u64 *d, u8 n, u8 s) +{ + u64 tmp = 0; + int ret; + + if (n <= s) { + pr_debug("split_next_bits invalid n %u s %u\n", n, s); + return -EINVAL; + } + + ret = next_bits(p, &tmp, n - s); + if (ret) + return ret; + ret = next_bits(p, d, s); + if (ret) + return ret; + *d |= tmp << s; + return 0; +} + +static int next_bits(struct sw842_param *p, u64 *d, u8 n) +{ + u8 *in = p->in, b = p->bit, bits = b + n; + + if (n > 64) { + pr_debug("next_bits invalid n %u\n", n); + return -EINVAL; + } + + /* split this up if reading > 8 bytes, or if we're at the end of + * the input buffer and would read past the end + */ + if (bits > 64) + return __split_next_bits(p, d, n, 32); + else if (p->ilen < 8 && bits > 32 && bits <= 56) + return __split_next_bits(p, d, n, 16); + else if (p->ilen < 4 && bits > 16 && bits <= 24) + return __split_next_bits(p, d, n, 8); + + if (DIV_ROUND_UP(bits, 8) > p->ilen) + return -EOVERFLOW; + + if (bits <= 8) + *d = *in >> (8 - bits); + else if (bits <= 16) + *d = be16_to_cpu(get_unaligned((__be16 *)in)) >> (16 - bits); + else if (bits <= 32) + *d = be32_to_cpu(get_unaligned((__be32 *)in)) >> (32 - bits); + else + *d = be64_to_cpu(get_unaligned((__be64 *)in)) >> (64 - bits); + + *d &= GENMASK_ULL(n - 1, 0); + + p->bit += n; + + if (p->bit > 7) { + p->in += p->bit / 8; + p->ilen -= p->bit / 8; + p->bit %= 8; + } + + return 0; +} + +static int do_data(struct sw842_param *p, u8 n) +{ + u64 v; + int ret; + + if (n > p->olen) + return -ENOSPC; + + ret = next_bits(p, &v, n * 8); + if (ret) + return ret; + + switch (n) { + case 2: + put_unaligned(cpu_to_be16((u16)v), (__be16 *)p->out); + break; + case 4: + put_unaligned(cpu_to_be32((u32)v), (__be32 *)p->out); + break; + case 8: + put_unaligned(cpu_to_be64((u64)v), (__be64 *)p->out); + break; + default: + return -EINVAL; + } + + p->out += n; + p->olen -= n; + + return 0; +} + +static int __do_index(struct sw842_param *p, u8 size, u8 bits, u64 fsize) +{ + u64 index, offset, total = round_down(p->out - p->ostart, 8); + int ret; + + ret = next_bits(p, &index, bits); + if (ret) + return ret; + + offset = index * size; + + /* a ring buffer of fsize is used; correct the offset */ + if (total > fsize) { + /* this is where the current fifo is */ + u64 section = round_down(total, fsize); + /* the current pos in the fifo */ + u64 pos = total - section; + + /* if the offset is past/at the pos, we need to + * go back to the last fifo section + */ + if (offset >= pos) + section -= fsize; + + offset += section; + } + + if (offset + size > total) { + pr_debug("index%x %lx points past end %lx\n", size, + (unsigned long)offset, (unsigned long)total); + return -EINVAL; + } + + pr_debug("index%x to %lx off %lx adjoff %lx tot %lx data %lx\n", + size, (unsigned long)index, (unsigned long)(index * size), + (unsigned long)offset, (unsigned long)total, + (unsigned long)beN_to_cpu(&p->ostart[offset], size)); + + memcpy(p->out, &p->ostart[offset], size); + p->out += size; + p->olen -= size; + + return 0; +} + +static int do_index(struct sw842_param *p, u8 n) +{ + switch (n) { + case 2: + return __do_index(p, 2, I2_BITS, I2_FIFO_SIZE); + case 4: + return __do_index(p, 4, I4_BITS, I4_FIFO_SIZE); + case 8: + return __do_index(p, 8, I8_BITS, I8_FIFO_SIZE); + default: + return -EINVAL; + } +} + +static int do_op(struct sw842_param *p, u8 o) +{ + int i, ret = 0; + + if (o >= OPS_MAX) + return -EINVAL; + + for (i = 0; i < 4; i++) { + u8 op = decomp_ops[o][i]; + + pr_debug("op is %x\n", op); + + switch (op & OP_ACTION) { + case OP_ACTION_DATA: + ret = do_data(p, op & OP_AMOUNT); + break; + case OP_ACTION_INDEX: + ret = do_index(p, op & OP_AMOUNT); + break; + case OP_ACTION_NOOP: + break; + default: + pr_err("Interal error, invalid op %x\n", op); + return -EINVAL; + } + + if (ret) + return ret; + } + + if (sw842_template_counts) + atomic_inc(&template_count[o]); + + return 0; +} + +/** + * sw842_decompress + * + * Decompress the 842-compressed buffer of length @ilen at @in + * to the output buffer @out, using no more than @olen bytes. + * + * The compressed buffer must be only a single 842-compressed buffer, + * with the standard format described in the comments in 842.h + * Processing will stop when the 842 "END" template is detected, + * not the end of the buffer. + * + * Returns: 0 on success, error on failure. The @olen parameter + * will contain the number of output bytes written on success, or + * 0 on error. + */ +int sw842_decompress(const u8 *in, unsigned int ilen, + u8 *out, unsigned int *olen) +{ + struct sw842_param p; + int ret; + u64 op, rep, tmp, bytes, total; + + p.in = (u8 *)in; + p.bit = 0; + p.ilen = ilen; + p.out = out; + p.ostart = out; + p.olen = *olen; + + total = p.olen; + + *olen = 0; + + do { + ret = next_bits(&p, &op, OP_BITS); + if (ret) + return ret; + + pr_debug("template is %lx\n", (unsigned long)op); + + switch (op) { + case OP_REPEAT: + ret = next_bits(&p, &rep, REPEAT_BITS); + if (ret) + return ret; + + if (p.out == out) /* no previous bytes */ + return -EINVAL; + + /* copy rep + 1 */ + rep++; + + if (rep * 8 > p.olen) + return -ENOSPC; + + while (rep-- > 0) { + memcpy(p.out, p.out - 8, 8); + p.out += 8; + p.olen -= 8; + } + + if (sw842_template_counts) + atomic_inc(&template_repeat_count); + + break; + case OP_ZEROS: + if (8 > p.olen) + return -ENOSPC; + + memset(p.out, 0, 8); + p.out += 8; + p.olen -= 8; + + if (sw842_template_counts) + atomic_inc(&template_zeros_count); + + break; + case OP_SHORT_DATA: + ret = next_bits(&p, &bytes, SHORT_DATA_BITS); + if (ret) + return ret; + + if (!bytes || bytes > SHORT_DATA_BITS_MAX) + return -EINVAL; + + while (bytes-- > 0) { + ret = next_bits(&p, &tmp, 8); + if (ret) + return ret; + *p.out = (u8)tmp; + p.out++; + p.olen--; + } + + if (sw842_template_counts) + atomic_inc(&template_short_data_count); + + break; + case OP_END: + if (sw842_template_counts) + atomic_inc(&template_end_count); + + break; + default: /* use template */ + ret = do_op(&p, op); + if (ret) + return ret; + break; + } + } while (op != OP_END); + + if (unlikely((total - p.olen) > UINT_MAX)) + return -ENOSPC; + + *olen = total - p.olen; + + return 0; +} +EXPORT_SYMBOL_GPL(sw842_decompress); + +static int __init sw842_init(void) +{ + if (sw842_template_counts) + sw842_debugfs_create(); + + return 0; +} +module_init(sw842_init); + +static void __exit sw842_exit(void) +{ + if (sw842_template_counts) + sw842_debugfs_remove(); +} +module_exit(sw842_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Software 842 Decompressor"); +MODULE_AUTHOR("Dan Streetman <ddstreet@ieee.org>"); diff --git a/lib/842/Makefile b/lib/842/Makefile new file mode 100644 index 000000000..5d24c0baf --- /dev/null +++ b/lib/842/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_842_COMPRESS) += 842_compress.o +obj-$(CONFIG_842_DECOMPRESS) += 842_decompress.o diff --git a/lib/Kconfig b/lib/Kconfig index 601965a94..3a2ef67db 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -212,6 +212,12 @@ config RANDOM32_SELFTEST # # compression support is select'ed if needed # +config 842_COMPRESS + tristate + +config 842_DECOMPRESS + tristate + config ZLIB_INFLATE tristate @@ -522,4 +528,7 @@ source "lib/fonts/Kconfig" config ARCH_HAS_SG_CHAIN def_bool n +config ARCH_HAS_PMEM_API + bool + endmenu diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 3bddcda18..e2894b23e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -841,9 +841,14 @@ config SCHED_DEBUG that can help debug the scheduler. The runtime overhead of this option is minimal. +config SCHED_INFO + bool + default n + config SCHEDSTATS bool "Collect scheduler statistics" depends on DEBUG_KERNEL && PROC_FS + select SCHED_INFO help If you say Y here, additional code will be inserted into the scheduler and related routines to collect statistics about @@ -1230,9 +1235,10 @@ config TORTURE_TEST config RCU_TORTURE_TEST tristate "torture tests for RCU" - depends on DEBUG_KERNEL && !SCHED_BFS + depends on DEBUG_KERNEL select TORTURE_TEST select SRCU + select TASKS_RCU default n help This option provides a kernel module that runs torture tests @@ -1261,12 +1267,38 @@ config RCU_TORTURE_TEST_RUNNABLE Say N here if you want the RCU torture tests to start only after being manually enabled via /proc. +config RCU_TORTURE_TEST_SLOW_PREINIT + bool "Slow down RCU grace-period pre-initialization to expose races" + depends on RCU_TORTURE_TEST + help + This option delays grace-period pre-initialization (the + propagation of CPU-hotplug changes up the rcu_node combining + tree) for a few jiffies between initializing each pair of + consecutive rcu_node structures. This helps to expose races + involving grace-period pre-initialization, in other words, it + makes your kernel less stable. It can also greatly increase + grace-period latency, especially on systems with large numbers + of CPUs. This is useful when torture-testing RCU, but in + almost no other circumstance. + + Say Y here if you want your system to crash and hang more often. + Say N if you want a sane system. + +config RCU_TORTURE_TEST_SLOW_PREINIT_DELAY + int "How much to slow down RCU grace-period pre-initialization" + range 0 5 + default 3 + depends on RCU_TORTURE_TEST_SLOW_PREINIT + help + This option specifies the number of jiffies to wait between + each rcu_node structure pre-initialization step. + config RCU_TORTURE_TEST_SLOW_INIT bool "Slow down RCU grace-period initialization to expose races" depends on RCU_TORTURE_TEST help - This option makes grace-period initialization block for a - few jiffies between initializing each pair of consecutive + This option delays grace-period initialization for a few + jiffies between initializing each pair of consecutive rcu_node structures. This helps to expose races involving grace-period initialization, in other words, it makes your kernel less stable. It can also greatly increase grace-period @@ -1286,6 +1318,30 @@ config RCU_TORTURE_TEST_SLOW_INIT_DELAY This option specifies the number of jiffies to wait between each rcu_node structure initialization. +config RCU_TORTURE_TEST_SLOW_CLEANUP + bool "Slow down RCU grace-period cleanup to expose races" + depends on RCU_TORTURE_TEST + help + This option delays grace-period cleanup for a few jiffies + between cleaning up each pair of consecutive rcu_node + structures. This helps to expose races involving grace-period + cleanup, in other words, it makes your kernel less stable. + It can also greatly increase grace-period latency, especially + on systems with large numbers of CPUs. This is useful when + torture-testing RCU, but in almost no other circumstance. + + Say Y here if you want your system to crash and hang more often. + Say N if you want a sane system. + +config RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY + int "How much to slow down RCU grace-period cleanup" + range 0 5 + default 3 + depends on RCU_TORTURE_TEST_SLOW_CLEANUP + help + This option specifies the number of jiffies to wait between + each rcu_node structure cleanup operation. + config RCU_CPU_STALL_TIMEOUT int "RCU CPU stall timeout in seconds" depends on RCU_STALL_COMMON @@ -1322,6 +1378,17 @@ config RCU_TRACE Say Y here if you want to enable RCU tracing Say N if you are unsure. +config RCU_EQS_DEBUG + bool "Use this when adding any sort of NO_HZ support to your arch" + depends on DEBUG_KERNEL + help + This option provides consistency checks in RCU's handling of + NO_HZ. These checks have proven quite helpful in detecting + bugs in arch-specific NO_HZ code. + + Say N here if you need ultimate kernel/user switch latencies + Say Y if you are unsure + endmenu # "RCU Debugging" config DEBUG_BLOCK_EXT_DEVT diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan index 777eda7d1..39f24d672 100644 --- a/lib/Kconfig.kasan +++ b/lib/Kconfig.kasan @@ -18,10 +18,6 @@ config KASAN For better error detection enable CONFIG_STACKTRACE, and add slub_debug=U to boot cmdline. -config KASAN_SHADOW_OFFSET - hex - default 0xdffffc0000000000 if X86_64 - choice prompt "Instrumentation type" depends on KASAN diff --git a/lib/Makefile b/lib/Makefile index 3d30b6650..89cb00bec 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -45,6 +45,9 @@ CFLAGS_kobject.o += -DDEBUG CFLAGS_kobject_uevent.o += -DDEBUG endif +obj-$(CONFIG_DEBUG_INFO_REDUCED) += debug_info.o +CFLAGS_debug_info.o += $(call cc-option, -femit-struct-debug-detailed=any) + obj-$(CONFIG_GENERIC_IOMAP) += iomap.o obj-$(CONFIG_GENERIC_PCI_IOMAP) += pci_iomap.o obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o @@ -78,6 +81,8 @@ obj-$(CONFIG_LIBCRC32C) += libcrc32c.o obj-$(CONFIG_CRC8) += crc8.o obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o +obj-$(CONFIG_842_COMPRESS) += 842/ +obj-$(CONFIG_842_DECOMPRESS) += 842/ obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/ obj-$(CONFIG_ZLIB_DEFLATE) += zlib_deflate/ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ diff --git a/lib/bitmap.c b/lib/bitmap.c index 40162f87e..a578a0189 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -462,19 +462,20 @@ EXPORT_SYMBOL(bitmap_parse_user); * Output format is a comma-separated list of decimal numbers and * ranges if list is specified or hex digits grouped into comma-separated * sets of 8 digits/set. Returns the number of characters written to buf. + * + * It is assumed that @buf is a pointer into a PAGE_SIZE area and that + * sufficient storage remains at @buf to accommodate the + * bitmap_print_to_pagebuf() output. */ int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits) { - ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf - 2; + ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf; int n = 0; - if (len > 1) { - n = list ? scnprintf(buf, len, "%*pbl", nmaskbits, maskp) : - scnprintf(buf, len, "%*pb", nmaskbits, maskp); - buf[n++] = '\n'; - buf[n] = '\0'; - } + if (len > 1) + n = list ? scnprintf(buf, len, "%*pbl\n", nmaskbits, maskp) : + scnprintf(buf, len, "%*pb\n", nmaskbits, maskp); return n; } EXPORT_SYMBOL(bitmap_print_to_pagebuf); @@ -66,7 +66,7 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr) struct module *mod; const struct bug_entry *bug = NULL; - rcu_read_lock(); + rcu_read_lock_sched(); list_for_each_entry_rcu(mod, &module_bug_list, bug_list) { unsigned i; @@ -77,7 +77,7 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr) } bug = NULL; out: - rcu_read_unlock(); + rcu_read_unlock_sched(); return bug; } @@ -88,6 +88,8 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, char *secstrings; unsigned int i; + lockdep_assert_held(&module_mutex); + mod->bug_table = NULL; mod->num_bugs = 0; @@ -113,6 +115,7 @@ void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, void module_bug_cleanup(struct module *mod) { + lockdep_assert_held(&module_mutex); list_del_rcu(&mod->bug_list); } diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c index 4f134d890..f610b2a10 100644 --- a/lib/cpu_rmap.c +++ b/lib/cpu_rmap.c @@ -191,7 +191,7 @@ int cpu_rmap_update(struct cpu_rmap *rmap, u16 index, /* Update distances based on topology */ for_each_cpu(cpu, update_mask) { if (cpu_rmap_copy_neigh(rmap, cpu, - topology_thread_cpumask(cpu), 1)) + topology_sibling_cpumask(cpu), 1)) continue; if (cpu_rmap_copy_neigh(rmap, cpu, topology_core_cpumask(cpu), 2)) diff --git a/lib/crc-itu-t.c b/lib/crc-itu-t.c index a63472b82..b3219d0ab 100644 --- a/lib/crc-itu-t.c +++ b/lib/crc-itu-t.c @@ -9,7 +9,7 @@ #include <linux/module.h> #include <linux/crc-itu-t.h> -/** CRC table for the CRC ITU-T V.41 0x0x1021 (x^16 + x^12 + x^15 + 1) */ +/** CRC table for the CRC ITU-T V.41 0x1021 (x^16 + x^12 + x^15 + 1) */ const u16 crc_itu_t_table[256] = { 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, diff --git a/lib/crc-t10dif.c b/lib/crc-t10dif.c index dfe6ec17c..1ad33e555 100644 --- a/lib/crc-t10dif.c +++ b/lib/crc-t10dif.c @@ -19,7 +19,7 @@ static struct crypto_shash *crct10dif_tfm; static struct static_key crct10dif_fallback __read_mostly; -__u16 crc_t10dif(const unsigned char *buffer, size_t len) +__u16 crc_t10dif_update(__u16 crc, const unsigned char *buffer, size_t len) { struct { struct shash_desc shash; @@ -28,17 +28,23 @@ __u16 crc_t10dif(const unsigned char *buffer, size_t len) int err; if (static_key_false(&crct10dif_fallback)) - return crc_t10dif_generic(0, buffer, len); + return crc_t10dif_generic(crc, buffer, len); desc.shash.tfm = crct10dif_tfm; desc.shash.flags = 0; - *(__u16 *)desc.ctx = 0; + *(__u16 *)desc.ctx = crc; err = crypto_shash_update(&desc.shash, buffer, len); BUG_ON(err); return *(__u16 *)desc.ctx; } +EXPORT_SYMBOL(crc_t10dif_update); + +__u16 crc_t10dif(const unsigned char *buffer, size_t len) +{ + return crc_t10dif_update(0, buffer, len); +} EXPORT_SYMBOL(crc_t10dif); static int __init crc_t10dif_mod_init(void) diff --git a/lib/debug_info.c b/lib/debug_info.c new file mode 100644 index 000000000..2edbe2751 --- /dev/null +++ b/lib/debug_info.c @@ -0,0 +1,27 @@ +/* + * This file exists solely to ensure debug information for some core + * data structures is included in the final image even for + * CONFIG_DEBUG_INFO_REDUCED. Please do not add actual code. However, + * adding appropriate #includes is fine. + */ +#include <stdarg.h> + +#include <linux/cred.h> +#include <linux/crypto.h> +#include <linux/dcache.h> +#include <linux/device.h> +#include <linux/fs.h> +#include <linux/fscache-cache.h> +#include <linux/io.h> +#include <linux/kallsyms.h> +#include <linux/kernel.h> +#include <linux/kobject.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/net.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <net/addrconf.h> +#include <net/sock.h> +#include <net/tcp.h> diff --git a/lib/decompress.c b/lib/decompress.c index 528ff932d..62696dff5 100644 --- a/lib/decompress.c +++ b/lib/decompress.c @@ -59,8 +59,11 @@ decompress_fn __init decompress_method(const unsigned char *inbuf, long len, { const struct compress_format *cf; - if (len < 2) + if (len < 2) { + if (name) + *name = NULL; return NULL; /* Need at least this much... */ + } pr_debug("Compressed data magic: %#.2x %#.2x\n", inbuf[0], inbuf[1]); diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index d8f3d3150..e491e02ef 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -887,7 +887,7 @@ static int ddebug_dyndbg_param_cb(char *param, char *val, /* handle both dyndbg and $module.dyndbg params at boot */ static int ddebug_dyndbg_boot_param_cb(char *param, char *val, - const char *unused) + const char *unused, void *arg) { vpr_info("%s=\"%s\"\n", param, val); return ddebug_dyndbg_param_cb(param, val, NULL, 0); @@ -1028,7 +1028,7 @@ static int __init dynamic_debug_init(void) */ cmdline = kstrdup(saved_command_line, GFP_KERNEL); parse_args("dyndbg params", cmdline, NULL, - 0, 0, 0, &ddebug_dyndbg_boot_param_cb); + 0, 0, 0, NULL, &ddebug_dyndbg_boot_param_cb); kfree(cmdline); return 0; diff --git a/lib/genalloc.c b/lib/genalloc.c index d214866ee..daf0afb6d 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -602,12 +602,12 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, EXPORT_SYMBOL(devm_gen_pool_create); /** - * dev_get_gen_pool - Obtain the gen_pool (if any) for a device + * gen_pool_get - Obtain the gen_pool (if any) for a device * @dev: device to retrieve the gen_pool from * * Returns the gen_pool for the device if one is present, or NULL. */ -struct gen_pool *dev_get_gen_pool(struct device *dev) +struct gen_pool *gen_pool_get(struct device *dev) { struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL, NULL); @@ -616,11 +616,11 @@ struct gen_pool *dev_get_gen_pool(struct device *dev) return NULL; return *p; } -EXPORT_SYMBOL_GPL(dev_get_gen_pool); +EXPORT_SYMBOL_GPL(gen_pool_get); #ifdef CONFIG_OF /** - * of_get_named_gen_pool - find a pool by phandle property + * of_gen_pool_get - find a pool by phandle property * @np: device node * @propname: property name containing phandle(s) * @index: index into the phandle array @@ -629,7 +629,7 @@ EXPORT_SYMBOL_GPL(dev_get_gen_pool); * address of the device tree node pointed at by the phandle property, * or NULL if not found. */ -struct gen_pool *of_get_named_gen_pool(struct device_node *np, +struct gen_pool *of_gen_pool_get(struct device_node *np, const char *propname, int index) { struct platform_device *pdev; @@ -642,7 +642,7 @@ struct gen_pool *of_get_named_gen_pool(struct device_node *np, of_node_put(np_pool); if (!pdev) return NULL; - return dev_get_gen_pool(&pdev->dev); + return gen_pool_get(&pdev->dev); } -EXPORT_SYMBOL_GPL(of_get_named_gen_pool); +EXPORT_SYMBOL_GPL(of_gen_pool_get); #endif /* CONFIG_OF */ diff --git a/lib/hexdump.c b/lib/hexdump.c index 7ea096998..8d74c20d8 100644 --- a/lib/hexdump.c +++ b/lib/hexdump.c @@ -11,6 +11,7 @@ #include <linux/ctype.h> #include <linux/kernel.h> #include <linux/export.h> +#include <asm/unaligned.h> const char hex_asc[] = "0123456789abcdef"; EXPORT_SYMBOL(hex_asc); @@ -139,7 +140,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, for (j = 0; j < ngroups; j++) { ret = snprintf(linebuf + lx, linebuflen - lx, "%s%16.16llx", j ? " " : "", - (unsigned long long)*(ptr8 + j)); + get_unaligned(ptr8 + j)); if (ret >= linebuflen - lx) goto overflow1; lx += ret; @@ -150,7 +151,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, for (j = 0; j < ngroups; j++) { ret = snprintf(linebuf + lx, linebuflen - lx, "%s%8.8x", j ? " " : "", - *(ptr4 + j)); + get_unaligned(ptr4 + j)); if (ret >= linebuflen - lx) goto overflow1; lx += ret; @@ -161,7 +162,7 @@ int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize, for (j = 0; j < ngroups; j++) { ret = snprintf(linebuf + lx, linebuflen - lx, "%s%4.4x", j ? " " : "", - *(ptr2 + j)); + get_unaligned(ptr2 + j)); if (ret >= linebuflen - lx) goto overflow1; lx += ret; diff --git a/lib/iommu-common.c b/lib/iommu-common.c index df30632f0..ff19f66d3 100644 --- a/lib/iommu-common.c +++ b/lib/iommu-common.c @@ -119,7 +119,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev, unsigned long align_mask = 0; if (align_order > 0) - align_mask = 0xffffffffffffffffl >> (64 - align_order); + align_mask = ~0ul >> (BITS_PER_LONG - align_order); /* Sanity check */ if (unlikely(npages == 0)) { diff --git a/lib/kobject.c b/lib/kobject.c index 3b841b97f..3e3a5c3cb 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -257,23 +257,20 @@ static int kobject_add_internal(struct kobject *kobj) int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list vargs) { - const char *old_name = kobj->name; char *s; if (kobj->name && !fmt) return 0; - kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs); - if (!kobj->name) { - kobj->name = old_name; + s = kvasprintf(GFP_KERNEL, fmt, vargs); + if (!s) return -ENOMEM; - } /* ewww... some of these buggers have '/' in the name ... */ - while ((s = strchr(kobj->name, '/'))) - s[0] = '!'; + strreplace(s, '/', '!'); + kfree(kobj->name); + kobj->name = s; - kfree(old_name); return 0; } @@ -340,8 +337,9 @@ error: } EXPORT_SYMBOL(kobject_init); -static int kobject_add_varg(struct kobject *kobj, struct kobject *parent, - const char *fmt, va_list vargs) +static __printf(3, 0) int kobject_add_varg(struct kobject *kobj, + struct kobject *parent, + const char *fmt, va_list vargs) { int retval; @@ -548,6 +546,7 @@ out: kfree(devpath); return error; } +EXPORT_SYMBOL_GPL(kobject_move); /** * kobject_del - unlink kobject from hierarchy. diff --git a/lib/list_sort.c b/lib/list_sort.c index b29015102..3fe401067 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c @@ -289,5 +289,5 @@ exit: kfree(elts); return err; } -module_init(list_sort_test); +late_initcall(list_sort_test); #endif /* CONFIG_TEST_LIST_SORT */ diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index 26cc6029b..6d940c72b 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c @@ -140,8 +140,12 @@ static int lz4_uncompress(const char *source, char *dest, int osize) /* Error: request to write beyond destination buffer */ if (cpy > oend) goto _output_error; +#if LZ4_ARCH64 + if ((ref + COPYLENGTH) > oend) +#else if ((ref + COPYLENGTH) > oend || (op + COPYLENGTH) > oend) +#endif goto _output_error; LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); while (op < cpy) @@ -266,7 +270,13 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, if (cpy > oend - COPYLENGTH) { if (cpy > oend) goto _output_error; /* write outside of buf */ - +#if LZ4_ARCH64 + if ((ref + COPYLENGTH) > oend) +#else + if ((ref + COPYLENGTH) > oend || + (op + COPYLENGTH) > oend) +#endif + goto _output_error; LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH)); while (op < cpy) *op++ = *ref++; diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c index 4cc644273..bc0a1da8a 100644 --- a/lib/mpi/mpicoder.c +++ b/lib/mpi/mpicoder.c @@ -128,28 +128,36 @@ leave: } EXPORT_SYMBOL_GPL(mpi_read_from_buffer); -/**************** - * Return an allocated buffer with the MPI (msb first). - * NBYTES receives the length of this buffer. Caller must free the - * return string (This function does return a 0 byte buffer with NBYTES - * set to zero if the value of A is zero. If sign is not NULL, it will - * be set to the sign of the A. +/** + * mpi_read_buffer() - read MPI to a bufer provided by user (msb first) + * + * @a: a multi precision integer + * @buf: bufer to which the output will be written to. Needs to be at + * leaset mpi_get_size(a) long. + * @buf_len: size of the buf. + * @nbytes: receives the actual length of the data written. + * @sign: if not NULL, it will be set to the sign of a. + * + * Return: 0 on success or error code in case of error */ -void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign) +int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, + int *sign) { - uint8_t *p, *buffer; + uint8_t *p; mpi_limb_t alimb; + unsigned int n = mpi_get_size(a); int i; - unsigned int n; + + if (buf_len < n || !buf) + return -EINVAL; if (sign) *sign = a->sign; - *nbytes = n = a->nlimbs * BYTES_PER_MPI_LIMB; - if (!n) - n++; /* avoid zero length allocation */ - p = buffer = kmalloc(n, GFP_KERNEL); - if (!p) - return NULL; + + if (nbytes) + *nbytes = n; + + p = buf; for (i = a->nlimbs - 1; i >= 0; i--) { alimb = a->d[i]; @@ -171,15 +179,56 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign) #error please implement for this limb size. #endif } + return 0; +} +EXPORT_SYMBOL_GPL(mpi_read_buffer); + +/* + * mpi_get_buffer() - Returns an allocated buffer with the MPI (msb first). + * Caller must free the return string. + * This function does return a 0 byte buffer with nbytes set to zero if the + * value of A is zero. + * + * @a: a multi precision integer. + * @nbytes: receives the length of this buffer. + * @sign: if not NULL, it will be set to the sign of the a. + * + * Return: Pointer to MPI buffer or NULL on error + */ +void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign) +{ + uint8_t *buf, *p; + unsigned int n; + int ret; + + if (!nbytes) + return NULL; + + n = mpi_get_size(a); + + if (!n) + n++; + + buf = kmalloc(n, GFP_KERNEL); + + if (!buf) + return NULL; + + ret = mpi_read_buffer(a, buf, n, nbytes, sign); + + if (ret) { + kfree(buf); + return NULL; + } /* this is sub-optimal but we need to do the shift operation * because the caller has to free the returned buffer */ - for (p = buffer; !*p && *nbytes; p++, --*nbytes) + for (p = buf; !*p && *nbytes; p++, --*nbytes) ; - if (p != buffer) - memmove(buffer, p, *nbytes); + if (p != buf) + memmove(buf, p, *nbytes); - return buffer; + return buf; } EXPORT_SYMBOL_GPL(mpi_get_buffer); diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c index bf076d281..314f4dfa6 100644 --- a/lib/mpi/mpiutil.c +++ b/lib/mpi/mpiutil.c @@ -69,7 +69,7 @@ void mpi_free_limb_space(mpi_ptr_t a) if (!a) return; - kfree(a); + kzfree(a); } void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs) @@ -95,7 +95,7 @@ int mpi_resize(MPI a, unsigned nlimbs) if (!p) return -ENOMEM; memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t)); - kfree(a->d); + kzfree(a->d); a->d = p; } else { a->d = kzalloc(nlimbs * sizeof(mpi_limb_t), GFP_KERNEL); @@ -112,7 +112,7 @@ void mpi_free(MPI a) return; if (a->flags & 4) - kfree(a->d); + kzfree(a->d); else mpi_free_limb_space(a->d); diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 3d2aa27b8..f9ebe1c82 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -33,7 +33,7 @@ #include <linux/string.h> #include <linux/bitops.h> #include <linux/rcupdate.h> -#include <linux/preempt_mask.h> /* in_interrupt() */ +#include <linux/preempt.h> /* in_interrupt() */ /* @@ -65,7 +65,8 @@ static struct kmem_cache *radix_tree_node_cachep; */ struct radix_tree_preload { int nr; - struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE]; + /* nodes->private_data points to next preallocated node */ + struct radix_tree_node *nodes; }; static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; @@ -197,8 +198,9 @@ radix_tree_node_alloc(struct radix_tree_root *root) */ rtp = this_cpu_ptr(&radix_tree_preloads); if (rtp->nr) { - ret = rtp->nodes[rtp->nr - 1]; - rtp->nodes[rtp->nr - 1] = NULL; + ret = rtp->nodes; + rtp->nodes = ret->private_data; + ret->private_data = NULL; rtp->nr--; } /* @@ -257,17 +259,20 @@ static int __radix_tree_preload(gfp_t gfp_mask) preempt_disable(); rtp = this_cpu_ptr(&radix_tree_preloads); - while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { + while (rtp->nr < RADIX_TREE_PRELOAD_SIZE) { preempt_enable(); node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); if (node == NULL) goto out; preempt_disable(); rtp = this_cpu_ptr(&radix_tree_preloads); - if (rtp->nr < ARRAY_SIZE(rtp->nodes)) - rtp->nodes[rtp->nr++] = node; - else + if (rtp->nr < RADIX_TREE_PRELOAD_SIZE) { + node->private_data = rtp->nodes; + rtp->nodes = node; + rtp->nr++; + } else { kmem_cache_free(radix_tree_node_cachep, node); + } } ret = 0; out: @@ -1463,15 +1468,16 @@ static int radix_tree_callback(struct notifier_block *nfb, { int cpu = (long)hcpu; struct radix_tree_preload *rtp; + struct radix_tree_node *node; /* Free per-cpu pool of perloaded nodes */ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { rtp = &per_cpu(radix_tree_preloads, cpu); while (rtp->nr) { - kmem_cache_free(radix_tree_node_cachep, - rtp->nodes[rtp->nr-1]); - rtp->nodes[rtp->nr-1] = NULL; - rtp->nr--; + node = rtp->nodes; + rtp->nodes = node->private_data; + kmem_cache_free(radix_tree_node_cachep, node); + rtp->nr--; } } return NOTIFY_OK; diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile index c7dab0645..3b10a48fa 100644 --- a/lib/raid6/Makefile +++ b/lib/raid6/Makefile @@ -15,7 +15,7 @@ quiet_cmd_unroll = UNROLL $@ < $< > $@ || ( rm -f $@ && exit 1 ) ifeq ($(CONFIG_ALTIVEC),y) -altivec_flags := -maltivec -mabi=altivec +altivec_flags := -maltivec $(call cc-option,-mabi=altivec) endif # The GCC option -ffreestanding is required in order to compile code containing diff --git a/lib/raid6/x86.h b/lib/raid6/x86.h index b7595484a..8fe9d9662 100644 --- a/lib/raid6/x86.h +++ b/lib/raid6/x86.h @@ -23,7 +23,7 @@ #ifdef __KERNEL__ /* Real code */ -#include <asm/i387.h> +#include <asm/fpu/api.h> #else /* Dummy code for user space testing */ diff --git a/lib/rbtree.c b/lib/rbtree.c index c16c81a3d..1356454e3 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c @@ -44,6 +44,30 @@ * parentheses and have some accompanying text comment. */ +/* + * Notes on lockless lookups: + * + * All stores to the tree structure (rb_left and rb_right) must be done using + * WRITE_ONCE(). And we must not inadvertently cause (temporary) loops in the + * tree structure as seen in program order. + * + * These two requirements will allow lockless iteration of the tree -- not + * correct iteration mind you, tree rotations are not atomic so a lookup might + * miss entire subtrees. + * + * But they do guarantee that any such traversal will only see valid elements + * and that it will indeed complete -- does not get stuck in a loop. + * + * It also guarantees that if the lookup returns an element it is the 'correct' + * one. But not returning an element does _NOT_ mean it's not present. + * + * NOTE: + * + * Stores to __rb_parent_color are not important for simple lookups so those + * are left undone as of now. Nor did I check for loops involving parent + * pointers. + */ + static inline void rb_set_black(struct rb_node *rb) { rb->__rb_parent_color |= RB_BLACK; @@ -129,8 +153,9 @@ __rb_insert(struct rb_node *node, struct rb_root *root, * This still leaves us in violation of 4), the * continuation into Case 3 will fix that. */ - parent->rb_right = tmp = node->rb_left; - node->rb_left = parent; + tmp = node->rb_left; + WRITE_ONCE(parent->rb_right, tmp); + WRITE_ONCE(node->rb_left, parent); if (tmp) rb_set_parent_color(tmp, parent, RB_BLACK); @@ -149,8 +174,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root, * / \ * n U */ - gparent->rb_left = tmp; /* == parent->rb_right */ - parent->rb_right = gparent; + WRITE_ONCE(gparent->rb_left, tmp); /* == parent->rb_right */ + WRITE_ONCE(parent->rb_right, gparent); if (tmp) rb_set_parent_color(tmp, gparent, RB_BLACK); __rb_rotate_set_parents(gparent, parent, root, RB_RED); @@ -171,8 +196,9 @@ __rb_insert(struct rb_node *node, struct rb_root *root, tmp = parent->rb_left; if (node == tmp) { /* Case 2 - right rotate at parent */ - parent->rb_left = tmp = node->rb_right; - node->rb_right = parent; + tmp = node->rb_right; + WRITE_ONCE(parent->rb_left, tmp); + WRITE_ONCE(node->rb_right, parent); if (tmp) rb_set_parent_color(tmp, parent, RB_BLACK); @@ -183,8 +209,8 @@ __rb_insert(struct rb_node *node, struct rb_root *root, } /* Case 3 - left rotate at gparent */ - gparent->rb_right = tmp; /* == parent->rb_left */ - parent->rb_left = gparent; + WRITE_ONCE(gparent->rb_right, tmp); /* == parent->rb_left */ + WRITE_ONCE(parent->rb_left, gparent); if (tmp) rb_set_parent_color(tmp, gparent, RB_BLACK); __rb_rotate_set_parents(gparent, parent, root, RB_RED); @@ -224,8 +250,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root, * / \ / \ * Sl Sr N Sl */ - parent->rb_right = tmp1 = sibling->rb_left; - sibling->rb_left = parent; + tmp1 = sibling->rb_left; + WRITE_ONCE(parent->rb_right, tmp1); + WRITE_ONCE(sibling->rb_left, parent); rb_set_parent_color(tmp1, parent, RB_BLACK); __rb_rotate_set_parents(parent, sibling, root, RB_RED); @@ -275,9 +302,10 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root, * \ * Sr */ - sibling->rb_left = tmp1 = tmp2->rb_right; - tmp2->rb_right = sibling; - parent->rb_right = tmp2; + tmp1 = tmp2->rb_right; + WRITE_ONCE(sibling->rb_left, tmp1); + WRITE_ONCE(tmp2->rb_right, sibling); + WRITE_ONCE(parent->rb_right, tmp2); if (tmp1) rb_set_parent_color(tmp1, sibling, RB_BLACK); @@ -297,8 +325,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root, * / \ / \ * (sl) sr N (sl) */ - parent->rb_right = tmp2 = sibling->rb_left; - sibling->rb_left = parent; + tmp2 = sibling->rb_left; + WRITE_ONCE(parent->rb_right, tmp2); + WRITE_ONCE(sibling->rb_left, parent); rb_set_parent_color(tmp1, sibling, RB_BLACK); if (tmp2) rb_set_parent(tmp2, parent); @@ -310,8 +339,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root, sibling = parent->rb_left; if (rb_is_red(sibling)) { /* Case 1 - right rotate at parent */ - parent->rb_left = tmp1 = sibling->rb_right; - sibling->rb_right = parent; + tmp1 = sibling->rb_right; + WRITE_ONCE(parent->rb_left, tmp1); + WRITE_ONCE(sibling->rb_right, parent); rb_set_parent_color(tmp1, parent, RB_BLACK); __rb_rotate_set_parents(parent, sibling, root, RB_RED); @@ -336,9 +366,10 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root, break; } /* Case 3 - right rotate at sibling */ - sibling->rb_right = tmp1 = tmp2->rb_left; - tmp2->rb_left = sibling; - parent->rb_left = tmp2; + tmp1 = tmp2->rb_left; + WRITE_ONCE(sibling->rb_right, tmp1); + WRITE_ONCE(tmp2->rb_left, sibling); + WRITE_ONCE(parent->rb_left, tmp2); if (tmp1) rb_set_parent_color(tmp1, sibling, RB_BLACK); @@ -347,8 +378,9 @@ ____rb_erase_color(struct rb_node *parent, struct rb_root *root, sibling = tmp2; } /* Case 4 - left rotate at parent + color flips */ - parent->rb_left = tmp2 = sibling->rb_right; - sibling->rb_right = parent; + tmp2 = sibling->rb_right; + WRITE_ONCE(parent->rb_left, tmp2); + WRITE_ONCE(sibling->rb_right, parent); rb_set_parent_color(tmp1, sibling, RB_BLACK); if (tmp2) rb_set_parent(tmp2, parent); diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 8609378e6..cc0c69710 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -585,7 +585,6 @@ void *rhashtable_walk_next(struct rhashtable_iter *iter) struct bucket_table *tbl = iter->walker->tbl; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; - void *obj = NULL; if (p) { p = rht_dereference_bucket_rcu(p->next, tbl, iter->slot); @@ -605,13 +604,14 @@ next: if (!rht_is_a_nulls(p)) { iter->skip++; iter->p = p; - obj = rht_obj(ht, p); - goto out; + return rht_obj(ht, p); } iter->skip = 0; } + iter->p = NULL; + /* Ensure we see any new tables. */ smp_rmb(); @@ -622,11 +622,7 @@ next: return ERR_PTR(-EAGAIN); } - iter->p = NULL; - -out: - - return obj; + return NULL; } EXPORT_SYMBOL_GPL(rhashtable_walk_next); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index c9f2e8c6c..d105a9f56 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -56,6 +56,38 @@ int sg_nents(struct scatterlist *sg) } EXPORT_SYMBOL(sg_nents); +/** + * sg_nents_for_len - return total count of entries in scatterlist + * needed to satisfy the supplied length + * @sg: The scatterlist + * @len: The total required length + * + * Description: + * Determines the number of entries in sg that are required to meet + * the supplied length, taking into acount chaining as well + * + * Returns: + * the number of sg entries needed, negative error on failure + * + **/ +int sg_nents_for_len(struct scatterlist *sg, u64 len) +{ + int nents; + u64 total; + + if (!len) + return 0; + + for (nents = 0, total = 0; sg; sg = sg_next(sg)) { + nents++; + total += sg->length; + if (total >= len) + return nents; + } + + return -EINVAL; +} +EXPORT_SYMBOL(sg_nents_for_len); /** * sg_last - return the last scatterlist entry in a list @@ -618,9 +650,8 @@ EXPORT_SYMBOL(sg_miter_stop); * Returns the number of copied bytes. * **/ -static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, - void *buf, size_t buflen, off_t skip, - bool to_buffer) +size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, + size_t buflen, off_t skip, bool to_buffer) { unsigned int offset = 0; struct sg_mapping_iter miter; @@ -657,6 +688,7 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, local_irq_restore(flags); return offset; } +EXPORT_SYMBOL(sg_copy_buffer); /** * sg_copy_from_buffer - Copy from a linear buffer to an SG list @@ -669,9 +701,9 @@ static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, * **/ size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, - void *buf, size_t buflen) + const void *buf, size_t buflen) { - return sg_copy_buffer(sgl, nents, buf, buflen, 0, false); + return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false); } EXPORT_SYMBOL(sg_copy_from_buffer); @@ -697,16 +729,16 @@ EXPORT_SYMBOL(sg_copy_to_buffer); * @sgl: The SG list * @nents: Number of SG entries * @buf: Where to copy from - * @skip: Number of bytes to skip before copying * @buflen: The number of bytes to copy + * @skip: Number of bytes to skip before copying * * Returns the number of copied bytes. * **/ size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, - void *buf, size_t buflen, off_t skip) + const void *buf, size_t buflen, off_t skip) { - return sg_copy_buffer(sgl, nents, buf, buflen, skip, false); + return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false); } EXPORT_SYMBOL(sg_pcopy_from_buffer); @@ -715,8 +747,8 @@ EXPORT_SYMBOL(sg_pcopy_from_buffer); * @sgl: The SG list * @nents: Number of SG entries * @buf: Where to copy to - * @skip: Number of bytes to skip before copying * @buflen: The number of bytes to copy + * @skip: Number of bytes to skip before copying * * Returns the number of copied bytes. * diff --git a/lib/sort.c b/lib/sort.c index 43c9fe73a..fc20df42a 100644 --- a/lib/sort.c +++ b/lib/sort.c @@ -8,6 +8,12 @@ #include <linux/export.h> #include <linux/sort.h> +static int alignment_ok(const void *base, int align) +{ + return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || + ((unsigned long)base & (align - 1)) == 0; +} + static void u32_swap(void *a, void *b, int size) { u32 t = *(u32 *)a; @@ -15,6 +21,13 @@ static void u32_swap(void *a, void *b, int size) *(u32 *)b = t; } +static void u64_swap(void *a, void *b, int size) +{ + u64 t = *(u64 *)a; + *(u64 *)a = *(u64 *)b; + *(u64 *)b = t; +} + static void generic_swap(void *a, void *b, int size) { char t; @@ -50,8 +63,14 @@ void sort(void *base, size_t num, size_t size, /* pre-scale counters for performance */ int i = (num/2 - 1) * size, n = num * size, c, r; - if (!swap_func) - swap_func = (size == 4 ? u32_swap : generic_swap); + if (!swap_func) { + if (size == 4 && alignment_ok(base, 4)) + swap_func = u32_swap; + else if (size == 8 && alignment_ok(base, 8)) + swap_func = u64_swap; + else + swap_func = generic_swap; + } /* heapify */ for ( ; i >= 0; i -= size) { diff --git a/lib/string.c b/lib/string.c index bb3d4b699..13d1e84dd 100644 --- a/lib/string.c +++ b/lib/string.c @@ -849,3 +849,20 @@ void *memchr_inv(const void *start, int c, size_t bytes) return check_bytes8(start, value, bytes % 8); } EXPORT_SYMBOL(memchr_inv); + +/** + * strreplace - Replace all occurrences of character in string. + * @s: The string to operate on. + * @old: The character being replaced. + * @new: The character @old is replaced with. + * + * Returns pointer to the nul byte at the end of @s. + */ +char *strreplace(char *s, char old, char new) +{ + for (; *s; ++s) + if (*s == old) + *s = new; + return s; +} +EXPORT_SYMBOL(strreplace); diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index fe9a32591..3a5f2b366 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -85,7 +85,8 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, * @str: The string to measure. * @count: Maximum count (including NUL character) * - * Context: User context only. This function may sleep. + * Context: User context only. This function may sleep if pagefaults are + * enabled. * * Get the size of a NUL-terminated string in user space. * @@ -121,7 +122,8 @@ EXPORT_SYMBOL(strnlen_user); * strlen_user: - Get the size of a user string INCLUDING final NUL. * @str: The string to measure. * - * Context: User context only. This function may sleep. + * Context: User context only. This function may sleep if pagefaults are + * enabled. * * Get the size of a NUL-terminated string in user space. * diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 3c365ab6c..76f29ecba 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -29,10 +29,10 @@ #include <linux/ctype.h> #include <linux/highmem.h> #include <linux/gfp.h> +#include <linux/scatterlist.h> #include <asm/io.h> #include <asm/dma.h> -#include <asm/scatterlist.h> #include <linux/init.h> #include <linux/bootmem.h> @@ -656,7 +656,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, */ phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE); if (paddr == SWIOTLB_MAP_ERROR) - return NULL; + goto err_warn; ret = phys_to_virt(paddr); dev_addr = phys_to_dma(hwdev, paddr); @@ -670,7 +670,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ swiotlb_tbl_unmap_single(hwdev, paddr, size, DMA_TO_DEVICE); - return NULL; + goto err_warn; } } @@ -678,6 +678,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, memset(ret, 0, size); return ret; + +err_warn: + pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n", + dev_name(hwdev), size); + dump_stack(); + + return NULL; } EXPORT_SYMBOL(swiotlb_alloc_coherent); diff --git a/lib/test-hexdump.c b/lib/test-hexdump.c index c227cc43e..5241df36e 100644 --- a/lib/test-hexdump.c +++ b/lib/test-hexdump.c @@ -25,19 +25,19 @@ static const char * const test_data_1_le[] __initconst = { "4c", "d1", "19", "99", "43", "b1", "af", "0c", }; -static const char *test_data_2_le[] __initdata = { +static const char * const test_data_2_le[] __initconst = { "32be", "7bdb", "180a", "b293", "ba70", "24c4", "837d", "9b34", "9ca6", "ad31", "0f9c", "e9ac", "d14c", "9919", "b143", "0caf", }; -static const char *test_data_4_le[] __initdata = { +static const char * const test_data_4_le[] __initconst = { "7bdb32be", "b293180a", "24c4ba70", "9b34837d", "ad319ca6", "e9ac0f9c", "9919d14c", "0cafb143", }; -static const char *test_data_8_le[] __initdata = { +static const char * const test_data_8_le[] __initconst = { "b293180a7bdb32be", "9b34837d24c4ba70", "e9ac0f9cad319ca6", "0cafb1439919d14c", }; diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 80d78c51f..7f58c735d 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -21,6 +21,7 @@ #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/if_vlan.h> +#include <linux/random.h> /* General test specific settings */ #define MAX_SUBTESTS 3 @@ -67,6 +68,10 @@ struct bpf_test { union { struct sock_filter insns[MAX_INSNS]; struct bpf_insn insns_int[MAX_INSNS]; + struct { + void *insns; + unsigned int len; + } ptr; } u; __u8 aux; __u8 data[MAX_DATA]; @@ -74,8 +79,282 @@ struct bpf_test { int data_size; __u32 result; } test[MAX_SUBTESTS]; + int (*fill_helper)(struct bpf_test *self); }; +/* Large test cases need separate allocation and fill handler. */ + +static int bpf_fill_maxinsns1(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct sock_filter *insn; + __u32 k = ~0; + int i; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + for (i = 0; i < len; i++, k--) + insn[i] = __BPF_STMT(BPF_RET | BPF_K, k); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns2(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct sock_filter *insn; + int i; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + for (i = 0; i < len; i++) + insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns3(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct sock_filter *insn; + struct rnd_state rnd; + int i; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + prandom_seed_state(&rnd, 3141592653589793238ULL); + + for (i = 0; i < len - 1; i++) { + __u32 k = prandom_u32_state(&rnd); + + insn[i] = __BPF_STMT(BPF_ALU | BPF_ADD | BPF_K, k); + } + + insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns4(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS + 1; + struct sock_filter *insn; + int i; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + for (i = 0; i < len; i++) + insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns5(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct sock_filter *insn; + int i; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + insn[0] = __BPF_JUMP(BPF_JMP | BPF_JA, len - 2, 0, 0); + + for (i = 1; i < len - 1; i++) + insn[i] = __BPF_STMT(BPF_RET | BPF_K, 0xfefefefe); + + insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xabababab); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns6(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct sock_filter *insn; + int i; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + for (i = 0; i < len - 1; i++) + insn[i] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF + + SKF_AD_VLAN_TAG_PRESENT); + + insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns7(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct sock_filter *insn; + int i; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + for (i = 0; i < len - 4; i++) + insn[i] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF + + SKF_AD_CPU); + + insn[len - 4] = __BPF_STMT(BPF_MISC | BPF_TAX, 0); + insn[len - 3] = __BPF_STMT(BPF_LD | BPF_W | BPF_ABS, SKF_AD_OFF + + SKF_AD_CPU); + insn[len - 2] = __BPF_STMT(BPF_ALU | BPF_SUB | BPF_X, 0); + insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns8(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct sock_filter *insn; + int i, jmp_off = len - 3; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + insn[0] = __BPF_STMT(BPF_LD | BPF_IMM, 0xffffffff); + + for (i = 1; i < len - 1; i++) + insn[i] = __BPF_JUMP(BPF_JMP | BPF_JGT, 0xffffffff, jmp_off--, 0); + + insn[len - 1] = __BPF_STMT(BPF_RET | BPF_A, 0); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns9(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS; + struct bpf_insn *insn; + int i; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + insn[0] = BPF_JMP_IMM(BPF_JA, 0, 0, len - 2); + insn[1] = BPF_ALU32_IMM(BPF_MOV, R0, 0xcbababab); + insn[2] = BPF_EXIT_INSN(); + + for (i = 3; i < len - 2; i++) + insn[i] = BPF_ALU32_IMM(BPF_MOV, R0, 0xfefefefe); + + insn[len - 2] = BPF_EXIT_INSN(); + insn[len - 1] = BPF_JMP_IMM(BPF_JA, 0, 0, -(len - 1)); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns10(struct bpf_test *self) +{ + unsigned int len = BPF_MAXINSNS, hlen = len - 2; + struct bpf_insn *insn; + int i; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + for (i = 0; i < hlen / 2; i++) + insn[i] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen - 2 - 2 * i); + for (i = hlen - 1; i > hlen / 2; i--) + insn[i] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen - 1 - 2 * i); + + insn[hlen / 2] = BPF_JMP_IMM(BPF_JA, 0, 0, hlen / 2 - 1); + insn[hlen] = BPF_ALU32_IMM(BPF_MOV, R0, 0xabababac); + insn[hlen + 1] = BPF_EXIT_INSN(); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int __bpf_fill_ja(struct bpf_test *self, unsigned int len, + unsigned int plen) +{ + struct sock_filter *insn; + unsigned int rlen; + int i, j; + + insn = kmalloc_array(len, sizeof(*insn), GFP_KERNEL); + if (!insn) + return -ENOMEM; + + rlen = (len % plen) - 1; + + for (i = 0; i + plen < len; i += plen) + for (j = 0; j < plen; j++) + insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA, + plen - 1 - j, 0, 0); + for (j = 0; j < rlen; j++) + insn[i + j] = __BPF_JUMP(BPF_JMP | BPF_JA, rlen - 1 - j, + 0, 0); + + insn[len - 1] = __BPF_STMT(BPF_RET | BPF_K, 0xababcbac); + + self->u.ptr.insns = insn; + self->u.ptr.len = len; + + return 0; +} + +static int bpf_fill_maxinsns11(struct bpf_test *self) +{ + /* Hits 70 passes on x86_64, so cannot get JITed there. */ + return __bpf_fill_ja(self, BPF_MAXINSNS, 68); +} + +static int bpf_fill_ja(struct bpf_test *self) +{ + /* Hits exactly 11 passes on x86_64 JIT. */ + return __bpf_fill_ja(self, 12, 9); +} + static struct bpf_test tests[] = { { "TAX", @@ -1755,7 +2034,8 @@ static struct bpf_test tests[] = { BPF_EXIT_INSN(), BPF_JMP_IMM(BPF_JEQ, R3, 0x1234, 1), BPF_EXIT_INSN(), - BPF_ALU64_IMM(BPF_MOV, R0, 1), + BPF_LD_IMM64(R0, 0x1ffffffffLL), + BPF_ALU64_IMM(BPF_RSH, R0, 32), /* R0 = 1 */ BPF_EXIT_INSN(), }, INTERNAL, @@ -1805,6 +2085,2313 @@ static struct bpf_test tests[] = { 0x10, 0xbf, 0x48, 0xd6, 0x43, 0xd6}, { { 38, 256 } } }, + /* BPF_ALU | BPF_MOV | BPF_X */ + { + "ALU_MOV_X: dst = 2", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU32_REG(BPF_MOV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_MOV_X: dst = 4294967295", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U), + BPF_ALU32_REG(BPF_MOV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 4294967295U } }, + }, + { + "ALU64_MOV_X: dst = 2", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU64_REG(BPF_MOV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_MOV_X: dst = 4294967295", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U), + BPF_ALU64_REG(BPF_MOV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 4294967295U } }, + }, + /* BPF_ALU | BPF_MOV | BPF_K */ + { + "ALU_MOV_K: dst = 2", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_MOV_K: dst = 4294967295", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 4294967295U), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 4294967295U } }, + }, + { + "ALU_MOV_K: 0x0000ffffffff0000 = 0x00000000ffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0x00000000ffffffffLL), + BPF_ALU32_IMM(BPF_MOV, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_MOV_K: dst = 2", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_MOV_K: dst = 2147483647", + .u.insns_int = { + BPF_ALU64_IMM(BPF_MOV, R0, 2147483647), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2147483647 } }, + }, + { + "ALU64_OR_K: dst = 0x0", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0x0), + BPF_ALU64_IMM(BPF_MOV, R2, 0x0), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_MOV_K: dst = -1", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_MOV, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + /* BPF_ALU | BPF_ADD | BPF_X */ + { + "ALU_ADD_X: 1 + 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU32_REG(BPF_ADD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_ADD_X: 1 + 4294967294 = 4294967295", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U), + BPF_ALU32_REG(BPF_ADD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 4294967295U } }, + }, + { + "ALU64_ADD_X: 1 + 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU64_REG(BPF_ADD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_ADD_X: 1 + 4294967294 = 4294967295", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U), + BPF_ALU64_REG(BPF_ADD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 4294967295U } }, + }, + /* BPF_ALU | BPF_ADD | BPF_K */ + { + "ALU_ADD_K: 1 + 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_ADD, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_ADD_K: 3 + 0 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_ADD, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_ADD_K: 1 + 4294967294 = 4294967295", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_ADD, R0, 4294967294U), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 4294967295U } }, + }, + { + "ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0x00000000ffffffff), + BPF_ALU32_IMM(BPF_ADD, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_ADD_K: 1 + 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_ADD, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_ADD_K: 3 + 0 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_ADD, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_ADD_K: 1 + 2147483646 = 2147483647", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_ADD, R0, 2147483646), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2147483647 } }, + }, + { + "ALU64_ADD_K: 2147483646 + -2147483647 = -1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2147483646), + BPF_ALU64_IMM(BPF_ADD, R0, -2147483647), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -1 } }, + }, + { + "ALU64_ADD_K: 1 + 0 = 1", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x1), + BPF_LD_IMM64(R3, 0x1), + BPF_ALU64_IMM(BPF_ADD, R2, 0x0), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_ADD_K: 0 + (-1) = 0xffffffffffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_ADD, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + /* BPF_ALU | BPF_SUB | BPF_X */ + { + "ALU_SUB_X: 3 - 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOV, R1, 1), + BPF_ALU32_REG(BPF_SUB, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_SUB_X: 4294967295 - 4294967294 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U), + BPF_ALU32_REG(BPF_SUB, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_SUB_X: 3 - 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOV, R1, 1), + BPF_ALU64_REG(BPF_SUB, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_SUB_X: 4294967295 - 4294967294 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_MOV, R1, 4294967294U), + BPF_ALU64_REG(BPF_SUB, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_ALU | BPF_SUB | BPF_K */ + { + "ALU_SUB_K: 3 - 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_SUB, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_SUB_K: 3 - 0 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_SUB, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_SUB_K: 4294967295 - 4294967294 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_SUB, R0, 4294967294U), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_SUB_K: 3 - 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_SUB, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_SUB_K: 3 - 0 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_SUB, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_SUB_K: 4294967294 - 4294967295 = -1", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967294U), + BPF_ALU64_IMM(BPF_SUB, R0, 4294967295U), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -1 } }, + }, + { + "ALU64_ADD_K: 2147483646 - 2147483647 = -1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2147483646), + BPF_ALU64_IMM(BPF_SUB, R0, 2147483647), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -1 } }, + }, + /* BPF_ALU | BPF_MUL | BPF_X */ + { + "ALU_MUL_X: 2 * 3 = 6", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 3), + BPF_ALU32_REG(BPF_MUL, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 6 } }, + }, + { + "ALU_MUL_X: 2 * 0x7FFFFFF8 = 0xFFFFFFF0", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 0x7FFFFFF8), + BPF_ALU32_REG(BPF_MUL, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xFFFFFFF0 } }, + }, + { + "ALU_MUL_X: -1 * -1 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, -1), + BPF_ALU32_IMM(BPF_MOV, R1, -1), + BPF_ALU32_REG(BPF_MUL, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_MUL_X: 2 * 3 = 6", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 3), + BPF_ALU64_REG(BPF_MUL, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 6 } }, + }, + { + "ALU64_MUL_X: 1 * 2147483647 = 2147483647", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 2147483647), + BPF_ALU64_REG(BPF_MUL, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2147483647 } }, + }, + /* BPF_ALU | BPF_MUL | BPF_K */ + { + "ALU_MUL_K: 2 * 3 = 6", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MUL, R0, 3), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 6 } }, + }, + { + "ALU_MUL_K: 3 * 1 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MUL, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_MUL_K: 2 * 0x7FFFFFF8 = 0xFFFFFFF0", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MUL, R0, 0x7FFFFFF8), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xFFFFFFF0 } }, + }, + { + "ALU_MUL_K: 1 * (-1) = 0x00000000ffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x1), + BPF_LD_IMM64(R3, 0x00000000ffffffff), + BPF_ALU32_IMM(BPF_MUL, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_MUL_K: 2 * 3 = 6", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU64_IMM(BPF_MUL, R0, 3), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 6 } }, + }, + { + "ALU64_MUL_K: 3 * 1 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_MUL, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_MUL_K: 1 * 2147483647 = 2147483647", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_MUL, R0, 2147483647), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2147483647 } }, + }, + { + "ALU64_MUL_K: 1 * -2147483647 = -2147483647", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_MUL, R0, -2147483647), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -2147483647 } }, + }, + { + "ALU64_MUL_K: 1 * (-1) = 0xffffffffffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x1), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_MUL, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + /* BPF_ALU | BPF_DIV | BPF_X */ + { + "ALU_DIV_X: 6 / 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 6), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU32_REG(BPF_DIV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_DIV_X: 4294967295 / 4294967295 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_MOV, R1, 4294967295U), + BPF_ALU32_REG(BPF_DIV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_DIV_X: 6 / 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 6), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU64_REG(BPF_DIV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_DIV_X: 2147483647 / 2147483647 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2147483647), + BPF_ALU32_IMM(BPF_MOV, R1, 2147483647), + BPF_ALU64_REG(BPF_DIV, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_DIV_X: 0xffffffffffffffff / (-1) = 0x0000000000000001", + .u.insns_int = { + BPF_LD_IMM64(R2, 0xffffffffffffffffLL), + BPF_LD_IMM64(R4, 0xffffffffffffffffLL), + BPF_LD_IMM64(R3, 0x0000000000000001LL), + BPF_ALU64_REG(BPF_DIV, R2, R4), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + /* BPF_ALU | BPF_DIV | BPF_K */ + { + "ALU_DIV_K: 6 / 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 6), + BPF_ALU32_IMM(BPF_DIV, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_DIV_K: 3 / 1 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_DIV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_DIV_K: 4294967295 / 4294967295 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_DIV, R0, 4294967295U), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU_DIV_K: 0xffffffffffffffff / (-1) = 0x1", + .u.insns_int = { + BPF_LD_IMM64(R2, 0xffffffffffffffffLL), + BPF_LD_IMM64(R3, 0x1UL), + BPF_ALU32_IMM(BPF_DIV, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_DIV_K: 6 / 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 6), + BPF_ALU64_IMM(BPF_DIV, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_DIV_K: 3 / 1 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_DIV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_DIV_K: 2147483647 / 2147483647 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2147483647), + BPF_ALU64_IMM(BPF_DIV, R0, 2147483647), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_DIV_K: 0xffffffffffffffff / (-1) = 0x0000000000000001", + .u.insns_int = { + BPF_LD_IMM64(R2, 0xffffffffffffffffLL), + BPF_LD_IMM64(R3, 0x0000000000000001LL), + BPF_ALU64_IMM(BPF_DIV, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + /* BPF_ALU | BPF_MOD | BPF_X */ + { + "ALU_MOD_X: 3 % 2 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU32_REG(BPF_MOD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU_MOD_X: 4294967295 % 4294967293 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_MOV, R1, 4294967293U), + BPF_ALU32_REG(BPF_MOD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_MOD_X: 3 % 2 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU64_REG(BPF_MOD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_MOD_X: 2147483647 % 2147483645 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 2147483647), + BPF_ALU32_IMM(BPF_MOV, R1, 2147483645), + BPF_ALU64_REG(BPF_MOD, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + /* BPF_ALU | BPF_MOD | BPF_K */ + { + "ALU_MOD_K: 3 % 2 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOD, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU_MOD_K: 3 % 1 = 0", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOD, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0 } }, + }, + { + "ALU_MOD_K: 4294967295 % 4294967293 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967295U), + BPF_ALU32_IMM(BPF_MOD, R0, 4294967293U), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_MOD_K: 3 % 2 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_MOD, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_MOD_K: 3 % 1 = 0", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_MOD, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0 } }, + }, + { + "ALU64_MOD_K: 2147483647 % 2147483645 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 2147483647), + BPF_ALU64_IMM(BPF_MOD, R0, 2147483645), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + /* BPF_ALU | BPF_AND | BPF_X */ + { + "ALU_AND_X: 3 & 2 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU32_REG(BPF_AND, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_AND_X: 0xffffffff & 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xffffffff), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_ALU32_REG(BPF_AND, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ALU64_AND_X: 3 & 2 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU64_REG(BPF_AND, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_AND_X: 0xffffffff & 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xffffffff), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_ALU64_REG(BPF_AND, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + /* BPF_ALU | BPF_AND | BPF_K */ + { + "ALU_AND_K: 3 & 2 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU32_IMM(BPF_AND, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_AND_K: 0xffffffff & 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xffffffff), + BPF_ALU32_IMM(BPF_AND, R0, 0xffffffff), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ALU64_AND_K: 3 & 2 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_AND, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_AND_K: 0xffffffff & 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xffffffff), + BPF_ALU64_IMM(BPF_AND, R0, 0xffffffff), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ALU64_AND_K: 0x0000ffffffff0000 & 0x0 = 0x0000ffff00000000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0x0000000000000000LL), + BPF_ALU64_IMM(BPF_AND, R2, 0x0), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_AND_K: 0x0000ffffffff0000 & -1 = 0x0000ffffffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0x0000ffffffff0000LL), + BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_AND_K: 0xffffffffffffffff & -1 = 0xffffffffffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0xffffffffffffffffLL), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_AND, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + /* BPF_ALU | BPF_OR | BPF_X */ + { + "ALU_OR_X: 1 | 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU32_REG(BPF_OR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_OR_X: 0x0 | 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_ALU32_REG(BPF_OR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ALU64_OR_X: 1 | 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 2), + BPF_ALU64_REG(BPF_OR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_OR_X: 0 | 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_ALU64_REG(BPF_OR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + /* BPF_ALU | BPF_OR | BPF_K */ + { + "ALU_OR_K: 1 | 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_OR, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_OR_K: 0 & 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0), + BPF_ALU32_IMM(BPF_OR, R0, 0xffffffff), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ALU64_OR_K: 1 | 2 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_OR, R0, 2), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_OR_K: 0 & 0xffffffff = 0xffffffff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0), + BPF_ALU64_IMM(BPF_OR, R0, 0xffffffff), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ALU64_OR_K: 0x0000ffffffff0000 | 0x0 = 0x0000ffff00000000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0x0000ffffffff0000LL), + BPF_ALU64_IMM(BPF_OR, R2, 0x0), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_OR_K: 0x0000ffffffff0000 | -1 = 0xffffffffffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_OR_K: 0x000000000000000 | -1 = 0xffffffffffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000000000000000LL), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_OR, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + /* BPF_ALU | BPF_XOR | BPF_X */ + { + "ALU_XOR_X: 5 ^ 6 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 5), + BPF_ALU32_IMM(BPF_MOV, R1, 6), + BPF_ALU32_REG(BPF_XOR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_XOR_X: 0x1 ^ 0xffffffff = 0xfffffffe", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_ALU32_REG(BPF_XOR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xfffffffe } }, + }, + { + "ALU64_XOR_X: 5 ^ 6 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 5), + BPF_ALU32_IMM(BPF_MOV, R1, 6), + BPF_ALU64_REG(BPF_XOR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_XOR_X: 1 ^ 0xffffffff = 0xfffffffe", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 0xffffffff), + BPF_ALU64_REG(BPF_XOR, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xfffffffe } }, + }, + /* BPF_ALU | BPF_XOR | BPF_K */ + { + "ALU_XOR_K: 5 ^ 6 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 5), + BPF_ALU32_IMM(BPF_XOR, R0, 6), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU_XOR_K: 1 ^ 0xffffffff = 0xfffffffe", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_XOR, R0, 0xffffffff), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xfffffffe } }, + }, + { + "ALU64_XOR_K: 5 ^ 6 = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, 5), + BPF_ALU64_IMM(BPF_XOR, R0, 6), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_XOR_K: 1 & 0xffffffff = 0xfffffffe", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_XOR, R0, 0xffffffff), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xfffffffe } }, + }, + { + "ALU64_XOR_K: 0x0000ffffffff0000 ^ 0x0 = 0x0000ffffffff0000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0x0000ffffffff0000LL), + BPF_ALU64_IMM(BPF_XOR, R2, 0x0), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_XOR_K: 0x0000ffffffff0000 ^ -1 = 0xffff00000000ffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000ffffffff0000LL), + BPF_LD_IMM64(R3, 0xffff00000000ffffLL), + BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_XOR_K: 0x000000000000000 ^ -1 = 0xffffffffffffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0000000000000000LL), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ALU64_IMM(BPF_XOR, R2, 0xffffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + /* BPF_ALU | BPF_LSH | BPF_X */ + { + "ALU_LSH_X: 1 << 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 1), + BPF_ALU32_REG(BPF_LSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_LSH_X: 1 << 31 = 0x80000000", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 31), + BPF_ALU32_REG(BPF_LSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x80000000 } }, + }, + { + "ALU64_LSH_X: 1 << 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 1), + BPF_ALU64_REG(BPF_LSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_LSH_X: 1 << 31 = 0x80000000", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_MOV, R1, 31), + BPF_ALU64_REG(BPF_LSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x80000000 } }, + }, + /* BPF_ALU | BPF_LSH | BPF_K */ + { + "ALU_LSH_K: 1 << 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_LSH, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU_LSH_K: 1 << 31 = 0x80000000", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU32_IMM(BPF_LSH, R0, 31), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x80000000 } }, + }, + { + "ALU64_LSH_K: 1 << 1 = 2", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_LSH, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 2 } }, + }, + { + "ALU64_LSH_K: 1 << 31 = 0x80000000", + .u.insns_int = { + BPF_LD_IMM64(R0, 1), + BPF_ALU64_IMM(BPF_LSH, R0, 31), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x80000000 } }, + }, + /* BPF_ALU | BPF_RSH | BPF_X */ + { + "ALU_RSH_X: 2 >> 1 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 1), + BPF_ALU32_REG(BPF_RSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU_RSH_X: 0x80000000 >> 31 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x80000000), + BPF_ALU32_IMM(BPF_MOV, R1, 31), + BPF_ALU32_REG(BPF_RSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_RSH_X: 2 >> 1 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_MOV, R1, 1), + BPF_ALU64_REG(BPF_RSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_RSH_X: 0x80000000 >> 31 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x80000000), + BPF_ALU32_IMM(BPF_MOV, R1, 31), + BPF_ALU64_REG(BPF_RSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_ALU | BPF_RSH | BPF_K */ + { + "ALU_RSH_K: 2 >> 1 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU32_IMM(BPF_RSH, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU_RSH_K: 0x80000000 >> 31 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x80000000), + BPF_ALU32_IMM(BPF_RSH, R0, 31), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_RSH_K: 2 >> 1 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_ALU64_IMM(BPF_RSH, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "ALU64_RSH_K: 0x80000000 >> 31 = 1", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x80000000), + BPF_ALU64_IMM(BPF_RSH, R0, 31), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_ALU | BPF_ARSH | BPF_X */ + { + "ALU_ARSH_X: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xff00ff0000000000LL), + BPF_ALU32_IMM(BPF_MOV, R1, 40), + BPF_ALU64_REG(BPF_ARSH, R0, R1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffff00ff } }, + }, + /* BPF_ALU | BPF_ARSH | BPF_K */ + { + "ALU_ARSH_K: 0xff00ff0000000000 >> 40 = 0xffffffffffff00ff", + .u.insns_int = { + BPF_LD_IMM64(R0, 0xff00ff0000000000LL), + BPF_ALU64_IMM(BPF_ARSH, R0, 40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffff00ff } }, + }, + /* BPF_ALU | BPF_NEG */ + { + "ALU_NEG: -(3) = -3", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 3), + BPF_ALU32_IMM(BPF_NEG, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -3 } }, + }, + { + "ALU_NEG: -(-3) = 3", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, -3), + BPF_ALU32_IMM(BPF_NEG, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + { + "ALU64_NEG: -(3) = -3", + .u.insns_int = { + BPF_LD_IMM64(R0, 3), + BPF_ALU64_IMM(BPF_NEG, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, -3 } }, + }, + { + "ALU64_NEG: -(-3) = 3", + .u.insns_int = { + BPF_LD_IMM64(R0, -3), + BPF_ALU64_IMM(BPF_NEG, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 3 } }, + }, + /* BPF_ALU | BPF_END | BPF_FROM_BE */ + { + "ALU_END_FROM_BE 16: 0x0123456789abcdef -> 0xcdef", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ENDIAN(BPF_FROM_BE, R0, 16), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, cpu_to_be16(0xcdef) } }, + }, + { + "ALU_END_FROM_BE 32: 0x0123456789abcdef -> 0x89abcdef", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ENDIAN(BPF_FROM_BE, R0, 32), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, cpu_to_be32(0x89abcdef) } }, + }, + { + "ALU_END_FROM_BE 64: 0x0123456789abcdef -> 0x89abcdef", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ENDIAN(BPF_FROM_BE, R0, 64), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, (u32) cpu_to_be64(0x0123456789abcdefLL) } }, + }, + /* BPF_ALU | BPF_END | BPF_FROM_LE */ + { + "ALU_END_FROM_LE 16: 0x0123456789abcdef -> 0xefcd", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ENDIAN(BPF_FROM_LE, R0, 16), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, cpu_to_le16(0xcdef) } }, + }, + { + "ALU_END_FROM_LE 32: 0x0123456789abcdef -> 0xefcdab89", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ENDIAN(BPF_FROM_LE, R0, 32), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, cpu_to_le32(0x89abcdef) } }, + }, + { + "ALU_END_FROM_LE 64: 0x0123456789abcdef -> 0x67452301", + .u.insns_int = { + BPF_LD_IMM64(R0, 0x0123456789abcdefLL), + BPF_ENDIAN(BPF_FROM_LE, R0, 64), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, (u32) cpu_to_le64(0x0123456789abcdefLL) } }, + }, + /* BPF_ST(X) | BPF_MEM | BPF_B/H/W/DW */ + { + "ST_MEM_B: Store/Load byte: max negative", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_B, R10, -40, 0xff), + BPF_LDX_MEM(BPF_B, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xff } }, + }, + { + "ST_MEM_B: Store/Load byte: max positive", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_H, R10, -40, 0x7f), + BPF_LDX_MEM(BPF_H, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x7f } }, + }, + { + "STX_MEM_B: Store/Load byte: max negative", + .u.insns_int = { + BPF_LD_IMM64(R0, 0), + BPF_LD_IMM64(R1, 0xffLL), + BPF_STX_MEM(BPF_B, R10, R1, -40), + BPF_LDX_MEM(BPF_B, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xff } }, + }, + { + "ST_MEM_H: Store/Load half word: max negative", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_H, R10, -40, 0xffff), + BPF_LDX_MEM(BPF_H, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffff } }, + }, + { + "ST_MEM_H: Store/Load half word: max positive", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_H, R10, -40, 0x7fff), + BPF_LDX_MEM(BPF_H, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x7fff } }, + }, + { + "STX_MEM_H: Store/Load half word: max negative", + .u.insns_int = { + BPF_LD_IMM64(R0, 0), + BPF_LD_IMM64(R1, 0xffffLL), + BPF_STX_MEM(BPF_H, R10, R1, -40), + BPF_LDX_MEM(BPF_H, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffff } }, + }, + { + "ST_MEM_W: Store/Load word: max negative", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_W, R10, -40, 0xffffffff), + BPF_LDX_MEM(BPF_W, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ST_MEM_W: Store/Load word: max positive", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_W, R10, -40, 0x7fffffff), + BPF_LDX_MEM(BPF_W, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x7fffffff } }, + }, + { + "STX_MEM_W: Store/Load word: max negative", + .u.insns_int = { + BPF_LD_IMM64(R0, 0), + BPF_LD_IMM64(R1, 0xffffffffLL), + BPF_STX_MEM(BPF_W, R10, R1, -40), + BPF_LDX_MEM(BPF_W, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ST_MEM_DW: Store/Load double word: max negative", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff), + BPF_LDX_MEM(BPF_DW, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + { + "ST_MEM_DW: Store/Load double word: max negative 2", + .u.insns_int = { + BPF_LD_IMM64(R2, 0xffff00000000ffffLL), + BPF_LD_IMM64(R3, 0xffffffffffffffffLL), + BPF_ST_MEM(BPF_DW, R10, -40, 0xffffffff), + BPF_LDX_MEM(BPF_DW, R2, R10, -40), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ST_MEM_DW: Store/Load double word: max positive", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_ST_MEM(BPF_DW, R10, -40, 0x7fffffff), + BPF_LDX_MEM(BPF_DW, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x7fffffff } }, + }, + { + "STX_MEM_DW: Store/Load double word: max negative", + .u.insns_int = { + BPF_LD_IMM64(R0, 0), + BPF_LD_IMM64(R1, 0xffffffffffffffffLL), + BPF_STX_MEM(BPF_W, R10, R1, -40), + BPF_LDX_MEM(BPF_W, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0xffffffff } }, + }, + /* BPF_STX | BPF_XADD | BPF_W/DW */ + { + "STX_XADD_W: Test: 0x12 + 0x10 = 0x22", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0x12), + BPF_ST_MEM(BPF_W, R10, -40, 0x10), + BPF_STX_XADD(BPF_W, R10, R0, -40), + BPF_LDX_MEM(BPF_W, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x22 } }, + }, + { + "STX_XADD_DW: Test: 0x12 + 0x10 = 0x22", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0x12), + BPF_ST_MEM(BPF_DW, R10, -40, 0x10), + BPF_STX_XADD(BPF_DW, R10, R0, -40), + BPF_LDX_MEM(BPF_DW, R0, R10, -40), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x22 } }, + }, + /* BPF_JMP | BPF_EXIT */ + { + "JMP_EXIT", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0x4711), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 0x4712), + }, + INTERNAL, + { }, + { { 0, 0x4711 } }, + }, + /* BPF_JMP | BPF_JA */ + { + "JMP_JA: Unconditional jump: if (true) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JSGT | BPF_K */ + { + "JMP_JSGT_K: Signed jump: if (-1 > -2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 0xffffffffffffffffLL), + BPF_JMP_IMM(BPF_JSGT, R1, -2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JSGT_K: Signed jump: if (-1 > -1) return 0", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_LD_IMM64(R1, 0xffffffffffffffffLL), + BPF_JMP_IMM(BPF_JSGT, R1, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JSGE | BPF_K */ + { + "JMP_JSGE_K: Signed jump: if (-1 >= -2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 0xffffffffffffffffLL), + BPF_JMP_IMM(BPF_JSGE, R1, -2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JSGE_K: Signed jump: if (-1 >= -1) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 0xffffffffffffffffLL), + BPF_JMP_IMM(BPF_JSGE, R1, -1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JGT | BPF_K */ + { + "JMP_JGT_K: if (3 > 2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JGT, R1, 2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JGE | BPF_K */ + { + "JMP_JGE_K: if (3 >= 2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JGE, R1, 2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JGT | BPF_K jump backwards */ + { + "JMP_JGT_K: if (3 > 2) return 1 (jump backwards)", + .u.insns_int = { + BPF_JMP_IMM(BPF_JA, 0, 0, 2), /* goto start */ + BPF_ALU32_IMM(BPF_MOV, R0, 1), /* out: */ + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 0), /* start: */ + BPF_LD_IMM64(R1, 3), /* note: this takes 2 insns */ + BPF_JMP_IMM(BPF_JGT, R1, 2, -6), /* goto out */ + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JGE_K: if (3 >= 3) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JGE, R1, 3, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JNE | BPF_K */ + { + "JMP_JNE_K: if (3 != 2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JNE, R1, 2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JEQ | BPF_K */ + { + "JMP_JEQ_K: if (3 == 3) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JEQ, R1, 3, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JSET | BPF_K */ + { + "JMP_JSET_K: if (0x3 & 0x2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JNE, R1, 2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JSET_K: if (0x3 & 0xffffffff) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_JMP_IMM(BPF_JNE, R1, 0xffffffff, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JSGT | BPF_X */ + { + "JMP_JSGT_X: Signed jump: if (-1 > -2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, -2), + BPF_JMP_REG(BPF_JSGT, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JSGT_X: Signed jump: if (-1 > -1) return 0", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, -1), + BPF_JMP_REG(BPF_JSGT, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JSGE | BPF_X */ + { + "JMP_JSGE_X: Signed jump: if (-1 >= -2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, -2), + BPF_JMP_REG(BPF_JSGE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JSGE_X: Signed jump: if (-1 >= -1) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, -1), + BPF_JMP_REG(BPF_JSGE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JGT | BPF_X */ + { + "JMP_JGT_X: if (3 > 2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JGT, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JGE | BPF_X */ + { + "JMP_JGE_X: if (3 >= 2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JGE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JGE_X: if (3 >= 3) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 3), + BPF_JMP_REG(BPF_JGE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JNE | BPF_X */ + { + "JMP_JNE_X: if (3 != 2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JNE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JEQ | BPF_X */ + { + "JMP_JEQ_X: if (3 == 3) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 3), + BPF_JMP_REG(BPF_JEQ, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + /* BPF_JMP | BPF_JSET | BPF_X */ + { + "JMP_JSET_X: if (0x3 & 0x2) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 2), + BPF_JMP_REG(BPF_JNE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JSET_X: if (0x3 & 0xffffffff) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, 3), + BPF_LD_IMM64(R2, 0xffffffff), + BPF_JMP_REG(BPF_JNE, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { + "JMP_JA: Jump, gap, jump, ...", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xababcbac } }, + .fill_helper = bpf_fill_ja, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Maximum possible literals", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xffffffff } }, + .fill_helper = bpf_fill_maxinsns1, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Single literal", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xfefefefe } }, + .fill_helper = bpf_fill_maxinsns2, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Run/add until end", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0x947bf368 } }, + .fill_helper = bpf_fill_maxinsns3, + }, + { + "BPF_MAXINSNS: Too many instructions", + { }, + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL, + { }, + { }, + .fill_helper = bpf_fill_maxinsns4, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Very long jump", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xabababab } }, + .fill_helper = bpf_fill_maxinsns5, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Ctx heavy transformations", + { }, + CLASSIC, + { }, + { + { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }, + { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) } + }, + .fill_helper = bpf_fill_maxinsns6, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Call heavy transformations", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 1, 0 }, { 10, 0 } }, + .fill_helper = bpf_fill_maxinsns7, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Jump heavy test", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xffffffff } }, + .fill_helper = bpf_fill_maxinsns8, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Very long jump backwards", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 0xcbababab } }, + .fill_helper = bpf_fill_maxinsns9, + }, + { /* Mainly checking JIT here. */ + "BPF_MAXINSNS: Edge hopping nuthouse", + { }, + INTERNAL | FLAG_NO_DATA, + { }, + { { 0, 0xabababac } }, + .fill_helper = bpf_fill_maxinsns10, + }, + { + "BPF_MAXINSNS: Jump, gap, jump, ...", + { }, + CLASSIC | FLAG_NO_DATA, + { }, + { { 0, 0xababcbac } }, + .fill_helper = bpf_fill_maxinsns11, + }, }; static struct net_device dev; @@ -1858,10 +4445,15 @@ static void release_test_data(const struct bpf_test *test, void *data) kfree_skb(data); } -static int probe_filter_length(struct sock_filter *fp) +static int filter_length(int which) { - int len = 0; + struct sock_filter *fp; + int len; + if (tests[which].fill_helper) + return tests[which].u.ptr.len; + + fp = tests[which].u.insns; for (len = MAX_INSNS - 1; len > 0; --len) if (fp[len].code != 0 || fp[len].k != 0) break; @@ -1869,16 +4461,25 @@ static int probe_filter_length(struct sock_filter *fp) return len + 1; } +static void *filter_pointer(int which) +{ + if (tests[which].fill_helper) + return tests[which].u.ptr.insns; + else + return tests[which].u.insns; +} + static struct bpf_prog *generate_filter(int which, int *err) { - struct bpf_prog *fp; - struct sock_fprog_kern fprog; - unsigned int flen = probe_filter_length(tests[which].u.insns); __u8 test_type = tests[which].aux & TEST_TYPE_MASK; + unsigned int flen = filter_length(which); + void *fptr = filter_pointer(which); + struct sock_fprog_kern fprog; + struct bpf_prog *fp; switch (test_type) { case CLASSIC: - fprog.filter = tests[which].u.insns; + fprog.filter = fptr; fprog.len = flen; *err = bpf_prog_create(&fp, &fprog); @@ -1914,8 +4515,7 @@ static struct bpf_prog *generate_filter(int which, int *err) } fp->len = flen; - memcpy(fp->insnsi, tests[which].u.insns_int, - fp->len * sizeof(struct bpf_insn)); + memcpy(fp->insnsi, fptr, fp->len * sizeof(struct bpf_insn)); bpf_prog_select_runtime(fp); break; @@ -1987,9 +4587,33 @@ static int run_one(const struct bpf_prog *fp, struct bpf_test *test) return err_cnt; } +static __init int prepare_bpf_tests(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + if (tests[i].fill_helper && + tests[i].fill_helper(&tests[i]) < 0) + return -ENOMEM; + } + + return 0; +} + +static __init void destroy_bpf_tests(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(tests); i++) { + if (tests[i].fill_helper) + kfree(tests[i].u.ptr.insns); + } +} + static __init int test_bpf(void) { int i, err_cnt = 0, pass_cnt = 0; + int jit_cnt = 0, run_cnt = 0; for (i = 0; i < ARRAY_SIZE(tests); i++) { struct bpf_prog *fp; @@ -2006,6 +4630,13 @@ static __init int test_bpf(void) return err; } + + pr_cont("jited:%u ", fp->jited); + + run_cnt++; + if (fp->jited) + jit_cnt++; + err = run_one(fp, &tests[i]); release_filter(fp, i); @@ -2018,13 +4649,24 @@ static __init int test_bpf(void) } } - pr_info("Summary: %d PASSED, %d FAILED\n", pass_cnt, err_cnt); + pr_info("Summary: %d PASSED, %d FAILED, [%d/%d JIT'ed]\n", + pass_cnt, err_cnt, jit_cnt, run_cnt); + return err_cnt ? -EINVAL : 0; } static int __init test_bpf_init(void) { - return test_bpf(); + int ret; + + ret = prepare_bpf_tests(); + if (ret < 0) + return ret; + + ret = test_bpf(); + + destroy_bpf_tests(); + return ret; } static void __exit test_bpf_exit(void) diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index b2957540d..c90777eae 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -1,14 +1,9 @@ /* * Resizable, Scalable, Concurrent Hash Table * - * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch> + * Copyright (c) 2014-2015 Thomas Graf <tgraf@suug.ch> * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> * - * Based on the following paper: - * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf - * - * Code partially derived from nft_hash - * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. @@ -26,20 +21,37 @@ #include <linux/rhashtable.h> #include <linux/slab.h> +#define MAX_ENTRIES 1000000 +#define TEST_INSERT_FAIL INT_MAX + +static int entries = 50000; +module_param(entries, int, 0); +MODULE_PARM_DESC(entries, "Number of entries to add (default: 50000)"); + +static int runs = 4; +module_param(runs, int, 0); +MODULE_PARM_DESC(runs, "Number of test runs per variant (default: 4)"); + +static int max_size = 65536; +module_param(max_size, int, 0); +MODULE_PARM_DESC(runs, "Maximum table size (default: 65536)"); -#define TEST_HT_SIZE 8 -#define TEST_ENTRIES 2048 -#define TEST_PTR ((void *) 0xdeadbeef) -#define TEST_NEXPANDS 4 +static bool shrinking = false; +module_param(shrinking, bool, 0); +MODULE_PARM_DESC(shrinking, "Enable automatic shrinking (default: off)"); + +static int size = 8; +module_param(size, int, 0); +MODULE_PARM_DESC(size, "Initial size hint of table (default: 8)"); struct test_obj { - void *ptr; int value; struct rhash_head node; }; -static const struct rhashtable_params test_rht_params = { - .nelem_hint = TEST_HT_SIZE, +static struct test_obj array[MAX_ENTRIES]; + +static struct rhashtable_params test_rht_params = { .head_offset = offsetof(struct test_obj, node), .key_offset = offsetof(struct test_obj, value), .key_len = sizeof(int), @@ -51,11 +63,14 @@ static int __init test_rht_lookup(struct rhashtable *ht) { unsigned int i; - for (i = 0; i < TEST_ENTRIES * 2; i++) { + for (i = 0; i < entries * 2; i++) { struct test_obj *obj; bool expected = !(i % 2); u32 key = i; + if (array[i / 2].value == TEST_INSERT_FAIL) + expected = false; + obj = rhashtable_lookup_fast(ht, &key, test_rht_params); if (expected && !obj) { @@ -66,9 +81,9 @@ static int __init test_rht_lookup(struct rhashtable *ht) key); return -EEXIST; } else if (expected && obj) { - if (obj->ptr != TEST_PTR || obj->value != i) { - pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n", - obj->ptr, TEST_PTR, obj->value, i); + if (obj->value != i) { + pr_warn("Test failed: Lookup value mismatch %u!=%u\n", + obj->value, i); return -EINVAL; } } @@ -77,129 +92,147 @@ static int __init test_rht_lookup(struct rhashtable *ht) return 0; } -static void test_bucket_stats(struct rhashtable *ht, bool quiet) +static void test_bucket_stats(struct rhashtable *ht) { - unsigned int cnt, rcu_cnt, i, total = 0; + unsigned int err, total = 0, chain_len = 0; + struct rhashtable_iter hti; struct rhash_head *pos; - struct test_obj *obj; - struct bucket_table *tbl; - tbl = rht_dereference_rcu(ht->tbl, ht); - for (i = 0; i < tbl->size; i++) { - rcu_cnt = cnt = 0; + err = rhashtable_walk_init(ht, &hti); + if (err) { + pr_warn("Test failed: allocation error"); + return; + } - if (!quiet) - pr_info(" [%#4x/%u]", i, tbl->size); + err = rhashtable_walk_start(&hti); + if (err && err != -EAGAIN) { + pr_warn("Test failed: iterator failed: %d\n", err); + return; + } - rht_for_each_entry_rcu(obj, pos, tbl, i, node) { - cnt++; - total++; - if (!quiet) - pr_cont(" [%p],", obj); + while ((pos = rhashtable_walk_next(&hti))) { + if (PTR_ERR(pos) == -EAGAIN) { + pr_info("Info: encountered resize\n"); + chain_len++; + continue; + } else if (IS_ERR(pos)) { + pr_warn("Test failed: rhashtable_walk_next() error: %ld\n", + PTR_ERR(pos)); + break; } - rht_for_each_entry_rcu(obj, pos, tbl, i, node) - rcu_cnt++; - - if (rcu_cnt != cnt) - pr_warn("Test failed: Chain count mismach %d != %d", - cnt, rcu_cnt); - - if (!quiet) - pr_cont("\n [%#x] first element: %p, chain length: %u\n", - i, tbl->buckets[i], cnt); + total++; } - pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d\n", - total, atomic_read(&ht->nelems), TEST_ENTRIES); + rhashtable_walk_stop(&hti); + rhashtable_walk_exit(&hti); + + pr_info(" Traversal complete: counted=%u, nelems=%u, entries=%d, table-jumps=%u\n", + total, atomic_read(&ht->nelems), entries, chain_len); - if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES) + if (total != atomic_read(&ht->nelems) || total != entries) pr_warn("Test failed: Total count mismatch ^^^"); } -static int __init test_rhashtable(struct rhashtable *ht) +static s64 __init test_rhashtable(struct rhashtable *ht) { - struct bucket_table *tbl; struct test_obj *obj; - struct rhash_head *pos, *next; int err; - unsigned int i; + unsigned int i, insert_fails = 0; + s64 start, end; /* * Insertion Test: - * Insert TEST_ENTRIES into table with all keys even numbers + * Insert entries into table with all keys even numbers */ - pr_info(" Adding %d keys\n", TEST_ENTRIES); - for (i = 0; i < TEST_ENTRIES; i++) { - struct test_obj *obj; - - obj = kzalloc(sizeof(*obj), GFP_KERNEL); - if (!obj) { - err = -ENOMEM; - goto error; - } + pr_info(" Adding %d keys\n", entries); + start = ktime_get_ns(); + for (i = 0; i < entries; i++) { + struct test_obj *obj = &array[i]; - obj->ptr = TEST_PTR; obj->value = i * 2; err = rhashtable_insert_fast(ht, &obj->node, test_rht_params); - if (err) { - kfree(obj); - goto error; + if (err == -ENOMEM || err == -EBUSY) { + /* Mark failed inserts but continue */ + obj->value = TEST_INSERT_FAIL; + insert_fails++; + } else if (err) { + return err; } } + if (insert_fails) + pr_info(" %u insertions failed due to memory pressure\n", + insert_fails); + + test_bucket_stats(ht); rcu_read_lock(); - test_bucket_stats(ht, true); test_rht_lookup(ht); rcu_read_unlock(); - rcu_read_lock(); - test_bucket_stats(ht, true); - rcu_read_unlock(); + test_bucket_stats(ht); - pr_info(" Deleting %d keys\n", TEST_ENTRIES); - for (i = 0; i < TEST_ENTRIES; i++) { + pr_info(" Deleting %d keys\n", entries); + for (i = 0; i < entries; i++) { u32 key = i * 2; - obj = rhashtable_lookup_fast(ht, &key, test_rht_params); - BUG_ON(!obj); + if (array[i].value != TEST_INSERT_FAIL) { + obj = rhashtable_lookup_fast(ht, &key, test_rht_params); + BUG_ON(!obj); - rhashtable_remove_fast(ht, &obj->node, test_rht_params); - kfree(obj); + rhashtable_remove_fast(ht, &obj->node, test_rht_params); + } } - return 0; - -error: - tbl = rht_dereference_rcu(ht->tbl, ht); - for (i = 0; i < tbl->size; i++) - rht_for_each_entry_safe(obj, pos, next, tbl, i, node) - kfree(obj); + end = ktime_get_ns(); + pr_info(" Duration of test: %lld ns\n", end - start); - return err; + return end - start; } static struct rhashtable ht; static int __init test_rht_init(void) { - int err; + int i, err; + u64 total_time = 0; - pr_info("Running resizable hashtable tests...\n"); + entries = min(entries, MAX_ENTRIES); - err = rhashtable_init(&ht, &test_rht_params); - if (err < 0) { - pr_warn("Test failed: Unable to initialize hashtable: %d\n", - err); - return err; - } + test_rht_params.automatic_shrinking = shrinking; + test_rht_params.max_size = max_size; + test_rht_params.nelem_hint = size; - err = test_rhashtable(&ht); + pr_info("Running rhashtable test nelem=%d, max_size=%d, shrinking=%d\n", + size, max_size, shrinking); - rhashtable_destroy(&ht); + for (i = 0; i < runs; i++) { + s64 time; - return err; + pr_info("Test %02d:\n", i); + memset(&array, 0, sizeof(array)); + err = rhashtable_init(&ht, &test_rht_params); + if (err < 0) { + pr_warn("Test failed: Unable to initialize hashtable: %d\n", + err); + continue; + } + + time = test_rhashtable(&ht); + rhashtable_destroy(&ht); + if (time < 0) { + pr_warn("Test failed: return code %lld\n", time); + return -EINVAL; + } + + total_time += time; + } + + do_div(total_time, runs); + pr_info("Average test time: %llu\n", total_time); + + return 0; } static void __exit test_rht_exit(void) diff --git a/lib/timerqueue.c b/lib/timerqueue.c index a382e4a32..782ae8ca2 100644 --- a/lib/timerqueue.c +++ b/lib/timerqueue.c @@ -36,7 +36,7 @@ * Adds the timer node to the timerqueue, sorted by the * node's expires value. */ -void timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node) +bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node) { struct rb_node **p = &head->head.rb_node; struct rb_node *parent = NULL; @@ -56,8 +56,11 @@ void timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node) rb_link_node(&node->node, parent, p); rb_insert_color(&node->node, &head->head); - if (!head->next || node->expires.tv64 < head->next->expires.tv64) + if (!head->next || node->expires.tv64 < head->next->expires.tv64) { head->next = node; + return true; + } + return false; } EXPORT_SYMBOL_GPL(timerqueue_add); @@ -69,7 +72,7 @@ EXPORT_SYMBOL_GPL(timerqueue_add); * * Removes the timer node from the timerqueue. */ -void timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node) +bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node) { WARN_ON_ONCE(RB_EMPTY_NODE(&node->node)); @@ -82,6 +85,7 @@ void timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node) } rb_erase(&node->node, &head->head); RB_CLEAR_NODE(&node->node); + return head->next != NULL; } EXPORT_SYMBOL_GPL(timerqueue_del); |