From e16a4aafb3682fdd8126d8fa9705196638336ce0 Mon Sep 17 00:00:00 2001 From: Ramesh Adhikari Date: Mon, 27 Apr 2026 20:47:42 +0530 Subject: [PATCH] badblocks: fix infinite loop due to incorrect rounding and overflow The roundup() and rounddown() macros return the rounded value but do not modify the input in place. In _badblocks_set(), _badblocks_clear(), and badblocks_check(), the return values were being discarded, causing s and target/next to remain unrounded. This resulted in sectors being calculated from unrounded values, which could lead to sectors being way too large (or zero), causing infinite loops in the re_insert/re_clear/re_check loops. Additionally, add integer overflow checks (s > ULLONG_MAX - sectors) before the s + sectors calculation in all three functions to prevent overflow-related issues. Also add early return when sectors becomes zero after rounding in badblocks_check(). Root cause: When s and sectors have specific values (e.g., from syzkaller fuzzing via nvdimm ioctl), the unrounded values cause sectors to be incorrectly calculated. In _badblocks_clear(), this could result in needing 2^46 iterations to process 2^55 sectors, triggering RCU stall warnings and effectively hanging the kernel. Fix by properly capturing the return values from roundup() and rounddown(), adding overflow checks before sector arithmetic, and handling the zero-sectors case in badblocks_check(). Signed-off-by: Ramesh Adhikari --- block/badblocks.c | 43 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 9 deletions(-) diff --git a/block/badblocks.c b/block/badblocks.c index ece64e76fe8ff..a5ffae65a05a0 100644 --- a/block/badblocks.c +++ b/block/badblocks.c @@ -855,13 +855,21 @@ static bool _badblocks_set(struct badblocks *bb, sector_t s, sector_t sectors, if (bb->shift) { /* round the start down, and the end up */ + if (s > ULLONG_MAX - sectors) + return false; sector_t next = s + sectors; - rounddown(s, 1 << bb->shift); - roundup(next, 1 << bb->shift); - sectors = next - s; + s = rounddown(s, 1 << bb->shift); + next = roundup(next, 1 << bb->shift); + if (next < s) + sectors = 0; + else + sectors = next - s; } + if (sectors == 0) + return false; + write_seqlock_irqsave(&bb->lock, flags); bad.ack = acknowledged; @@ -1070,12 +1078,20 @@ static bool _badblocks_clear(struct badblocks *bb, sector_t s, sector_t sectors) * However it is better the think a block is bad when it * isn't than to think a block is not bad when it is. */ + if (s > ULLONG_MAX - sectors) + return false; target = s + sectors; - roundup(s, 1 << bb->shift); - rounddown(target, 1 << bb->shift); - sectors = target - s; + s = roundup(s, 1 << bb->shift); + target = rounddown(target, 1 << bb->shift); + if (target < s) + sectors = 0; + else + sectors = target - s; } + if (sectors == 0) + return false; + write_seqlock_irq(&bb->lock); bad.ack = true; @@ -1305,11 +1321,20 @@ int badblocks_check(struct badblocks *bb, sector_t s, sector_t sectors, if (bb->shift > 0) { /* round the start down, and the end up */ + if (s > ULLONG_MAX - sectors) { + return -EINVAL; + } sector_t target = s + sectors; - rounddown(s, 1 << bb->shift); - roundup(target, 1 << bb->shift); - sectors = target - s; + s = rounddown(s, 1 << bb->shift); + target = roundup(target, 1 << bb->shift); + if (target < s) + sectors = 0; + else + sectors = target - s; + + if (sectors == 0) + return 0; } retry: