mirror of
https://github.com/torvalds/linux.git
synced 2026-01-25 15:03:52 +08:00
md/md-bitmap: merge md_bitmap_close_sync() into bitmap_operations
So that the implementation won't be exposed, and it'll be possible to invent a new bitmap by replacing bitmap_operations. Also change the parameter from bitmap to mddev, to avoid access bitmap outside md-bitmap.c as much as possible. Signed-off-by: Yu Kuai <yukuai3@huawei.com> Link: https://lore.kernel.org/r/20240826074452.1490072-29-yukuai1@huaweicloud.com Signed-off-by: Song Liu <song@kernel.org>
This commit is contained in:
@@ -1671,7 +1671,7 @@ static void bitmap_end_sync(struct mddev *mddev, sector_t offset,
|
||||
__bitmap_end_sync(mddev->bitmap, offset, blocks, true);
|
||||
}
|
||||
|
||||
void md_bitmap_close_sync(struct bitmap *bitmap)
|
||||
static void bitmap_close_sync(struct mddev *mddev)
|
||||
{
|
||||
/* Sync has finished, and any bitmap chunks that weren't synced
|
||||
* properly have been aborted. It remains to us to clear the
|
||||
@@ -1679,14 +1679,16 @@ void md_bitmap_close_sync(struct bitmap *bitmap)
|
||||
*/
|
||||
sector_t sector = 0;
|
||||
sector_t blocks;
|
||||
struct bitmap *bitmap = mddev->bitmap;
|
||||
|
||||
if (!bitmap)
|
||||
return;
|
||||
|
||||
while (sector < bitmap->mddev->resync_max_sectors) {
|
||||
__bitmap_end_sync(bitmap, sector, &blocks, false);
|
||||
sector += blocks;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(md_bitmap_close_sync);
|
||||
|
||||
void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force)
|
||||
{
|
||||
@@ -2017,7 +2019,7 @@ static int bitmap_load(struct mddev *mddev)
|
||||
bitmap_start_sync(mddev, sector, &blocks, false);
|
||||
sector += blocks;
|
||||
}
|
||||
md_bitmap_close_sync(bitmap);
|
||||
bitmap_close_sync(mddev);
|
||||
|
||||
if (mddev->degraded == 0
|
||||
|| bitmap->events_cleared == mddev->events)
|
||||
@@ -2745,6 +2747,7 @@ static struct bitmap_operations bitmap_ops = {
|
||||
.endwrite = bitmap_endwrite,
|
||||
.start_sync = bitmap_start_sync,
|
||||
.end_sync = bitmap_end_sync,
|
||||
.close_sync = bitmap_close_sync,
|
||||
|
||||
.update_sb = bitmap_update_sb,
|
||||
.get_stats = bitmap_get_stats,
|
||||
|
||||
@@ -262,6 +262,7 @@ struct bitmap_operations {
|
||||
bool (*start_sync)(struct mddev *mddev, sector_t offset,
|
||||
sector_t *blocks, bool degraded);
|
||||
void (*end_sync)(struct mddev *mddev, sector_t offset, sector_t *blocks);
|
||||
void (*close_sync)(struct mddev *mddev);
|
||||
|
||||
void (*update_sb)(struct bitmap *bitmap);
|
||||
int (*get_stats)(struct bitmap *bitmap, struct md_bitmap_stats *stats);
|
||||
@@ -271,7 +272,6 @@ struct bitmap_operations {
|
||||
void mddev_set_bitmap_ops(struct mddev *mddev);
|
||||
|
||||
/* these are exported */
|
||||
void md_bitmap_close_sync(struct bitmap *bitmap);
|
||||
void md_bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force);
|
||||
void md_bitmap_sync_with_cluster(struct mddev *mddev,
|
||||
sector_t old_lo, sector_t old_hi,
|
||||
|
||||
@@ -2777,7 +2777,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
|
||||
else /* completed sync */
|
||||
conf->fullsync = 0;
|
||||
|
||||
md_bitmap_close_sync(mddev->bitmap);
|
||||
mddev->bitmap_ops->close_sync(mddev);
|
||||
close_sync(conf);
|
||||
|
||||
if (mddev_is_clustered(mddev)) {
|
||||
|
||||
@@ -3222,7 +3222,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
||||
}
|
||||
conf->fullsync = 0;
|
||||
}
|
||||
md_bitmap_close_sync(mddev->bitmap);
|
||||
mddev->bitmap_ops->close_sync(mddev);
|
||||
close_sync(conf);
|
||||
*skipped = 1;
|
||||
return sectors_skipped;
|
||||
|
||||
@@ -6501,7 +6501,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n
|
||||
&sync_blocks);
|
||||
else /* completed sync */
|
||||
conf->fullsync = 0;
|
||||
md_bitmap_close_sync(mddev->bitmap);
|
||||
mddev->bitmap_ops->close_sync(mddev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user