drm/amdgpu: Reset the clear flag in buddy during resume

- Added a handler in DRM buddy manager to reset the cleared
  flag for the blocks in the freelist.

- This is necessary because, upon resuming, the VRAM becomes
  cluttered with BIOS data, yet the VRAM backend manager
  believes that everything has been cleared.

v2:
  - Add lock before accessing drm_buddy_clear_reset_blocks()(Matthew Auld)
  - Force merge the two dirty blocks.(Matthew Auld)
  - Add a new unit test case for this issue.(Matthew Auld)
  - Having this function being able to flip the state either way would be
    good. (Matthew Brost)

v3(Matthew Auld):
  - Do merge step first to avoid the use of extra reset flag.

Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
Suggested-by: Christian König <christian.koenig@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Cc: stable@vger.kernel.org
Fixes: a68c7eaa7a ("drm/amdgpu: Enable clear page functionality")
Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/3812
Signed-off-by: Christian König <christian.koenig@amd.com>
Link: https://lore.kernel.org/r/20250716075125.240637-2-Arunpravin.PaneerSelvam@amd.com
This commit is contained in:
Arunpravin Paneer Selvam
2025-07-16 13:21:24 +05:30
committed by Christian König
parent cb345f954e
commit 95a16160ca
5 changed files with 65 additions and 0 deletions

View File

@@ -5193,6 +5193,8 @@ exit:
dev->dev->power.disable_depth--;
#endif
}
amdgpu_vram_mgr_clear_reset_blocks(adev);
adev->in_suspend = false;
if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))

View File

@@ -154,6 +154,7 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
uint64_t start, uint64_t size);
int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start);
void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev);
bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
struct ttm_resource *res);

View File

@@ -782,6 +782,23 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
return atomic64_read(&mgr->vis_usage);
}
/**
* amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks
*
* @adev: amdgpu device pointer
*
* Reset the cleared drm buddy blocks.
*/
void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev)
{
struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
struct drm_buddy *mm = &mgr->mm;
mutex_lock(&mgr->lock);
drm_buddy_reset_clear(mm, false);
mutex_unlock(&mgr->lock);
}
/**
* amdgpu_vram_mgr_intersects - test each drm buddy block for intersection
*

View File

@@ -404,6 +404,49 @@ drm_get_buddy(struct drm_buddy_block *block)
}
EXPORT_SYMBOL(drm_get_buddy);
/**
* drm_buddy_reset_clear - reset blocks clear state
*
* @mm: DRM buddy manager
* @is_clear: blocks clear state
*
* Reset the clear state based on @is_clear value for each block
* in the freelist.
*/
void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear)
{
u64 root_size, size, start;
unsigned int order;
int i;
size = mm->size;
for (i = 0; i < mm->n_roots; ++i) {
order = ilog2(size) - ilog2(mm->chunk_size);
start = drm_buddy_block_offset(mm->roots[i]);
__force_merge(mm, start, start + size, order);
root_size = mm->chunk_size << order;
size -= root_size;
}
for (i = 0; i <= mm->max_order; ++i) {
struct drm_buddy_block *block;
list_for_each_entry_reverse(block, &mm->free_list[i], link) {
if (is_clear != drm_buddy_block_is_clear(block)) {
if (is_clear) {
mark_cleared(block);
mm->clear_avail += drm_buddy_block_size(mm, block);
} else {
clear_reset(block);
mm->clear_avail -= drm_buddy_block_size(mm, block);
}
}
}
}
}
EXPORT_SYMBOL(drm_buddy_reset_clear);
/**
* drm_buddy_free_block - free a block
*

View File

@@ -160,6 +160,8 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
u64 new_size,
struct list_head *blocks);
void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear);
void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *block);
void drm_buddy_free_list(struct drm_buddy *mm,