mirror of
https://github.com/torvalds/linux.git
synced 2026-01-12 00:42:35 +08:00
accel/ivpu: replace use of system_wq with system_percpu_wq
Currently if a user enqueue a work item using schedule_delayed_work() the used wq is "system_wq" (per-cpu wq) while queue_delayed_work() use WORK_CPU_UNBOUND (used when a cpu is not specified). The same applies to schedule_work() that is using system_wq and queue_work(), that makes use again of WORK_CPU_UNBOUND. This lack of consistency cannot be addressed without refactoring the API. system_wq should be the per-cpu workqueue, yet in this name nothing makes that clear, so replace system_wq with system_percpu_wq. The old wq (system_wq) will be kept for a few release cycles. Suggested-by: Tejun Heo <tj@kernel.org> Signed-off-by: Marco Crivellari <marco.crivellari@suse.com> Signed-off-by: Karol Wachowski <karol.wachowski@linux.intel.com> Link: https://patch.msgid.link/20251029165642.364488-3-marco.crivellari@suse.com
This commit is contained in:
committed by
Karol Wachowski
parent
7b1a70c33d
commit
0f5b2982c3
@@ -673,7 +673,7 @@ bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq)
|
||||
|
||||
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) {
|
||||
ivpu_dbg(vdev, IRQ, "Survivability IRQ\n");
|
||||
queue_work(system_wq, &vdev->irq_dct_work);
|
||||
queue_work(system_percpu_wq, &vdev->irq_dct_work);
|
||||
}
|
||||
|
||||
if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) {
|
||||
|
||||
@@ -459,7 +459,7 @@ void ivpu_ipc_irq_handler(struct ivpu_device *vdev)
|
||||
}
|
||||
}
|
||||
|
||||
queue_work(system_wq, &vdev->irq_ipc_work);
|
||||
queue_work(system_percpu_wq, &vdev->irq_ipc_work);
|
||||
}
|
||||
|
||||
void ivpu_ipc_irq_work_fn(struct work_struct *work)
|
||||
|
||||
@@ -591,7 +591,7 @@ bool ivpu_job_handle_engine_error(struct ivpu_device *vdev, u32 job_id, u32 job_
|
||||
* status and ensure both are handled in the same way
|
||||
*/
|
||||
job->file_priv->has_mmu_faults = true;
|
||||
queue_work(system_wq, &vdev->context_abort_work);
|
||||
queue_work(system_percpu_wq, &vdev->context_abort_work);
|
||||
return true;
|
||||
}
|
||||
default:
|
||||
|
||||
@@ -970,7 +970,7 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
|
||||
}
|
||||
}
|
||||
|
||||
queue_work(system_wq, &vdev->context_abort_work);
|
||||
queue_work(system_percpu_wq, &vdev->context_abort_work);
|
||||
}
|
||||
|
||||
void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
|
||||
|
||||
@@ -226,7 +226,8 @@ void ivpu_start_job_timeout_detection(struct ivpu_device *vdev)
|
||||
unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
|
||||
|
||||
/* No-op if already queued */
|
||||
queue_delayed_work(system_wq, &vdev->pm->job_timeout_work, msecs_to_jiffies(timeout_ms));
|
||||
queue_delayed_work(system_percpu_wq, &vdev->pm->job_timeout_work,
|
||||
msecs_to_jiffies(timeout_ms));
|
||||
}
|
||||
|
||||
void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev)
|
||||
|
||||
Reference in New Issue
Block a user