drm/radeon: force fence completion only on problematic rings (v2)

Instead of resetting all fence numbers, only reset the
number of the problematic ring. Split out from a patch
from Maarten Lankhorst <maarten.lankhorst@canonical.com>

v2 (agd5f): rebase build fix

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2014-08-27 15:21:56 +02:00 committed by Alex Deucher
parent f0d970b4fd
commit eb98c70990
4 changed files with 8 additions and 15 deletions

View File

@ -371,7 +371,7 @@ struct radeon_fence {
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev);
void radeon_fence_driver_force_completion(struct radeon_device *rdev);
void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);

View File

@ -1488,7 +1488,6 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
struct drm_crtc *crtc;
struct drm_connector *connector;
int i, r;
bool force_completion = false;
if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
@ -1532,12 +1531,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
r = radeon_fence_wait_empty(rdev, i);
if (r) {
/* delay GPU reset to resume */
force_completion = true;
radeon_fence_driver_force_completion(rdev, i);
}
}
if (force_completion) {
radeon_fence_driver_force_completion(rdev);
}
radeon_save_bios_scratch_regs(rdev);
@ -1722,8 +1718,8 @@ int radeon_gpu_reset(struct radeon_device *rdev)
}
}
} else {
radeon_fence_driver_force_completion(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
radeon_fence_driver_force_completion(rdev, i);
kfree(ring_data[i]);
}
}

View File

@ -758,7 +758,7 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
r = radeon_fence_wait_empty(rdev, ring);
if (r) {
/* no need to trigger GPU reset as we are unloading */
radeon_fence_driver_force_completion(rdev);
radeon_fence_driver_force_completion(rdev, ring);
}
wake_up_all(&rdev->fence_queue);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
@ -771,19 +771,15 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
* radeon_fence_driver_force_completion - force all fence waiter to complete
*
* @rdev: radeon device pointer
* @ring: the ring to complete
*
* In case of GPU reset failure make sure no process keep waiting on fence
* that will never complete.
*/
void radeon_fence_driver_force_completion(struct radeon_device *rdev)
void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring)
{
int ring;
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
if (!rdev->fence_drv[ring].initialized)
continue;
if (rdev->fence_drv[ring].initialized)
radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
}
}

View File

@ -269,6 +269,7 @@ int radeon_ib_ring_tests(struct radeon_device *rdev)
r = radeon_ib_test(rdev, i, ring);
if (r) {
radeon_fence_driver_force_completion(rdev, i);
ring->ready = false;
rdev->needs_reset = false;