drm/i915: Use i915_global_register()

Rather than manually add every new global into each hook, use
i915_global_register() function and keep a list of registered globals to
invoke instead.

However, I haven't found a way for random drivers to add an .init table
to avoid having to manually add ourselves to i915_globals_init() each
time.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190305213830.18094-1-chris@chris-wilson.co.uk
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
This commit is contained in:
Chris Wilson 2019-03-05 21:38:30 +00:00
parent d846325ad0
commit 103b76eeff
14 changed files with 174 additions and 137 deletions

View File

@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "i915_active.h"
#include "i915_globals.h"
#define BKL(ref) (&(ref)->i915->drm.struct_mutex)
@ -17,6 +18,7 @@
* nodes from a local slab cache to hopefully reduce the fragmentation.
*/
static struct i915_global_active {
struct i915_global base;
struct kmem_cache *slab_cache;
} global;
@ -285,21 +287,27 @@ void i915_active_retire_noop(struct i915_active_request *active,
#include "selftests/i915_active.c"
#endif
static void i915_global_active_shrink(void)
{
kmem_cache_shrink(global.slab_cache);
}
static void i915_global_active_exit(void)
{
kmem_cache_destroy(global.slab_cache);
}
static struct i915_global_active global = { {
.shrink = i915_global_active_shrink,
.exit = i915_global_active_exit,
} };
int __init i915_global_active_init(void)
{
global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
if (!global.slab_cache)
return -ENOMEM;
i915_global_register(&global.base);
return 0;
}
void i915_global_active_shrink(void)
{
kmem_cache_shrink(global.slab_cache);
}
void i915_global_active_exit(void)
{
kmem_cache_destroy(global.slab_cache);
}

View File

@ -419,8 +419,4 @@ void i915_active_fini(struct i915_active *ref);
static inline void i915_active_fini(struct i915_active *ref) { }
#endif
int i915_global_active_init(void);
void i915_global_active_shrink(void);
void i915_global_active_exit(void);
#endif /* _I915_ACTIVE_H_ */

View File

@ -88,6 +88,7 @@
#include <linux/log2.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_globals.h"
#include "i915_trace.h"
#include "intel_lrc_reg.h"
#include "intel_workarounds.h"
@ -95,6 +96,7 @@
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
static struct i915_global_context {
struct i915_global base;
struct kmem_cache *slab_luts;
} global;
@ -1423,21 +1425,27 @@ int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx)
#include "selftests/i915_gem_context.c"
#endif
static void i915_global_context_shrink(void)
{
kmem_cache_shrink(global.slab_luts);
}
static void i915_global_context_exit(void)
{
kmem_cache_destroy(global.slab_luts);
}
static struct i915_global_context global = { {
.shrink = i915_global_context_shrink,
.exit = i915_global_context_exit,
} };
int __init i915_global_context_init(void)
{
global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
if (!global.slab_luts)
return -ENOMEM;
i915_global_register(&global.base);
return 0;
}
void i915_global_context_shrink(void)
{
kmem_cache_shrink(global.slab_luts);
}
void i915_global_context_exit(void)
{
kmem_cache_destroy(global.slab_luts);
}

View File

@ -411,8 +411,4 @@ void intel_context_init(struct intel_context *ce,
struct i915_lut_handle *i915_lut_handle_alloc(void);
void i915_lut_handle_free(struct i915_lut_handle *lut);
int i915_global_context_init(void);
void i915_global_context_shrink(void);
void i915_global_context_exit(void);
#endif /* !__I915_GEM_CONTEXT_H__ */

View File

@ -24,8 +24,10 @@
#include "i915_drv.h"
#include "i915_gem_object.h"
#include "i915_globals.h"
static struct i915_global_object {
struct i915_global base;
struct kmem_cache *slab_objects;
} global;
@ -61,6 +63,21 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
}
static void i915_global_objects_shrink(void)
{
kmem_cache_shrink(global.slab_objects);
}
static void i915_global_objects_exit(void)
{
kmem_cache_destroy(global.slab_objects);
}
static struct i915_global_object global = { {
.shrink = i915_global_objects_shrink,
.exit = i915_global_objects_exit,
} };
int __init i915_global_objects_init(void)
{
global.slab_objects =
@ -68,15 +85,6 @@ int __init i915_global_objects_init(void)
if (!global.slab_objects)
return -ENOMEM;
i915_global_register(&global.base);
return 0;
}
void i915_global_objects_shrink(void)
{
kmem_cache_shrink(global.slab_objects);
}
void i915_global_objects_exit(void)
{
kmem_cache_destroy(global.slab_objects);
}

View File

@ -502,8 +502,4 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
unsigned int cache_level);
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
int i915_global_objects_init(void);
void i915_global_objects_shrink(void);
void i915_global_objects_exit(void);
#endif

View File

@ -15,62 +15,61 @@
#include "i915_scheduler.h"
#include "i915_vma.h"
static LIST_HEAD(globals);
void __init i915_global_register(struct i915_global *global)
{
GEM_BUG_ON(!global->shrink);
GEM_BUG_ON(!global->exit);
list_add_tail(&global->link, &globals);
}
static void __i915_globals_cleanup(void)
{
struct i915_global *global, *next;
list_for_each_entry_safe_reverse(global, next, &globals, link)
global->exit();
}
static __initconst int (* const initfn[])(void) = {
i915_global_active_init,
i915_global_context_init,
i915_global_objects_init,
i915_global_request_init,
i915_global_scheduler_init,
i915_global_vma_init,
};
int __init i915_globals_init(void)
{
int err;
int i;
err = i915_global_active_init();
if (err)
return err;
for (i = 0; i < ARRAY_SIZE(initfn); i++) {
int err;
err = i915_global_context_init();
if (err)
goto err_active;
err = i915_global_objects_init();
if (err)
goto err_context;
err = i915_global_request_init();
if (err)
goto err_objects;
err = i915_global_scheduler_init();
if (err)
goto err_request;
err = i915_global_vma_init();
if (err)
goto err_scheduler;
err = initfn[i]();
if (err) {
__i915_globals_cleanup();
return err;
}
}
return 0;
err_scheduler:
i915_global_scheduler_exit();
err_request:
i915_global_request_exit();
err_objects:
i915_global_objects_exit();
err_context:
i915_global_context_exit();
err_active:
i915_global_active_exit();
return err;
}
static void i915_globals_shrink(void)
{
struct i915_global *global;
/*
* kmem_cache_shrink() discards empty slabs and reorders partially
* filled slabs to prioritise allocating from the mostly full slabs,
* with the aim of reducing fragmentation.
*/
i915_global_active_shrink();
i915_global_context_shrink();
i915_global_objects_shrink();
i915_global_request_shrink();
i915_global_scheduler_shrink();
i915_global_vma_shrink();
list_for_each_entry(global, &globals, link)
global->shrink();
}
static atomic_t active;
@ -128,12 +127,7 @@ void __exit i915_globals_exit(void)
rcu_barrier();
flush_scheduled_work();
i915_global_vma_exit();
i915_global_scheduler_exit();
i915_global_request_exit();
i915_global_objects_exit();
i915_global_context_exit();
i915_global_active_exit();
__i915_globals_cleanup();
/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
rcu_barrier();

View File

@ -7,9 +7,28 @@
#ifndef _I915_GLOBALS_H_
#define _I915_GLOBALS_H_
typedef void (*i915_global_func_t)(void);
struct i915_global {
struct list_head link;
i915_global_func_t shrink;
i915_global_func_t exit;
};
void i915_global_register(struct i915_global *global);
int i915_globals_init(void);
void i915_globals_park(void);
void i915_globals_unpark(void);
void i915_globals_exit(void);
/* constructors */
int i915_global_active_init(void);
int i915_global_context_init(void);
int i915_global_objects_init(void);
int i915_global_request_init(void);
int i915_global_scheduler_init(void);
int i915_global_vma_init(void);
#endif /* _I915_GLOBALS_H_ */

View File

@ -31,6 +31,7 @@
#include "i915_drv.h"
#include "i915_active.h"
#include "i915_globals.h"
#include "i915_reset.h"
struct execute_cb {
@ -40,6 +41,7 @@ struct execute_cb {
};
static struct i915_global_request {
struct i915_global base;
struct kmem_cache *slab_requests;
struct kmem_cache *slab_dependencies;
struct kmem_cache *slab_execute_cbs;
@ -1338,6 +1340,25 @@ void i915_retire_requests(struct drm_i915_private *i915)
#include "selftests/i915_request.c"
#endif
static void i915_global_request_shrink(void)
{
kmem_cache_shrink(global.slab_dependencies);
kmem_cache_shrink(global.slab_execute_cbs);
kmem_cache_shrink(global.slab_requests);
}
static void i915_global_request_exit(void)
{
kmem_cache_destroy(global.slab_dependencies);
kmem_cache_destroy(global.slab_execute_cbs);
kmem_cache_destroy(global.slab_requests);
}
static struct i915_global_request global = { {
.shrink = i915_global_request_shrink,
.exit = i915_global_request_exit,
} };
int __init i915_global_request_init(void)
{
global.slab_requests = KMEM_CACHE(i915_request,
@ -1360,6 +1381,7 @@ int __init i915_global_request_init(void)
if (!global.slab_dependencies)
goto err_execute_cbs;
i915_global_register(&global.base);
return 0;
err_execute_cbs:
@ -1368,17 +1390,3 @@ int __init i915_global_request_init(void)
kmem_cache_destroy(global.slab_requests);
return -ENOMEM;
}
void i915_global_request_shrink(void)
{
kmem_cache_shrink(global.slab_dependencies);
kmem_cache_shrink(global.slab_execute_cbs);
kmem_cache_shrink(global.slab_requests);
}
void i915_global_request_exit(void)
{
kmem_cache_destroy(global.slab_dependencies);
kmem_cache_destroy(global.slab_execute_cbs);
kmem_cache_destroy(global.slab_requests);
}

View File

@ -406,8 +406,4 @@ static inline void i915_request_mark_complete(struct i915_request *rq)
void i915_retire_requests(struct drm_i915_private *i915);
int i915_global_request_init(void);
void i915_global_request_shrink(void);
void i915_global_request_exit(void);
#endif /* I915_REQUEST_H */

View File

@ -7,10 +7,12 @@
#include <linux/mutex.h>
#include "i915_drv.h"
#include "i915_globals.h"
#include "i915_request.h"
#include "i915_scheduler.h"
static struct i915_global_scheduler {
struct i915_global base;
struct kmem_cache *slab_dependencies;
struct kmem_cache *slab_priorities;
} global;
@ -437,6 +439,23 @@ void __i915_priolist_free(struct i915_priolist *p)
kmem_cache_free(global.slab_priorities, p);
}
static void i915_global_scheduler_shrink(void)
{
kmem_cache_shrink(global.slab_dependencies);
kmem_cache_shrink(global.slab_priorities);
}
static void i915_global_scheduler_exit(void)
{
kmem_cache_destroy(global.slab_dependencies);
kmem_cache_destroy(global.slab_priorities);
}
static struct i915_global_scheduler global = { {
.shrink = i915_global_scheduler_shrink,
.exit = i915_global_scheduler_exit,
} };
int __init i915_global_scheduler_init(void)
{
global.slab_dependencies = KMEM_CACHE(i915_dependency,
@ -449,21 +468,10 @@ int __init i915_global_scheduler_init(void)
if (!global.slab_priorities)
goto err_priorities;
i915_global_register(&global.base);
return 0;
err_priorities:
kmem_cache_destroy(global.slab_priorities);
return -ENOMEM;
}
void i915_global_scheduler_shrink(void)
{
kmem_cache_shrink(global.slab_dependencies);
kmem_cache_shrink(global.slab_priorities);
}
void i915_global_scheduler_exit(void)
{
kmem_cache_destroy(global.slab_dependencies);
kmem_cache_destroy(global.slab_priorities);
}

View File

@ -134,8 +134,4 @@ static inline void i915_priolist_free(struct i915_priolist *p)
__i915_priolist_free(p);
}
int i915_global_scheduler_init(void);
void i915_global_scheduler_shrink(void);
void i915_global_scheduler_exit(void);
#endif /* _I915_SCHEDULER_H_ */

View File

@ -25,12 +25,14 @@
#include "i915_vma.h"
#include "i915_drv.h"
#include "i915_globals.h"
#include "intel_ringbuffer.h"
#include "intel_frontbuffer.h"
#include <drm/drm_gem.h>
static struct i915_global_vma {
struct i915_global base;
struct kmem_cache *slab_vmas;
} global;
@ -1054,21 +1056,27 @@ int i915_vma_unbind(struct i915_vma *vma)
#include "selftests/i915_vma.c"
#endif
static void i915_global_vma_shrink(void)
{
kmem_cache_shrink(global.slab_vmas);
}
static void i915_global_vma_exit(void)
{
kmem_cache_destroy(global.slab_vmas);
}
static struct i915_global_vma global = { {
.shrink = i915_global_vma_shrink,
.exit = i915_global_vma_exit,
} };
int __init i915_global_vma_init(void)
{
global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
if (!global.slab_vmas)
return -ENOMEM;
i915_global_register(&global.base);
return 0;
}
void i915_global_vma_shrink(void)
{
kmem_cache_shrink(global.slab_vmas);
}
void i915_global_vma_exit(void)
{
kmem_cache_destroy(global.slab_vmas);
}

View File

@ -443,8 +443,4 @@ void i915_vma_parked(struct drm_i915_private *i915);
struct i915_vma *i915_vma_alloc(void);
void i915_vma_free(struct i915_vma *vma);
int i915_global_vma_init(void);
void i915_global_vma_shrink(void);
void i915_global_vma_exit(void);
#endif