2018-05-08 17:39:47 +07:00
|
|
|
#ifndef __NV50_KMS_HEAD_H__
|
|
|
|
#define __NV50_KMS_HEAD_H__
|
|
|
|
#define nv50_head(c) container_of((c), struct nv50_head, base.base)
|
|
|
|
#include "disp.h"
|
|
|
|
#include "atom.h"
|
2018-05-08 17:39:47 +07:00
|
|
|
#include "lut.h"
|
2018-05-08 17:39:47 +07:00
|
|
|
|
|
|
|
#include "nouveau_crtc.h"
|
|
|
|
|
|
|
|
struct nv50_head {
|
|
|
|
const struct nv50_head_func *func;
|
|
|
|
struct nouveau_crtc base;
|
2018-05-08 17:39:47 +07:00
|
|
|
struct nv50_lut olut;
|
drm/nouveau/kms/nv50-: Use less encoders by making mstos per-head
Currently, for every single MST capable DRM connector we create a set of
fake encoders, one for each possible head. Unfortunately this ends up
being a huge waste of encoders. While this currently isn't causing us
any problems, it's extremely close to doing so.
The ThinkPad P71 is a good example of this. Originally when trying to
figure out why nouveau was failing to load on this laptop, I discovered
it was because nouveau was creating too many encoders. This ended up
being because we were mistakenly creating MST encoders for the eDP port,
however we are still extremely close to hitting the encoder limit on
this machine as it exposes 1 eDP port and 5 DP ports, resulting in 31
encoders.
So while this fix didn't end up being necessary to fix the P71, we still
need to implement this so that we avoid hitting the encoder limit for
valid display configurations in the event that some machine with more
connectors then this becomes available. Plus, we don't want to let good
code go to waste :)
So, use less encoders by only creating one MSTO per head. Then, attach
each new MSTC to each MSTO which corresponds to a head that it's parent
DP port is capable of using. This brings the number of encoders we
register on the ThinkPad P71 from 31, down to just 15. Yay!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-09-14 05:03:52 +07:00
|
|
|
struct nv50_msto *msto;
|
2018-05-08 17:39:47 +07:00
|
|
|
};
|
|
|
|
|
drm/nouveau/kms/nv50-: Use less encoders by making mstos per-head
Currently, for every single MST capable DRM connector we create a set of
fake encoders, one for each possible head. Unfortunately this ends up
being a huge waste of encoders. While this currently isn't causing us
any problems, it's extremely close to doing so.
The ThinkPad P71 is a good example of this. Originally when trying to
figure out why nouveau was failing to load on this laptop, I discovered
it was because nouveau was creating too many encoders. This ended up
being because we were mistakenly creating MST encoders for the eDP port,
however we are still extremely close to hitting the encoder limit on
this machine as it exposes 1 eDP port and 5 DP ports, resulting in 31
encoders.
So while this fix didn't end up being necessary to fix the P71, we still
need to implement this so that we avoid hitting the encoder limit for
valid display configurations in the event that some machine with more
connectors then this becomes available. Plus, we don't want to let good
code go to waste :)
So, use less encoders by only creating one MSTO per head. Then, attach
each new MSTC to each MSTO which corresponds to a head that it's parent
DP port is capable of using. This brings the number of encoders we
register on the ThinkPad P71 from 31, down to just 15. Yay!
Signed-off-by: Lyude Paul <lyude@redhat.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
2019-09-14 05:03:52 +07:00
|
|
|
struct nv50_head *nv50_head_create(struct drm_device *, int index);
|
2018-05-08 17:39:47 +07:00
|
|
|
void nv50_head_flush_set(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void nv50_head_flush_clr(struct nv50_head *, struct nv50_head_atom *, bool y);
|
|
|
|
|
|
|
|
struct nv50_head_func {
|
|
|
|
void (*view)(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void (*mode)(struct nv50_head *, struct nv50_head_atom *);
|
2019-09-06 11:13:59 +07:00
|
|
|
bool (*olut)(struct nv50_head *, struct nv50_head_atom *, int);
|
2018-12-11 11:50:02 +07:00
|
|
|
bool olut_identity;
|
2019-09-06 11:13:59 +07:00
|
|
|
int olut_size;
|
2018-05-08 17:39:47 +07:00
|
|
|
void (*olut_set)(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void (*olut_clr)(struct nv50_head *);
|
2018-05-08 17:39:47 +07:00
|
|
|
void (*core_calc)(struct nv50_head *, struct nv50_head_atom *);
|
2018-05-08 17:39:47 +07:00
|
|
|
void (*core_set)(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void (*core_clr)(struct nv50_head *);
|
2018-05-08 17:39:47 +07:00
|
|
|
int (*curs_layout)(struct nv50_head *, struct nv50_wndw_atom *,
|
|
|
|
struct nv50_head_atom *);
|
|
|
|
int (*curs_format)(struct nv50_head *, struct nv50_wndw_atom *,
|
|
|
|
struct nv50_head_atom *);
|
2018-05-08 17:39:47 +07:00
|
|
|
void (*curs_set)(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void (*curs_clr)(struct nv50_head *);
|
|
|
|
void (*base)(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void (*ovly)(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void (*dither)(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void (*procamp)(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void (*or)(struct nv50_head *, struct nv50_head_atom *);
|
drm/nouveau/kms/nv140-: Track wndw mappings in nv50_head_atom
While we're not quite ready yet to add support for flexible wndw
mappings, we are going to need to at least keep track of the static wndw
mappings we're currently using in each head's atomic state. We'll likely
use this in the future to implement real flexible window mapping, but
the primary reason we'll need this is for CRC support.
See: on nvidia hardware, each CRC entry in the CRC notifier dma context
has a "tag". This tag corresponds to the nth update on a specific
EVO/NvDisplay channel, which itself is referred to as the "controlling
channel". For gf119+ this can be the core channel, ovly channel, or base
channel. Since we don't expose CRC entry tags to userspace, we simply
ignore this feature and always use the core channel as the controlling
channel. Simple.
Things get a little bit more complicated on gv100+ though. GV100+ only
lets us set the controlling channel to a specific wndw channel, and that
wndw must be owned by the head that we're grabbing CRCs when we enable
CRC generation. Thus, we always need to make sure that each atomic head
state has at least one wndw that is mapped to the head, which will be
used as the controlling channel.
Note that since we don't have flexible wndw mappings yet, we don't
expect to run into any scenarios yet where we'd have a head with no
mapped wndws. When we do add support for flexible wndw mappings however,
we'll need to make sure that we handle reprogramming CRC capture if our
controlling wndw is moved to another head (and potentially reject the
new head state entirely if we can't find another available wndw to
replace it).
With that being said, nouveau currently tracks wndw visibility on heads.
It does not keep track of the actual ownership mappings, which are
(currently) statically programmed. To fix this, we introduce another
bitmask into nv50_head_atom.wndw to keep track of ownership separately
from visibility. We then introduce a nv50_head callback to handle
populating the wndw ownership map, and call it during the atomic check
phase when core->assign_windows is set to true.
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Acked-by: Dave Airlie <airlied@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200627194657.156514-7-lyude@redhat.com
2020-02-07 02:37:36 +07:00
|
|
|
void (*static_wndw_map)(struct nv50_head *, struct nv50_head_atom *);
|
2018-05-08 17:39:47 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
extern const struct nv50_head_func head507d;
|
2018-05-08 17:39:47 +07:00
|
|
|
void head507d_view(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void head507d_mode(struct nv50_head *, struct nv50_head_atom *);
|
2019-09-06 11:13:59 +07:00
|
|
|
bool head507d_olut(struct nv50_head *, struct nv50_head_atom *, int);
|
2018-05-08 17:39:47 +07:00
|
|
|
void head507d_core_calc(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void head507d_core_clr(struct nv50_head *);
|
2018-05-08 17:39:47 +07:00
|
|
|
int head507d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *,
|
|
|
|
struct nv50_head_atom *);
|
|
|
|
int head507d_curs_format(struct nv50_head *, struct nv50_wndw_atom *,
|
|
|
|
struct nv50_head_atom *);
|
2018-05-08 17:39:47 +07:00
|
|
|
void head507d_base(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void head507d_ovly(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void head507d_dither(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void head507d_procamp(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
|
|
|
|
extern const struct nv50_head_func head827d;
|
|
|
|
|
|
|
|
extern const struct nv50_head_func head907d;
|
|
|
|
void head907d_view(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void head907d_mode(struct nv50_head *, struct nv50_head_atom *);
|
2019-09-06 11:13:59 +07:00
|
|
|
bool head907d_olut(struct nv50_head *, struct nv50_head_atom *, int);
|
2018-05-08 17:39:47 +07:00
|
|
|
void head907d_olut_set(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void head907d_olut_clr(struct nv50_head *);
|
2018-05-08 17:39:47 +07:00
|
|
|
void head907d_core_set(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void head907d_core_clr(struct nv50_head *);
|
|
|
|
void head907d_curs_set(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void head907d_curs_clr(struct nv50_head *);
|
|
|
|
void head907d_ovly(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void head907d_procamp(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void head907d_or(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
|
|
|
|
extern const struct nv50_head_func head917d;
|
2018-05-08 17:39:48 +07:00
|
|
|
int head917d_curs_layout(struct nv50_head *, struct nv50_wndw_atom *,
|
|
|
|
struct nv50_head_atom *);
|
|
|
|
|
|
|
|
extern const struct nv50_head_func headc37d;
|
2018-12-11 11:50:02 +07:00
|
|
|
void headc37d_view(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void headc37d_core_set(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void headc37d_core_clr(struct nv50_head *);
|
|
|
|
int headc37d_curs_format(struct nv50_head *, struct nv50_wndw_atom *,
|
|
|
|
struct nv50_head_atom *);
|
|
|
|
void headc37d_curs_set(struct nv50_head *, struct nv50_head_atom *);
|
|
|
|
void headc37d_curs_clr(struct nv50_head *);
|
|
|
|
void headc37d_dither(struct nv50_head *, struct nv50_head_atom *);
|
drm/nouveau/kms/nv140-: Track wndw mappings in nv50_head_atom
While we're not quite ready yet to add support for flexible wndw
mappings, we are going to need to at least keep track of the static wndw
mappings we're currently using in each head's atomic state. We'll likely
use this in the future to implement real flexible window mapping, but
the primary reason we'll need this is for CRC support.
See: on nvidia hardware, each CRC entry in the CRC notifier dma context
has a "tag". This tag corresponds to the nth update on a specific
EVO/NvDisplay channel, which itself is referred to as the "controlling
channel". For gf119+ this can be the core channel, ovly channel, or base
channel. Since we don't expose CRC entry tags to userspace, we simply
ignore this feature and always use the core channel as the controlling
channel. Simple.
Things get a little bit more complicated on gv100+ though. GV100+ only
lets us set the controlling channel to a specific wndw channel, and that
wndw must be owned by the head that we're grabbing CRCs when we enable
CRC generation. Thus, we always need to make sure that each atomic head
state has at least one wndw that is mapped to the head, which will be
used as the controlling channel.
Note that since we don't have flexible wndw mappings yet, we don't
expect to run into any scenarios yet where we'd have a head with no
mapped wndws. When we do add support for flexible wndw mappings however,
we'll need to make sure that we handle reprogramming CRC capture if our
controlling wndw is moved to another head (and potentially reject the
new head state entirely if we can't find another available wndw to
replace it).
With that being said, nouveau currently tracks wndw visibility on heads.
It does not keep track of the actual ownership mappings, which are
(currently) statically programmed. To fix this, we introduce another
bitmask into nv50_head_atom.wndw to keep track of ownership separately
from visibility. We then introduce a nv50_head callback to handle
populating the wndw ownership map, and call it during the atomic check
phase when core->assign_windows is set to true.
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Acked-by: Dave Airlie <airlied@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200627194657.156514-7-lyude@redhat.com
2020-02-07 02:37:36 +07:00
|
|
|
void headc37d_static_wndw_map(struct nv50_head *, struct nv50_head_atom *);
|
2018-12-11 11:50:02 +07:00
|
|
|
|
|
|
|
extern const struct nv50_head_func headc57d;
|
2018-05-08 17:39:47 +07:00
|
|
|
#endif
|