nvc0: move HW SM queries to nvc0_query_hw_sm.c/h files

Global performance counters (PCOUNTER) will be added to
nvc0_query_hw_pm.c/h files.

Signed-off-by: Samuel Pitoiset <samuel.pitoiset@gmail.com>
This commit is contained in:
Samuel Pitoiset 2015-10-04 23:43:20 +02:00
parent 224fec05ea
commit 7129cbf5f4
8 changed files with 908 additions and 796 deletions

View File

@ -154,6 +154,8 @@ NVC0_C_SOURCES := \
nvc0/nvc0_query.h \
nvc0/nvc0_query_hw.c \
nvc0/nvc0_query_hw.h \
nvc0/nvc0_query_hw_sm.c \
nvc0/nvc0_query_hw_sm.h \
nvc0/nvc0_query_sw.c \
nvc0/nvc0_query_sw.h \
nvc0/nvc0_resource.c \

View File

@ -28,6 +28,7 @@
#include "nvc0/nvc0_query.h"
#include "nvc0/nvc0_query_sw.h"
#include "nvc0/nvc0_query_hw.h"
#include "nvc0/nvc0_query_hw_sm.h"
static struct pipe_query *
nvc0_create_query(struct pipe_context *pipe, unsigned type, unsigned index)
@ -182,7 +183,7 @@ static const char *nvc0_sw_query_drv_stat_names[] =
/* === PERFORMANCE MONITORING COUNTERS for NVE4+ === */
/* NOTE: intentionally using the same names as NV */
static const char *nve4_pm_query_names[] =
static const char *nve4_hw_sm_query_names[] =
{
/* MP counters */
"active_cycles",
@ -238,7 +239,7 @@ static const char *nve4_pm_query_names[] =
};
/* === PERFORMANCE MONITORING COUNTERS for NVC0:NVE4 === */
static const char *nvc0_pm_query_names[] =
static const char *nvc0_hw_sm_query_names[] =
{
/* MP counters */
"active_cycles",
@ -320,17 +321,17 @@ nvc0_screen_get_driver_query_info(struct pipe_screen *pscreen,
if (id < count) {
if (screen->compute) {
if (screen->base.class_3d == NVE4_3D_CLASS) {
info->name = nve4_pm_query_names[id - NVC0_SW_QUERY_DRV_STAT_COUNT];
info->name = nve4_hw_sm_query_names[id - NVC0_SW_QUERY_DRV_STAT_COUNT];
info->query_type = NVE4_HW_SM_QUERY(id - NVC0_SW_QUERY_DRV_STAT_COUNT);
info->max_value.u64 =
(id < NVE4_HW_SM_QUERY_METRIC_MP_OCCUPANCY) ? 0 : 100;
info->group_id = NVC0_QUERY_MP_COUNTER_GROUP;
info->group_id = NVC0_HW_SM_QUERY_GROUP;
return 1;
} else
if (screen->base.class_3d < NVE4_3D_CLASS) {
info->name = nvc0_pm_query_names[id - NVC0_SW_QUERY_DRV_STAT_COUNT];
info->name = nvc0_hw_sm_query_names[id - NVC0_SW_QUERY_DRV_STAT_COUNT];
info->query_type = NVC0_HW_SM_QUERY(id - NVC0_SW_QUERY_DRV_STAT_COUNT);
info->group_id = NVC0_QUERY_MP_COUNTER_GROUP;
info->group_id = NVC0_HW_SM_QUERY_GROUP;
return 1;
}
}
@ -365,7 +366,7 @@ nvc0_screen_get_driver_query_group_info(struct pipe_screen *pscreen,
if (!info)
return count;
if (id == NVC0_QUERY_MP_COUNTER_GROUP) {
if (id == NVC0_HW_SM_QUERY_GROUP) {
if (screen->compute) {
info->name = "MP counters";
info->type = PIPE_DRIVER_QUERY_GROUP_TYPE_GPU;

View File

@ -31,7 +31,7 @@ nvc0_query(struct pipe_query *pipe)
/*
* Driver queries groups:
*/
#define NVC0_QUERY_MP_COUNTER_GROUP 0
#define NVC0_HW_SM_QUERY_GROUP 0
#define NVC0_SW_QUERY_DRV_STAT_GROUP 1
void nvc0_init_query_functions(struct nvc0_context *);

View File

@ -25,10 +25,7 @@
#include "nvc0/nvc0_context.h"
#include "nvc0/nvc0_query_hw.h"
#include "nv_object.xml.h"
#include "nvc0/nve4_compute.xml.h"
#include "nvc0/nvc0_compute.xml.h"
#include "nvc0/nvc0_query_hw_sm.h"
#define NVC0_HW_QUERY_STATE_READY 0
#define NVC0_HW_QUERY_STATE_ACTIVE 1
@ -37,632 +34,7 @@
#define NVC0_HW_QUERY_ALLOC_SPACE 256
/* === PERFORMANCE MONITORING COUNTERS for NVE4+ === */
/* Code to read out MP counters: They are accessible via mmio, too, but let's
* just avoid mapping registers in userspace. We'd have to know which MPs are
* enabled/present, too, and that information is not presently exposed.
* We could add a kernel interface for it, but reading the counters like this
* has the advantage of being async (if get_result isn't called immediately).
*/
static const uint64_t nve4_read_hw_sm_counters_code[] =
{
/* sched 0x20 0x20 0x20 0x20 0x20 0x20 0x20
* mov b32 $r8 $tidx
* mov b32 $r12 $physid
* mov b32 $r0 $pm0
* mov b32 $r1 $pm1
* mov b32 $r2 $pm2
* mov b32 $r3 $pm3
* mov b32 $r4 $pm4
* sched 0x20 0x20 0x23 0x04 0x20 0x04 0x2b
* mov b32 $r5 $pm5
* mov b32 $r6 $pm6
* mov b32 $r7 $pm7
* set $p0 0x1 eq u32 $r8 0x0
* mov b32 $r10 c0[0x0]
* ext u32 $r8 $r12 0x414
* mov b32 $r11 c0[0x4]
* sched 0x04 0x2e 0x04 0x20 0x20 0x28 0x04
* ext u32 $r9 $r12 0x208
* (not $p0) exit
* set $p1 0x1 eq u32 $r9 0x0
* mul $r8 u32 $r8 u32 96
* mul $r12 u32 $r9 u32 16
* mul $r13 u32 $r9 u32 4
* add b32 $r9 $r8 $r13
* sched 0x28 0x04 0x2c 0x04 0x2c 0x04 0x2c
* add b32 $r8 $r8 $r12
* mov b32 $r12 $r10
* add b32 $r10 $c $r10 $r8
* mov b32 $r13 $r11
* add b32 $r11 $r11 0x0 $c
* add b32 $r12 $c $r12 $r9
* st b128 wt g[$r10d] $r0q
* sched 0x4 0x2c 0x20 0x04 0x2e 0x00 0x00
* mov b32 $r0 c0[0x8]
* add b32 $r13 $r13 0x0 $c
* $p1 st b128 wt g[$r12d+0x40] $r4q
* st b32 wt g[$r12d+0x50] $r0
* exit */
0x2202020202020207ULL,
0x2c00000084021c04ULL,
0x2c0000000c031c04ULL,
0x2c00000010001c04ULL,
0x2c00000014005c04ULL,
0x2c00000018009c04ULL,
0x2c0000001c00dc04ULL,
0x2c00000020011c04ULL,
0x22b0420042320207ULL,
0x2c00000024015c04ULL,
0x2c00000028019c04ULL,
0x2c0000002c01dc04ULL,
0x190e0000fc81dc03ULL,
0x2800400000029de4ULL,
0x7000c01050c21c03ULL,
0x280040001002dde4ULL,
0x204282020042e047ULL,
0x7000c00820c25c03ULL,
0x80000000000021e7ULL,
0x190e0000fc93dc03ULL,
0x1000000180821c02ULL,
0x1000000040931c02ULL,
0x1000000010935c02ULL,
0x4800000034825c03ULL,
0x22c042c042c04287ULL,
0x4800000030821c03ULL,
0x2800000028031de4ULL,
0x4801000020a29c03ULL,
0x280000002c035de4ULL,
0x0800000000b2dc42ULL,
0x4801000024c31c03ULL,
0x9400000000a01fc5ULL,
0x200002e04202c047ULL,
0x2800400020001de4ULL,
0x0800000000d35c42ULL,
0x9400000100c107c5ULL,
0x9400000140c01f85ULL,
0x8000000000001de7ULL
};
/* For simplicity, we will allocate as many group slots as we allocate counter
* slots. This means that a single counter which wants to source from 2 groups
* will have to be declared as using 2 counter slots. This shouldn't really be
* a problem because such queries don't make much sense ... (unless someone is
* really creative).
*/
struct nvc0_mp_counter_cfg
{
uint32_t func : 16; /* mask or 4-bit logic op (depending on mode) */
uint32_t mode : 4; /* LOGOP,B6,LOGOP_B6(_PULSE) */
uint32_t num_src : 3; /* number of sources (1 - 6, only for NVC0:NVE4) */
uint32_t sig_dom : 1; /* if 0, MP_PM_A (per warp-sched), if 1, MP_PM_B */
uint32_t sig_sel : 8; /* signal group */
uint64_t src_sel; /* signal selection for up to 6 sources (48 bit) */
};
#define NVC0_COUNTER_OPn_SUM 0
#define NVC0_COUNTER_OPn_OR 1
#define NVC0_COUNTER_OPn_AND 2
#define NVC0_COUNTER_OP2_REL_SUM_MM 3 /* (sum(ctr0) - sum(ctr1)) / sum(ctr0) */
#define NVC0_COUNTER_OP2_DIV_SUM_M0 4 /* sum(ctr0) / ctr1 of MP[0]) */
#define NVC0_COUNTER_OP2_AVG_DIV_MM 5 /* avg(ctr0 / ctr1) */
#define NVC0_COUNTER_OP2_AVG_DIV_M0 6 /* avg(ctr0) / ctr1 of MP[0]) */
struct nvc0_hw_sm_query_cfg
{
struct nvc0_mp_counter_cfg ctr[4];
uint8_t num_counters;
uint8_t op;
uint8_t norm[2]; /* normalization num,denom */
};
#define _Q1A(n, f, m, g, s, nu, dn) [NVE4_HW_SM_QUERY_##n] = { { { f, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m, 0, 0, NVE4_COMPUTE_MP_PM_A_SIGSEL_##g, s }, {}, {}, {} }, 1, NVC0_COUNTER_OPn_SUM, { nu, dn } }
#define _Q1B(n, f, m, g, s, nu, dn) [NVE4_HW_SM_QUERY_##n] = { { { f, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m, 0, 1, NVE4_COMPUTE_MP_PM_B_SIGSEL_##g, s }, {}, {}, {} }, 1, NVC0_COUNTER_OPn_SUM, { nu, dn } }
#define _M2A(n, f0, m0, g0, s0, f1, m1, g1, s1, o, nu, dn) [NVE4_HW_SM_QUERY_METRIC_##n] = { { \
{ f0, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m0, 0, 0, NVE4_COMPUTE_MP_PM_A_SIGSEL_##g0, s0 }, \
{ f1, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m1, 0, 0, NVE4_COMPUTE_MP_PM_A_SIGSEL_##g1, s1 }, \
{}, {}, }, 2, NVC0_COUNTER_OP2_##o, { nu, dn } }
#define _M2B(n, f0, m0, g0, s0, f1, m1, g1, s1, o, nu, dn) [NVE4_HW_SM_QUERY_METRIC_##n] = { { \
{ f0, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m0, 0, 1, NVE4_COMPUTE_MP_PM_B_SIGSEL_##g0, s0 }, \
{ f1, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m1, 0, 1, NVE4_COMPUTE_MP_PM_B_SIGSEL_##g1, s1 }, \
{}, {}, }, 2, NVC0_COUNTER_OP2_##o, { nu, dn } }
#define _M2AB(n, f0, m0, g0, s0, f1, m1, g1, s1, o, nu, dn) [NVE4_HW_SM_QUERY_METRIC_##n] = { { \
{ f0, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m0, 0, 0, NVE4_COMPUTE_MP_PM_A_SIGSEL_##g0, s0 }, \
{ f1, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m1, 0, 1, NVE4_COMPUTE_MP_PM_B_SIGSEL_##g1, s1 }, \
{}, {}, }, 2, NVC0_COUNTER_OP2_##o, { nu, dn } }
/* NOTES:
* active_warps: bit 0 alternates btw 0 and 1 for odd nr of warps
* inst_executed etc.: we only count a single warp scheduler
* metric-ipXc: we simply multiply by 4 to account for the 4 warp schedulers;
* this is inaccurate !
*/
static const struct nvc0_hw_sm_query_cfg nve4_hw_sm_queries[] =
{
_Q1B(ACTIVE_CYCLES, 0x0001, B6, WARP, 0x00000000, 1, 1),
_Q1B(ACTIVE_WARPS, 0x003f, B6, WARP, 0x31483104, 2, 1),
_Q1A(ATOM_COUNT, 0x0001, B6, BRANCH, 0x00000000, 1, 1),
_Q1A(BRANCH, 0x0001, B6, BRANCH, 0x0000000c, 1, 1),
_Q1A(DIVERGENT_BRANCH, 0x0001, B6, BRANCH, 0x00000010, 1, 1),
_Q1A(GLD_REQUEST, 0x0001, B6, LDST, 0x00000010, 1, 1),
_Q1B(GLD_MEM_DIV_REPLAY, 0x0001, B6, REPLAY, 0x00000010, 1, 1),
_Q1B(GST_TRANSACTIONS, 0x0001, B6, MEM, 0x00000004, 1, 1),
_Q1B(GST_MEM_DIV_REPLAY, 0x0001, B6, REPLAY, 0x00000014, 1, 1),
_Q1A(GRED_COUNT, 0x0001, B6, BRANCH, 0x00000008, 1, 1),
_Q1A(GST_REQUEST, 0x0001, B6, LDST, 0x00000014, 1, 1),
_Q1A(INST_EXECUTED, 0x0003, B6, EXEC, 0x00000398, 1, 1),
_Q1A(INST_ISSUED, 0x0003, B6, ISSUE, 0x00000104, 1, 1),
_Q1A(INST_ISSUED1, 0x0001, B6, ISSUE, 0x00000004, 1, 1),
_Q1A(INST_ISSUED2, 0x0001, B6, ISSUE, 0x00000008, 1, 1),
_Q1B(L1_GLD_HIT, 0x0001, B6, L1, 0x00000010, 1, 1),
_Q1B(L1_GLD_MISS, 0x0001, B6, L1, 0x00000014, 1, 1),
_Q1B(L1_LOCAL_LD_HIT, 0x0001, B6, L1, 0x00000000, 1, 1),
_Q1B(L1_LOCAL_LD_MISS, 0x0001, B6, L1, 0x00000004, 1, 1),
_Q1B(L1_LOCAL_ST_HIT, 0x0001, B6, L1, 0x00000008, 1, 1),
_Q1B(L1_LOCAL_ST_MISS, 0x0001, B6, L1, 0x0000000c, 1, 1),
_Q1B(L1_SHARED_LD_TRANSACTIONS, 0x0001, B6, TRANSACTION, 0x00000008, 1, 1),
_Q1B(L1_SHARED_ST_TRANSACTIONS, 0x0001, B6, TRANSACTION, 0x0000000c, 1, 1),
_Q1A(LOCAL_LD, 0x0001, B6, LDST, 0x00000008, 1, 1),
_Q1B(LOCAL_LD_TRANSACTIONS, 0x0001, B6, TRANSACTION, 0x00000000, 1, 1),
_Q1A(LOCAL_ST, 0x0001, B6, LDST, 0x0000000c, 1, 1),
_Q1B(LOCAL_ST_TRANSACTIONS, 0x0001, B6, TRANSACTION, 0x00000004, 1, 1),
_Q1A(PROF_TRIGGER_0, 0x0001, B6, USER, 0x00000000, 1, 1),
_Q1A(PROF_TRIGGER_1, 0x0001, B6, USER, 0x00000004, 1, 1),
_Q1A(PROF_TRIGGER_2, 0x0001, B6, USER, 0x00000008, 1, 1),
_Q1A(PROF_TRIGGER_3, 0x0001, B6, USER, 0x0000000c, 1, 1),
_Q1A(PROF_TRIGGER_4, 0x0001, B6, USER, 0x00000010, 1, 1),
_Q1A(PROF_TRIGGER_5, 0x0001, B6, USER, 0x00000014, 1, 1),
_Q1A(PROF_TRIGGER_6, 0x0001, B6, USER, 0x00000018, 1, 1),
_Q1A(PROF_TRIGGER_7, 0x0001, B6, USER, 0x0000001c, 1, 1),
_Q1A(SHARED_LD, 0x0001, B6, LDST, 0x00000000, 1, 1),
_Q1B(SHARED_LD_REPLAY, 0x0001, B6, REPLAY, 0x00000008, 1, 1),
_Q1A(SHARED_ST, 0x0001, B6, LDST, 0x00000004, 1, 1),
_Q1B(SHARED_ST_REPLAY, 0x0001, B6, REPLAY, 0x0000000c, 1, 1),
_Q1B(SM_CTA_LAUNCHED, 0x0001, B6, WARP, 0x0000001c, 1, 1),
_Q1A(THREADS_LAUNCHED, 0x003f, B6, LAUNCH, 0x398a4188, 1, 1),
_Q1B(UNCACHED_GLD_TRANSACTIONS, 0x0001, B6, MEM, 0x00000000, 1, 1),
_Q1A(WARPS_LAUNCHED, 0x0001, B6, LAUNCH, 0x00000004, 1, 1),
_M2AB(IPC, 0x3, B6, EXEC, 0x398, 0xffff, LOGOP, WARP, 0x0, DIV_SUM_M0, 10, 1),
_M2AB(IPAC, 0x3, B6, EXEC, 0x398, 0x1, B6, WARP, 0x0, AVG_DIV_MM, 10, 1),
_M2A(IPEC, 0x3, B6, EXEC, 0x398, 0xe, LOGOP, EXEC, 0x398, AVG_DIV_MM, 10, 1),
_M2A(INST_REPLAY_OHEAD, 0x3, B6, ISSUE, 0x104, 0x3, B6, EXEC, 0x398, REL_SUM_MM, 100, 1),
_M2B(MP_OCCUPANCY, 0x3f, B6, WARP, 0x31483104, 0x01, B6, WARP, 0x0, AVG_DIV_MM, 200, 64),
_M2B(MP_EFFICIENCY, 0x01, B6, WARP, 0x0, 0xffff, LOGOP, WARP, 0x0, AVG_DIV_M0, 100, 1),
};
#undef _Q1A
#undef _Q1B
#undef _M2A
#undef _M2B
/* === PERFORMANCE MONITORING COUNTERS for NVC0:NVE4 === */
static const uint64_t nvc0_read_hw_sm_counters_code[] =
{
/* mov b32 $r8 $tidx
* mov b32 $r9 $physid
* mov b32 $r0 $pm0
* mov b32 $r1 $pm1
* mov b32 $r2 $pm2
* mov b32 $r3 $pm3
* mov b32 $r4 $pm4
* mov b32 $r5 $pm5
* mov b32 $r6 $pm6
* mov b32 $r7 $pm7
* set $p0 0x1 eq u32 $r8 0x0
* mov b32 $r10 c0[0x0]
* mov b32 $r11 c0[0x4]
* ext u32 $r8 $r9 0x414
* (not $p0) exit
* mul $r8 u32 $r8 u32 36
* add b32 $r10 $c $r10 $r8
* add b32 $r11 $r11 0x0 $c
* mov b32 $r8 c0[0x8]
* st b128 wt g[$r10d+0x00] $r0q
* st b128 wt g[$r10d+0x10] $r4q
* st b32 wt g[$r10d+0x20] $r8
* exit */
0x2c00000084021c04ULL,
0x2c0000000c025c04ULL,
0x2c00000010001c04ULL,
0x2c00000014005c04ULL,
0x2c00000018009c04ULL,
0x2c0000001c00dc04ULL,
0x2c00000020011c04ULL,
0x2c00000024015c04ULL,
0x2c00000028019c04ULL,
0x2c0000002c01dc04ULL,
0x190e0000fc81dc03ULL,
0x2800400000029de4ULL,
0x280040001002dde4ULL,
0x7000c01050921c03ULL,
0x80000000000021e7ULL,
0x1000000090821c02ULL,
0x4801000020a29c03ULL,
0x0800000000b2dc42ULL,
0x2800400020021de4ULL,
0x9400000000a01fc5ULL,
0x9400000040a11fc5ULL,
0x9400000080a21f85ULL,
0x8000000000001de7ULL
};
#define _Q(n, f, m, g, c, s0, s1, s2, s3, s4, s5) [NVC0_HW_SM_QUERY_##n] = { { { f, NVC0_COMPUTE_MP_PM_OP_MODE_##m, c, 0, g, s0|(s1 << 8)|(s2 << 16)|(s3 << 24)|(s4##ULL << 32)|(s5##ULL << 40) }, {}, {}, {} }, 1, NVC0_COUNTER_OPn_SUM, { 1, 1 } }
static const struct nvc0_hw_sm_query_cfg nvc0_hw_sm_queries[] =
{
_Q(ACTIVE_CYCLES, 0xaaaa, LOGOP, 0x11, 1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(ACTIVE_WARPS, 0xaaaa, LOGOP, 0x24, 6, 0x10, 0x21, 0x32, 0x43, 0x54, 0x65),
_Q(ATOM_COUNT, 0xaaaa, LOGOP, 0x63, 1, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(BRANCH, 0xaaaa, LOGOP, 0x1a, 2, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00),
_Q(DIVERGENT_BRANCH, 0xaaaa, LOGOP, 0x19, 2, 0x20, 0x31, 0x00, 0x00, 0x00, 0x00),
_Q(GLD_REQUEST, 0xaaaa, LOGOP, 0x64, 1, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(GRED_COUNT, 0xaaaa, LOGOP, 0x63, 1, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(GST_REQUEST, 0xaaaa, LOGOP, 0x64, 1, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(INST_EXECUTED, 0xaaaa, LOGOP, 0x2d, 3, 0x00, 0x11, 0x22, 0x00, 0x00, 0x00),
_Q(INST_ISSUED1_0, 0xaaaa, LOGOP, 0x7e, 1, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(INST_ISSUED1_1, 0xaaaa, LOGOP, 0x7e, 1, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(INST_ISSUED2_0, 0xaaaa, LOGOP, 0x7e, 1, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(INST_ISSUED2_1, 0xaaaa, LOGOP, 0x7e, 1, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(LOCAL_LD, 0xaaaa, LOGOP, 0x64, 1, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(LOCAL_ST, 0xaaaa, LOGOP, 0x64, 1, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(PROF_TRIGGER_0, 0xaaaa, LOGOP, 0x01, 1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(PROF_TRIGGER_1, 0xaaaa, LOGOP, 0x01, 1, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(PROF_TRIGGER_2, 0xaaaa, LOGOP, 0x01, 1, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(PROF_TRIGGER_3, 0xaaaa, LOGOP, 0x01, 1, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(PROF_TRIGGER_4, 0xaaaa, LOGOP, 0x01, 1, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(PROF_TRIGGER_5, 0xaaaa, LOGOP, 0x01, 1, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(PROF_TRIGGER_6, 0xaaaa, LOGOP, 0x01, 1, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(PROF_TRIGGER_7, 0xaaaa, LOGOP, 0x01, 1, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(SHARED_LD, 0xaaaa, LOGOP, 0x64, 1, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(SHARED_ST, 0xaaaa, LOGOP, 0x64, 1, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(THREADS_LAUNCHED, 0xaaaa, LOGOP, 0x26, 6, 0x10, 0x21, 0x32, 0x43, 0x54, 0x65),
_Q(TH_INST_EXECUTED_0, 0xaaaa, LOGOP, 0xa3, 6, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55),
_Q(TH_INST_EXECUTED_1, 0xaaaa, LOGOP, 0xa5, 6, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55),
_Q(TH_INST_EXECUTED_2, 0xaaaa, LOGOP, 0xa4, 6, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55),
_Q(TH_INST_EXECUTED_3, 0xaaaa, LOGOP, 0xa6, 6, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55),
_Q(WARPS_LAUNCHED, 0xaaaa, LOGOP, 0x26, 1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00),
};
#undef _Q
static const struct nvc0_hw_sm_query_cfg *
nvc0_hw_sm_query_get_cfg(struct nvc0_context *nvc0, struct nvc0_query *q)
{
struct nvc0_screen *screen = nvc0->screen;
if (screen->base.class_3d >= NVE4_3D_CLASS)
return &nve4_hw_sm_queries[q->type - PIPE_QUERY_DRIVER_SPECIFIC];
return &nvc0_hw_sm_queries[q->type - NVC0_HW_SM_QUERY(0)];
}
static boolean
nvc0_hw_sm_query_begin(struct nvc0_context *nvc0, struct nvc0_query *q)
{
struct nvc0_screen *screen = nvc0->screen;
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
const bool is_nve4 = screen->base.class_3d >= NVE4_3D_CLASS;
struct nvc0_hw_query *hq = nvc0_hw_query(q);
const struct nvc0_hw_sm_query_cfg *cfg;
unsigned i, c;
unsigned num_ab[2] = { 0, 0 };
cfg = nvc0_hw_sm_query_get_cfg(nvc0, q);
/* check if we have enough free counter slots */
for (i = 0; i < cfg->num_counters; ++i)
num_ab[cfg->ctr[i].sig_dom]++;
if (screen->pm.num_hw_sm_active[0] + num_ab[0] > 4 ||
screen->pm.num_hw_sm_active[1] + num_ab[1] > 4) {
NOUVEAU_ERR("Not enough free MP counter slots !\n");
return false;
}
assert(cfg->num_counters <= 4);
PUSH_SPACE(push, 4 * 8 * (is_nve4 ? 1 : 6) + 6);
if (!screen->pm.mp_counters_enabled) {
screen->pm.mp_counters_enabled = true;
BEGIN_NVC0(push, SUBC_SW(0x06ac), 1);
PUSH_DATA (push, 0x1fcb);
}
/* set sequence field to 0 (used to check if result is available) */
for (i = 0; i < screen->mp_count; ++i)
hq->data[i * 10 + 10] = 0;
for (i = 0; i < cfg->num_counters; ++i) {
const unsigned d = cfg->ctr[i].sig_dom;
if (!screen->pm.num_hw_sm_active[d]) {
uint32_t m = (1 << 22) | (1 << (7 + (8 * !d)));
if (screen->pm.num_hw_sm_active[!d])
m |= 1 << (7 + (8 * d));
BEGIN_NVC0(push, SUBC_SW(0x0600), 1);
PUSH_DATA (push, m);
}
screen->pm.num_hw_sm_active[d]++;
for (c = d * 4; c < (d * 4 + 4); ++c) {
if (!screen->pm.mp_counter[c]) {
hq->ctr[i] = c;
screen->pm.mp_counter[c] = (struct pipe_query *)q;
break;
}
}
assert(c <= (d * 4 + 3)); /* must succeed, already checked for space */
/* configure and reset the counter(s) */
if (is_nve4) {
if (d == 0)
BEGIN_NVC0(push, NVE4_COMPUTE(MP_PM_A_SIGSEL(c & 3)), 1);
else
BEGIN_NVC0(push, NVE4_COMPUTE(MP_PM_B_SIGSEL(c & 3)), 1);
PUSH_DATA (push, cfg->ctr[i].sig_sel);
BEGIN_NVC0(push, NVE4_COMPUTE(MP_PM_SRCSEL(c)), 1);
PUSH_DATA (push, cfg->ctr[i].src_sel + 0x2108421 * (c & 3));
BEGIN_NVC0(push, NVE4_COMPUTE(MP_PM_FUNC(c)), 1);
PUSH_DATA (push, (cfg->ctr[i].func << 4) | cfg->ctr[i].mode);
BEGIN_NVC0(push, NVE4_COMPUTE(MP_PM_SET(c)), 1);
PUSH_DATA (push, 0);
} else {
unsigned s;
for (s = 0; s < cfg->ctr[i].num_src; s++) {
BEGIN_NVC0(push, NVC0_COMPUTE(MP_PM_SIGSEL(s)), 1);
PUSH_DATA (push, cfg->ctr[i].sig_sel);
BEGIN_NVC0(push, NVC0_COMPUTE(MP_PM_SRCSEL(s)), 1);
PUSH_DATA (push, (cfg->ctr[i].src_sel >> (s * 8)) & 0xff);
BEGIN_NVC0(push, NVC0_COMPUTE(MP_PM_OP(s)), 1);
PUSH_DATA (push, (cfg->ctr[i].func << 4) | cfg->ctr[i].mode);
BEGIN_NVC0(push, NVC0_COMPUTE(MP_PM_SET(s)), 1);
PUSH_DATA (push, 0);
}
}
}
return true;
}
static void
nvc0_hw_sm_query_end(struct nvc0_context *nvc0, struct nvc0_query *q)
{
struct nvc0_screen *screen = nvc0->screen;
struct pipe_context *pipe = &nvc0->base.pipe;
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
const bool is_nve4 = screen->base.class_3d >= NVE4_3D_CLASS;
struct nvc0_hw_query *hq = nvc0_hw_query(q);
uint32_t mask;
uint32_t input[3];
const uint block[3] = { 32, is_nve4 ? 4 : 1, 1 };
const uint grid[3] = { screen->mp_count, 1, 1 };
unsigned c;
const struct nvc0_hw_sm_query_cfg *cfg;
cfg = nvc0_hw_sm_query_get_cfg(nvc0, q);
if (unlikely(!screen->pm.prog)) {
struct nvc0_program *prog = CALLOC_STRUCT(nvc0_program);
prog->type = PIPE_SHADER_COMPUTE;
prog->translated = true;
prog->num_gprs = 14;
prog->parm_size = 12;
if (is_nve4) {
prog->code = (uint32_t *)nve4_read_hw_sm_counters_code;
prog->code_size = sizeof(nve4_read_hw_sm_counters_code);
} else {
prog->code = (uint32_t *)nvc0_read_hw_sm_counters_code;
prog->code_size = sizeof(nvc0_read_hw_sm_counters_code);
}
screen->pm.prog = prog;
}
/* disable all counting */
PUSH_SPACE(push, 8);
for (c = 0; c < 8; ++c)
if (screen->pm.mp_counter[c]) {
if (is_nve4) {
IMMED_NVC0(push, NVE4_COMPUTE(MP_PM_FUNC(c)), 0);
} else {
IMMED_NVC0(push, NVC0_COMPUTE(MP_PM_OP(c)), 0);
}
}
/* release counters for this query */
for (c = 0; c < 8; ++c) {
if (nvc0_query(screen->pm.mp_counter[c]) == q) {
screen->pm.num_hw_sm_active[c / 4]--;
screen->pm.mp_counter[c] = NULL;
}
}
BCTX_REFN_bo(nvc0->bufctx_cp, CP_QUERY, NOUVEAU_BO_GART | NOUVEAU_BO_WR,
hq->bo);
PUSH_SPACE(push, 1);
IMMED_NVC0(push, SUBC_COMPUTE(NV50_GRAPH_SERIALIZE), 0);
pipe->bind_compute_state(pipe, screen->pm.prog);
input[0] = (hq->bo->offset + hq->base_offset);
input[1] = (hq->bo->offset + hq->base_offset) >> 32;
input[2] = hq->sequence;
pipe->launch_grid(pipe, block, grid, 0, input);
nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_QUERY);
/* re-activate other counters */
PUSH_SPACE(push, 16);
mask = 0;
for (c = 0; c < 8; ++c) {
unsigned i;
q = nvc0_query(screen->pm.mp_counter[c]);
if (!q)
continue;
cfg = nvc0_hw_sm_query_get_cfg(nvc0, q);
for (i = 0; i < cfg->num_counters; ++i) {
if (mask & (1 << hq->ctr[i]))
break;
mask |= 1 << hq->ctr[i];
if (is_nve4) {
BEGIN_NVC0(push, NVE4_COMPUTE(MP_PM_FUNC(hq->ctr[i])), 1);
} else {
BEGIN_NVC0(push, NVC0_COMPUTE(MP_PM_OP(hq->ctr[i])), 1);
}
PUSH_DATA (push, (cfg->ctr[i].func << 4) | cfg->ctr[i].mode);
}
}
}
static inline bool
nvc0_hw_sm_query_read_data(uint32_t count[32][4],
struct nvc0_context *nvc0, bool wait,
struct nvc0_query *q,
const struct nvc0_hw_sm_query_cfg *cfg,
unsigned mp_count)
{
struct nvc0_hw_query *hq = nvc0_hw_query(q);
unsigned p, c;
for (p = 0; p < mp_count; ++p) {
const unsigned b = (0x24 / 4) * p;
for (c = 0; c < cfg->num_counters; ++c) {
if (hq->data[b + 8] != hq->sequence) {
if (!wait)
return false;
if (nouveau_bo_wait(hq->bo, NOUVEAU_BO_RD, nvc0->base.client))
return false;
}
count[p][c] = hq->data[b + hq->ctr[c]];
}
}
return true;
}
static inline bool
nve4_hw_sm_query_read_data(uint32_t count[32][4],
struct nvc0_context *nvc0, bool wait,
struct nvc0_query *q,
const struct nvc0_hw_sm_query_cfg *cfg,
unsigned mp_count)
{
struct nvc0_hw_query *hq = nvc0_hw_query(q);
unsigned p, c, d;
for (p = 0; p < mp_count; ++p) {
const unsigned b = (0x60 / 4) * p;
for (c = 0; c < cfg->num_counters; ++c) {
count[p][c] = 0;
for (d = 0; d < ((hq->ctr[c] & ~3) ? 1 : 4); ++d) {
if (hq->data[b + 20 + d] != hq->sequence) {
if (!wait)
return false;
if (nouveau_bo_wait(hq->bo, NOUVEAU_BO_RD, nvc0->base.client))
return false;
}
if (hq->ctr[c] & ~0x3)
count[p][c] = hq->data[b + 16 + (hq->ctr[c] & 3)];
else
count[p][c] += hq->data[b + d * 4 + hq->ctr[c]];
}
}
}
return true;
}
/* Metric calculations:
* sum(x) ... sum of x over all MPs
* avg(x) ... average of x over all MPs
*
* IPC : sum(inst_executed) / clock
* INST_REPLAY_OHEAD: (sum(inst_issued) - sum(inst_executed)) / sum(inst_issued)
* MP_OCCUPANCY : avg((active_warps / 64) / active_cycles)
* MP_EFFICIENCY : avg(active_cycles / clock)
*
* NOTE: Interpretation of IPC requires knowledge of MP count.
*/
static boolean
nvc0_hw_sm_query_result(struct nvc0_context *nvc0, struct nvc0_query *q,
void *result, boolean wait)
{
uint32_t count[32][4];
uint64_t value = 0;
unsigned mp_count = MIN2(nvc0->screen->mp_count_compute, 32);
unsigned p, c;
const struct nvc0_hw_sm_query_cfg *cfg;
bool ret;
cfg = nvc0_hw_sm_query_get_cfg(nvc0, q);
if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS)
ret = nve4_hw_sm_query_read_data(count, nvc0, wait, q, cfg, mp_count);
else
ret = nvc0_hw_sm_query_read_data(count, nvc0, wait, q, cfg, mp_count);
if (!ret)
return false;
if (cfg->op == NVC0_COUNTER_OPn_SUM) {
for (c = 0; c < cfg->num_counters; ++c)
for (p = 0; p < mp_count; ++p)
value += count[p][c];
value = (value * cfg->norm[0]) / cfg->norm[1];
} else
if (cfg->op == NVC0_COUNTER_OPn_OR) {
uint32_t v = 0;
for (c = 0; c < cfg->num_counters; ++c)
for (p = 0; p < mp_count; ++p)
v |= count[p][c];
value = ((uint64_t)v * cfg->norm[0]) / cfg->norm[1];
} else
if (cfg->op == NVC0_COUNTER_OPn_AND) {
uint32_t v = ~0;
for (c = 0; c < cfg->num_counters; ++c)
for (p = 0; p < mp_count; ++p)
v &= count[p][c];
value = ((uint64_t)v * cfg->norm[0]) / cfg->norm[1];
} else
if (cfg->op == NVC0_COUNTER_OP2_REL_SUM_MM) {
uint64_t v[2] = { 0, 0 };
for (p = 0; p < mp_count; ++p) {
v[0] += count[p][0];
v[1] += count[p][1];
}
if (v[0])
value = ((v[0] - v[1]) * cfg->norm[0]) / (v[0] * cfg->norm[1]);
} else
if (cfg->op == NVC0_COUNTER_OP2_DIV_SUM_M0) {
for (p = 0; p < mp_count; ++p)
value += count[p][0];
if (count[0][1])
value = (value * cfg->norm[0]) / (count[0][1] * cfg->norm[1]);
else
value = 0;
} else
if (cfg->op == NVC0_COUNTER_OP2_AVG_DIV_MM) {
unsigned mp_used = 0;
for (p = 0; p < mp_count; ++p, mp_used += !!count[p][0])
if (count[p][1])
value += (count[p][0] * cfg->norm[0]) / count[p][1];
if (mp_used)
value /= (uint64_t)mp_used * cfg->norm[1];
} else
if (cfg->op == NVC0_COUNTER_OP2_AVG_DIV_M0) {
unsigned mp_used = 0;
for (p = 0; p < mp_count; ++p, mp_used += !!count[p][0])
value += count[p][0];
if (count[0][1] && mp_used) {
value *= cfg->norm[0];
value /= (uint64_t)count[0][1] * mp_used * cfg->norm[1];
} else {
value = 0;
}
}
*(uint64_t *)result = value;
return true;
}
static bool
bool
nvc0_hw_query_allocate(struct nvc0_context *nvc0, struct nvc0_query *q,
int size)
{
@ -755,6 +127,9 @@ nvc0_hw_begin_query(struct nvc0_context *nvc0, struct nvc0_query *q)
struct nvc0_hw_query *hq = nvc0_hw_query(q);
bool ret = true;
if (hq->funcs && hq->funcs->begin_query)
return hq->funcs->begin_query(nvc0, hq);
/* For occlusion queries we have to change the storage, because a previous
* query might set the initial render conition to false even *after* we re-
* initialized it to true.
@ -814,10 +189,6 @@ nvc0_hw_begin_query(struct nvc0_context *nvc0, struct nvc0_query *q)
nvc0_hw_query_get(push, q, 0xc0 + 0x90, 0x0e809002); /* TEP, LAUNCHES */
break;
default:
if ((q->type >= NVE4_HW_SM_QUERY(0) && q->type <= NVE4_HW_SM_QUERY_LAST) ||
(q->type >= NVC0_HW_SM_QUERY(0) && q->type <= NVC0_HW_SM_QUERY_LAST)) {
ret = nvc0_hw_sm_query_begin(nvc0, q);
}
break;
}
hq->state = NVC0_HW_QUERY_STATE_ACTIVE;
@ -830,6 +201,11 @@ nvc0_hw_end_query(struct nvc0_context *nvc0, struct nvc0_query *q)
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
struct nvc0_hw_query *hq = nvc0_hw_query(q);
if (hq->funcs && hq->funcs->end_query) {
hq->funcs->end_query(nvc0, hq);
return;
}
if (hq->state != NVC0_HW_QUERY_STATE_ACTIVE) {
/* some queries don't require 'begin' to be called (e.g. GPU_FINISHED) */
if (hq->rotate)
@ -891,10 +267,6 @@ nvc0_hw_end_query(struct nvc0_context *nvc0, struct nvc0_query *q)
nvc0_hw_query_get(push, q, 0x00, 0x0d005002 | (q->index << 5));
break;
default:
if ((q->type >= NVE4_HW_SM_QUERY(0) && q->type <= NVE4_HW_SM_QUERY_LAST) ||
(q->type >= NVC0_HW_SM_QUERY(0) && q->type <= NVC0_HW_SM_QUERY_LAST)) {
nvc0_hw_sm_query_end(nvc0, q);
}
break;
}
if (hq->is64bit)
@ -912,10 +284,8 @@ nvc0_hw_get_query_result(struct nvc0_context *nvc0, struct nvc0_query *q,
uint64_t *data64 = (uint64_t *)hq->data;
unsigned i;
if ((q->type >= NVE4_HW_SM_QUERY(0) && q->type <= NVE4_HW_SM_QUERY_LAST) ||
(q->type >= NVC0_HW_SM_QUERY(0) && q->type <= NVC0_HW_SM_QUERY_LAST)) {
return nvc0_hw_sm_query_result(nvc0, q, result, wait);
}
if (hq->funcs && hq->funcs->get_query_result)
return hq->funcs->get_query_result(nvc0, hq, wait, result);
if (hq->state != NVC0_HW_QUERY_STATE_READY)
nvc0_hw_query_update(nvc0->screen->base.client, q);
@ -995,6 +365,12 @@ nvc0_hw_create_query(struct nvc0_context *nvc0, unsigned type, unsigned index)
struct nvc0_query *q;
unsigned space = NVC0_HW_QUERY_ALLOC_SPACE;
hq = nvc0_hw_sm_create_query(nvc0, type);
if (hq) {
hq->base.funcs = &hw_query_funcs;
return (struct nvc0_query *)hq;
}
hq = CALLOC_STRUCT(nvc0_hw_query);
if (!hq)
return NULL;
@ -1034,53 +410,6 @@ nvc0_hw_create_query(struct nvc0_context *nvc0, unsigned type, unsigned index)
space = 16;
break;
default:
if (nvc0->screen->base.device->drm_version >= 0x01000101) {
if (type >= NVE4_HW_SM_QUERY(0) && type <= NVE4_HW_SM_QUERY_LAST) {
/* for each MP:
* [00] = WS0.C0
* [04] = WS0.C1
* [08] = WS0.C2
* [0c] = WS0.C3
* [10] = WS1.C0
* [14] = WS1.C1
* [18] = WS1.C2
* [1c] = WS1.C3
* [20] = WS2.C0
* [24] = WS2.C1
* [28] = WS2.C2
* [2c] = WS2.C3
* [30] = WS3.C0
* [34] = WS3.C1
* [38] = WS3.C2
* [3c] = WS3.C3
* [40] = MP.C4
* [44] = MP.C5
* [48] = MP.C6
* [4c] = MP.C7
* [50] = WS0.sequence
* [54] = WS1.sequence
* [58] = WS2.sequence
* [5c] = WS3.sequence
*/
space = (4 * 4 + 4 + 4) * nvc0->screen->mp_count * sizeof(uint32_t);
break;
} else
if (type >= NVC0_HW_SM_QUERY(0) && type <= NVC0_HW_SM_QUERY_LAST) {
/* for each MP:
* [00] = MP.C0
* [04] = MP.C1
* [08] = MP.C2
* [0c] = MP.C3
* [10] = MP.C4
* [14] = MP.C5
* [18] = MP.C6
* [1c] = MP.C7
* [20] = MP.sequence
*/
space = (8 + 1) * nvc0->screen->mp_count * sizeof(uint32_t);
break;
}
}
debug_printf("invalid query type: %u\n", type);
FREE(q);
return NULL;

View File

@ -8,10 +8,20 @@
#define NVC0_HW_QUERY_TFB_BUFFER_OFFSET (PIPE_QUERY_TYPES + 0)
struct nvc0_hw_query;
struct nvc0_hw_query_funcs {
void (*destroy_query)(struct nvc0_context *, struct nvc0_hw_query *);
boolean (*begin_query)(struct nvc0_context *, struct nvc0_hw_query *);
void (*end_query)(struct nvc0_context *, struct nvc0_hw_query *);
boolean (*get_query_result)(struct nvc0_context *, struct nvc0_hw_query *,
boolean, union pipe_query_result *);
};
struct nvc0_hw_query {
struct nvc0_query base;
const struct nvc0_hw_query_funcs *funcs;
uint32_t *data;
int8_t ctr[4];
uint32_t sequence;
struct nouveau_bo *bo;
uint32_t base_offset;
@ -30,105 +40,10 @@ nvc0_hw_query(struct nvc0_query *q)
return (struct nvc0_hw_query *)q;
}
/*
* Performance counter queries:
*/
#define NVE4_HW_SM_QUERY(i) (PIPE_QUERY_DRIVER_SPECIFIC + (i))
#define NVE4_HW_SM_QUERY_LAST NVE4_HW_SM_QUERY(NVE4_HW_SM_QUERY_COUNT - 1)
enum nve4_pm_queries
{
NVE4_HW_SM_QUERY_ACTIVE_CYCLES = 0,
NVE4_HW_SM_QUERY_ACTIVE_WARPS,
NVE4_HW_SM_QUERY_ATOM_COUNT,
NVE4_HW_SM_QUERY_BRANCH,
NVE4_HW_SM_QUERY_DIVERGENT_BRANCH,
NVE4_HW_SM_QUERY_GLD_REQUEST,
NVE4_HW_SM_QUERY_GLD_MEM_DIV_REPLAY,
NVE4_HW_SM_QUERY_GST_TRANSACTIONS,
NVE4_HW_SM_QUERY_GST_MEM_DIV_REPLAY,
NVE4_HW_SM_QUERY_GRED_COUNT,
NVE4_HW_SM_QUERY_GST_REQUEST,
NVE4_HW_SM_QUERY_INST_EXECUTED,
NVE4_HW_SM_QUERY_INST_ISSUED,
NVE4_HW_SM_QUERY_INST_ISSUED1,
NVE4_HW_SM_QUERY_INST_ISSUED2,
NVE4_HW_SM_QUERY_L1_GLD_HIT,
NVE4_HW_SM_QUERY_L1_GLD_MISS,
NVE4_HW_SM_QUERY_L1_LOCAL_LD_HIT,
NVE4_HW_SM_QUERY_L1_LOCAL_LD_MISS,
NVE4_HW_SM_QUERY_L1_LOCAL_ST_HIT,
NVE4_HW_SM_QUERY_L1_LOCAL_ST_MISS,
NVE4_HW_SM_QUERY_L1_SHARED_LD_TRANSACTIONS,
NVE4_HW_SM_QUERY_L1_SHARED_ST_TRANSACTIONS,
NVE4_HW_SM_QUERY_LOCAL_LD,
NVE4_HW_SM_QUERY_LOCAL_LD_TRANSACTIONS,
NVE4_HW_SM_QUERY_LOCAL_ST,
NVE4_HW_SM_QUERY_LOCAL_ST_TRANSACTIONS,
NVE4_HW_SM_QUERY_PROF_TRIGGER_0,
NVE4_HW_SM_QUERY_PROF_TRIGGER_1,
NVE4_HW_SM_QUERY_PROF_TRIGGER_2,
NVE4_HW_SM_QUERY_PROF_TRIGGER_3,
NVE4_HW_SM_QUERY_PROF_TRIGGER_4,
NVE4_HW_SM_QUERY_PROF_TRIGGER_5,
NVE4_HW_SM_QUERY_PROF_TRIGGER_6,
NVE4_HW_SM_QUERY_PROF_TRIGGER_7,
NVE4_HW_SM_QUERY_SHARED_LD,
NVE4_HW_SM_QUERY_SHARED_LD_REPLAY,
NVE4_HW_SM_QUERY_SHARED_ST,
NVE4_HW_SM_QUERY_SHARED_ST_REPLAY,
NVE4_HW_SM_QUERY_SM_CTA_LAUNCHED,
NVE4_HW_SM_QUERY_THREADS_LAUNCHED,
NVE4_HW_SM_QUERY_UNCACHED_GLD_TRANSACTIONS,
NVE4_HW_SM_QUERY_WARPS_LAUNCHED,
NVE4_HW_SM_QUERY_METRIC_IPC,
NVE4_HW_SM_QUERY_METRIC_IPAC,
NVE4_HW_SM_QUERY_METRIC_IPEC,
NVE4_HW_SM_QUERY_METRIC_MP_OCCUPANCY,
NVE4_HW_SM_QUERY_METRIC_MP_EFFICIENCY,
NVE4_HW_SM_QUERY_METRIC_INST_REPLAY_OHEAD,
NVE4_HW_SM_QUERY_COUNT
};
#define NVC0_HW_SM_QUERY(i) (PIPE_QUERY_DRIVER_SPECIFIC + 2048 + (i))
#define NVC0_HW_SM_QUERY_LAST NVC0_HW_SM_QUERY(NVC0_HW_SM_QUERY_COUNT - 1)
enum nvc0_pm_queries
{
NVC0_HW_SM_QUERY_ACTIVE_CYCLES = 0,
NVC0_HW_SM_QUERY_ACTIVE_WARPS,
NVC0_HW_SM_QUERY_ATOM_COUNT,
NVC0_HW_SM_QUERY_BRANCH,
NVC0_HW_SM_QUERY_DIVERGENT_BRANCH,
NVC0_HW_SM_QUERY_GLD_REQUEST,
NVC0_HW_SM_QUERY_GRED_COUNT,
NVC0_HW_SM_QUERY_GST_REQUEST,
NVC0_HW_SM_QUERY_INST_EXECUTED,
NVC0_HW_SM_QUERY_INST_ISSUED1_0,
NVC0_HW_SM_QUERY_INST_ISSUED1_1,
NVC0_HW_SM_QUERY_INST_ISSUED2_0,
NVC0_HW_SM_QUERY_INST_ISSUED2_1,
NVC0_HW_SM_QUERY_LOCAL_LD,
NVC0_HW_SM_QUERY_LOCAL_ST,
NVC0_HW_SM_QUERY_PROF_TRIGGER_0,
NVC0_HW_SM_QUERY_PROF_TRIGGER_1,
NVC0_HW_SM_QUERY_PROF_TRIGGER_2,
NVC0_HW_SM_QUERY_PROF_TRIGGER_3,
NVC0_HW_SM_QUERY_PROF_TRIGGER_4,
NVC0_HW_SM_QUERY_PROF_TRIGGER_5,
NVC0_HW_SM_QUERY_PROF_TRIGGER_6,
NVC0_HW_SM_QUERY_PROF_TRIGGER_7,
NVC0_HW_SM_QUERY_SHARED_LD,
NVC0_HW_SM_QUERY_SHARED_ST,
NVC0_HW_SM_QUERY_THREADS_LAUNCHED,
NVC0_HW_SM_QUERY_TH_INST_EXECUTED_0,
NVC0_HW_SM_QUERY_TH_INST_EXECUTED_1,
NVC0_HW_SM_QUERY_TH_INST_EXECUTED_2,
NVC0_HW_SM_QUERY_TH_INST_EXECUTED_3,
NVC0_HW_SM_QUERY_WARPS_LAUNCHED,
NVC0_HW_SM_QUERY_COUNT
};
struct nvc0_query *
nvc0_hw_create_query(struct nvc0_context *, unsigned, unsigned);
bool
nvc0_hw_query_allocate(struct nvc0_context *, struct nvc0_query *, int);
void
nvc0_hw_query_pushbuf_submit(struct nouveau_pushbuf *, struct nvc0_query *,
unsigned);

View File

@ -0,0 +1,748 @@
/*
* Copyright 2011 Christoph Bumiller
* Copyright 2015 Samuel Pitoiset
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#define NVC0_PUSH_EXPLICIT_SPACE_CHECKING
#include "nvc0/nvc0_context.h"
#include "nvc0/nvc0_query_hw_sm.h"
#include "nv_object.xml.h"
#include "nvc0/nve4_compute.xml.h"
#include "nvc0/nvc0_compute.xml.h"
/* === PERFORMANCE MONITORING COUNTERS for NVE4+ === */
/* Code to read out MP counters: They are accessible via mmio, too, but let's
* just avoid mapping registers in userspace. We'd have to know which MPs are
* enabled/present, too, and that information is not presently exposed.
* We could add a kernel interface for it, but reading the counters like this
* has the advantage of being async (if get_result isn't called immediately).
*/
static const uint64_t nve4_read_hw_sm_counters_code[] =
{
/* sched 0x20 0x20 0x20 0x20 0x20 0x20 0x20
* mov b32 $r8 $tidx
* mov b32 $r12 $physid
* mov b32 $r0 $pm0
* mov b32 $r1 $pm1
* mov b32 $r2 $pm2
* mov b32 $r3 $pm3
* mov b32 $r4 $pm4
* sched 0x20 0x20 0x23 0x04 0x20 0x04 0x2b
* mov b32 $r5 $pm5
* mov b32 $r6 $pm6
* mov b32 $r7 $pm7
* set $p0 0x1 eq u32 $r8 0x0
* mov b32 $r10 c0[0x0]
* ext u32 $r8 $r12 0x414
* mov b32 $r11 c0[0x4]
* sched 0x04 0x2e 0x04 0x20 0x20 0x28 0x04
* ext u32 $r9 $r12 0x208
* (not $p0) exit
* set $p1 0x1 eq u32 $r9 0x0
* mul $r8 u32 $r8 u32 96
* mul $r12 u32 $r9 u32 16
* mul $r13 u32 $r9 u32 4
* add b32 $r9 $r8 $r13
* sched 0x28 0x04 0x2c 0x04 0x2c 0x04 0x2c
* add b32 $r8 $r8 $r12
* mov b32 $r12 $r10
* add b32 $r10 $c $r10 $r8
* mov b32 $r13 $r11
* add b32 $r11 $r11 0x0 $c
* add b32 $r12 $c $r12 $r9
* st b128 wt g[$r10d] $r0q
* sched 0x4 0x2c 0x20 0x04 0x2e 0x00 0x00
* mov b32 $r0 c0[0x8]
* add b32 $r13 $r13 0x0 $c
* $p1 st b128 wt g[$r12d+0x40] $r4q
* st b32 wt g[$r12d+0x50] $r0
* exit */
0x2202020202020207ULL,
0x2c00000084021c04ULL,
0x2c0000000c031c04ULL,
0x2c00000010001c04ULL,
0x2c00000014005c04ULL,
0x2c00000018009c04ULL,
0x2c0000001c00dc04ULL,
0x2c00000020011c04ULL,
0x22b0420042320207ULL,
0x2c00000024015c04ULL,
0x2c00000028019c04ULL,
0x2c0000002c01dc04ULL,
0x190e0000fc81dc03ULL,
0x2800400000029de4ULL,
0x7000c01050c21c03ULL,
0x280040001002dde4ULL,
0x204282020042e047ULL,
0x7000c00820c25c03ULL,
0x80000000000021e7ULL,
0x190e0000fc93dc03ULL,
0x1000000180821c02ULL,
0x1000000040931c02ULL,
0x1000000010935c02ULL,
0x4800000034825c03ULL,
0x22c042c042c04287ULL,
0x4800000030821c03ULL,
0x2800000028031de4ULL,
0x4801000020a29c03ULL,
0x280000002c035de4ULL,
0x0800000000b2dc42ULL,
0x4801000024c31c03ULL,
0x9400000000a01fc5ULL,
0x200002e04202c047ULL,
0x2800400020001de4ULL,
0x0800000000d35c42ULL,
0x9400000100c107c5ULL,
0x9400000140c01f85ULL,
0x8000000000001de7ULL
};
/* For simplicity, we will allocate as many group slots as we allocate counter
* slots. This means that a single counter which wants to source from 2 groups
* will have to be declared as using 2 counter slots. This shouldn't really be
* a problem because such queries don't make much sense ... (unless someone is
* really creative).
*/
struct nvc0_hw_sm_counter_cfg
{
uint32_t func : 16; /* mask or 4-bit logic op (depending on mode) */
uint32_t mode : 4; /* LOGOP,B6,LOGOP_B6(_PULSE) */
uint32_t num_src : 3; /* number of sources (1 - 6, only for NVC0:NVE4) */
uint32_t sig_dom : 1; /* if 0, MP_PM_A (per warp-sched), if 1, MP_PM_B */
uint32_t sig_sel : 8; /* signal group */
uint64_t src_sel; /* signal selection for up to 6 sources (48 bit) */
};
#define NVC0_COUNTER_OPn_SUM 0
#define NVC0_COUNTER_OPn_OR 1
#define NVC0_COUNTER_OPn_AND 2
#define NVC0_COUNTER_OP2_REL_SUM_MM 3 /* (sum(ctr0) - sum(ctr1)) / sum(ctr0) */
#define NVC0_COUNTER_OP2_DIV_SUM_M0 4 /* sum(ctr0) / ctr1 of MP[0]) */
#define NVC0_COUNTER_OP2_AVG_DIV_MM 5 /* avg(ctr0 / ctr1) */
#define NVC0_COUNTER_OP2_AVG_DIV_M0 6 /* avg(ctr0) / ctr1 of MP[0]) */
struct nvc0_hw_sm_query_cfg
{
struct nvc0_hw_sm_counter_cfg ctr[4];
uint8_t num_counters;
uint8_t op;
uint8_t norm[2]; /* normalization num,denom */
};
#define _Q1A(n, f, m, g, s, nu, dn) [NVE4_HW_SM_QUERY_##n] = { { { f, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m, 0, 0, NVE4_COMPUTE_MP_PM_A_SIGSEL_##g, s }, {}, {}, {} }, 1, NVC0_COUNTER_OPn_SUM, { nu, dn } }
#define _Q1B(n, f, m, g, s, nu, dn) [NVE4_HW_SM_QUERY_##n] = { { { f, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m, 0, 1, NVE4_COMPUTE_MP_PM_B_SIGSEL_##g, s }, {}, {}, {} }, 1, NVC0_COUNTER_OPn_SUM, { nu, dn } }
#define _M2A(n, f0, m0, g0, s0, f1, m1, g1, s1, o, nu, dn) [NVE4_HW_SM_QUERY_METRIC_##n] = { { \
{ f0, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m0, 0, 0, NVE4_COMPUTE_MP_PM_A_SIGSEL_##g0, s0 }, \
{ f1, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m1, 0, 0, NVE4_COMPUTE_MP_PM_A_SIGSEL_##g1, s1 }, \
{}, {}, }, 2, NVC0_COUNTER_OP2_##o, { nu, dn } }
#define _M2B(n, f0, m0, g0, s0, f1, m1, g1, s1, o, nu, dn) [NVE4_HW_SM_QUERY_METRIC_##n] = { { \
{ f0, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m0, 0, 1, NVE4_COMPUTE_MP_PM_B_SIGSEL_##g0, s0 }, \
{ f1, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m1, 0, 1, NVE4_COMPUTE_MP_PM_B_SIGSEL_##g1, s1 }, \
{}, {}, }, 2, NVC0_COUNTER_OP2_##o, { nu, dn } }
#define _M2AB(n, f0, m0, g0, s0, f1, m1, g1, s1, o, nu, dn) [NVE4_HW_SM_QUERY_METRIC_##n] = { { \
{ f0, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m0, 0, 0, NVE4_COMPUTE_MP_PM_A_SIGSEL_##g0, s0 }, \
{ f1, NVE4_COMPUTE_MP_PM_FUNC_MODE_##m1, 0, 1, NVE4_COMPUTE_MP_PM_B_SIGSEL_##g1, s1 }, \
{}, {}, }, 2, NVC0_COUNTER_OP2_##o, { nu, dn } }
/* NOTES:
* active_warps: bit 0 alternates btw 0 and 1 for odd nr of warps
* inst_executed etc.: we only count a single warp scheduler
* metric-ipXc: we simply multiply by 4 to account for the 4 warp schedulers;
* this is inaccurate !
*/
static const struct nvc0_hw_sm_query_cfg nve4_hw_sm_queries[] =
{
_Q1B(ACTIVE_CYCLES, 0x0001, B6, WARP, 0x00000000, 1, 1),
_Q1B(ACTIVE_WARPS, 0x003f, B6, WARP, 0x31483104, 2, 1),
_Q1A(ATOM_COUNT, 0x0001, B6, BRANCH, 0x00000000, 1, 1),
_Q1A(BRANCH, 0x0001, B6, BRANCH, 0x0000000c, 1, 1),
_Q1A(DIVERGENT_BRANCH, 0x0001, B6, BRANCH, 0x00000010, 1, 1),
_Q1A(GLD_REQUEST, 0x0001, B6, LDST, 0x00000010, 1, 1),
_Q1B(GLD_MEM_DIV_REPLAY, 0x0001, B6, REPLAY, 0x00000010, 1, 1),
_Q1B(GST_TRANSACTIONS, 0x0001, B6, MEM, 0x00000004, 1, 1),
_Q1B(GST_MEM_DIV_REPLAY, 0x0001, B6, REPLAY, 0x00000014, 1, 1),
_Q1A(GRED_COUNT, 0x0001, B6, BRANCH, 0x00000008, 1, 1),
_Q1A(GST_REQUEST, 0x0001, B6, LDST, 0x00000014, 1, 1),
_Q1A(INST_EXECUTED, 0x0003, B6, EXEC, 0x00000398, 1, 1),
_Q1A(INST_ISSUED, 0x0003, B6, ISSUE, 0x00000104, 1, 1),
_Q1A(INST_ISSUED1, 0x0001, B6, ISSUE, 0x00000004, 1, 1),
_Q1A(INST_ISSUED2, 0x0001, B6, ISSUE, 0x00000008, 1, 1),
_Q1B(L1_GLD_HIT, 0x0001, B6, L1, 0x00000010, 1, 1),
_Q1B(L1_GLD_MISS, 0x0001, B6, L1, 0x00000014, 1, 1),
_Q1B(L1_LOCAL_LD_HIT, 0x0001, B6, L1, 0x00000000, 1, 1),
_Q1B(L1_LOCAL_LD_MISS, 0x0001, B6, L1, 0x00000004, 1, 1),
_Q1B(L1_LOCAL_ST_HIT, 0x0001, B6, L1, 0x00000008, 1, 1),
_Q1B(L1_LOCAL_ST_MISS, 0x0001, B6, L1, 0x0000000c, 1, 1),
_Q1B(L1_SHARED_LD_TRANSACTIONS, 0x0001, B6, TRANSACTION, 0x00000008, 1, 1),
_Q1B(L1_SHARED_ST_TRANSACTIONS, 0x0001, B6, TRANSACTION, 0x0000000c, 1, 1),
_Q1A(LOCAL_LD, 0x0001, B6, LDST, 0x00000008, 1, 1),
_Q1B(LOCAL_LD_TRANSACTIONS, 0x0001, B6, TRANSACTION, 0x00000000, 1, 1),
_Q1A(LOCAL_ST, 0x0001, B6, LDST, 0x0000000c, 1, 1),
_Q1B(LOCAL_ST_TRANSACTIONS, 0x0001, B6, TRANSACTION, 0x00000004, 1, 1),
_Q1A(PROF_TRIGGER_0, 0x0001, B6, USER, 0x00000000, 1, 1),
_Q1A(PROF_TRIGGER_1, 0x0001, B6, USER, 0x00000004, 1, 1),
_Q1A(PROF_TRIGGER_2, 0x0001, B6, USER, 0x00000008, 1, 1),
_Q1A(PROF_TRIGGER_3, 0x0001, B6, USER, 0x0000000c, 1, 1),
_Q1A(PROF_TRIGGER_4, 0x0001, B6, USER, 0x00000010, 1, 1),
_Q1A(PROF_TRIGGER_5, 0x0001, B6, USER, 0x00000014, 1, 1),
_Q1A(PROF_TRIGGER_6, 0x0001, B6, USER, 0x00000018, 1, 1),
_Q1A(PROF_TRIGGER_7, 0x0001, B6, USER, 0x0000001c, 1, 1),
_Q1A(SHARED_LD, 0x0001, B6, LDST, 0x00000000, 1, 1),
_Q1B(SHARED_LD_REPLAY, 0x0001, B6, REPLAY, 0x00000008, 1, 1),
_Q1A(SHARED_ST, 0x0001, B6, LDST, 0x00000004, 1, 1),
_Q1B(SHARED_ST_REPLAY, 0x0001, B6, REPLAY, 0x0000000c, 1, 1),
_Q1B(SM_CTA_LAUNCHED, 0x0001, B6, WARP, 0x0000001c, 1, 1),
_Q1A(THREADS_LAUNCHED, 0x003f, B6, LAUNCH, 0x398a4188, 1, 1),
_Q1B(UNCACHED_GLD_TRANSACTIONS, 0x0001, B6, MEM, 0x00000000, 1, 1),
_Q1A(WARPS_LAUNCHED, 0x0001, B6, LAUNCH, 0x00000004, 1, 1),
_M2AB(IPC, 0x3, B6, EXEC, 0x398, 0xffff, LOGOP, WARP, 0x0, DIV_SUM_M0, 10, 1),
_M2AB(IPAC, 0x3, B6, EXEC, 0x398, 0x1, B6, WARP, 0x0, AVG_DIV_MM, 10, 1),
_M2A(IPEC, 0x3, B6, EXEC, 0x398, 0xe, LOGOP, EXEC, 0x398, AVG_DIV_MM, 10, 1),
_M2A(INST_REPLAY_OHEAD, 0x3, B6, ISSUE, 0x104, 0x3, B6, EXEC, 0x398, REL_SUM_MM, 100, 1),
_M2B(MP_OCCUPANCY, 0x3f, B6, WARP, 0x31483104, 0x01, B6, WARP, 0x0, AVG_DIV_MM, 200, 64),
_M2B(MP_EFFICIENCY, 0x01, B6, WARP, 0x0, 0xffff, LOGOP, WARP, 0x0, AVG_DIV_M0, 100, 1),
};
#undef _Q1A
#undef _Q1B
#undef _M2A
#undef _M2B
/* === PERFORMANCE MONITORING COUNTERS for NVC0:NVE4 === */
static const uint64_t nvc0_read_hw_sm_counters_code[] =
{
/* mov b32 $r8 $tidx
* mov b32 $r9 $physid
* mov b32 $r0 $pm0
* mov b32 $r1 $pm1
* mov b32 $r2 $pm2
* mov b32 $r3 $pm3
* mov b32 $r4 $pm4
* mov b32 $r5 $pm5
* mov b32 $r6 $pm6
* mov b32 $r7 $pm7
* set $p0 0x1 eq u32 $r8 0x0
* mov b32 $r10 c0[0x0]
* mov b32 $r11 c0[0x4]
* ext u32 $r8 $r9 0x414
* (not $p0) exit
* mul $r8 u32 $r8 u32 36
* add b32 $r10 $c $r10 $r8
* add b32 $r11 $r11 0x0 $c
* mov b32 $r8 c0[0x8]
* st b128 wt g[$r10d+0x00] $r0q
* st b128 wt g[$r10d+0x10] $r4q
* st b32 wt g[$r10d+0x20] $r8
* exit */
0x2c00000084021c04ULL,
0x2c0000000c025c04ULL,
0x2c00000010001c04ULL,
0x2c00000014005c04ULL,
0x2c00000018009c04ULL,
0x2c0000001c00dc04ULL,
0x2c00000020011c04ULL,
0x2c00000024015c04ULL,
0x2c00000028019c04ULL,
0x2c0000002c01dc04ULL,
0x190e0000fc81dc03ULL,
0x2800400000029de4ULL,
0x280040001002dde4ULL,
0x7000c01050921c03ULL,
0x80000000000021e7ULL,
0x1000000090821c02ULL,
0x4801000020a29c03ULL,
0x0800000000b2dc42ULL,
0x2800400020021de4ULL,
0x9400000000a01fc5ULL,
0x9400000040a11fc5ULL,
0x9400000080a21f85ULL,
0x8000000000001de7ULL
};
#define _Q(n, f, m, g, c, s0, s1, s2, s3, s4, s5) [NVC0_HW_SM_QUERY_##n] = { { { f, NVC0_COMPUTE_MP_PM_OP_MODE_##m, c, 0, g, s0|(s1 << 8)|(s2 << 16)|(s3 << 24)|(s4##ULL << 32)|(s5##ULL << 40) }, {}, {}, {} }, 1, NVC0_COUNTER_OPn_SUM, { 1, 1 } }
static const struct nvc0_hw_sm_query_cfg nvc0_hw_sm_queries[] =
{
_Q(ACTIVE_CYCLES, 0xaaaa, LOGOP, 0x11, 1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(ACTIVE_WARPS, 0xaaaa, LOGOP, 0x24, 6, 0x10, 0x21, 0x32, 0x43, 0x54, 0x65),
_Q(ATOM_COUNT, 0xaaaa, LOGOP, 0x63, 1, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(BRANCH, 0xaaaa, LOGOP, 0x1a, 2, 0x00, 0x11, 0x00, 0x00, 0x00, 0x00),
_Q(DIVERGENT_BRANCH, 0xaaaa, LOGOP, 0x19, 2, 0x20, 0x31, 0x00, 0x00, 0x00, 0x00),
_Q(GLD_REQUEST, 0xaaaa, LOGOP, 0x64, 1, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(GRED_COUNT, 0xaaaa, LOGOP, 0x63, 1, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(GST_REQUEST, 0xaaaa, LOGOP, 0x64, 1, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(INST_EXECUTED, 0xaaaa, LOGOP, 0x2d, 3, 0x00, 0x11, 0x22, 0x00, 0x00, 0x00),
_Q(INST_ISSUED1_0, 0xaaaa, LOGOP, 0x7e, 1, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(INST_ISSUED1_1, 0xaaaa, LOGOP, 0x7e, 1, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(INST_ISSUED2_0, 0xaaaa, LOGOP, 0x7e, 1, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(INST_ISSUED2_1, 0xaaaa, LOGOP, 0x7e, 1, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(LOCAL_LD, 0xaaaa, LOGOP, 0x64, 1, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(LOCAL_ST, 0xaaaa, LOGOP, 0x64, 1, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(PROF_TRIGGER_0, 0xaaaa, LOGOP, 0x01, 1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(PROF_TRIGGER_1, 0xaaaa, LOGOP, 0x01, 1, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(PROF_TRIGGER_2, 0xaaaa, LOGOP, 0x01, 1, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(PROF_TRIGGER_3, 0xaaaa, LOGOP, 0x01, 1, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(PROF_TRIGGER_4, 0xaaaa, LOGOP, 0x01, 1, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(PROF_TRIGGER_5, 0xaaaa, LOGOP, 0x01, 1, 0x50, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(PROF_TRIGGER_6, 0xaaaa, LOGOP, 0x01, 1, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(PROF_TRIGGER_7, 0xaaaa, LOGOP, 0x01, 1, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(SHARED_LD, 0xaaaa, LOGOP, 0x64, 1, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(SHARED_ST, 0xaaaa, LOGOP, 0x64, 1, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00),
_Q(THREADS_LAUNCHED, 0xaaaa, LOGOP, 0x26, 6, 0x10, 0x21, 0x32, 0x43, 0x54, 0x65),
_Q(TH_INST_EXECUTED_0, 0xaaaa, LOGOP, 0xa3, 6, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55),
_Q(TH_INST_EXECUTED_1, 0xaaaa, LOGOP, 0xa5, 6, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55),
_Q(TH_INST_EXECUTED_2, 0xaaaa, LOGOP, 0xa4, 6, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55),
_Q(TH_INST_EXECUTED_3, 0xaaaa, LOGOP, 0xa6, 6, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55),
_Q(WARPS_LAUNCHED, 0xaaaa, LOGOP, 0x26, 1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00),
};
#undef _Q
static const struct nvc0_hw_sm_query_cfg *
nvc0_hw_sm_query_get_cfg(struct nvc0_context *nvc0, struct nvc0_hw_query *hq)
{
struct nvc0_screen *screen = nvc0->screen;
struct nvc0_query *q = &hq->base;
if (screen->base.class_3d >= NVE4_3D_CLASS)
return &nve4_hw_sm_queries[q->type - PIPE_QUERY_DRIVER_SPECIFIC];
return &nvc0_hw_sm_queries[q->type - NVC0_HW_SM_QUERY(0)];
}
static void
nvc0_hw_sm_destroy_query(struct nvc0_context *nvc0, struct nvc0_hw_query *hq)
{
struct nvc0_query *q = &hq->base;
q->funcs->destroy_query(nvc0, q);
}
static boolean
nvc0_hw_sm_begin_query(struct nvc0_context *nvc0, struct nvc0_hw_query *hq)
{
struct nvc0_screen *screen = nvc0->screen;
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
const bool is_nve4 = screen->base.class_3d >= NVE4_3D_CLASS;
struct nvc0_hw_sm_query *hsq = nvc0_hw_sm_query(hq);
const struct nvc0_hw_sm_query_cfg *cfg;
unsigned i, c;
unsigned num_ab[2] = { 0, 0 };
cfg = nvc0_hw_sm_query_get_cfg(nvc0, hq);
/* check if we have enough free counter slots */
for (i = 0; i < cfg->num_counters; ++i)
num_ab[cfg->ctr[i].sig_dom]++;
if (screen->pm.num_hw_sm_active[0] + num_ab[0] > 4 ||
screen->pm.num_hw_sm_active[1] + num_ab[1] > 4) {
NOUVEAU_ERR("Not enough free MP counter slots !\n");
return false;
}
assert(cfg->num_counters <= 4);
PUSH_SPACE(push, 4 * 8 * (is_nve4 ? 1 : 6) + 6);
if (!screen->pm.mp_counters_enabled) {
screen->pm.mp_counters_enabled = true;
BEGIN_NVC0(push, SUBC_SW(0x06ac), 1);
PUSH_DATA (push, 0x1fcb);
}
/* set sequence field to 0 (used to check if result is available) */
for (i = 0; i < screen->mp_count; ++i)
hq->data[i * 10 + 10] = 0;
hq->sequence++;
for (i = 0; i < cfg->num_counters; ++i) {
const unsigned d = cfg->ctr[i].sig_dom;
if (!screen->pm.num_hw_sm_active[d]) {
uint32_t m = (1 << 22) | (1 << (7 + (8 * !d)));
if (screen->pm.num_hw_sm_active[!d])
m |= 1 << (7 + (8 * d));
BEGIN_NVC0(push, SUBC_SW(0x0600), 1);
PUSH_DATA (push, m);
}
screen->pm.num_hw_sm_active[d]++;
for (c = d * 4; c < (d * 4 + 4); ++c) {
if (!screen->pm.mp_counter[c]) {
hsq->ctr[i] = c;
screen->pm.mp_counter[c] = hsq;
break;
}
}
assert(c <= (d * 4 + 3)); /* must succeed, already checked for space */
/* configure and reset the counter(s) */
if (is_nve4) {
if (d == 0)
BEGIN_NVC0(push, NVE4_COMPUTE(MP_PM_A_SIGSEL(c & 3)), 1);
else
BEGIN_NVC0(push, NVE4_COMPUTE(MP_PM_B_SIGSEL(c & 3)), 1);
PUSH_DATA (push, cfg->ctr[i].sig_sel);
BEGIN_NVC0(push, NVE4_COMPUTE(MP_PM_SRCSEL(c)), 1);
PUSH_DATA (push, cfg->ctr[i].src_sel + 0x2108421 * (c & 3));
BEGIN_NVC0(push, NVE4_COMPUTE(MP_PM_FUNC(c)), 1);
PUSH_DATA (push, (cfg->ctr[i].func << 4) | cfg->ctr[i].mode);
BEGIN_NVC0(push, NVE4_COMPUTE(MP_PM_SET(c)), 1);
PUSH_DATA (push, 0);
} else {
unsigned s;
for (s = 0; s < cfg->ctr[i].num_src; s++) {
BEGIN_NVC0(push, NVC0_COMPUTE(MP_PM_SIGSEL(s)), 1);
PUSH_DATA (push, cfg->ctr[i].sig_sel);
BEGIN_NVC0(push, NVC0_COMPUTE(MP_PM_SRCSEL(s)), 1);
PUSH_DATA (push, (cfg->ctr[i].src_sel >> (s * 8)) & 0xff);
BEGIN_NVC0(push, NVC0_COMPUTE(MP_PM_OP(s)), 1);
PUSH_DATA (push, (cfg->ctr[i].func << 4) | cfg->ctr[i].mode);
BEGIN_NVC0(push, NVC0_COMPUTE(MP_PM_SET(s)), 1);
PUSH_DATA (push, 0);
}
}
}
return true;
}
static void
nvc0_hw_sm_end_query(struct nvc0_context *nvc0, struct nvc0_hw_query *hq)
{
struct nvc0_screen *screen = nvc0->screen;
struct pipe_context *pipe = &nvc0->base.pipe;
struct nouveau_pushbuf *push = nvc0->base.pushbuf;
const bool is_nve4 = screen->base.class_3d >= NVE4_3D_CLASS;
struct nvc0_hw_sm_query *hsq = nvc0_hw_sm_query(hq);
uint32_t mask;
uint32_t input[3];
const uint block[3] = { 32, is_nve4 ? 4 : 1, 1 };
const uint grid[3] = { screen->mp_count, 1, 1 };
unsigned c;
const struct nvc0_hw_sm_query_cfg *cfg;
cfg = nvc0_hw_sm_query_get_cfg(nvc0, hq);
if (unlikely(!screen->pm.prog)) {
struct nvc0_program *prog = CALLOC_STRUCT(nvc0_program);
prog->type = PIPE_SHADER_COMPUTE;
prog->translated = true;
prog->num_gprs = 14;
prog->parm_size = 12;
if (is_nve4) {
prog->code = (uint32_t *)nve4_read_hw_sm_counters_code;
prog->code_size = sizeof(nve4_read_hw_sm_counters_code);
} else {
prog->code = (uint32_t *)nvc0_read_hw_sm_counters_code;
prog->code_size = sizeof(nvc0_read_hw_sm_counters_code);
}
screen->pm.prog = prog;
}
/* disable all counting */
PUSH_SPACE(push, 8);
for (c = 0; c < 8; ++c)
if (screen->pm.mp_counter[c]) {
if (is_nve4) {
IMMED_NVC0(push, NVE4_COMPUTE(MP_PM_FUNC(c)), 0);
} else {
IMMED_NVC0(push, NVC0_COMPUTE(MP_PM_OP(c)), 0);
}
}
/* release counters for this query */
for (c = 0; c < 8; ++c) {
if (screen->pm.mp_counter[c] == hsq) {
screen->pm.num_hw_sm_active[c / 4]--;
screen->pm.mp_counter[c] = NULL;
}
}
BCTX_REFN_bo(nvc0->bufctx_cp, CP_QUERY, NOUVEAU_BO_GART | NOUVEAU_BO_WR,
hq->bo);
PUSH_SPACE(push, 1);
IMMED_NVC0(push, SUBC_COMPUTE(NV50_GRAPH_SERIALIZE), 0);
pipe->bind_compute_state(pipe, screen->pm.prog);
input[0] = (hq->bo->offset + hq->base_offset);
input[1] = (hq->bo->offset + hq->base_offset) >> 32;
input[2] = hq->sequence;
pipe->launch_grid(pipe, block, grid, 0, input);
nouveau_bufctx_reset(nvc0->bufctx_cp, NVC0_BIND_CP_QUERY);
/* re-activate other counters */
PUSH_SPACE(push, 16);
mask = 0;
for (c = 0; c < 8; ++c) {
unsigned i;
hsq = screen->pm.mp_counter[c];
if (!hsq)
continue;
cfg = nvc0_hw_sm_query_get_cfg(nvc0, &hsq->base);
for (i = 0; i < cfg->num_counters; ++i) {
if (mask & (1 << hsq->ctr[i]))
break;
mask |= 1 << hsq->ctr[i];
if (is_nve4) {
BEGIN_NVC0(push, NVE4_COMPUTE(MP_PM_FUNC(hsq->ctr[i])), 1);
} else {
BEGIN_NVC0(push, NVC0_COMPUTE(MP_PM_OP(hsq->ctr[i])), 1);
}
PUSH_DATA (push, (cfg->ctr[i].func << 4) | cfg->ctr[i].mode);
}
}
}
static inline bool
nvc0_hw_sm_query_read_data(uint32_t count[32][4],
struct nvc0_context *nvc0, bool wait,
struct nvc0_hw_query *hq,
const struct nvc0_hw_sm_query_cfg *cfg,
unsigned mp_count)
{
struct nvc0_hw_sm_query *hsq = nvc0_hw_sm_query(hq);
unsigned p, c;
for (p = 0; p < mp_count; ++p) {
const unsigned b = (0x24 / 4) * p;
for (c = 0; c < cfg->num_counters; ++c) {
if (hq->data[b + 8] != hq->sequence) {
if (!wait)
return false;
if (nouveau_bo_wait(hq->bo, NOUVEAU_BO_RD, nvc0->base.client))
return false;
}
count[p][c] = hq->data[b + hsq->ctr[c]];
}
}
return true;
}
static inline bool
nve4_hw_sm_query_read_data(uint32_t count[32][4],
struct nvc0_context *nvc0, bool wait,
struct nvc0_hw_query *hq,
const struct nvc0_hw_sm_query_cfg *cfg,
unsigned mp_count)
{
struct nvc0_hw_sm_query *hsq = nvc0_hw_sm_query(hq);
unsigned p, c, d;
for (p = 0; p < mp_count; ++p) {
const unsigned b = (0x60 / 4) * p;
for (c = 0; c < cfg->num_counters; ++c) {
count[p][c] = 0;
for (d = 0; d < ((hsq->ctr[c] & ~3) ? 1 : 4); ++d) {
if (hq->data[b + 20 + d] != hq->sequence) {
if (!wait)
return false;
if (nouveau_bo_wait(hq->bo, NOUVEAU_BO_RD, nvc0->base.client))
return false;
}
if (hsq->ctr[c] & ~0x3)
count[p][c] = hq->data[b + 16 + (hsq->ctr[c] & 3)];
else
count[p][c] += hq->data[b + d * 4 + hsq->ctr[c]];
}
}
}
return true;
}
/* Metric calculations:
* sum(x) ... sum of x over all MPs
* avg(x) ... average of x over all MPs
*
* IPC : sum(inst_executed) / clock
* INST_REPLAY_OHEAD: (sum(inst_issued) - sum(inst_executed)) / sum(inst_issued)
* MP_OCCUPANCY : avg((active_warps / 64) / active_cycles)
* MP_EFFICIENCY : avg(active_cycles / clock)
*
* NOTE: Interpretation of IPC requires knowledge of MP count.
*/
static boolean
nvc0_hw_sm_get_query_result(struct nvc0_context *nvc0, struct nvc0_hw_query *hq,
boolean wait, union pipe_query_result *result)
{
uint32_t count[32][4];
uint64_t value = 0;
unsigned mp_count = MIN2(nvc0->screen->mp_count_compute, 32);
unsigned p, c;
const struct nvc0_hw_sm_query_cfg *cfg;
bool ret;
cfg = nvc0_hw_sm_query_get_cfg(nvc0, hq);
if (nvc0->screen->base.class_3d >= NVE4_3D_CLASS)
ret = nve4_hw_sm_query_read_data(count, nvc0, wait, hq, cfg, mp_count);
else
ret = nvc0_hw_sm_query_read_data(count, nvc0, wait, hq, cfg, mp_count);
if (!ret)
return false;
if (cfg->op == NVC0_COUNTER_OPn_SUM) {
for (c = 0; c < cfg->num_counters; ++c)
for (p = 0; p < mp_count; ++p)
value += count[p][c];
value = (value * cfg->norm[0]) / cfg->norm[1];
} else
if (cfg->op == NVC0_COUNTER_OPn_OR) {
uint32_t v = 0;
for (c = 0; c < cfg->num_counters; ++c)
for (p = 0; p < mp_count; ++p)
v |= count[p][c];
value = ((uint64_t)v * cfg->norm[0]) / cfg->norm[1];
} else
if (cfg->op == NVC0_COUNTER_OPn_AND) {
uint32_t v = ~0;
for (c = 0; c < cfg->num_counters; ++c)
for (p = 0; p < mp_count; ++p)
v &= count[p][c];
value = ((uint64_t)v * cfg->norm[0]) / cfg->norm[1];
} else
if (cfg->op == NVC0_COUNTER_OP2_REL_SUM_MM) {
uint64_t v[2] = { 0, 0 };
for (p = 0; p < mp_count; ++p) {
v[0] += count[p][0];
v[1] += count[p][1];
}
if (v[0])
value = ((v[0] - v[1]) * cfg->norm[0]) / (v[0] * cfg->norm[1]);
} else
if (cfg->op == NVC0_COUNTER_OP2_DIV_SUM_M0) {
for (p = 0; p < mp_count; ++p)
value += count[p][0];
if (count[0][1])
value = (value * cfg->norm[0]) / (count[0][1] * cfg->norm[1]);
else
value = 0;
} else
if (cfg->op == NVC0_COUNTER_OP2_AVG_DIV_MM) {
unsigned mp_used = 0;
for (p = 0; p < mp_count; ++p, mp_used += !!count[p][0])
if (count[p][1])
value += (count[p][0] * cfg->norm[0]) / count[p][1];
if (mp_used)
value /= (uint64_t)mp_used * cfg->norm[1];
} else
if (cfg->op == NVC0_COUNTER_OP2_AVG_DIV_M0) {
unsigned mp_used = 0;
for (p = 0; p < mp_count; ++p, mp_used += !!count[p][0])
value += count[p][0];
if (count[0][1] && mp_used) {
value *= cfg->norm[0];
value /= (uint64_t)count[0][1] * mp_used * cfg->norm[1];
} else {
value = 0;
}
}
*(uint64_t *)result = value;
return true;
}
static const struct nvc0_hw_query_funcs hw_sm_query_funcs = {
.destroy_query = nvc0_hw_sm_destroy_query,
.begin_query = nvc0_hw_sm_begin_query,
.end_query = nvc0_hw_sm_end_query,
.get_query_result = nvc0_hw_sm_get_query_result,
};
struct nvc0_hw_query *
nvc0_hw_sm_create_query(struct nvc0_context *nvc0, unsigned type)
{
struct nvc0_screen *screen = nvc0->screen;
struct nvc0_hw_sm_query *hsq;
struct nvc0_hw_query *hq;
unsigned space;
if (nvc0->screen->base.device->drm_version < 0x01000101)
return NULL;
if ((type < NVE4_HW_SM_QUERY(0) || type > NVE4_HW_SM_QUERY_LAST) &&
(type < NVC0_HW_SM_QUERY(0) || type > NVC0_HW_SM_QUERY_LAST))
return NULL;
hsq = CALLOC_STRUCT(nvc0_hw_sm_query);
if (!hsq)
return NULL;
hq = &hsq->base;
hq->funcs = &hw_sm_query_funcs;
hq->base.type = type;
if (screen->base.class_3d >= NVE4_3D_CLASS) {
/* for each MP:
* [00] = WS0.C0
* [04] = WS0.C1
* [08] = WS0.C2
* [0c] = WS0.C3
* [10] = WS1.C0
* [14] = WS1.C1
* [18] = WS1.C2
* [1c] = WS1.C3
* [20] = WS2.C0
* [24] = WS2.C1
* [28] = WS2.C2
* [2c] = WS2.C3
* [30] = WS3.C0
* [34] = WS3.C1
* [38] = WS3.C2
* [3c] = WS3.C3
* [40] = MP.C4
* [44] = MP.C5
* [48] = MP.C6
* [4c] = MP.C7
* [50] = WS0.sequence
* [54] = WS1.sequence
* [58] = WS2.sequence
* [5c] = WS3.sequence
*/
space = (4 * 4 + 4 + 4) * nvc0->screen->mp_count * sizeof(uint32_t);
} else {
/* for each MP:
* [00] = MP.C0
* [04] = MP.C1
* [08] = MP.C2
* [0c] = MP.C3
* [10] = MP.C4
* [14] = MP.C5
* [18] = MP.C6
* [1c] = MP.C7
* [20] = MP.sequence
*/
space = (8 + 1) * nvc0->screen->mp_count * sizeof(uint32_t);
}
if (!nvc0_hw_query_allocate(nvc0, &hq->base, space)) {
FREE(hq);
return NULL;
}
return hq;
}

View File

@ -0,0 +1,117 @@
#ifndef __NVC0_QUERY_HW_SM_H__
#define __NVC0_QUERY_HW_SM_H__
#include "nvc0_query_hw.h"
struct nvc0_hw_sm_query {
struct nvc0_hw_query base;
int8_t ctr[4];
};
static inline struct nvc0_hw_sm_query *
nvc0_hw_sm_query(struct nvc0_hw_query *hq)
{
return (struct nvc0_hw_sm_query *)hq;
}
/*
* Performance counter queries:
*/
#define NVE4_HW_SM_QUERY(i) (PIPE_QUERY_DRIVER_SPECIFIC + (i))
#define NVE4_HW_SM_QUERY_LAST NVE4_HW_SM_QUERY(NVE4_HW_SM_QUERY_COUNT - 1)
enum nve4_hw_sm_queries
{
NVE4_HW_SM_QUERY_ACTIVE_CYCLES = 0,
NVE4_HW_SM_QUERY_ACTIVE_WARPS,
NVE4_HW_SM_QUERY_ATOM_COUNT,
NVE4_HW_SM_QUERY_BRANCH,
NVE4_HW_SM_QUERY_DIVERGENT_BRANCH,
NVE4_HW_SM_QUERY_GLD_REQUEST,
NVE4_HW_SM_QUERY_GLD_MEM_DIV_REPLAY,
NVE4_HW_SM_QUERY_GST_TRANSACTIONS,
NVE4_HW_SM_QUERY_GST_MEM_DIV_REPLAY,
NVE4_HW_SM_QUERY_GRED_COUNT,
NVE4_HW_SM_QUERY_GST_REQUEST,
NVE4_HW_SM_QUERY_INST_EXECUTED,
NVE4_HW_SM_QUERY_INST_ISSUED,
NVE4_HW_SM_QUERY_INST_ISSUED1,
NVE4_HW_SM_QUERY_INST_ISSUED2,
NVE4_HW_SM_QUERY_L1_GLD_HIT,
NVE4_HW_SM_QUERY_L1_GLD_MISS,
NVE4_HW_SM_QUERY_L1_LOCAL_LD_HIT,
NVE4_HW_SM_QUERY_L1_LOCAL_LD_MISS,
NVE4_HW_SM_QUERY_L1_LOCAL_ST_HIT,
NVE4_HW_SM_QUERY_L1_LOCAL_ST_MISS,
NVE4_HW_SM_QUERY_L1_SHARED_LD_TRANSACTIONS,
NVE4_HW_SM_QUERY_L1_SHARED_ST_TRANSACTIONS,
NVE4_HW_SM_QUERY_LOCAL_LD,
NVE4_HW_SM_QUERY_LOCAL_LD_TRANSACTIONS,
NVE4_HW_SM_QUERY_LOCAL_ST,
NVE4_HW_SM_QUERY_LOCAL_ST_TRANSACTIONS,
NVE4_HW_SM_QUERY_PROF_TRIGGER_0,
NVE4_HW_SM_QUERY_PROF_TRIGGER_1,
NVE4_HW_SM_QUERY_PROF_TRIGGER_2,
NVE4_HW_SM_QUERY_PROF_TRIGGER_3,
NVE4_HW_SM_QUERY_PROF_TRIGGER_4,
NVE4_HW_SM_QUERY_PROF_TRIGGER_5,
NVE4_HW_SM_QUERY_PROF_TRIGGER_6,
NVE4_HW_SM_QUERY_PROF_TRIGGER_7,
NVE4_HW_SM_QUERY_SHARED_LD,
NVE4_HW_SM_QUERY_SHARED_LD_REPLAY,
NVE4_HW_SM_QUERY_SHARED_ST,
NVE4_HW_SM_QUERY_SHARED_ST_REPLAY,
NVE4_HW_SM_QUERY_SM_CTA_LAUNCHED,
NVE4_HW_SM_QUERY_THREADS_LAUNCHED,
NVE4_HW_SM_QUERY_UNCACHED_GLD_TRANSACTIONS,
NVE4_HW_SM_QUERY_WARPS_LAUNCHED,
NVE4_HW_SM_QUERY_METRIC_IPC,
NVE4_HW_SM_QUERY_METRIC_IPAC,
NVE4_HW_SM_QUERY_METRIC_IPEC,
NVE4_HW_SM_QUERY_METRIC_MP_OCCUPANCY,
NVE4_HW_SM_QUERY_METRIC_MP_EFFICIENCY,
NVE4_HW_SM_QUERY_METRIC_INST_REPLAY_OHEAD,
NVE4_HW_SM_QUERY_COUNT
};
#define NVC0_HW_SM_QUERY(i) (PIPE_QUERY_DRIVER_SPECIFIC + 2048 + (i))
#define NVC0_HW_SM_QUERY_LAST NVC0_HW_SM_QUERY(NVC0_HW_SM_QUERY_COUNT - 1)
enum nvc0_hw_sm_queries
{
NVC0_HW_SM_QUERY_ACTIVE_CYCLES = 0,
NVC0_HW_SM_QUERY_ACTIVE_WARPS,
NVC0_HW_SM_QUERY_ATOM_COUNT,
NVC0_HW_SM_QUERY_BRANCH,
NVC0_HW_SM_QUERY_DIVERGENT_BRANCH,
NVC0_HW_SM_QUERY_GLD_REQUEST,
NVC0_HW_SM_QUERY_GRED_COUNT,
NVC0_HW_SM_QUERY_GST_REQUEST,
NVC0_HW_SM_QUERY_INST_EXECUTED,
NVC0_HW_SM_QUERY_INST_ISSUED1_0,
NVC0_HW_SM_QUERY_INST_ISSUED1_1,
NVC0_HW_SM_QUERY_INST_ISSUED2_0,
NVC0_HW_SM_QUERY_INST_ISSUED2_1,
NVC0_HW_SM_QUERY_LOCAL_LD,
NVC0_HW_SM_QUERY_LOCAL_ST,
NVC0_HW_SM_QUERY_PROF_TRIGGER_0,
NVC0_HW_SM_QUERY_PROF_TRIGGER_1,
NVC0_HW_SM_QUERY_PROF_TRIGGER_2,
NVC0_HW_SM_QUERY_PROF_TRIGGER_3,
NVC0_HW_SM_QUERY_PROF_TRIGGER_4,
NVC0_HW_SM_QUERY_PROF_TRIGGER_5,
NVC0_HW_SM_QUERY_PROF_TRIGGER_6,
NVC0_HW_SM_QUERY_PROF_TRIGGER_7,
NVC0_HW_SM_QUERY_SHARED_LD,
NVC0_HW_SM_QUERY_SHARED_ST,
NVC0_HW_SM_QUERY_THREADS_LAUNCHED,
NVC0_HW_SM_QUERY_TH_INST_EXECUTED_0,
NVC0_HW_SM_QUERY_TH_INST_EXECUTED_1,
NVC0_HW_SM_QUERY_TH_INST_EXECUTED_2,
NVC0_HW_SM_QUERY_TH_INST_EXECUTED_3,
NVC0_HW_SM_QUERY_WARPS_LAUNCHED,
NVC0_HW_SM_QUERY_COUNT
};
struct nvc0_hw_query *
nvc0_hw_sm_create_query(struct nvc0_context *, unsigned);
#endif

View File

@ -94,7 +94,7 @@ struct nvc0_screen {
struct {
struct nvc0_program *prog; /* compute state object to read MP counters */
struct pipe_query *mp_counter[8]; /* counter to query allocation */
struct nvc0_hw_sm_query *mp_counter[8]; /* counter to query allocation */
uint8_t num_hw_sm_active[2];
bool mp_counters_enabled;
} pm;