FD.io VPP  v21.10.1-2-g0a485f517
Vector Packet Processing
core.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2020 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #ifndef __perfmon_intel_h
17 #define __perfmon_intel_h
18 
19 #define PERF_INTEL_CODE(event, umask, edge, any, inv, cmask) \
20  ((event) | (umask) << 8 | (edge) << 18 | (any) << 21 | (inv) << 23 | \
21  (cmask) << 24)
22 
23 /* EventCode, UMask, EdgeDetect, AnyThread, Invert, CounterMask
24  * counter_unit, name, suffix, description */
25 #define foreach_perf_intel_core_event \
26  _ (0x00, 0x02, 0, 0, 0, 0x00, CPU_CLK_UNHALTED, THREAD, \
27  "Core cycles when the thread is not in halt state") \
28  _ (0x00, 0x03, 0, 0, 0, 0x00, CPU_CLK_UNHALTED, REF_TSC, \
29  "Reference cycles when the core is not in halt state.") \
30  _ (0x00, 0x04, 0, 0, 0, 0x00, TOPDOWN, SLOTS, \
31  "TMA slots available for an unhalted logical processor.") \
32  _ (0x00, 0x80, 0, 0, 0, 0x00, TOPDOWN, L1_RETIRING_METRIC, \
33  "TMA retiring slots for an unhalted logical processor.") \
34  _ (0x00, 0x81, 0, 0, 0, 0x00, TOPDOWN, L1_BAD_SPEC_METRIC, \
35  "TMA bad spec slots or an unhalted logical processor.") \
36  _ (0x00, 0x82, 0, 0, 0, 0x00, TOPDOWN, L1_FE_BOUND_METRIC, \
37  "TMA fe bound slots for an unhalted logical processor.") \
38  _ (0x00, 0x83, 0, 0, 0, 0x00, TOPDOWN, L1_BE_BOUND_METRIC, \
39  "TMA be bound slots for an unhalted logical processor.") \
40  _ (0x03, 0x02, 0, 0, 0, 0x00, LD_BLOCKS, STORE_FORWARD, \
41  "Loads blocked due to overlapping with a preceding store that cannot be" \
42  " forwarded.") \
43  _ (0x03, 0x08, 0, 0, 0, 0x00, LD_BLOCKS, NO_SR, \
44  "The number of times that split load operations are temporarily " \
45  "blocked " \
46  "because all resources for handling the split accesses are in use.") \
47  _ (0x07, 0x01, 0, 0, 0, 0x00, LD_BLOCKS_PARTIAL, ADDRESS_ALIAS, \
48  "False dependencies in MOB due to partial compare on address.") \
49  _ (0x08, 0x01, 0, 0, 0, 0x00, DTLB_LOAD_MISSES, MISS_CAUSES_A_WALK, \
50  "Load misses in all DTLB levels that cause page walks") \
51  _ (0x08, 0x02, 0, 0, 0, 0x00, DTLB_LOAD_MISSES, WALK_COMPLETED_4K, \
52  "Page walk completed due to a demand data load to a 4K page") \
53  _ (0x08, 0x04, 0, 0, 0, 0x00, DTLB_LOAD_MISSES, WALK_COMPLETED_2M_4M, \
54  "Page walk completed due to a demand data load to a 2M/4M page") \
55  _ (0x08, 0x08, 0, 0, 0, 0x00, DTLB_LOAD_MISSES, WALK_COMPLETED_1G, \
56  "Page walk completed due to a demand data load to a 1G page") \
57  _ (0x08, 0x0E, 0, 0, 0, 0x00, DTLB_LOAD_MISSES, WALK_COMPLETED, \
58  "Load miss in all TLB levels causes a page walk that completes. (All " \
59  "page sizes)") \
60  _ (0x08, 0x10, 0, 0, 0, 0x00, DTLB_LOAD_MISSES, WALK_PENDING, \
61  "Counts 1 per cycle for each PMH that is busy with a page walk for a " \
62  "load. EPT page walk duration are excluded in Skylake.") \
63  _ (0x08, 0x20, 0, 0, 0, 0x00, DTLB_LOAD_MISSES, STLB_HIT, \
64  "Loads that miss the DTLB and hit the STLB.") \
65  _ (0x0D, 0x01, 0, 0, 0, 0x00, INT_MISC, RECOVERY_CYCLES, \
66  "Core cycles the allocator was stalled due to recovery from earlier " \
67  "clear event for this thread (e.g. misprediction or memory nuke)") \
68  _ (0x0E, 0x01, 0, 0, 0, 0x00, UOPS_ISSUED, ANY, \
69  "Uops that Resource Allocation Table (RAT) issues to Reservation " \
70  "Station (RS)") \
71  _ (0x28, 0x07, 0, 0, 0, 0x00, CORE_POWER, LVL0_TURBO_LICENSE, \
72  "Core cycles where the core was running in a manner where Turbo may be " \
73  "clipped to the Non-AVX turbo schedule.") \
74  _ (0x28, 0x18, 0, 0, 0, 0x00, CORE_POWER, LVL1_TURBO_LICENSE, \
75  "Core cycles where the core was running in a manner where Turbo may be " \
76  "clipped to the AVX2 turbo schedule.") \
77  _ (0x28, 0x20, 0, 0, 0, 0x00, CORE_POWER, LVL2_TURBO_LICENSE, \
78  "Core cycles where the core was running in a manner where Turbo may be " \
79  "clipped to the AVX512 turbo schedule.") \
80  _ (0x28, 0x40, 0, 0, 0, 0x00, CORE_POWER, THROTTLE, \
81  "Core cycles the core was throttled due to a pending power level " \
82  "request.") \
83  _ (0x3C, 0x00, 0, 0, 0, 0x00, CPU_CLK_UNHALTED, THREAD_P, \
84  "Thread cycles when thread is not in halt state") \
85  _ (0x3C, 0x00, 0, 1, 0, 0x00, CPU_CLK_UNHALTED, THREAD_P_ANY, \
86  "Core cycles when at least one thread on the physical core is not in " \
87  "halt state.") \
88  _ (0x3C, 0x00, 1, 0, 0, 0x01, CPU_CLK_UNHALTED, RING0_TRANS, \
89  "Counts when there is a transition from ring 1, 2 or 3 to ring 0.") \
90  _ (0x48, 0x01, 0, 0, 0, 0x01, L1D_PEND_MISS, PENDING_CYCLES, \
91  "Cycles with L1D load Misses outstanding.") \
92  _ (0x48, 0x01, 0, 0, 0, 0x00, L1D_PEND_MISS, PENDING, \
93  "L1D miss outstandings duration in cycles") \
94  _ (0x48, 0x02, 0, 0, 0, 0x00, L1D_PEND_MISS, FB_FULL, \
95  "Number of times a request needed a FB entry but there was no entry " \
96  "available for it. That is the FB unavailability was dominant reason " \
97  "for blocking the request. A request includes cacheable/uncacheable " \
98  "demands that is load, store or SW prefetch.") \
99  _ (0x51, 0x01, 0, 0, 0, 0x00, L1D, REPLACEMENT, \
100  "L1D data line replacements") \
101  _ (0x51, 0x04, 0, 0, 0, 0x00, L1D, M_EVICT, "L1D data line evictions") \
102  _ (0x83, 0x02, 0, 0, 0, 0x00, ICACHE_64B, IFTAG_MISS, \
103  "Instruction fetch tag lookups that miss in the instruction cache " \
104  "(L1I). Counts at 64-byte cache-line granularity.") \
105  _ (0x9C, 0x01, 0, 0, 0, 0x00, IDQ_UOPS_NOT_DELIVERED, CORE, \
106  "Uops not delivered to Resource Allocation Table (RAT) per thread when " \
107  "backend of the machine is not stalled") \
108  _ (0xA2, 0x08, 0, 0, 0, 0x00, RESOURCE_STALLS, SB, \
109  "Counts allocation stall cycles caused by the store buffer (SB) being " \
110  "full. This counts cycles that the pipeline back-end blocked uop " \
111  "delivery" \
112  "from the front-end.") \
113  _ (0xA3, 0x04, 0, 0, 0, 0x04, CYCLE_ACTIVITY, CYCLES_NO_EXECUTE, \
114  "This event counts cycles during which no instructions were executed in" \
115  " the execution stage of the pipeline.") \
116  _ (0xA3, 0x05, 0, 0, 0, 0x05, CYCLE_ACTIVITY, STALLS_L2_MISS, \
117  "Execution stalls while L2 cache miss demand load is outstanding") \
118  _ (0xA3, 0x06, 0, 0, 0, 0x06, CYCLE_ACTIVITY, STALLS_L3_MISS, \
119  "Execution stalls while L3 cache miss demand load is outstanding") \
120  _ (0xA3, 0x0C, 0, 0, 0, 0x0C, CYCLE_ACTIVITY, STALLS_L1D_MISS, \
121  "Execution stalls while L1 cache miss demand load is outstanding") \
122  _ (0xA3, 0x14, 0, 0, 0, 0x14, CYCLE_ACTIVITY, STALLS_MEM_ANY, \
123  "Execution stalls while memory subsystem has an outstanding load.") \
124  _ (0xC0, 0x00, 0, 0, 0, 0x00, INST_RETIRED, ANY_P, \
125  "Number of instructions retired. General Counter - architectural event") \
126  _ (0xC2, 0x02, 0, 0, 0, 0x00, UOPS_RETIRED, RETIRE_SLOTS, \
127  "Retirement slots used.") \
128  _ (0xC4, 0x00, 0, 0, 0, 0x00, BR_INST_RETIRED, ALL_BRANCHES, \
129  "Counts all (macro) branch instructions retired.") \
130  _ (0xC5, 0x00, 0, 0, 0, 0x00, BR_MISP_RETIRED, ALL_BRANCHES, \
131  "All mispredicted macro branch instructions retired.") \
132  _ (0xC4, 0x20, 0, 0, 0, 0x00, BR_INST_RETIRED, NEAR_TAKEN, \
133  "Taken branch instructions retired.") \
134  _ (0xD0, 0x81, 0, 0, 0, 0x00, MEM_INST_RETIRED, ALL_LOADS, \
135  "All retired load instructions.") \
136  _ (0xD0, 0x82, 0, 0, 0, 0x00, MEM_INST_RETIRED, ALL_STORES, \
137  "All retired store instructions.") \
138  _ (0xD1, 0x01, 0, 0, 0, 0x00, MEM_LOAD_RETIRED, L1_HIT, \
139  "Retired load instructions with L1 cache hits as data sources") \
140  _ (0xD1, 0x02, 0, 0, 0, 0x00, MEM_LOAD_RETIRED, L2_HIT, \
141  "Retired load instructions with L2 cache hits as data sources") \
142  _ (0xD1, 0x04, 0, 0, 0, 0x00, MEM_LOAD_RETIRED, L3_HIT, \
143  "Retired load instructions with L3 cache hits as data sources") \
144  _ (0xD1, 0x08, 0, 0, 0, 0x00, MEM_LOAD_RETIRED, L1_MISS, \
145  "Retired load instructions missed L1 cache as data sources") \
146  _ (0xD1, 0x10, 0, 0, 0, 0x00, MEM_LOAD_RETIRED, L2_MISS, \
147  "Retired load instructions missed L2 cache as data sources") \
148  _ (0xD1, 0x20, 0, 0, 0, 0x00, MEM_LOAD_RETIRED, L3_MISS, \
149  "Retired load instructions missed L3 cache as data sources") \
150  _ (0xD1, 0x40, 0, 0, 0, 0x00, MEM_LOAD_RETIRED, FB_HIT, \
151  "Retired load instructions which data sources were load missed L1 but " \
152  "hit FB due to preceding miss to the same cache line with data not " \
153  "ready") \
154  _ (0xD2, 0x01, 0, 0, 0, 0x00, MEM_LOAD_L3_HIT_RETIRED, XSNP_MISS, \
155  "Retired load instructions which data sources were L3 hit and cross-" \
156  "core snoop missed in on-pkg core cache.") \
157  _ (0xD2, 0x02, 0, 0, 0, 0x00, MEM_LOAD_L3_HIT_RETIRED, XSNP_HIT, \
158  "Retired load instructions which data sources were L3 and cross-core " \
159  "snoop hits in on-pkg core cache") \
160  _ (0xD2, 0x04, 0, 0, 0, 0x00, MEM_LOAD_L3_HIT_RETIRED, XSNP_HITM, \
161  "Retired load instructions which data sources were HitM responses from " \
162  "shared L3") \
163  _ (0xD2, 0x08, 0, 0, 0, 0x00, MEM_LOAD_L3_HIT_RETIRED, XSNP_NONE, \
164  "Retired load instructions which data sources were hits in L3 without " \
165  "snoops required") \
166  _ (0xD3, 0x01, 0, 0, 0, 0x00, MEM_LOAD_L3_MISS_RETIRED, LOCAL_DRAM, \
167  "Retired load instructions which data sources missed L3 but serviced " \
168  "from local dram") \
169  _ (0xD3, 0x02, 0, 0, 0, 0x00, MEM_LOAD_L3_MISS_RETIRED, REMOTE_DRAM, \
170  "Retired load instructions which data sources missed L3 but serviced " \
171  "from remote dram") \
172  _ (0xD3, 0x04, 0, 0, 0, 0x00, MEM_LOAD_L3_MISS_RETIRED, REMOTE_HITM, \
173  "Retired load instructions whose data sources was remote HITM") \
174  _ (0xD3, 0x08, 0, 0, 0, 0x00, MEM_LOAD_L3_MISS_RETIRED, REMOTE_FWD, \
175  "Retired load instructions whose data sources was forwarded from a " \
176  "remote cache") \
177  _ (0xF0, 0x40, 0, 0, 0, 0x00, L2_TRANS, L2_WB, \
178  "L2 writebacks that access L2 cache") \
179  _ (0xF1, 0x1F, 0, 0, 0, 0x00, L2_LINES_IN, ALL, \
180  "L2 cache lines filling L2") \
181  _ (0xF4, 0x04, 0, 0, 0, 0x00, SQ_MISC, SQ_FULL, \
182  "Counts the cycles for which the thread is active and the superQ cannot" \
183  "take any more entries.") \
184  _ (0xFE, 0x02, 0, 0, 0, 0x00, IDI_MISC, WB_UPGRADE, \
185  "Counts number of cache lines that are allocated and written back to L3" \
186  " with the intention that they are more likely to be reused shortly") \
187  _ (0xFE, 0x04, 0, 0, 0, 0x00, IDI_MISC, WB_DOWNGRADE, \
188  "Counts number of cache lines that are dropped and not written back to " \
189  "L3 as they are deemed to be less likely to be reused shortly")
190 
191 typedef enum
192 {
193 #define _(event, umask, edge, any, inv, cmask, name, suffix, desc) \
194  INTEL_CORE_E_##name##_##suffix,
196 #undef _
199 
200 #endif
foreach_perf_intel_core_event
#define foreach_perf_intel_core_event
Definition: core.h:25
perf_intel_core_event_t
perf_intel_core_event_t
Definition: core.h:191
INTEL_CORE_N_EVENTS
@ INTEL_CORE_N_EVENTS
Definition: core.h:197