);
#ifdef CONFIG_KVM_ASYNC_PF
-TRACE_EVENT(
- kvm_try_async_get_page,
+DECLARE_EVENT_CLASS(kvm_async_get_page_class,
+
TP_PROTO(u64 gva, u64 gfn),
+
TP_ARGS(gva, gfn),
TP_STRUCT__entry(
- __field(u64, gva)
+ __field(__u64, gva)
__field(u64, gfn)
- ),
+ ),
TP_fast_assign(
__entry->gva = gva;
__entry->gfn = gfn;
- ),
+ ),
TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
);
-TRACE_EVENT(
- kvm_async_pf_not_present,
+DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
+
+ TP_PROTO(u64 gva, u64 gfn),
+
+ TP_ARGS(gva, gfn)
+);
+
+DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_doublefault,
+
+ TP_PROTO(u64 gva, u64 gfn),
+
+ TP_ARGS(gva, gfn)
+);
+
+DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
+
TP_PROTO(u64 token, u64 gva),
+
TP_ARGS(token, gva),
TP_STRUCT__entry(
__field(__u64, token)
__field(__u64, gva)
- ),
+ ),
TP_fast_assign(
__entry->token = token;
__entry->gva = gva;
- ),
+ ),
+
+ TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
- TP_printk("token %#llx gva %#llx not present", __entry->token,
- __entry->gva)
);
-TRACE_EVENT(
- kvm_async_pf_ready,
+DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
+
TP_PROTO(u64 token, u64 gva),
- TP_ARGS(token, gva),
- TP_STRUCT__entry(
- __field(__u64, token)
- __field(__u64, gva)
- ),
+ TP_ARGS(token, gva)
+);
- TP_fast_assign(
- __entry->token = token;
- __entry->gva = gva;
- ),
+DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
+
+ TP_PROTO(u64 token, u64 gva),
- TP_printk("token %#llx gva %#llx ready", __entry->token, __entry->gva)
+ TP_ARGS(token, gva)
);
TRACE_EVENT(
__entry->address, __entry->pfn)
);
-TRACE_EVENT(
- kvm_async_pf_doublefault,
- TP_PROTO(u64 gva, u64 gfn),
- TP_ARGS(gva, gfn),
-
- TP_STRUCT__entry(
- __field(u64, gva)
- __field(u64, gfn)
- ),
-
- TP_fast_assign(
- __entry->gva = gva;
- __entry->gfn = gfn;
- ),
-
- TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
-);
-
#endif
#endif /* _TRACE_KVM_MAIN_H */