diff -uprN linux-4.7.3-hardened/arch/x86/xen/apic.c linux-4.7.3-hardened.good/arch/x86/xen/apic.c
— linux-4.7.3-hardened/arch/x86/xen/apic.c 2016-07-24 19:23:50.000000000 +0000
+++ linux-4.7.3-hardened.good/arch/x86/xen/apic.c 2016-09-10 20:05:21.450647009 +0000
@@ -7,7 +7,6 @@
#include <xen/xen.h>
#include <xen/interface/physdev.h>
#include “xen-ops.h”
-#include “pmu.h”
#include “smp.h”
static unsigned int xen_io_apic_read(unsigned apic, unsigned reg)
@@ -73,10 +72,8 @@ static u32 xen_apic_read(u32 reg)
static void xen_apic_write(u32 reg, u32 val)
{
– if (reg == APIC_LVTPC) {
– (void)pmu_apic_update(reg);
+ if (reg == APIC_LVTPC)
return;
– }
/* Warn to see if theres any stray references */
WARN(1,”register: %x, value: %x\n”, reg, val);
diff -uprN linux-4.7.3-hardened/arch/x86/xen/enlighten.c linux-4.7.3-hardened.good/arch/x86/xen/enlighten.c
— linux-4.7.3-hardened/arch/x86/xen/enlighten.c 2016-09-10 19:59:29.237313676 +0000
+++ linux-4.7.3-hardened.good/arch/x86/xen/enlighten.c 2016-09-10 20:06:49.683980342 +0000
@@ -1031,9 +1031,6 @@ static u64 xen_read_msr_safe(unsigned in
{
u64 val;
– if (pmu_msr_read(msr, &val, err))
– return val;
–
val = native_read_msr_safe(msr, err);
switch (msr) {
case MSR_IA32_APICBASE:
@@ -1081,13 +1078,17 @@ static int xen_write_msr_safe(unsigned i
break;
default:
– if (!pmu_msr_write(msr, low, high, &ret))
– ret = native_write_msr_safe(msr, low, high);
+ ret = native_write_msr_safe(msr, low, high);
}
return ret;
}
+unsigned long long xen_read_pmc(int counter)
+{
+ return 0;
+}
+
static u64 xen_read_msr(unsigned int msr)
{
/*
diff -uprN linux-4.7.3-hardened/arch/x86/xen/pmu.c linux-4.7.3-hardened.good/arch/x86/xen/pmu.c
— linux-4.7.3-hardened/arch/x86/xen/pmu.c 2016-07-24 19:23:50.000000000 +0000
+++ linux-4.7.3-hardened.good/arch/x86/xen/pmu.c 2016-09-10 20:05:21.450647009 +0000
@@ -13,20 +13,11 @@
/* x86_pmu.handle_irq definition */
#include “../events/perf_event.h”
-#define XENPMU_IRQ_PROCESSING 1
-struct xenpmu {
– /* Shared page between hypervisor and domain */
– struct xen_pmu_data *xenpmu_data;
– uint8_t flags;
-};
-static DEFINE_PER_CPU(struct xenpmu, xenpmu_shared);
-#define get_xenpmu_data() (this_cpu_ptr(&xenpmu_shared)->xenpmu_data)
-#define get_xenpmu_flags() (this_cpu_ptr(&xenpmu_shared)->flags)
–
-/* Macro for computing address of a PMU MSR bank */
-#define field_offset(ctxt, field) ((void *)((uintptr_t)ctxt + \
– (uintptr_t)ctxt->field))
+/* Shared page between hypervisor and domain */
+static DEFINE_PER_CPU(struct xen_pmu_data *, xenpmu_shared);
+#define get_xenpmu_data() per_cpu(xenpmu_shared, smp_processor_id())
+
/* AMD PMU */
#define F15H_NUM_COUNTERS 6
@@ -60,8 +51,6 @@ static __read_mostly int amd_num_counter
/* Alias registers (0x4c1) for full-width writes to PMCs */
#define MSR_PMC_ALIAS_MASK (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_PMC0))
-#define INTEL_PMC_TYPE_SHIFT 30
–
static __read_mostly int intel_num_arch_counters, intel_num_fixed_counters;
@@ -178,232 +167,6 @@ static int is_intel_pmu_msr(u32 msr_inde
}
}
-static bool xen_intel_pmu_emulate(unsigned int msr, u64 *val, int type,
– int index, bool is_read)
-{
– uint64_t *reg = NULL;
– struct xen_pmu_intel_ctxt *ctxt;
– uint64_t *fix_counters;
– struct xen_pmu_cntr_pair *arch_cntr_pair;
– struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
– uint8_t xenpmu_flags = get_xenpmu_flags();
–
–
– if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING))
– return false;
–
– ctxt = &xenpmu_data->pmu.c.intel;
–
– switch (msr) {
– case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
– reg = &ctxt->global_ovf_ctrl;
– break;
– case MSR_CORE_PERF_GLOBAL_STATUS:
– reg = &ctxt->global_status;
– break;
– case MSR_CORE_PERF_GLOBAL_CTRL:
– reg = &ctxt->global_ctrl;
– break;
– case MSR_CORE_PERF_FIXED_CTR_CTRL:
– reg = &ctxt->fixed_ctrl;
– break;
– default:
– switch (type) {
– case MSR_TYPE_COUNTER:
– fix_counters = field_offset(ctxt, fixed_counters);
– reg = &fix_counters[index];
– break;
– case MSR_TYPE_ARCH_COUNTER:
– arch_cntr_pair = field_offset(ctxt, arch_counters);
– reg = &arch_cntr_pair[index].counter;
– break;
– case MSR_TYPE_ARCH_CTRL:
– arch_cntr_pair = field_offset(ctxt, arch_counters);
– reg = &arch_cntr_pair[index].control;
– break;
– default:
– return false;
– }
– }
–
– if (reg) {
– if (is_read)
– *val = *reg;
– else {
– *reg = *val;
–
– if (msr == MSR_CORE_PERF_GLOBAL_OVF_CTRL)
– ctxt->global_status &= (~(*val));
– }
– return true;
– }
–
– return false;
-}
–
-static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read)
-{
– uint64_t *reg = NULL;
– int i, off = 0;
– struct xen_pmu_amd_ctxt *ctxt;
– uint64_t *counter_regs, *ctrl_regs;
– struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
– uint8_t xenpmu_flags = get_xenpmu_flags();
–
– if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING))
– return false;
–
– if (k7_counters_mirrored &&
– ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3)))
– msr = get_fam15h_addr(msr);
–
– ctxt = &xenpmu_data->pmu.c.amd;
– for (i = 0; i < amd_num_counters; i++) {
– if (msr == amd_ctrls_base + off) {
– ctrl_regs = field_offset(ctxt, ctrls);
– reg = &ctrl_regs[i];
– break;
– } else if (msr == amd_counters_base + off) {
– counter_regs = field_offset(ctxt, counters);
– reg = &counter_regs[i];
– break;
– }
– off += amd_msr_step;
– }
–
– if (reg) {
– if (is_read)
– *val = *reg;
– else
– *reg = *val;
–
– return true;
– }
– return false;
-}
–
-bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err)
-{
– if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
– if (is_amd_pmu_msr(msr)) {
– if (!xen_amd_pmu_emulate(msr, val, 1))
– *val = native_read_msr_safe(msr, err);
– return true;
– }
– } else {
– int type, index;
–
– if (is_intel_pmu_msr(msr, &type, &index)) {
– if (!xen_intel_pmu_emulate(msr, val, type, index, 1))
– *val = native_read_msr_safe(msr, err);
– return true;
– }
– }
–
– return false;
-}
–
-bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
-{
– uint64_t val = ((uint64_t)high << 32) | low;
–
– if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
– if (is_amd_pmu_msr(msr)) {
– if (!xen_amd_pmu_emulate(msr, &val, 0))
– *err = native_write_msr_safe(msr, low, high);
– return true;
– }
– } else {
– int type, index;
–
– if (is_intel_pmu_msr(msr, &type, &index)) {
– if (!xen_intel_pmu_emulate(msr, &val, type, index, 0))
– *err = native_write_msr_safe(msr, low, high);
– return true;
– }
– }
–
– return false;
-}
–
-static unsigned long long xen_amd_read_pmc(int counter)
-{
– struct xen_pmu_amd_ctxt *ctxt;
– uint64_t *counter_regs;
– struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
– uint8_t xenpmu_flags = get_xenpmu_flags();
–
– if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) {
– uint32_t msr;
– int err;
–
– msr = amd_counters_base + (counter * amd_msr_step);
– return native_read_msr_safe(msr, &err);
– }
–
– ctxt = &xenpmu_data->pmu.c.amd;
– counter_regs = field_offset(ctxt, counters);
– return counter_regs[counter];
-}
–
-static unsigned long long xen_intel_read_pmc(int counter)
-{
– struct xen_pmu_intel_ctxt *ctxt;
– uint64_t *fixed_counters;
– struct xen_pmu_cntr_pair *arch_cntr_pair;
– struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
– uint8_t xenpmu_flags = get_xenpmu_flags();
–
– if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) {
– uint32_t msr;
– int err;
–
– if (counter & (1 << INTEL_PMC_TYPE_SHIFT))
– msr = MSR_CORE_PERF_FIXED_CTR0 + (counter & 0xffff);
– else
– msr = MSR_IA32_PERFCTR0 + counter;
–
– return native_read_msr_safe(msr, &err);
– }
–
– ctxt = &xenpmu_data->pmu.c.intel;
– if (counter & (1 << INTEL_PMC_TYPE_SHIFT)) {
– fixed_counters = field_offset(ctxt, fixed_counters);
– return fixed_counters[counter & 0xffff];
– }
–
– arch_cntr_pair = field_offset(ctxt, arch_counters);
– return arch_cntr_pair[counter].counter;
-}
–
-unsigned long long xen_read_pmc(int counter)
-{
– if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
– return xen_amd_read_pmc(counter);
– else
– return xen_intel_read_pmc(counter);
-}
–
-int pmu_apic_update(uint32_t val)
-{
– int ret;
– struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
–
– if (!xenpmu_data) {
– pr_warn_once(“%s: pmudata not initialized\n”, __func__);
– return -EINVAL;
– }
–
– xenpmu_data->pmu.l.lapic_lvtpc = val;
–
– if (get_xenpmu_flags() & XENPMU_IRQ_PROCESSING)
– return 0;
–
– ret = HYPERVISOR_xenpmu_op(XENPMU_lvtpc_set, NULL);
–
– return ret;
-}
–
/* perf callbacks */
static int xen_is_in_guest(void)
{
@@ -476,37 +239,26 @@ static void xen_convert_regs(const struc
irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
{
– int err, ret = IRQ_NONE;
+ int ret = IRQ_NONE;
struct pt_regs regs;
const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
– uint8_t xenpmu_flags = get_xenpmu_flags();
if (!xenpmu_data) {
pr_warn_once(“%s: pmudata not initialized\n”, __func__);
return ret;
}
– this_cpu_ptr(&xenpmu_shared)->flags =
– xenpmu_flags | XENPMU_IRQ_PROCESSING;
xen_convert_regs(&xenpmu_data->pmu.r.regs, ®s,
xenpmu_data->pmu.pmu_flags);
if (x86_pmu.handle_irq(®s))
ret = IRQ_HANDLED;
– /* Write out cached context to HW */
– err = HYPERVISOR_xenpmu_op(XENPMU_flush, NULL);
– this_cpu_ptr(&xenpmu_shared)->flags = xenpmu_flags;
– if (err) {
– pr_warn_once(“%s: failed hypercall, err: %d\n”, __func__, err);
– return IRQ_NONE;
– }
–
return ret;
}
bool is_xen_pmu(int cpu)
{
– return (get_xenpmu_data() != NULL);
+ return (per_cpu(xenpmu_shared, cpu) != NULL);
}
void xen_pmu_init(int cpu)
@@ -536,8 +288,7 @@ void xen_pmu_init(int cpu)
if (err)
goto fail;
– per_cpu(xenpmu_shared, cpu).xenpmu_data = xenpmu_data;
– per_cpu(xenpmu_shared, cpu).flags = 0;
+ per_cpu(xenpmu_shared, cpu) = xenpmu_data;
if (cpu == 0) {
perf_register_guest_info_callbacks(&xen_guest_cbs);
@@ -565,6 +316,6 @@ void xen_pmu_finish(int cpu)
(void)HYPERVISOR_xenpmu_op(XENPMU_finish, &xp);
– free_pages((unsigned long)per_cpu(xenpmu_shared, cpu).xenpmu_data, 0);
– per_cpu(xenpmu_shared, cpu).xenpmu_data = NULL;
+ free_pages((unsigned long)per_cpu(xenpmu_shared, cpu), 0);
+ per_cpu(xenpmu_shared, cpu) = NULL;
}
diff -uprN linux-4.7.3-hardened/arch/x86/xen/pmu.h linux-4.7.3-hardened.good/arch/x86/xen/pmu.h
— linux-4.7.3-hardened/arch/x86/xen/pmu.h 2016-07-24 19:23:50.000000000 +0000
+++ linux-4.7.3-hardened.good/arch/x86/xen/pmu.h 2016-09-10 20:05:21.453980342 +0000
@@ -7,9 +7,5 @@ irqreturn_t xen_pmu_irq_handler(int irq,
void xen_pmu_init(int cpu);
void xen_pmu_finish(int cpu);
bool is_xen_pmu(int cpu);
-bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err);
-bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err);
-int pmu_apic_update(uint32_t reg);
-unsigned long long xen_read_pmc(int counter);
#endif /* __XEN_PMU_H */