#include <linux/errno.h>
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
-#include <linux/ratelimit.h>
#include <asm/asm-offsets.h>
#include <asm/sclp.h>
struct cpu_inf ifl;
};
+/*
+ * STHYI requires extensive locking in the higher hypervisors
+ * and is very computational/memory expensive. Therefore we
+ * cache the retrieved data whose valid period is 1s.
+ */
+#define CACHE_VALID_JIFFIES HZ
+
+struct sthyi_info {
+ void *info;
+ unsigned long end;
+};
+
+static DEFINE_MUTEX(sthyi_mutex);
+static struct sthyi_info sthyi_cache;
+
static inline u64 cpu_id(u8 ctidx, void *diag224_buf)
{
return *((u64 *)(diag224_buf + (ctidx + 1) * DIAG204_CPU_NAME_LEN));
return cc;
}
+static int fill_dst(void *dst, u64 *rc)
+{
+ struct sthyi_sctns *sctns = (struct sthyi_sctns *)dst;
+
+ /*
+ * If the facility is on, we don't want to emulate the instruction.
+ * We ask the hypervisor to provide the data.
+ */
+ if (test_facility(74))
+ return sthyi((u64)dst, rc);
+
+ fill_hdr(sctns);
+ fill_stsi(sctns);
+ fill_diag(sctns);
+ *rc = 0;
+ return 0;
+}
+
+static int sthyi_init_cache(void)
+{
+ if (sthyi_cache.info)
+ return 0;
+ sthyi_cache.info = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!sthyi_cache.info)
+ return -ENOMEM;
+ sthyi_cache.end = jiffies - 1; /* expired */
+ return 0;
+}
+
+static int sthyi_update_cache(u64 *rc)
+{
+ int r;
+
+ memset(sthyi_cache.info, 0, PAGE_SIZE);
+ r = fill_dst(sthyi_cache.info, rc);
+ if (r)
+ return r;
+ sthyi_cache.end = jiffies + CACHE_VALID_JIFFIES;
+ return r;
+}
+
/*
* sthyi_fill - Fill page with data returned by the STHYI instruction
*
*/
int sthyi_fill(void *dst, u64 *rc)
{
- struct sthyi_sctns *sctns = (struct sthyi_sctns *)dst;
-
- /*
- * If the facility is on, we don't want to emulate the instruction.
- * We ask the hypervisor to provide the data.
- */
- if (test_facility(74))
- return sthyi((u64)dst, rc);
+ int r;
- fill_hdr(sctns);
- fill_stsi(sctns);
- fill_diag(sctns);
+ mutex_lock(&sthyi_mutex);
+ r = sthyi_init_cache();
+ if (r)
+ goto out;
+ if (time_is_before_jiffies(sthyi_cache.end)) {
+ /* cache expired */
+ r = sthyi_update_cache(rc);
+ if (r)
+ goto out;
+ }
*rc = 0;
- return 0;
+ memcpy(dst, sthyi_cache.info, PAGE_SIZE);
+out:
+ mutex_unlock(&sthyi_mutex);
+ return r;
}
EXPORT_SYMBOL_GPL(sthyi_fill);
if (!test_kvm_facility(vcpu->kvm, 74))
return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
- /*
- * STHYI requires extensive locking in the higher hypervisors
- * and is very computational/memory expensive. Therefore we
- * ratelimit the executions per VM.
- */
- if (!__ratelimit(&vcpu->kvm->arch.sthyi_limit)) {
- kvm_s390_retry_instr(vcpu);
- return 0;
- }
-
kvm_s390_get_regs_rre(vcpu, ®1, ®2);
code = vcpu->run->s.regs.gprs[reg1];
addr = vcpu->run->s.regs.gprs[reg2];