x86/fpu: Rename XSAVE macros
authorDave Hansen <dave.hansen@linux.intel.com>
Wed, 2 Sep 2015 23:31:26 +0000 (16:31 -0700)
committerIngo Molnar <mingo@kernel.org>
Mon, 14 Sep 2015 10:21:46 +0000 (12:21 +0200)
There are two concepts that have some confusing naming:
 1. Extended State Component numbers (currently called
    XFEATURE_BIT_*)
 2. Extended State Component masks (currently called XSTATE_*)

The numbers are (currently) from 0-9.  State component 3 is the
bounds registers for MPX, for instance.

But when we want to enable "state component 3", we go set a bit
in XCR0.  The bit we set is 1<<3.  We can check to see if a
state component feature is enabled by looking at its bit.

The current 'xfeature_bit's are at best xfeature bit _numbers_.
Calling them bits is at best inconsistent with ending the enum
list with 'XFEATURES_NR_MAX'.

This patch renames the enum to be 'xfeature'.  These also
happen to be what the Intel documentation calls a "state
component".

We also want to differentiate these from the "XSTATE_*" macros.
The "XSTATE_*" macros are a mask, and we rename them to match.

These macros are reasonably widely used so this patch is a
wee bit big, but this really is just a rename.

The only non-mechanical part of this is the

s/XSTATE_EXTEND_MASK/XFEATURE_MASK_EXTEND/

We need a better name for it, but that's another patch.

Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tim Chen <tim.c.chen@linux.intel.com>
Cc: dave@sr71.net
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/20150902233126.38653250@viggo.jf.intel.com
[ Ported to v4.3-rc1. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
23 files changed:
arch/x86/crypto/camellia_aesni_avx2_glue.c
arch/x86/crypto/camellia_aesni_avx_glue.c
arch/x86/crypto/cast5_avx_glue.c
arch/x86/crypto/cast6_avx_glue.c
arch/x86/crypto/chacha20_glue.c
arch/x86/crypto/poly1305_glue.c
arch/x86/crypto/serpent_avx2_glue.c
arch/x86/crypto/serpent_avx_glue.c
arch/x86/crypto/sha1_ssse3_glue.c
arch/x86/crypto/sha256_ssse3_glue.c
arch/x86/crypto/sha512_ssse3_glue.c
arch/x86/crypto/twofish_avx_glue.c
arch/x86/include/asm/fpu/types.h
arch/x86/include/asm/fpu/xstate.h
arch/x86/kernel/fpu/init.c
arch/x86/kernel/fpu/regset.c
arch/x86/kernel/fpu/signal.c
arch/x86/kernel/fpu/xstate.c
arch/x86/kernel/traps.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/mm/mpx.c

index 4c65c70e628bb2776c17c106973b2f2b668890fb..d844569245633845da1fe485dca9c44043be680f 100644 (file)
@@ -567,7 +567,8 @@ static int __init camellia_aesni_init(void)
                return -ENODEV;
        }
 
-       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
+       if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
+                               &feature_name)) {
                pr_info("CPU feature '%s' is not supported.\n", feature_name);
                return -ENODEV;
        }
index 80a0e4389c9ad3f5e6e1f6d8bc5292e391801ff2..12e729bfe71b7b1f744972e48295989f41932d70 100644 (file)
@@ -554,7 +554,8 @@ static int __init camellia_aesni_init(void)
 {
        const char *feature_name;
 
-       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
+       if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
+                               &feature_name)) {
                pr_info("CPU feature '%s' is not supported.\n", feature_name);
                return -ENODEV;
        }
index be00aa48b2b5e3044ea8a397b0df37428e409447..8648158f39166f1cf1603fc7f139af629e163bde 100644 (file)
@@ -469,7 +469,8 @@ static int __init cast5_init(void)
 {
        const char *feature_name;
 
-       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
+       if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
+                               &feature_name)) {
                pr_info("CPU feature '%s' is not supported.\n", feature_name);
                return -ENODEV;
        }
index 5dbba72242217541a92056068378abd86e67c378..fca459578c355d8104654bad9d06e88c136e2f61 100644 (file)
@@ -591,7 +591,8 @@ static int __init cast6_init(void)
 {
        const char *feature_name;
 
-       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
+       if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
+                               &feature_name)) {
                pr_info("CPU feature '%s' is not supported.\n", feature_name);
                return -ENODEV;
        }
index effe2160b7c535acc3468e7371744ab318c6a8c2..722bacea040e71f4cae3769e38605e51a6fd8f82 100644 (file)
@@ -130,7 +130,7 @@ static int __init chacha20_simd_mod_init(void)
 
 #ifdef CONFIG_AS_AVX2
        chacha20_use_avx2 = cpu_has_avx && cpu_has_avx2 &&
-                           cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL);
+                           cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
 #endif
        return crypto_register_alg(&alg);
 }
index f7170d764f32c307c827d33e8c40c5fbd95acd4d..4264a3d595894b1a8e8bbba635f477878b0461dd 100644 (file)
@@ -184,7 +184,7 @@ static int __init poly1305_simd_mod_init(void)
 
 #ifdef CONFIG_AS_AVX2
        poly1305_use_avx2 = cpu_has_avx && cpu_has_avx2 &&
-                           cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL);
+                           cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL);
        alg.descsize = sizeof(struct poly1305_simd_desc_ctx);
        if (poly1305_use_avx2)
                alg.descsize += 10 * sizeof(u32);
index 7d838dc4d888f30010dfba7c21561a16bfe4e3d0..6d198342e2de4951635c46aa3486c88cbc793274 100644 (file)
@@ -542,7 +542,8 @@ static int __init init(void)
                pr_info("AVX2 instructions are not detected.\n");
                return -ENODEV;
        }
-       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
+       if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
+                               &feature_name)) {
                pr_info("CPU feature '%s' is not supported.\n", feature_name);
                return -ENODEV;
        }
index da7dafc9b16d5cca124e4f6d01d4462f0cb92b15..5dc37026c7ce6d78407cd04cea618a10d14174a8 100644 (file)
@@ -597,7 +597,8 @@ static int __init serpent_init(void)
 {
        const char *feature_name;
 
-       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
+       if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM,
+                               &feature_name)) {
                pr_info("CPU feature '%s' is not supported.\n", feature_name);
                return -ENODEV;
        }
index 7c48e8b20848e5c890825f25db91c63616dce65d..00212c32d4db289a2fb2937241a39be917738889 100644 (file)
@@ -121,7 +121,7 @@ static struct shash_alg alg = {
 #ifdef CONFIG_AS_AVX
 static bool __init avx_usable(void)
 {
-       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) {
+       if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
                if (cpu_has_avx)
                        pr_info("AVX detected but unusable.\n");
                return false;
index f8097fc0d1d1b56eb6d48389ac2778145897cfb2..0e0e85aea63418fefb54e2d377c86bcebdf1c795 100644 (file)
@@ -130,7 +130,7 @@ static struct shash_alg algs[] = { {
 #ifdef CONFIG_AS_AVX
 static bool __init avx_usable(void)
 {
-       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) {
+       if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
                if (cpu_has_avx)
                        pr_info("AVX detected but unusable.\n");
                return false;
index 2edad7b81870154055e86694142a66de7fbc48ff..0c8c38c101acda77c3bf67af0639a75451360ce8 100644 (file)
@@ -129,7 +129,7 @@ static struct shash_alg algs[] = { {
 #ifdef CONFIG_AS_AVX
 static bool __init avx_usable(void)
 {
-       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) {
+       if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
                if (cpu_has_avx)
                        pr_info("AVX detected but unusable.\n");
                return false;
index c2bd0ce718eee505272249386dffef3cc417dad8..6f3738ced95e76b96f495b294f1a37cea952ed46 100644 (file)
@@ -558,7 +558,7 @@ static int __init twofish_init(void)
 {
        const char *feature_name;
 
-       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
+       if (!cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) {
                pr_info("CPU feature '%s' is not supported.\n", feature_name);
                return -ENODEV;
        }
index 5dc1a18ef11cc6f36beee2c12e7a138b010acfa4..9f20d10af3b1e5963d25d356f44d86cf3f3ca5e8 100644 (file)
@@ -95,30 +95,36 @@ struct swregs_state {
 /*
  * List of XSAVE features Linux knows about:
  */
-enum xfeature_bit {
-       XSTATE_BIT_FP,
-       XSTATE_BIT_SSE,
-       XSTATE_BIT_YMM,
-       XSTATE_BIT_BNDREGS,
-       XSTATE_BIT_BNDCSR,
-       XSTATE_BIT_OPMASK,
-       XSTATE_BIT_ZMM_Hi256,
-       XSTATE_BIT_Hi16_ZMM,
+enum xfeature {
+       XFEATURE_FP,
+       XFEATURE_SSE,
+       /*
+        * Values above here are "legacy states".
+        * Those below are "extended states".
+        */
+       XFEATURE_YMM,
+       XFEATURE_BNDREGS,
+       XFEATURE_BNDCSR,
+       XFEATURE_OPMASK,
+       XFEATURE_ZMM_Hi256,
+       XFEATURE_Hi16_ZMM,
 
        XFEATURES_NR_MAX,
 };
 
-#define XSTATE_FP              (1 << XSTATE_BIT_FP)
-#define XSTATE_SSE             (1 << XSTATE_BIT_SSE)
-#define XSTATE_YMM             (1 << XSTATE_BIT_YMM)
-#define XSTATE_BNDREGS         (1 << XSTATE_BIT_BNDREGS)
-#define XSTATE_BNDCSR          (1 << XSTATE_BIT_BNDCSR)
-#define XSTATE_OPMASK          (1 << XSTATE_BIT_OPMASK)
-#define XSTATE_ZMM_Hi256       (1 << XSTATE_BIT_ZMM_Hi256)
-#define XSTATE_Hi16_ZMM                (1 << XSTATE_BIT_Hi16_ZMM)
+#define XFEATURE_MASK_FP               (1 << XFEATURE_FP)
+#define XFEATURE_MASK_SSE              (1 << XFEATURE_SSE)
+#define XFEATURE_MASK_YMM              (1 << XFEATURE_YMM)
+#define XFEATURE_MASK_BNDREGS          (1 << XFEATURE_BNDREGS)
+#define XFEATURE_MASK_BNDCSR           (1 << XFEATURE_BNDCSR)
+#define XFEATURE_MASK_OPMASK           (1 << XFEATURE_OPMASK)
+#define XFEATURE_MASK_ZMM_Hi256                (1 << XFEATURE_ZMM_Hi256)
+#define XFEATURE_MASK_Hi16_ZMM         (1 << XFEATURE_Hi16_ZMM)
 
-#define XSTATE_FPSSE           (XSTATE_FP | XSTATE_SSE)
-#define XSTATE_AVX512          (XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
+#define XFEATURE_MASK_FPSSE            (XFEATURE_MASK_FP | XFEATURE_MASK_SSE)
+#define XFEATURE_MASK_AVX512           (XFEATURE_MASK_OPMASK \
+                                        | XFEATURE_MASK_ZMM_Hi256 \
+                                        | XFEATURE_MASK_Hi16_ZMM)
 
 /*
  * There are 16x 256-bit AVX registers named YMM0-YMM15.
index d5a9b736553cc99c4e15ae5a73754a082cd1a94c..3a6c89b7030757e707a77be75a5a4d3a30e2982f 100644 (file)
@@ -6,7 +6,7 @@
 #include <linux/uaccess.h>
 
 /* Bit 63 of XCR0 is reserved for future expansion */
-#define XSTATE_EXTEND_MASK     (~(XSTATE_FPSSE | (1ULL << 63)))
+#define XFEATURE_MASK_EXTEND   (~(XFEATURE_MASK_FPSSE | (1ULL << 63)))
 
 #define XSTATE_CPUID           0x0000000d
 
 #define XSAVE_YMM_OFFSET    (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
 
 /* Supported features which support lazy state saving */
-#define XSTATE_LAZY    (XSTATE_FP | XSTATE_SSE | XSTATE_YMM                  \
-                       | XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
+#define XFEATURE_MASK_LAZY     (XFEATURE_MASK_FP | \
+                                XFEATURE_MASK_SSE | \
+                                XFEATURE_MASK_YMM | \
+                                XFEATURE_MASK_OPMASK | \
+                                XFEATURE_MASK_ZMM_Hi256 | \
+                                XFEATURE_MASK_Hi16_ZMM)
 
 /* Supported features which require eager state saving */
-#define XSTATE_EAGER   (XSTATE_BNDREGS | XSTATE_BNDCSR)
+#define XFEATURE_MASK_EAGER    (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR)
 
 /* All currently supported features */
-#define XCNTXT_MASK    (XSTATE_LAZY | XSTATE_EAGER)
+#define XCNTXT_MASK    (XFEATURE_MASK_LAZY | XFEATURE_MASK_EAGER)
 
 #ifdef CONFIG_X86_64
 #define REX_PREFIX     "0x48, "
index 0a250afc6cdf5f6710ab0a7969e8b3a35cf9963b..be39b5fde4b9619a566eb5deee4ee9456a3f5cc5 100644 (file)
@@ -290,11 +290,11 @@ static void __init fpu__init_system_ctx_switch(void)
        if (cpu_has_xsaveopt && eagerfpu != DISABLE)
                eagerfpu = ENABLE;
 
-       if (xfeatures_mask & XSTATE_EAGER) {
+       if (xfeatures_mask & XFEATURE_MASK_EAGER) {
                if (eagerfpu == DISABLE) {
                        pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n",
-                              xfeatures_mask & XSTATE_EAGER);
-                       xfeatures_mask &= ~XSTATE_EAGER;
+                              xfeatures_mask & XFEATURE_MASK_EAGER);
+                       xfeatures_mask &= ~XFEATURE_MASK_EAGER;
                } else {
                        eagerfpu = ENABLE;
                }
index dc60810c1c74c6180d47149e93994957f5d3a9ad..0bc3490420c55e1fcbecb458d8442766b901aab2 100644 (file)
@@ -66,7 +66,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
         * presence of FP and SSE state.
         */
        if (cpu_has_xsave)
-               fpu->state.xsave.header.xfeatures |= XSTATE_FPSSE;
+               fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FPSSE;
 
        return ret;
 }
@@ -326,7 +326,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
         * presence of FP.
         */
        if (cpu_has_xsave)
-               fpu->state.xsave.header.xfeatures |= XSTATE_FP;
+               fpu->state.xsave.header.xfeatures |= XFEATURE_MASK_FP;
        return ret;
 }
 
index 50ec9af1bd5189d9c51de284a383103842d9d60a..eb032677f9398d85214654f73a9a7a7653007d32 100644 (file)
@@ -107,7 +107,7 @@ static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
         * header as well as change any contents in the memory layout.
         * xrestore as part of sigreturn will capture all the changes.
         */
-       xfeatures |= XSTATE_FPSSE;
+       xfeatures |= XFEATURE_MASK_FPSSE;
 
        err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures);
 
@@ -207,7 +207,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
                 * layout and not enabled by the OS.
                 */
                if (fx_only)
-                       header->xfeatures = XSTATE_FPSSE;
+                       header->xfeatures = XFEATURE_MASK_FPSSE;
                else
                        header->xfeatures &= (xfeatures_mask & xfeatures);
        }
@@ -230,7 +230,7 @@ static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_
 {
        if (use_xsave()) {
                if ((unsigned long)buf % 64 || fx_only) {
-                       u64 init_bv = xfeatures_mask & ~XSTATE_FPSSE;
+                       u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
                        copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
                        return copy_user_to_fxregs(buf);
                } else {
index 769603abae630c076661572dfea10c167a36dd40..d6f0be9c2f5a7beb97c53568a4ddb181c057063d 100644 (file)
@@ -72,7 +72,7 @@ int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
                /*
                 * So we use FLS here to be able to print the most advanced
                 * feature that was requested but is missing. So if a driver
-                * asks about "XSTATE_SSE | XSTATE_YMM" we'll print the
+                * asks about "XFEATURE_MASK_SSE | XFEATURE_MASK_YMM" we'll print the
                 * missing AVX feature - this is the most informative message
                 * to users:
                 */
@@ -131,7 +131,7 @@ void fpstate_sanitize_xstate(struct fpu *fpu)
        /*
         * FP is in init state
         */
-       if (!(xfeatures & XSTATE_FP)) {
+       if (!(xfeatures & XFEATURE_MASK_FP)) {
                fx->cwd = 0x37f;
                fx->swd = 0;
                fx->twd = 0;
@@ -144,7 +144,7 @@ void fpstate_sanitize_xstate(struct fpu *fpu)
        /*
         * SSE is in init state
         */
-       if (!(xfeatures & XSTATE_SSE))
+       if (!(xfeatures & XFEATURE_MASK_SSE))
                memset(&fx->xmm_space[0], 0, 256);
 
        /*
@@ -223,14 +223,14 @@ static void __init print_xstate_feature(u64 xstate_mask)
  */
 static void __init print_xstate_features(void)
 {
-       print_xstate_feature(XSTATE_FP);
-       print_xstate_feature(XSTATE_SSE);
-       print_xstate_feature(XSTATE_YMM);
-       print_xstate_feature(XSTATE_BNDREGS);
-       print_xstate_feature(XSTATE_BNDCSR);
-       print_xstate_feature(XSTATE_OPMASK);
-       print_xstate_feature(XSTATE_ZMM_Hi256);
-       print_xstate_feature(XSTATE_Hi16_ZMM);
+       print_xstate_feature(XFEATURE_MASK_FP);
+       print_xstate_feature(XFEATURE_MASK_SSE);
+       print_xstate_feature(XFEATURE_MASK_YMM);
+       print_xstate_feature(XFEATURE_MASK_BNDREGS);
+       print_xstate_feature(XFEATURE_MASK_BNDCSR);
+       print_xstate_feature(XFEATURE_MASK_OPMASK);
+       print_xstate_feature(XFEATURE_MASK_ZMM_Hi256);
+       print_xstate_feature(XFEATURE_MASK_Hi16_ZMM);
 }
 
 /*
@@ -365,7 +365,11 @@ static int init_xstate_size(void)
        return 0;
 }
 
-void fpu__init_disable_system_xstate(void)
+/*
+ * We enabled the XSAVE hardware, but something went wrong and
+ * we can not use it.  Disable it.
+ */
+static void fpu__init_disable_system_xstate(void)
 {
        xfeatures_mask = 0;
        cr4_clear_bits(X86_CR4_OSXSAVE);
@@ -398,7 +402,7 @@ void __init fpu__init_system_xstate(void)
        cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
        xfeatures_mask = eax + ((u64)edx << 32);
 
-       if ((xfeatures_mask & XSTATE_FPSSE) != XSTATE_FPSSE) {
+       if ((xfeatures_mask & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
                pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n", xfeatures_mask);
                BUG();
        }
@@ -451,7 +455,7 @@ void fpu__resume_cpu(void)
  * Inputs:
  *     xstate: the thread's storage area for all FPU data
  *     xstate_feature: state which is defined in xsave.h (e.g.
- *     XSTATE_FP, XSTATE_SSE, etc...)
+ *     XFEATURE_MASK_FP, XFEATURE_MASK_SSE, etc...)
  * Output:
  *     address of the state in the xsave area, or NULL if the
  *     field is not present in the xsave buffer.
@@ -502,8 +506,8 @@ EXPORT_SYMBOL_GPL(get_xsave_addr);
  * Note that this only works on the current task.
  *
  * Inputs:
- *     @xsave_state: state which is defined in xsave.h (e.g. XSTATE_FP,
- *     XSTATE_SSE, etc...)
+ *     @xsave_state: state which is defined in xsave.h (e.g. XFEATURE_MASK_FP,
+ *     XFEATURE_MASK_SSE, etc...)
  * Output:
  *     address of the state in the xsave area or NULL if the state
  *     is not present or is in its 'init state'.
index 346eec73f7db408e95eaf3d682ded2844828d739..0cd2ac5c0f2844825f9904aa62714f9d0e52ed98 100644 (file)
@@ -384,7 +384,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
         * which is all zeros which indicates MPX was not
         * responsible for the exception.
         */
-       bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR);
+       bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
        if (!bndcsr)
                goto exit_trap;
 
index 2fbea2544f2437bc0ae50ef00288dc320effd81e..156441bcaac82721dea135eb4f548fa8366a2abc 100644 (file)
@@ -30,7 +30,7 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted)
        int feature_bit = 0;
        u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
 
-       xstate_bv &= XSTATE_EXTEND_MASK;
+       xstate_bv &= XFEATURE_MASK_EXTEND;
        while (xstate_bv) {
                if (xstate_bv & 0x1) {
                        u32 eax, ebx, ecx, edx, offset;
@@ -51,7 +51,7 @@ u64 kvm_supported_xcr0(void)
        u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
 
        if (!kvm_x86_ops->mpx_supported())
-               xcr0 &= ~(XSTATE_BNDREGS | XSTATE_BNDCSR);
+               xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
 
        return xcr0;
 }
index a60bdbccff5189b5a98b9a7fcc6a3b9f7ff5eeec..2d4e54db49afc2c9a05ae1df97793a26252d4c9b 100644 (file)
@@ -662,9 +662,9 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
        /* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
        if (index != XCR_XFEATURE_ENABLED_MASK)
                return 1;
-       if (!(xcr0 & XSTATE_FP))
+       if (!(xcr0 & XFEATURE_MASK_FP))
                return 1;
-       if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
+       if ((xcr0 & XFEATURE_MASK_YMM) && !(xcr0 & XFEATURE_MASK_SSE))
                return 1;
 
        /*
@@ -672,23 +672,24 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
         * saving.  However, xcr0 bit 0 is always set, even if the
         * emulated CPU does not support XSAVE (see fx_init).
         */
-       valid_bits = vcpu->arch.guest_supported_xcr0 | XSTATE_FP;
+       valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP;
        if (xcr0 & ~valid_bits)
                return 1;
 
-       if ((!(xcr0 & XSTATE_BNDREGS)) != (!(xcr0 & XSTATE_BNDCSR)))
+       if ((!(xcr0 & XFEATURE_MASK_BNDREGS)) !=
+           (!(xcr0 & XFEATURE_MASK_BNDCSR)))
                return 1;
 
-       if (xcr0 & XSTATE_AVX512) {
-               if (!(xcr0 & XSTATE_YMM))
+       if (xcr0 & XFEATURE_MASK_AVX512) {
+               if (!(xcr0 & XFEATURE_MASK_YMM))
                        return 1;
-               if ((xcr0 & XSTATE_AVX512) != XSTATE_AVX512)
+               if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
                        return 1;
        }
        kvm_put_guest_xcr0(vcpu);
        vcpu->arch.xcr0 = xcr0;
 
-       if ((xcr0 ^ old_xcr0) & XSTATE_EXTEND_MASK)
+       if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
                kvm_update_cpuid(vcpu);
        return 0;
 }
@@ -2906,7 +2907,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
         * Copy each region from the possibly compacted offset to the
         * non-compacted offset.
         */
-       valid = xstate_bv & ~XSTATE_FPSSE;
+       valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
        while (valid) {
                u64 feature = valid & -valid;
                int index = fls64(feature) - 1;
@@ -2944,7 +2945,7 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
         * Copy each region from the non-compacted offset to the
         * possibly compacted offset.
         */
-       valid = xstate_bv & ~XSTATE_FPSSE;
+       valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
        while (valid) {
                u64 feature = valid & -valid;
                int index = fls64(feature) - 1;
@@ -2972,7 +2973,7 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
                        &vcpu->arch.guest_fpu.state.fxsave,
                        sizeof(struct fxregs_state));
                *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
-                       XSTATE_FPSSE;
+                       XFEATURE_MASK_FPSSE;
        }
 }
 
@@ -2992,7 +2993,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
                        return -EINVAL;
                load_xsave(vcpu, (u8 *)guest_xsave->region);
        } else {
-               if (xstate_bv & ~XSTATE_FPSSE)
+               if (xstate_bv & ~XFEATURE_MASK_FPSSE)
                        return -EINVAL;
                memcpy(&vcpu->arch.guest_fpu.state.fxsave,
                        guest_xsave->region, sizeof(struct fxregs_state));
@@ -7001,7 +7002,7 @@ static void fx_init(struct kvm_vcpu *vcpu)
        /*
         * Ensure guest xcr0 is valid for loading
         */
-       vcpu->arch.xcr0 = XSTATE_FP;
+       vcpu->arch.xcr0 = XFEATURE_MASK_FP;
 
        vcpu->arch.cr0 |= X86_CR0_ET;
 }
index 2f822cd886c26cb631d50c38fb933d5898bcc0c0..f2afa5fe48a6dcbd44f5c573cfe9d16f41b8ad16 100644 (file)
@@ -180,9 +180,9 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
                                          int page_num);
 
-#define KVM_SUPPORTED_XCR0     (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
-                               | XSTATE_BNDREGS | XSTATE_BNDCSR \
-                               | XSTATE_AVX512)
+#define KVM_SUPPORTED_XCR0     (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
+                               | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
+                               | XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512)
 extern u64 host_xcr0;
 
 extern u64 kvm_supported_xcr0(void);
index 134948b0926f521afe63f9e8c3679835f72cccee..f35fc9c6ed50197f166a4fb87fd3513620b9ec23 100644 (file)
@@ -258,7 +258,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
                goto err_out;
        }
        /* get bndregs field from current task's xsave area */
-       bndregs = get_xsave_field_ptr(XSTATE_BNDREGS);
+       bndregs = get_xsave_field_ptr(XFEATURE_MASK_BNDREGS);
        if (!bndregs) {
                err = -EINVAL;
                goto err_out;
@@ -315,7 +315,7 @@ static __user void *mpx_get_bounds_dir(void)
         * The bounds directory pointer is stored in a register
         * only accessible if we first do an xsave.
         */
-       bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR);
+       bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
        if (!bndcsr)
                return MPX_INVALID_BOUNDS_DIR;
 
@@ -492,7 +492,7 @@ static int do_mpx_bt_fault(void)
        const struct bndcsr *bndcsr;
        struct mm_struct *mm = current->mm;
 
-       bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR);
+       bndcsr = get_xsave_field_ptr(XFEATURE_MASK_BNDCSR);
        if (!bndcsr)
                return -EINVAL;
        /*
This page took 0.042226 seconds and 5 git commands to generate.