x86/asm/memcpy_mcsafe: Define copy_to_iter_mcsafe()
authorDan Williams <dan.j.williams@intel.com>
Fri, 4 May 2018 00:06:31 +0000 (17:06 -0700)
committerIngo Molnar <mingo@kernel.org>
Tue, 15 May 2018 06:32:42 +0000 (08:32 +0200)
Use the updated memcpy_mcsafe() implementation to define
copy_user_mcsafe() and copy_to_iter_mcsafe(). The most significant
difference from typical copy_to_iter() is that the ITER_KVEC and
ITER_BVEC iterator types can fail to complete a full transfer.

Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: hch@lst.de
Cc: linux-fsdevel@vger.kernel.org
Cc: linux-nvdimm@lists.01.org
Link: http://lkml.kernel.org/r/152539239150.31796.9189779163576449784.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/Kconfig
arch/x86/include/asm/uaccess_64.h
include/linux/uio.h
lib/iov_iter.c

index c07f492b871a8bf0f47cfb7be03a004dd740c2c1..6ca22706cd64e962c4918d148ea3c528bb8ed33f 100644 (file)
@@ -60,6 +60,7 @@ config X86
        select ARCH_HAS_PMEM_API                if X86_64
        select ARCH_HAS_REFCOUNT
        select ARCH_HAS_UACCESS_FLUSHCACHE      if X86_64
+       select ARCH_HAS_UACCESS_MCSAFE          if X86_64
        select ARCH_HAS_SET_MEMORY
        select ARCH_HAS_SG_CHAIN
        select ARCH_HAS_STRICT_KERNEL_RWX
index c63efc07891f1c1bfbb9f3624effaddd771eb3ad..62acb613114b2322088083f7a9ccc85495a5afa4 100644 (file)
@@ -46,6 +46,17 @@ copy_user_generic(void *to, const void *from, unsigned len)
        return ret;
 }
 
+static __always_inline __must_check unsigned long
+copy_to_user_mcsafe(void *to, const void *from, unsigned len)
+{
+       unsigned long ret;
+
+       __uaccess_begin();
+       ret = memcpy_mcsafe(to, from, len);
+       __uaccess_end();
+       return ret;
+}
+
 static __always_inline __must_check unsigned long
 raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
 {
index e67e12adb1362da1e8ef729b914f6c5e05979efa..f5766e853a771c3e42ea056167507e59147a1366 100644 (file)
@@ -154,6 +154,12 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
 #define _copy_from_iter_flushcache _copy_from_iter_nocache
 #endif
 
+#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
+size_t _copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i);
+#else
+#define _copy_to_iter_mcsafe _copy_to_iter
+#endif
+
 static __always_inline __must_check
 size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
 {
@@ -163,6 +169,15 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
                return _copy_from_iter_flushcache(addr, bytes, i);
 }
 
+static __always_inline __must_check
+size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
+{
+       if (unlikely(!check_copy_size(addr, bytes, false)))
+               return 0;
+       else
+               return _copy_to_iter_mcsafe(addr, bytes, i);
+}
+
 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
 unsigned long iov_iter_alignment(const struct iov_iter *i);
 unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
index 970212670b6a11bf8e033bd7d0cc0fff34041dc8..70ebc8ede143f7599b482cc4e0704fde5ef99188 100644 (file)
@@ -573,6 +573,67 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 }
 EXPORT_SYMBOL(_copy_to_iter);
 
+#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
+static int copyout_mcsafe(void __user *to, const void *from, size_t n)
+{
+       if (access_ok(VERIFY_WRITE, to, n)) {
+               kasan_check_read(from, n);
+               n = copy_to_user_mcsafe((__force void *) to, from, n);
+       }
+       return n;
+}
+
+static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
+               const char *from, size_t len)
+{
+       unsigned long ret;
+       char *to;
+
+       to = kmap_atomic(page);
+       ret = memcpy_mcsafe(to + offset, from, len);
+       kunmap_atomic(to);
+
+       return ret;
+}
+
+size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
+{
+       const char *from = addr;
+       unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
+
+       if (unlikely(i->type & ITER_PIPE)) {
+               WARN_ON(1);
+               return 0;
+       }
+       if (iter_is_iovec(i))
+               might_fault();
+       iterate_and_advance(i, bytes, v,
+               copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
+               ({
+               rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
+                               (from += v.bv_len) - v.bv_len, v.bv_len);
+               if (rem) {
+                       curr_addr = (unsigned long) from;
+                       bytes = curr_addr - s_addr - rem;
+                       return bytes;
+               }
+               }),
+               ({
+               rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
+                               v.iov_len);
+               if (rem) {
+                       curr_addr = (unsigned long) from;
+                       bytes = curr_addr - s_addr - rem;
+                       return bytes;
+               }
+               })
+       )
+
+       return bytes;
+}
+EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
+#endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
+
 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
 {
        char *to = addr;