summaryrefslogtreecommitdiff
path: root/include/asm-x86
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/acpi.h2
-rw-r--r--include/asm-x86/amd_iommu.h3
-rw-r--r--include/asm-x86/amd_iommu_types.h64
-rw-r--r--include/asm-x86/apic.h4
-rw-r--r--include/asm-x86/asm.h7
-rw-r--r--include/asm-x86/bitops.h10
-rw-r--r--include/asm-x86/cacheflush.h7
-rw-r--r--include/asm-x86/cpufeature.h2
-rw-r--r--include/asm-x86/dma-mapping.h87
-rw-r--r--include/asm-x86/elf.h5
-rw-r--r--include/asm-x86/futex.h6
-rw-r--r--include/asm-x86/gart.h8
-rw-r--r--include/asm-x86/idle.h2
-rw-r--r--include/asm-x86/iommu.h1
-rw-r--r--include/asm-x86/kgdb.h24
-rw-r--r--include/asm-x86/mach-rdc321x/gpio.h3
-rw-r--r--include/asm-x86/mmu.h5
-rw-r--r--include/asm-x86/mpspec.h3
-rw-r--r--include/asm-x86/nmi.h1
-rw-r--r--include/asm-x86/page.h1
-rw-r--r--include/asm-x86/page_32.h3
-rw-r--r--include/asm-x86/paravirt.h30
-rw-r--r--include/asm-x86/pgtable-2level.h2
-rw-r--r--include/asm-x86/pgtable-3level.h7
-rw-r--r--include/asm-x86/pgtable.h20
-rw-r--r--include/asm-x86/pgtable_32.h5
-rw-r--r--include/asm-x86/pgtable_64.h2
-rw-r--r--include/asm-x86/ptrace.h5
-rw-r--r--include/asm-x86/resume-trace.h2
-rw-r--r--include/asm-x86/spinlock.h6
-rw-r--r--include/asm-x86/syscall.h211
-rw-r--r--include/asm-x86/thread_info.h4
-rw-r--r--include/asm-x86/uaccess_64.h1
33 files changed, 446 insertions, 97 deletions
diff --git a/include/asm-x86/acpi.h b/include/asm-x86/acpi.h
index bd76299586b3..392e17336be1 100644
--- a/include/asm-x86/acpi.h
+++ b/include/asm-x86/acpi.h
@@ -140,6 +140,8 @@ static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate)
boot_cpu_data.x86_model <= 0x05 &&
boot_cpu_data.x86_mask < 0x0A)
return 1;
+ else if (boot_cpu_has(X86_FEATURE_AMDC1E))
+ return 1;
else
return max_cstate;
}
diff --git a/include/asm-x86/amd_iommu.h b/include/asm-x86/amd_iommu.h
index 783f43e58052..041d0db7da27 100644
--- a/include/asm-x86/amd_iommu.h
+++ b/include/asm-x86/amd_iommu.h
@@ -20,10 +20,13 @@
#ifndef ASM_X86__AMD_IOMMU_H
#define ASM_X86__AMD_IOMMU_H
+#include <linux/irqreturn.h>
+
#ifdef CONFIG_AMD_IOMMU
extern int amd_iommu_init(void);
extern int amd_iommu_init_dma_ops(void);
extern void amd_iommu_detect(void);
+extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
#else
static inline int amd_iommu_init(void) { return -ENODEV; }
static inline void amd_iommu_detect(void) { }
diff --git a/include/asm-x86/amd_iommu_types.h b/include/asm-x86/amd_iommu_types.h
index 1ffa4e53c989..b3085869a17b 100644
--- a/include/asm-x86/amd_iommu_types.h
+++ b/include/asm-x86/amd_iommu_types.h
@@ -37,6 +37,7 @@
/* Capability offsets used by the driver */
#define MMIO_CAP_HDR_OFFSET 0x00
#define MMIO_RANGE_OFFSET 0x0c
+#define MMIO_MISC_OFFSET 0x10
/* Masks, shifts and macros to parse the device range capability */
#define MMIO_RANGE_LD_MASK 0xff000000
@@ -48,6 +49,7 @@
#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT)
#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT)
#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT)
+#define MMIO_MSI_NUM(x) ((x) & 0x1f)
/* Flag masks for the AMD IOMMU exclusion range */
#define MMIO_EXCL_ENABLE_MASK 0x01ULL
@@ -69,6 +71,25 @@
/* MMIO status bits */
#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04
+/* event logging constants */
+#define EVENT_ENTRY_SIZE 0x10
+#define EVENT_TYPE_SHIFT 28
+#define EVENT_TYPE_MASK 0xf
+#define EVENT_TYPE_ILL_DEV 0x1
+#define EVENT_TYPE_IO_FAULT 0x2
+#define EVENT_TYPE_DEV_TAB_ERR 0x3
+#define EVENT_TYPE_PAGE_TAB_ERR 0x4
+#define EVENT_TYPE_ILL_CMD 0x5
+#define EVENT_TYPE_CMD_HARD_ERR 0x6
+#define EVENT_TYPE_IOTLB_INV_TO 0x7
+#define EVENT_TYPE_INV_DEV_REQ 0x8
+#define EVENT_DEVID_MASK 0xffff
+#define EVENT_DEVID_SHIFT 0
+#define EVENT_DOMID_MASK 0xffff
+#define EVENT_DOMID_SHIFT 0
+#define EVENT_FLAGS_MASK 0xfff
+#define EVENT_FLAGS_SHIFT 0x10
+
/* feature control bits */
#define CONTROL_IOMMU_EN 0x00ULL
#define CONTROL_HT_TUN_EN 0x01ULL
@@ -109,6 +130,8 @@
#define DEV_ENTRY_NMI_PASS 0xba
#define DEV_ENTRY_LINT0_PASS 0xbe
#define DEV_ENTRY_LINT1_PASS 0xbf
+#define DEV_ENTRY_MODE_MASK 0x07
+#define DEV_ENTRY_MODE_SHIFT 0x09
/* constants to configure the command buffer */
#define CMD_BUFFER_SIZE 8192
@@ -116,6 +139,10 @@
#define MMIO_CMD_SIZE_SHIFT 56
#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT)
+/* constants for event buffer handling */
+#define EVT_BUFFER_SIZE 8192 /* 512 entries */
+#define EVT_LEN_MASK (0x9ULL << 56)
+
#define PAGE_MODE_1_LEVEL 0x01
#define PAGE_MODE_2_LEVEL 0x02
#define PAGE_MODE_3_LEVEL 0x03
@@ -134,6 +161,7 @@
#define IOMMU_MAP_SIZE_L3 (1ULL << 39)
#define IOMMU_PTE_P (1ULL << 0)
+#define IOMMU_PTE_TV (1ULL << 1)
#define IOMMU_PTE_U (1ULL << 59)
#define IOMMU_PTE_FC (1ULL << 60)
#define IOMMU_PTE_IR (1ULL << 61)
@@ -159,6 +187,9 @@
#define MAX_DOMAIN_ID 65536
+/* FIXME: move this macro to <linux/pci.h> */
+#define PCI_BUS(x) (((x) >> 8) & 0xff)
+
/*
* This structure contains generic data for IOMMU protection domains
* independent of their use.
@@ -196,6 +227,15 @@ struct dma_ops_domain {
* just calculate its address in constant time.
*/
u64 **pte_pages;
+
+ /* This will be set to true when TLB needs to be flushed */
+ bool need_flush;
+
+ /*
+ * if this is a preallocated domain, keep the device for which it was
+ * preallocated in this variable
+ */
+ u16 target_dev;
};
/*
@@ -208,8 +248,9 @@ struct amd_iommu {
/* locks the accesses to the hardware */
spinlock_t lock;
- /* device id of this IOMMU */
- u16 devid;
+ /* Pointer to PCI device of this IOMMU */
+ struct pci_dev *dev;
+
/*
* Capability pointer. There could be more than one IOMMU per PCI
* device function if there are more than one AMD IOMMU capability
@@ -225,6 +266,9 @@ struct amd_iommu {
/* capabilities of that IOMMU read from ACPI */
u32 cap;
+ /* pci domain of this IOMMU */
+ u16 pci_seg;
+
/* first device this IOMMU handles. read from PCI */
u16 first_device;
/* last device this IOMMU handles. read from PCI */
@@ -240,9 +284,19 @@ struct amd_iommu {
/* size of command buffer */
u32 cmd_buf_size;
+ /* event buffer virtual address */
+ u8 *evt_buf;
+ /* size of event buffer */
+ u32 evt_buf_size;
+ /* MSI number for event interrupt */
+ u16 evt_msi_num;
+
/* if one, we need to send a completion wait command */
int need_sync;
+ /* true if interrupts for this IOMMU are already enabled */
+ bool int_enabled;
+
/* default dma_ops domain for that IOMMU */
struct dma_ops_domain *default_dom;
};
@@ -322,6 +376,12 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
/* will be 1 if device isolation is enabled */
extern int amd_iommu_isolate;
+/*
+ * If true, the addresses will be flushed on unmap time, not when
+ * they are reused
+ */
+extern bool amd_iommu_unmap_flush;
+
/* takes a PCI device id and prints it out in a readable form */
static inline void print_devid(u16 devid, int nl)
{
diff --git a/include/asm-x86/apic.h b/include/asm-x86/apic.h
index 1311c82b165b..d76a0839abe9 100644
--- a/include/asm-x86/apic.h
+++ b/include/asm-x86/apic.h
@@ -134,9 +134,7 @@ static inline void ack_x2APIC_irq(void)
static inline void ack_APIC_irq(void)
{
/*
- * ack_APIC_irq() actually gets compiled as a single instruction:
- * - a single rmw on Pentium/82489DX
- * - a single write on P6+ cores (CONFIG_X86_GOOD_APIC)
+ * ack_APIC_irq() actually gets compiled as a single instruction
* ... yummie.
*/
diff --git a/include/asm-x86/asm.h b/include/asm-x86/asm.h
index 2439ae49e8ac..e1355f44d7c3 100644
--- a/include/asm-x86/asm.h
+++ b/include/asm-x86/asm.h
@@ -20,17 +20,22 @@
#define _ASM_PTR __ASM_SEL(.long, .quad)
#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
-#define _ASM_MOV_UL __ASM_SIZE(mov)
+#define _ASM_MOV __ASM_SIZE(mov)
#define _ASM_INC __ASM_SIZE(inc)
#define _ASM_DEC __ASM_SIZE(dec)
#define _ASM_ADD __ASM_SIZE(add)
#define _ASM_SUB __ASM_SIZE(sub)
#define _ASM_XADD __ASM_SIZE(xadd)
+
#define _ASM_AX __ASM_REG(ax)
#define _ASM_BX __ASM_REG(bx)
#define _ASM_CX __ASM_REG(cx)
#define _ASM_DX __ASM_REG(dx)
+#define _ASM_SP __ASM_REG(sp)
+#define _ASM_BP __ASM_REG(bp)
+#define _ASM_SI __ASM_REG(si)
+#define _ASM_DI __ASM_REG(di)
/* Exception table entry */
# define _ASM_EXTABLE(from,to) \
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
index 61989b93b475..451a74762bd4 100644
--- a/include/asm-x86/bitops.h
+++ b/include/asm-x86/bitops.h
@@ -424,16 +424,6 @@ static inline int fls(int x)
#undef ADDR
-static inline void set_bit_string(unsigned long *bitmap,
- unsigned long i, int len)
-{
- unsigned long end = i + len;
- while (i < end) {
- __set_bit(i, bitmap);
- i++;
- }
-}
-
#ifdef __KERNEL__
#include <asm-generic/bitops/sched.h>
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index 59859cb28a36..68840ef1b35a 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -24,6 +24,8 @@
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy((dst), (src), (len))
+#define PG_non_WB PG_arch_1
+PAGEFLAG(NonWB, non_WB)
/*
* The set_memory_* API can be used to change various attributes of a virtual
@@ -66,6 +68,9 @@ int set_memory_rw(unsigned long addr, int numpages);
int set_memory_np(unsigned long addr, int numpages);
int set_memory_4k(unsigned long addr, int numpages);
+int set_memory_array_uc(unsigned long *addr, int addrinarray);
+int set_memory_array_wb(unsigned long *addr, int addrinarray);
+
/*
* For legacy compatibility with the old APIs, a few functions
* are provided that work on a "struct page".
@@ -96,8 +101,6 @@ int set_pages_rw(struct page *page, int numpages);
void clflush_cache_range(void *addr, unsigned int size);
-void cpa_init(void);
-
#ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void);
extern const int rodata_test_data;
diff --git a/include/asm-x86/cpufeature.h b/include/asm-x86/cpufeature.h
index 8d45690bef5f..adfeae6586e1 100644
--- a/include/asm-x86/cpufeature.h
+++ b/include/asm-x86/cpufeature.h
@@ -80,6 +80,7 @@
#define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */
#define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */
#define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */
+#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
#define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */
#define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */
#define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */
@@ -89,6 +90,7 @@
#define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */
#define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */
#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */
+#define X86_FEATURE_AMDC1E (3*32+21) /* AMD C1E detected */
#define X86_FEATURE_XTOPOLOGY (3*32+21) /* cpu topology enum extensions */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
diff --git a/include/asm-x86/dma-mapping.h b/include/asm-x86/dma-mapping.h
index 5d200e78bd81..219c33d6361c 100644
--- a/include/asm-x86/dma-mapping.h
+++ b/include/asm-x86/dma-mapping.h
@@ -9,12 +9,12 @@
#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/swiotlb.h>
+#include <asm-generic/dma-coherent.h>
extern dma_addr_t bad_dma_address;
extern int iommu_merge;
-extern struct device fallback_dev;
+extern struct device x86_dma_fallback_dev;
extern int panic_on_overflow;
-extern int force_iommu;
struct dma_mapping_ops {
int (*mapping_error)(struct device *dev,
@@ -25,9 +25,6 @@ struct dma_mapping_ops {
void *vaddr, dma_addr_t dma_handle);
dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
size_t size, int direction);
- /* like map_single, but doesn't check the device mask */
- dma_addr_t (*map_simple)(struct device *hwdev, phys_addr_t ptr,
- size_t size, int direction);
void (*unmap_single)(struct device *dev, dma_addr_t addr,
size_t size, int direction);
void (*sync_single_for_cpu)(struct device *hwdev,
@@ -68,7 +65,7 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
return dma_ops;
else
return dev->archdata.dma_ops;
-#endif
+#endif /* ASM_X86__DMA_MAPPING_H */
}
/* Make sure we keep the same behaviour */
@@ -87,17 +84,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
-
-void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
-
-void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle);
-
+#define dma_is_consistent(d, h) (1)
extern int dma_supported(struct device *hwdev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 mask);
+extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_addr, gfp_t flag);
+
static inline dma_addr_t
dma_map_single(struct device *hwdev, void *ptr, size_t size,
int direction)
@@ -247,7 +241,68 @@ static inline int dma_get_cache_alignment(void)
return boot_cpu_data.x86_clflush_size;
}
-#define dma_is_consistent(d, h) (1)
+static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
+ gfp_t gfp)
+{
+ unsigned long dma_mask = 0;
-#include <asm-generic/dma-coherent.h>
-#endif /* ASM_X86__DMA_MAPPING_H */
+ dma_mask = dev->coherent_dma_mask;
+ if (!dma_mask)
+ dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
+
+ return dma_mask;
+}
+
+static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
+{
+#ifdef CONFIG_X86_64
+ unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
+
+ if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
+ gfp |= GFP_DMA32;
+#endif
+ return gfp;
+}
+
+static inline void *
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp)
+{
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
+ void *memory;
+
+ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
+
+ if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
+ return memory;
+
+ if (!dev) {
+ dev = &x86_dma_fallback_dev;
+ gfp |= GFP_DMA;
+ }
+
+ if (!is_device_dma_capable(dev))
+ return NULL;
+
+ if (!ops->alloc_coherent)
+ return NULL;
+
+ return ops->alloc_coherent(dev, size, dma_handle,
+ dma_alloc_coherent_gfp_flags(dev, gfp));
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t bus)
+{
+ struct dma_mapping_ops *ops = get_dma_ops(dev);
+
+ WARN_ON(irqs_disabled()); /* for portability */
+
+ if (dma_release_from_coherent(dev, get_order(size), vaddr))
+ return;
+
+ if (ops->free_coherent)
+ ops->free_coherent(dev, size, vaddr, bus);
+}
+
+#endif
diff --git a/include/asm-x86/elf.h b/include/asm-x86/elf.h
index cd678b2d6a74..5c4745bec906 100644
--- a/include/asm-x86/elf.h
+++ b/include/asm-x86/elf.h
@@ -148,8 +148,9 @@ do { \
static inline void start_ia32_thread(struct pt_regs *regs, u32 ip, u32 sp)
{
- asm volatile("movl %0,%%fs" :: "r" (0));
- asm volatile("movl %0,%%es; movl %0,%%ds" : : "r" (__USER32_DS));
+ loadsegment(fs, 0);
+ loadsegment(ds, __USER32_DS);
+ loadsegment(es, __USER32_DS);
load_gs_index(0);
regs->ip = ip;
regs->sp = sp;
diff --git a/include/asm-x86/futex.h b/include/asm-x86/futex.h
index 45dc24d84186..06b924ef6fa5 100644
--- a/include/asm-x86/futex.h
+++ b/include/asm-x86/futex.h
@@ -25,7 +25,7 @@
asm volatile("1:\tmovl %2, %0\n" \
"\tmovl\t%0, %3\n" \
"\t" insn "\n" \
- "2:\tlock; cmpxchgl %3, %2\n" \
+ "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
"\tjnz\t1b\n" \
"3:\t.section .fixup,\"ax\"\n" \
"4:\tmov\t%5, %1\n" \
@@ -64,7 +64,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
__futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ADD:
- __futex_atomic_op1("lock; xaddl %0, %2", ret, oldval,
+ __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
uaddr, oparg);
break;
case FUTEX_OP_OR:
@@ -122,7 +122,7 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;
- asm volatile("1:\tlock; cmpxchgl %3, %1\n"
+ asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
"2:\t.section .fixup, \"ax\"\n"
"3:\tmov %2, %0\n"
"\tjmp 2b\n"
diff --git a/include/asm-x86/gart.h b/include/asm-x86/gart.h
index 07f445844146..605edb39ef9e 100644
--- a/include/asm-x86/gart.h
+++ b/include/asm-x86/gart.h
@@ -29,6 +29,8 @@ extern int fix_aperture;
#define AMD64_GARTCACHECTL 0x9c
#define AMD64_GARTEN (1<<0)
+extern int agp_amd64_init(void);
+
static inline void enable_gart_translation(struct pci_dev *dev, u64 addr)
{
u32 tmp, ctl;
@@ -52,15 +54,15 @@ static inline int aperture_valid(u64 aper_base, u32 aper_size, u32 min_size)
return 0;
if (aper_base + aper_size > 0x100000000ULL) {
- printk(KERN_ERR "Aperture beyond 4GB. Ignoring.\n");
+ printk(KERN_INFO "Aperture beyond 4GB. Ignoring.\n");
return 0;
}
if (e820_any_mapped(aper_base, aper_base + aper_size, E820_RAM)) {
- printk(KERN_ERR "Aperture pointing to e820 RAM. Ignoring.\n");
+ printk(KERN_INFO "Aperture pointing to e820 RAM. Ignoring.\n");
return 0;
}
if (aper_size < min_size) {
- printk(KERN_ERR "Aperture too small (%d MB) than (%d MB)\n",
+ printk(KERN_INFO "Aperture too small (%d MB) than (%d MB)\n",
aper_size>>20, min_size>>20);
return 0;
}
diff --git a/include/asm-x86/idle.h b/include/asm-x86/idle.h
index dc9c7944847b..baa3f783d27d 100644
--- a/include/asm-x86/idle.h
+++ b/include/asm-x86/idle.h
@@ -10,4 +10,6 @@ void idle_notifier_register(struct notifier_block *n);
void enter_idle(void);
void exit_idle(void);
+void c1e_remove_cpu(int cpu);
+
#endif /* ASM_X86__IDLE_H */
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h
index e86f44148c66..546ad3110fea 100644
--- a/include/asm-x86/iommu.h
+++ b/include/asm-x86/iommu.h
@@ -6,6 +6,7 @@ extern void no_iommu_init(void);
extern struct dma_mapping_ops nommu_dma_ops;
extern int force_iommu, no_iommu;
extern int iommu_detected;
+extern int dmar_disabled;
extern unsigned long iommu_num_pages(unsigned long addr, unsigned long len);
diff --git a/include/asm-x86/kgdb.h b/include/asm-x86/kgdb.h
index 83a7ee228ab1..d283863354de 100644
--- a/include/asm-x86/kgdb.h
+++ b/include/asm-x86/kgdb.h
@@ -39,12 +39,13 @@ enum regnames {
GDB_FS, /* 14 */
GDB_GS, /* 15 */
};
+#define NUMREGBYTES ((GDB_GS+1)*4)
#else /* ! CONFIG_X86_32 */
-enum regnames {
+enum regnames64 {
GDB_AX, /* 0 */
- GDB_DX, /* 1 */
+ GDB_BX, /* 1 */
GDB_CX, /* 2 */
- GDB_BX, /* 3 */
+ GDB_DX, /* 3 */
GDB_SI, /* 4 */
GDB_DI, /* 5 */
GDB_BP, /* 6 */
@@ -58,18 +59,15 @@ enum regnames {
GDB_R14, /* 14 */
GDB_R15, /* 15 */
GDB_PC, /* 16 */
- GDB_PS, /* 17 */
};
-#endif /* CONFIG_X86_32 */
-/*
- * Number of bytes of registers:
- */
-#ifdef CONFIG_X86_32
-# define NUMREGBYTES 64
-#else
-# define NUMREGBYTES ((GDB_PS+1)*8)
-#endif
+enum regnames32 {
+ GDB_PS = 34,
+ GDB_CS,
+ GDB_SS,
+};
+#define NUMREGBYTES ((GDB_SS+1)*4)
+#endif /* CONFIG_X86_32 */
static inline void arch_kgdb_breakpoint(void)
{
diff --git a/include/asm-x86/mach-rdc321x/gpio.h b/include/asm-x86/mach-rdc321x/gpio.h
index 6184561980f2..94b6cdf532e2 100644
--- a/include/asm-x86/mach-rdc321x/gpio.h
+++ b/include/asm-x86/mach-rdc321x/gpio.h
@@ -1,6 +1,8 @@
#ifndef ASM_X86__MACH_RDC321X__GPIO_H
#define ASM_X86__MACH_RDC321X__GPIO_H
+#include <linux/kernel.h>
+
extern int rdc_gpio_get_value(unsigned gpio);
extern void rdc_gpio_set_value(unsigned gpio, int value);
extern int rdc_gpio_direction_input(unsigned gpio);
@@ -18,6 +20,7 @@ static inline int gpio_request(unsigned gpio, const char *label)
static inline void gpio_free(unsigned gpio)
{
+ might_sleep();
rdc_gpio_free(gpio);
}
diff --git a/include/asm-x86/mmu.h b/include/asm-x86/mmu.h
index a30d7a9c8297..9d5aff14334a 100644
--- a/include/asm-x86/mmu.h
+++ b/include/asm-x86/mmu.h
@@ -7,14 +7,9 @@
/*
* The x86 doesn't have a mmu context, but
* we put the segment information here.
- *
- * cpu_vm_mask is used to optimize ldt flushing.
*/
typedef struct {
void *ldt;
-#ifdef CONFIG_X86_64
- rwlock_t ldtlock;
-#endif
int size;
struct mutex lock;
void *vdso;
diff --git a/include/asm-x86/mpspec.h b/include/asm-x86/mpspec.h
index 118da365e371..be2241a818f1 100644
--- a/include/asm-x86/mpspec.h
+++ b/include/asm-x86/mpspec.h
@@ -5,11 +5,12 @@
#include <asm/mpspec_def.h>
+extern int apic_version[MAX_APICS];
+
#ifdef CONFIG_X86_32
#include <mach_mpspec.h>
extern unsigned int def_to_bigsmp;
-extern int apic_version[MAX_APICS];
extern u8 apicid_2_node[];
extern int pic_mode;
diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h
index f8b76f383904..d5e715f024dc 100644
--- a/include/asm-x86/nmi.h
+++ b/include/asm-x86/nmi.h
@@ -34,6 +34,7 @@ extern void stop_apic_nmi_watchdog(void *);
extern void disable_timer_nmi_watchdog(void);
extern void enable_timer_nmi_watchdog(void);
extern int nmi_watchdog_tick(struct pt_regs *regs, unsigned reason);
+extern void cpu_nmi_set_wd_enabled(void);
extern atomic_t nmi_active;
extern unsigned int nmi_watchdog;
diff --git a/include/asm-x86/page.h b/include/asm-x86/page.h
index 79544e6ffb8b..c91574776751 100644
--- a/include/asm-x86/page.h
+++ b/include/asm-x86/page.h
@@ -57,6 +57,7 @@ typedef struct { pgdval_t pgd; } pgd_t;
typedef struct { pgprotval_t pgprot; } pgprot_t;
extern int page_is_ram(unsigned long pagenr);
+extern int pagerange_is_ram(unsigned long start, unsigned long end);
extern int devmem_is_allowed(unsigned long pagenr);
extern void map_devmem(unsigned long pfn, unsigned long size,
pgprot_t vma_prot);
diff --git a/include/asm-x86/page_32.h b/include/asm-x86/page_32.h
index f32062a821c5..72f7305682c6 100644
--- a/include/asm-x86/page_32.h
+++ b/include/asm-x86/page_32.h
@@ -89,9 +89,6 @@ extern int nx_enabled;
extern unsigned int __VMALLOC_RESERVE;
extern int sysctl_legacy_va_layout;
-#define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
-#define MAXMEM (-__PAGE_OFFSET - __VMALLOC_RESERVE)
-
extern void find_low_pfn_range(void);
extern unsigned long init_memory_mapping(unsigned long start,
unsigned long end);
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h
index cba612c89e06..d7d358a43996 100644
--- a/include/asm-x86/paravirt.h
+++ b/include/asm-x86/paravirt.h
@@ -252,13 +252,13 @@ struct pv_mmu_ops {
* Hooks for allocating/releasing pagetable pages when they're
* attached to a pagetable
*/
- void (*alloc_pte)(struct mm_struct *mm, u32 pfn);
- void (*alloc_pmd)(struct mm_struct *mm, u32 pfn);
- void (*alloc_pmd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
- void (*alloc_pud)(struct mm_struct *mm, u32 pfn);
- void (*release_pte)(u32 pfn);
- void (*release_pmd)(u32 pfn);
- void (*release_pud)(u32 pfn);
+ void (*alloc_pte)(struct mm_struct *mm, unsigned long pfn);
+ void (*alloc_pmd)(struct mm_struct *mm, unsigned long pfn);
+ void (*alloc_pmd_clone)(unsigned long pfn, unsigned long clonepfn, unsigned long start, unsigned long count);
+ void (*alloc_pud)(struct mm_struct *mm, unsigned long pfn);
+ void (*release_pte)(unsigned long pfn);
+ void (*release_pmd)(unsigned long pfn);
+ void (*release_pud)(unsigned long pfn);
/* Pagetable manipulation functions */
void (*set_pte)(pte_t *ptep, pte_t pteval);
@@ -986,35 +986,35 @@ static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
}
-static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned pfn)
+static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
{
PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
}
-static inline void paravirt_release_pte(unsigned pfn)
+static inline void paravirt_release_pte(unsigned long pfn)
{
PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
}
-static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned pfn)
+static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
{
PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
}
-static inline void paravirt_alloc_pmd_clone(unsigned pfn, unsigned clonepfn,
- unsigned start, unsigned count)
+static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
+ unsigned long start, unsigned long count)
{
PVOP_VCALL4(pv_mmu_ops.alloc_pmd_clone, pfn, clonepfn, start, count);
}
-static inline void paravirt_release_pmd(unsigned pfn)
+static inline void paravirt_release_pmd(unsigned long pfn)
{
PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
}
-static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned pfn)
+static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
{
PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
}
-static inline void paravirt_release_pud(unsigned pfn)
+static inline void paravirt_release_pud(unsigned long pfn)
{
PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
}
diff --git a/include/asm-x86/pgtable-2level.h b/include/asm-x86/pgtable-2level.h
index 60440b191626..81762081dcd8 100644
--- a/include/asm-x86/pgtable-2level.h
+++ b/include/asm-x86/pgtable-2level.h
@@ -53,9 +53,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
#define native_ptep_get_and_clear(xp) native_local_ptep_get_and_clear(xp)
#endif
-#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pte_none(x) (!(x).pte_low)
-#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
/*
* Bits 0, 6 and 7 are taken, split up the 29 bits of offset
diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h
index e713bd5f39a6..75f4276b5ddb 100644
--- a/include/asm-x86/pgtable-3level.h
+++ b/include/asm-x86/pgtable-3level.h
@@ -151,18 +151,11 @@ static inline int pte_same(pte_t a, pte_t b)
return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
}
-#define pte_page(x) pfn_to_page(pte_pfn(x))
-
static inline int pte_none(pte_t pte)
{
return !pte.pte_low && !pte.pte_high;
}
-static inline unsigned long pte_pfn(pte_t pte)
-{
- return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
-}
-
/*
* Bits 0, 6 and 7 are taken in the low part of the pte,
* put the 32 bits of offset into the high part.
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index 57d919a2d79d..ed932453ef26 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -19,6 +19,7 @@
#define _PAGE_BIT_UNUSED3 11
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
@@ -36,6 +37,7 @@
#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
+#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
#define __HAVE_ARCH_PTE_SPECIAL
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
@@ -130,6 +132,17 @@
#define __S110 PAGE_SHARED_EXEC
#define __S111 PAGE_SHARED_EXEC
+/*
+ * early identity mapping pte attrib macros.
+ */
+#ifdef CONFIG_X86_64
+#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
+#else
+#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
+#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
+#endif
+
#ifndef __ASSEMBLY__
/*
@@ -186,6 +199,13 @@ static inline int pte_special(pte_t pte)
return pte_val(pte) & _PAGE_SPECIAL;
}
+static inline unsigned long pte_pfn(pte_t pte)
+{
+ return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
+}
+
+#define pte_page(pte) pfn_to_page(pte_pfn(pte))
+
static inline int pmd_large(pmd_t pte)
{
return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index 45c8235400fe..8de702dc7d62 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -57,8 +57,7 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
* area for the same reason. ;)
*/
#define VMALLOC_OFFSET (8 * 1024 * 1024)
-#define VMALLOC_START (((unsigned long)high_memory + 2 * VMALLOC_OFFSET - 1) \
- & ~(VMALLOC_OFFSET - 1))
+#define VMALLOC_START ((unsigned long)high_memory + VMALLOC_OFFSET)
#ifdef CONFIG_X86_PAE
#define LAST_PKMAP 512
#else
@@ -74,6 +73,8 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
#endif
+#define MAXMEM (VMALLOC_END - PAGE_OFFSET - __VMALLOC_RESERVE)
+
/*
* Define this if things work differently on an i386 and an i486:
* it will (on an i486) warn about kernel memory accesses that are
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
index e3dcf7a08a0b..fde9770e53d1 100644
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -175,8 +175,6 @@ static inline int pmd_bad(pmd_t pmd)
#define pte_present(x) (pte_val((x)) & (_PAGE_PRESENT | _PAGE_PROTNONE))
#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) /* FIXME: is this right? */
-#define pte_page(x) pfn_to_page(pte_pfn((x)))
-#define pte_pfn(x) ((pte_val((x)) & __PHYSICAL_MASK) >> PAGE_SHIFT)
/*
* Macro to mark a page protection value as "uncacheable".
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
index 33a034be8b5c..d64a61097165 100644
--- a/include/asm-x86/ptrace.h
+++ b/include/asm-x86/ptrace.h
@@ -250,6 +250,11 @@ static inline unsigned long frame_pointer(struct pt_regs *regs)
return regs->bp;
}
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+ return regs->sp;
+}
+
/*
* These are defined as per linux/ptrace.h, which see.
*/
diff --git a/include/asm-x86/resume-trace.h b/include/asm-x86/resume-trace.h
index 519a8ecbfc95..e39376d7de50 100644
--- a/include/asm-x86/resume-trace.h
+++ b/include/asm-x86/resume-trace.h
@@ -7,7 +7,7 @@
do { \
if (pm_trace_enabled) { \
const void *tracedata; \
- asm volatile(_ASM_MOV_UL " $1f,%0\n" \
+ asm volatile(_ASM_MOV " $1f,%0\n" \
".section .tracedata,\"a\"\n" \
"1:\t.word %c1\n\t" \
_ASM_PTR " %c2\n" \
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h
index 5d08fa280fdf..93adae338ac6 100644
--- a/include/asm-x86/spinlock.h
+++ b/include/asm-x86/spinlock.h
@@ -97,7 +97,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
"jne 1f\n\t"
"movw %w0,%w1\n\t"
"incb %h1\n\t"
- "lock ; cmpxchgw %w1,%2\n\t"
+ LOCK_PREFIX "cmpxchgw %w1,%2\n\t"
"1:"
"sete %b1\n\t"
"movzbl %b1,%0\n\t"
@@ -135,7 +135,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
int inc = 0x00010000;
int tmp;
- asm volatile("lock ; xaddl %0, %1\n"
+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
"movzwl %w0, %2\n\t"
"shrl $16, %0\n\t"
"1:\t"
@@ -162,7 +162,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
"cmpl %0,%1\n\t"
"jne 1f\n\t"
"addl $0x00010000, %1\n\t"
- "lock ; cmpxchgl %1,%2\n\t"
+ LOCK_PREFIX "cmpxchgl %1,%2\n\t"
"1:"
"sete %b1\n\t"
"movzbl %b1,%0\n\t"
diff --git a/include/asm-x86/syscall.h b/include/asm-x86/syscall.h
new file mode 100644
index 000000000000..04c47dc5597c
--- /dev/null
+++ b/include/asm-x86/syscall.h
@@ -0,0 +1,211 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ */
+
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H 1
+
+#include <linux/sched.h>
+#include <linux/err.h>
+
+static inline long syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ /*
+ * We always sign-extend a -1 value being set here,
+ * so this is always either -1L or a syscall number.
+ */
+ return regs->orig_ax;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->ax = regs->orig_ax;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ unsigned long error = regs->ax;
+#ifdef CONFIG_IA32_EMULATION
+ /*
+ * TS_COMPAT is set for 32-bit syscall entries and then
+ * remains set until we return to user mode.
+ */
+ if (task_thread_info(task)->status & TS_COMPAT)
+ /*
+ * Sign-extend the value so (int)-EFOO becomes (long)-EFOO
+ * and will match correctly in comparisons.
+ */
+ error = (long) (int) error;
+#endif
+ return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->ax;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ regs->ax = (long) error ?: val;
+}
+
+#ifdef CONFIG_X86_32
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ unsigned long *args)
+{
+ BUG_ON(i + n > 6);
+ memcpy(args, &regs->bx + i, n * sizeof(args[0]));
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ const unsigned long *args)
+{
+ BUG_ON(i + n > 6);
+ memcpy(&regs->bx + i, args, n * sizeof(args[0]));
+}
+
+#else /* CONFIG_X86_64 */
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ unsigned long *args)
+{
+# ifdef CONFIG_IA32_EMULATION
+ if (task_thread_info(task)->status & TS_COMPAT)
+ switch (i + n) {
+ case 6:
+ if (!n--) break;
+ *args++ = regs->bp;
+ case 5:
+ if (!n--) break;
+ *args++ = regs->di;
+ case 4:
+ if (!n--) break;
+ *args++ = regs->si;
+ case 3:
+ if (!n--) break;
+ *args++ = regs->dx;
+ case 2:
+ if (!n--) break;
+ *args++ = regs->cx;
+ case 1:
+ if (!n--) break;
+ *args++ = regs->bx;
+ case 0:
+ if (!n--) break;
+ default:
+ BUG();
+ break;
+ }
+ else
+# endif
+ switch (i + n) {
+ case 6:
+ if (!n--) break;
+ *args++ = regs->r9;
+ case 5:
+ if (!n--) break;
+ *args++ = regs->r8;
+ case 4:
+ if (!n--) break;
+ *args++ = regs->r10;
+ case 3:
+ if (!n--) break;
+ *args++ = regs->dx;
+ case 2:
+ if (!n--) break;
+ *args++ = regs->si;
+ case 1:
+ if (!n--) break;
+ *args++ = regs->di;
+ case 0:
+ if (!n--) break;
+ default:
+ BUG();
+ break;
+ }
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ const unsigned long *args)
+{
+# ifdef CONFIG_IA32_EMULATION
+ if (task_thread_info(task)->status & TS_COMPAT)
+ switch (i + n) {
+ case 6:
+ if (!n--) break;
+ regs->bp = *args++;
+ case 5:
+ if (!n--) break;
+ regs->di = *args++;
+ case 4:
+ if (!n--) break;
+ regs->si = *args++;
+ case 3:
+ if (!n--) break;
+ regs->dx = *args++;
+ case 2:
+ if (!n--) break;
+ regs->cx = *args++;
+ case 1:
+ if (!n--) break;
+ regs->bx = *args++;
+ case 0:
+ if (!n--) break;
+ default:
+ BUG();
+ }
+ else
+# endif
+ switch (i + n) {
+ case 6:
+ if (!n--) break;
+ regs->r9 = *args++;
+ case 5:
+ if (!n--) break;
+ regs->r8 = *args++;
+ case 4:
+ if (!n--) break;
+ regs->r10 = *args++;
+ case 3:
+ if (!n--) break;
+ regs->dx = *args++;
+ case 2:
+ if (!n--) break;
+ regs->si = *args++;
+ case 1:
+ if (!n--) break;
+ regs->di = *args++;
+ case 0:
+ if (!n--) break;
+ default:
+ BUG();
+ }
+}
+
+#endif /* CONFIG_X86_32 */
+
+#endif /* _ASM_SYSCALL_H */
diff --git a/include/asm-x86/thread_info.h b/include/asm-x86/thread_info.h
index 30586f2ee558..3f4e52bb77f5 100644
--- a/include/asm-x86/thread_info.h
+++ b/include/asm-x86/thread_info.h
@@ -71,6 +71,7 @@ struct thread_info {
* Warning: layout of LSW is hardcoded in entry.S
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/
@@ -93,6 +94,7 @@ struct thread_info {
#define TIF_BTS_TRACE_TS 27 /* record scheduling event timestamps */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
@@ -133,7 +135,7 @@ struct thread_info {
/* Only used for 64 bit */
#define _TIF_DO_NOTIFY_MASK \
- (_TIF_SIGPENDING|_TIF_MCE_NOTIFY)
+ (_TIF_SIGPENDING|_TIF_MCE_NOTIFY|_TIF_NOTIFY_RESUME)
/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW \
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h
index 5cfd2951c9e7..c96c1f5d07a2 100644
--- a/include/asm-x86/uaccess_64.h
+++ b/include/asm-x86/uaccess_64.h
@@ -7,6 +7,7 @@
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/prefetch.h>
+#include <linux/lockdep.h>
#include <asm/page.h>
/*