diff options
author | Justin Waters <justin.waters@timesys.com> | 2008-02-26 13:07:02 -0500 |
---|---|---|
committer | Justin Waters <justin.waters@timesys.com> | 2008-02-26 13:07:02 -0500 |
commit | b80a32b9cc634adfa8eaef33ec981e7febf2ade2 (patch) | |
tree | f256bce13ba11f514a388160df84e1410bedbe2b /include/asm-generic | |
parent | 594133ef22fae0d737bd1b57352cf3f48a192c63 (diff) |
Update the i.MX31 Kernel to 2.6.232.6.23-mx31ads-2008022618072.6.23-mx31-200802261807
This is the result of a brute-force attempt to update the kernel to 2.6.23.
Now that we have a git tree, our effort will be a little nicer in the future.
Signed-off-by: Justin Waters <justin.waters@timesys.com>
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/bitops/sched.h | 21 | ||||
-rw-r--r-- | include/asm-generic/bug.h | 8 | ||||
-rw-r--r-- | include/asm-generic/dma-mapping-broken.h | 82 | ||||
-rw-r--r-- | include/asm-generic/fcntl.h | 3 | ||||
-rw-r--r-- | include/asm-generic/percpu.h | 8 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 117 | ||||
-rw-r--r-- | include/asm-generic/termios.h | 2 | ||||
-rw-r--r-- | include/asm-generic/unaligned.h | 26 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 14 |
9 files changed, 160 insertions, 121 deletions
diff --git a/include/asm-generic/bitops/sched.h b/include/asm-generic/bitops/sched.h index 815bb0148060..604fab7031a6 100644 --- a/include/asm-generic/bitops/sched.h +++ b/include/asm-generic/bitops/sched.h @@ -6,28 +6,23 @@ /* * Every architecture must define this function. It's the fastest - * way of searching a 140-bit bitmap where the first 100 bits are - * unlikely to be set. It's guaranteed that at least one of the 140 - * bits is cleared. + * way of searching a 100-bit bitmap. It's guaranteed that at least + * one of the 100 bits is cleared. */ static inline int sched_find_first_bit(const unsigned long *b) { #if BITS_PER_LONG == 64 - if (unlikely(b[0])) + if (b[0]) return __ffs(b[0]); - if (likely(b[1])) - return __ffs(b[1]) + 64; - return __ffs(b[2]) + 128; + return __ffs(b[1]) + 64; #elif BITS_PER_LONG == 32 - if (unlikely(b[0])) + if (b[0]) return __ffs(b[0]); - if (unlikely(b[1])) + if (b[1]) return __ffs(b[1]) + 32; - if (unlikely(b[2])) + if (b[2]) return __ffs(b[2]) + 64; - if (b[3]) - return __ffs(b[3]) + 96; - return __ffs(b[4]) + 128; + return __ffs(b[3]) + 96; #else #error BITS_PER_LONG not defined #endif diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 7f30cce52857..d56fedbb457a 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -28,12 +28,12 @@ struct bug_entry { #endif #ifndef HAVE_ARCH_BUG_ON -#define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0) +#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0) #endif #ifndef HAVE_ARCH_WARN_ON #define WARN_ON(condition) ({ \ - typeof(condition) __ret_warn_on = (condition); \ + int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) { \ printk("WARNING: at %s:%d %s()\n", __FILE__, \ __LINE__, __FUNCTION__); \ @@ -54,7 +54,7 @@ struct bug_entry { #ifndef HAVE_ARCH_WARN_ON #define WARN_ON(condition) ({ \ - typeof(condition) __ret_warn_on = (condition); \ + int __ret_warn_on = !!(condition); \ unlikely(__ret_warn_on); \ }) #endif @@ -62,7 +62,7 @@ struct bug_entry { #define WARN_ON_ONCE(condition) ({ \ static int __warned; \ - typeof(condition) __ret_warn_once = (condition); \ + int __ret_warn_once = !!(condition); \ \ if (unlikely(__ret_warn_once)) \ if (WARN_ON(!__warned)) \ diff --git a/include/asm-generic/dma-mapping-broken.h b/include/asm-generic/dma-mapping-broken.h index 29413d3d4605..e2468f894d2a 100644 --- a/include/asm-generic/dma-mapping-broken.h +++ b/include/asm-generic/dma-mapping-broken.h @@ -1,24 +1,82 @@ #ifndef _ASM_GENERIC_DMA_MAPPING_H #define _ASM_GENERIC_DMA_MAPPING_H -/* This is used for archs that do not support DMA */ +/* define the dma api to allow compilation but not linking of + * dma dependent code. Code that depends on the dma-mapping + * API needs to set 'depends on HAS_DMA' in its Kconfig + */ -static inline void * +struct scatterlist; + +extern void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t flag) -{ - BUG(); - return NULL; -} + gfp_t flag); -static inline void +extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t dma_handle) -{ - BUG(); -} + dma_addr_t dma_handle); #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) +extern dma_addr_t +dma_map_single(struct device *dev, void *ptr, size_t size, + enum dma_data_direction direction); + +extern void +dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, + enum dma_data_direction direction); + +extern int +dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction); + +extern void +dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, + enum dma_data_direction direction); + +extern dma_addr_t +dma_map_page(struct device *dev, struct page *page, unsigned long offset, + size_t size, enum dma_data_direction direction); + +extern void +dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, + enum dma_data_direction direction); + +extern void +dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, + enum dma_data_direction direction); + +extern void +dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, + unsigned long offset, size_t size, + enum dma_data_direction direction); + +extern void +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction); + +#define dma_sync_single_for_device dma_sync_single_for_cpu +#define dma_sync_single_range_for_device dma_sync_single_range_for_cpu +#define dma_sync_sg_for_device dma_sync_sg_for_cpu + +extern int +dma_mapping_error(dma_addr_t dma_addr); + +extern int +dma_supported(struct device *dev, u64 mask); + +extern int +dma_set_mask(struct device *dev, u64 mask); + +extern int +dma_get_cache_alignment(void); + +extern int +dma_is_consistent(struct device *dev, dma_addr_t dma_handle); + +extern void +dma_cache_sync(struct device *dev, void *vaddr, size_t size, + enum dma_data_direction direction); + #endif /* _ASM_GENERIC_DMA_MAPPING_H */ diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h index c154b9d6e7e5..b8477414c5c8 100644 --- a/include/asm-generic/fcntl.h +++ b/include/asm-generic/fcntl.h @@ -48,6 +48,9 @@ #ifndef O_NOATIME #define O_NOATIME 01000000 #endif +#ifndef O_CLOEXEC +#define O_CLOEXEC 02000000 /* set close_on_exec */ +#endif #ifndef O_NDELAY #define O_NDELAY O_NONBLOCK #endif diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index d984a9041436..d85172e9ed45 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -14,6 +14,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #define DEFINE_PER_CPU(type, name) \ __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + __attribute__((__section__(".data.percpu.shared_aligned"))) \ + __typeof__(type) per_cpu__##name \ + ____cacheline_aligned_in_smp + /* var is in discarded region: offset to particular copy we want */ #define per_cpu(var, cpu) (*({ \ extern int simple_identifier_##var(void); \ @@ -34,6 +39,9 @@ do { \ #define DEFINE_PER_CPU(type, name) \ __typeof__(type) per_cpu__##name +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + DEFINE_PER_CPU(type, name) + #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var #define __raw_get_cpu_var(var) per_cpu__##var diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 7d7bcf990e99..5f0d797d33fd 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -2,25 +2,7 @@ #define _ASM_GENERIC_PGTABLE_H #ifndef __ASSEMBLY__ - -#ifndef __HAVE_ARCH_PTEP_ESTABLISH -/* - * Establish a new mapping: - * - flush the old one - * - update the page tables - * - inform the TLB about the new one - * - * We hold the mm semaphore for reading, and the pte lock. - * - * Note: the old pte is known to not be writable, so we don't need to - * worry about dirty bits etc getting lost. - */ -#define ptep_establish(__vma, __address, __ptep, __entry) \ -do { \ - set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ - flush_tlb_page(__vma, __address); \ -} while (0) -#endif +#ifdef CONFIG_MMU #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* @@ -68,31 +50,6 @@ do { \ }) #endif -#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY -#define ptep_test_and_clear_dirty(__vma, __address, __ptep) \ -({ \ - pte_t __pte = *__ptep; \ - int r = 1; \ - if (!pte_dirty(__pte)) \ - r = 0; \ - else \ - set_pte_at((__vma)->vm_mm, (__address), (__ptep), \ - pte_mkclean(__pte)); \ - r; \ -}) -#endif - -#ifndef __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH -#define ptep_clear_flush_dirty(__vma, __address, __ptep) \ -({ \ - int __dirty; \ - __dirty = ptep_test_and_clear_dirty(__vma, __address, __ptep); \ - if (__dirty) \ - flush_tlb_page(__vma, __address); \ - __dirty; \ -}) -#endif - #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR #define ptep_get_and_clear(__mm, __address, __ptep) \ ({ \ @@ -177,41 +134,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #endif /* - * A facility to provide lazy MMU batching. This allows PTE updates and - * page invalidations to be delayed until a call to leave lazy MMU mode - * is issued. Some architectures may benefit from doing this, and it is - * beneficial for both shadow and direct mode hypervisors, which may batch - * the PTE updates which happen during this window. Note that using this - * interface requires that read hazards be removed from the code. A read - * hazard could result in the direct mode hypervisor case, since the actual - * write to the page tables may not yet have taken place, so reads though - * a raw PTE pointer after it has been modified are not guaranteed to be - * up to date. This mode can only be entered and left under the protection of - * the page table locks for all page tables which may be modified. In the UP - * case, this is required so that preemption is disabled, and in the SMP case, - * it must synchronize the delayed page table writes properly on other CPUs. - */ -#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE -#define arch_enter_lazy_mmu_mode() do {} while (0) -#define arch_leave_lazy_mmu_mode() do {} while (0) -#define arch_flush_lazy_mmu_mode() do {} while (0) -#endif - -/* - * A facility to provide batching of the reload of page tables with the - * actual context switch code for paravirtualized guests. By convention, - * only one of the lazy modes (CPU, MMU) should be active at any given - * time, entry should never be nested, and entry and exits should always - * be paired. This is for sanity of maintaining and reasoning about the - * kernel code. - */ -#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE -#define arch_enter_lazy_cpu_mode() do {} while (0) -#define arch_leave_lazy_cpu_mode() do {} while (0) -#define arch_flush_lazy_cpu_mode() do {} while (0) -#endif - -/* * When walking page tables, get the address of the next boundary, * or the end address of the range if that comes earlier. Although no * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. @@ -277,6 +199,43 @@ static inline int pmd_none_or_clear_bad(pmd_t *pmd) } return 0; } +#endif /* CONFIG_MMU */ + +/* + * A facility to provide lazy MMU batching. This allows PTE updates and + * page invalidations to be delayed until a call to leave lazy MMU mode + * is issued. Some architectures may benefit from doing this, and it is + * beneficial for both shadow and direct mode hypervisors, which may batch + * the PTE updates which happen during this window. Note that using this + * interface requires that read hazards be removed from the code. A read + * hazard could result in the direct mode hypervisor case, since the actual + * write to the page tables may not yet have taken place, so reads though + * a raw PTE pointer after it has been modified are not guaranteed to be + * up to date. This mode can only be entered and left under the protection of + * the page table locks for all page tables which may be modified. In the UP + * case, this is required so that preemption is disabled, and in the SMP case, + * it must synchronize the delayed page table writes properly on other CPUs. + */ +#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE +#define arch_enter_lazy_mmu_mode() do {} while (0) +#define arch_leave_lazy_mmu_mode() do {} while (0) +#define arch_flush_lazy_mmu_mode() do {} while (0) +#endif + +/* + * A facility to provide batching of the reload of page tables with the + * actual context switch code for paravirtualized guests. By convention, + * only one of the lazy modes (CPU, MMU) should be active at any given + * time, entry should never be nested, and entry and exits should always + * be paired. This is for sanity of maintaining and reasoning about the + * kernel code. + */ +#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE +#define arch_enter_lazy_cpu_mode() do {} while (0) +#define arch_leave_lazy_cpu_mode() do {} while (0) +#define arch_flush_lazy_cpu_mode() do {} while (0) +#endif + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_GENERIC_PGTABLE_H */ diff --git a/include/asm-generic/termios.h b/include/asm-generic/termios.h index 3769e6bd63b1..33dca30a3c45 100644 --- a/include/asm-generic/termios.h +++ b/include/asm-generic/termios.h @@ -63,6 +63,8 @@ static inline int kernel_termios_to_user_termio(struct termio __user *termio, #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) +#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) +#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) #endif /* __ARCH_TERMIO_GETPUT */ diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h index 09ec447fe2af..2fe1b2e67f01 100644 --- a/include/asm-generic/unaligned.h +++ b/include/asm-generic/unaligned.h @@ -18,7 +18,8 @@ #define get_unaligned(ptr) \ __get_unaligned((ptr), sizeof(*(ptr))) #define put_unaligned(x,ptr) \ - __put_unaligned((__u64)(x), (ptr), sizeof(*(ptr))) + ((void)sizeof(*(ptr)=(x)),\ + __put_unaligned((__force __u64)(x), (ptr), sizeof(*(ptr)))) /* * This function doesn't actually exist. The idea is that when @@ -78,38 +79,38 @@ static inline void __ustw(__u16 val, __u16 *addr) #define __get_unaligned(ptr, size) ({ \ const void *__gu_p = ptr; \ - __u64 val; \ + __u64 __val; \ switch (size) { \ case 1: \ - val = *(const __u8 *)__gu_p; \ + __val = *(const __u8 *)__gu_p; \ break; \ case 2: \ - val = __uldw(__gu_p); \ + __val = __uldw(__gu_p); \ break; \ case 4: \ - val = __uldl(__gu_p); \ + __val = __uldl(__gu_p); \ break; \ case 8: \ - val = __uldq(__gu_p); \ + __val = __uldq(__gu_p); \ break; \ default: \ bad_unaligned_access_length(); \ }; \ - (__typeof__(*(ptr)))val; \ + (__force __typeof__(*(ptr)))__val; \ }) #define __put_unaligned(val, ptr, size) \ -do { \ +({ \ void *__gu_p = ptr; \ switch (size) { \ case 1: \ - *(__u8 *)__gu_p = val; \ + *(__u8 *)__gu_p = (__force __u8)val; \ break; \ case 2: \ - __ustw(val, __gu_p); \ + __ustw((__force __u16)val, __gu_p); \ break; \ case 4: \ - __ustl(val, __gu_p); \ + __ustl((__force __u32)val, __gu_p); \ break; \ case 8: \ __ustq(val, __gu_p); \ @@ -117,6 +118,7 @@ do { \ default: \ bad_unaligned_access_length(); \ }; \ -} while(0) + (void)0; \ +}) #endif /* _ASM_GENERIC_UNALIGNED_H */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 84155eb67f1d..0240e0506a07 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -224,7 +224,11 @@ } #define NOTES \ - .notes : { *(.note.*) } :note + .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_notes) = .; \ + *(.note.*) \ + VMLINUX_SYMBOL(__stop_notes) = .; \ + } #define INITCALLS \ *(.initcall0.init) \ @@ -245,3 +249,11 @@ *(.initcall7.init) \ *(.initcall7s.init) +#define PERCPU(align) \ + . = ALIGN(align); \ + __per_cpu_start = .; \ + .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ + *(.data.percpu) \ + *(.data.percpu.shared_aligned) \ + } \ + __per_cpu_end = .; |