GCC Bugzilla – Attachment 31083 Details for
Bug 58854
[4.8 regression] "sub sp, fp, #40" hoisted above frame accesses
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
stripped from kernel 3.4 fs/dcache.c
test.c (text/plain), 702.13 KB, created by
bccheng
on 2013-10-23 17:42:00 UTC
(
hide
)
Description:
stripped from kernel 3.4 fs/dcache.c
Filename:
MIME Type:
Creator:
bccheng
Created:
2013-10-23 17:42:00 UTC
Size:
702.13 KB
patch
obsolete
>struct epoll_event; >struct iattr; >struct inode; >struct iocb; >struct io_event; >struct iovec; >struct itimerspec; >struct itimerval; >struct kexec_segment; >struct linux_dirent; >struct linux_dirent64; >struct list_head; >struct mmap_arg_struct; >struct msgbuf; >struct msghdr; >struct mmsghdr; >struct msqid_ds; >struct new_utsname; >struct nfsctl_arg; >struct __old_kernel_stat; >struct oldold_utsname; >struct old_utsname; >struct pollfd; >struct rlimit; >struct rlimit64; >struct rusage; >struct sched_param; >struct sel_arg_struct; >struct semaphore; >struct sembuf; >struct shmid_ds; >struct sockaddr; >struct stat; >struct stat64; >struct statfs; >struct statfs64; >struct __sysctl_args; >struct sysinfo; >struct timespec; >struct timeval; >struct timex; >struct timezone; >struct tms; >struct utimbuf; >struct mq_attr; >struct compat_stat; >struct compat_timeval; >struct robust_list_head; >struct getcpu_cache; >struct old_linux_dirent; >struct perf_event_attr; >struct file_handle; > >typedef __signed__ char __s8; >typedef unsigned char __u8; > >typedef __signed__ short __s16; >typedef unsigned short __u16; > >typedef __signed__ int __s32; >typedef unsigned int __u32; > > >__extension__ typedef __signed__ long long __s64; >__extension__ typedef unsigned long long __u64; > >typedef signed char s8; >typedef unsigned char u8; > >typedef signed short s16; >typedef unsigned short u16; > >typedef signed int s32; >typedef unsigned int u32; > >typedef signed long long s64; >typedef unsigned long long u64; > > > > > > > > > > > > > > > > > > > >struct ftrace_branch_data { > const char *func; > const char *file; > unsigned line; > union { > struct { > unsigned long correct; > unsigned long incorrect; > }; > struct { > unsigned long miss; > unsigned long hit; > }; > unsigned long miss_hit[2]; > }; >}; > > > > > > > >enum { > false = 0, > true = 1 >}; > > >typedef struct { > unsigned long fds_bits[1024 / (8 * sizeof(long))]; >} __kernel_fd_set; > > >typedef void (*__kernel_sighandler_t)(int); > > >typedef int __kernel_key_t; >typedef int __kernel_mqd_t; > > > >typedef unsigned short __kernel_mode_t; > > >typedef unsigned short __kernel_nlink_t; > > >typedef unsigned short __kernel_ipc_pid_t; > > >typedef unsigned short __kernel_uid_t; >typedef unsigned short __kernel_gid_t; > > >typedef unsigned short __kernel_old_dev_t; > > > > > > > > > >typedef long __kernel_long_t; >typedef unsigned long __kernel_ulong_t; > > > >typedef __kernel_ulong_t __kernel_ino_t; > >typedef int __kernel_pid_t; > >typedef __kernel_long_t __kernel_suseconds_t; > > > >typedef int __kernel_daddr_t; > > > >typedef unsigned int __kernel_uid32_t; >typedef unsigned int __kernel_gid32_t; > > > >typedef __kernel_uid_t __kernel_old_uid_t; >typedef __kernel_gid_t __kernel_old_gid_t; > >typedef unsigned int __kernel_size_t; >typedef int __kernel_ssize_t; >typedef int __kernel_ptrdiff_t; > >typedef struct { > int val[2]; >} __kernel_fsid_t; > > > > > >typedef __kernel_long_t __kernel_off_t; >typedef long long __kernel_loff_t; >typedef __kernel_long_t __kernel_time_t; >typedef __kernel_long_t __kernel_clock_t; >typedef int __kernel_timer_t; >typedef int __kernel_clockid_t; >typedef char * __kernel_caddr_t; >typedef unsigned short __kernel_uid16_t; >typedef unsigned short __kernel_gid16_t; > > > > > > >typedef __u32 __kernel_dev_t; > >typedef __kernel_fd_set fd_set; >typedef __kernel_dev_t dev_t; >typedef __kernel_ino_t ino_t; >typedef __kernel_mode_t mode_t; >typedef unsigned short umode_t; >typedef __kernel_nlink_t nlink_t; >typedef __kernel_off_t off_t; >typedef __kernel_pid_t pid_t; >typedef __kernel_daddr_t daddr_t; >typedef __kernel_key_t key_t; >typedef __kernel_suseconds_t suseconds_t; >typedef __kernel_timer_t timer_t; >typedef __kernel_clockid_t clockid_t; >typedef __kernel_mqd_t mqd_t; > >typedef _Bool bool; > >typedef __kernel_uid32_t uid_t; >typedef __kernel_gid32_t gid_t; >typedef __kernel_uid16_t uid16_t; >typedef __kernel_gid16_t gid16_t; > >typedef unsigned long uintptr_t; > > > >typedef __kernel_old_uid_t old_uid_t; >typedef __kernel_old_gid_t old_gid_t; > > > >typedef __kernel_loff_t loff_t; > >typedef __kernel_size_t size_t; > > > > >typedef __kernel_ssize_t ssize_t; > > > > >typedef __kernel_ptrdiff_t ptrdiff_t; > > > > >typedef __kernel_time_t time_t; > > > > >typedef __kernel_clock_t clock_t; > > > > >typedef __kernel_caddr_t caddr_t; > > > >typedef unsigned char u_char; >typedef unsigned short u_short; >typedef unsigned int u_int; >typedef unsigned long u_long; > > >typedef unsigned char unchar; >typedef unsigned short ushort; >typedef unsigned int uint; >typedef unsigned long ulong; > > > > >typedef __u8 u_int8_t; >typedef __s8 int8_t; >typedef __u16 u_int16_t; >typedef __s16 int16_t; >typedef __u32 u_int32_t; >typedef __s32 int32_t; > > > >typedef __u8 uint8_t; >typedef __u16 uint16_t; >typedef __u32 uint32_t; > > >typedef __u64 uint64_t; >typedef __u64 u_int64_t; >typedef __s64 int64_t; > >typedef u64 sector_t; >typedef u64 blkcnt_t; > >typedef u32 dma_addr_t; > >typedef __u16 __le16; >typedef __u16 __be16; >typedef __u32 __le32; >typedef __u32 __be32; >typedef __u64 __le64; >typedef __u64 __be64; > >typedef __u16 __sum16; >typedef __u32 __wsum; > >typedef unsigned gfp_t; >typedef unsigned fmode_t; > > > > >typedef u32 phys_addr_t; > > >typedef phys_addr_t resource_size_t; > > > > > >typedef unsigned long irq_hw_number_t; > >typedef struct { > int counter; >} atomic_t; > > > > > > > >struct list_head { > struct list_head *next, *prev; >}; > >struct hlist_head { > struct hlist_node *first; >}; > >struct hlist_node { > struct hlist_node *next, **pprev; >}; > >struct ustat { > __kernel_daddr_t f_tfree; > __kernel_ino_t f_tinode; > char f_fname[6]; > char f_fpack[6]; >}; > > > > > > >struct rcu_head { > struct rcu_head *next; > void (*func)(struct rcu_head *head); >}; > > > > > > > > > > > > > > > >static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __arch_swahb32(__u32 x) >{ > __asm__ ("rev16 %0, %1" : "=r" (x) : "r" (x)); > return x; >} > > > >static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __arch_swab32(__u32 x) >{ > __asm__ ("rev %0, %1" : "=r" (x) : "r" (x)); > return x; >} > > >static inline __attribute__((always_inline)) __attribute__((__const__)) __u16 __fswab16(__u16 val) >{ > > return ((__u16)__arch_swahb32(val)); > > > >} > >static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __fswab32(__u32 val) >{ > > return __arch_swab32(val); > > > >} > >static inline __attribute__((always_inline)) __attribute__((__const__)) __u64 __fswab64(__u64 val) >{ > > > > __u32 h = val >> 32; > __u32 l = val & ((1ULL << 32) - 1); > return (((__u64)__fswab32(l)) << 32) | ((__u64)(__fswab32(h))); > > > >} > >static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __fswahw32(__u32 val) >{ > > > > return ((__u32)( (((__u32)(val) & (__u32)0x0000ffffUL) << 16) | (((__u32)(val) & (__u32)0xffff0000UL) >> 16))); > >} > >static inline __attribute__((always_inline)) __attribute__((__const__)) __u32 __fswahb32(__u32 val) >{ > > return __arch_swahb32(val); > > > >} > >static inline __attribute__((always_inline)) __u16 __swab16p(const __u16 *p) >{ > > > > return (__builtin_constant_p((__u16)(*p)) ? ((__u16)( (((__u16)(*p) & (__u16)0x00ffU) << 8) | (((__u16)(*p) & (__u16)0xff00U) >> 8))) : __fswab16(*p)); > >} > > > > > >static inline __attribute__((always_inline)) __u32 __swab32p(const __u32 *p) >{ > > > > return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x000000ffUL) << 24) | (((__u32)(*p) & (__u32)0x0000ff00UL) << 8) | (((__u32)(*p) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(*p) & (__u32)0xff000000UL) >> 24))) : __fswab32(*p)); > >} > > > > > >static inline __attribute__((always_inline)) __u64 __swab64p(const __u64 *p) >{ > > > > return (__builtin_constant_p((__u64)(*p)) ? ((__u64)( (((__u64)(*p) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(*p) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(*p) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(*p) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(*p) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(*p) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(*p) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(*p) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(*p)); > >} > > > > > > > >static inline __attribute__((always_inline)) __u32 __swahw32p(const __u32 *p) >{ > > > > return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x0000ffffUL) << 16) | (((__u32)(*p) & (__u32)0xffff0000UL) >> 16))) : __fswahw32(*p)); > >} > > > > > > > >static inline __attribute__((always_inline)) __u32 __swahb32p(const __u32 *p) >{ > > > > return (__builtin_constant_p((__u32)(*p)) ? ((__u32)( (((__u32)(*p) & (__u32)0x00ff00ffUL) << 8) | (((__u32)(*p) & (__u32)0xff00ff00UL) >> 8))) : __fswahb32(*p)); > >} > > > > > >static inline __attribute__((always_inline)) void __swab16s(__u16 *p) >{ > > > > *p = __swab16p(p); > >} > > > > >static inline __attribute__((always_inline)) void __swab32s(__u32 *p) >{ > > > > *p = __swab32p(p); > >} > > > > > >static inline __attribute__((always_inline)) void __swab64s(__u64 *p) >{ > > > > *p = __swab64p(p); > >} > > > > > > > >static inline __attribute__((always_inline)) void __swahw32s(__u32 *p) >{ > > > > *p = __swahw32p(p); > >} > > > > > > > >static inline __attribute__((always_inline)) void __swahb32s(__u32 *p) >{ > > > > *p = __swahb32p(p); > >} > > >static inline __attribute__((always_inline)) __le64 __cpu_to_le64p(const __u64 *p) >{ > return ( __le64)*p; >} >static inline __attribute__((always_inline)) __u64 __le64_to_cpup(const __le64 *p) >{ > return ( __u64)*p; >} >static inline __attribute__((always_inline)) __le32 __cpu_to_le32p(const __u32 *p) >{ > return ( __le32)*p; >} >static inline __attribute__((always_inline)) __u32 __le32_to_cpup(const __le32 *p) >{ > return ( __u32)*p; >} >static inline __attribute__((always_inline)) __le16 __cpu_to_le16p(const __u16 *p) >{ > return ( __le16)*p; >} >static inline __attribute__((always_inline)) __u16 __le16_to_cpup(const __le16 *p) >{ > return ( __u16)*p; >} >static inline __attribute__((always_inline)) __be64 __cpu_to_be64p(const __u64 *p) >{ > return ( __be64)__swab64p(p); >} >static inline __attribute__((always_inline)) __u64 __be64_to_cpup(const __be64 *p) >{ > return __swab64p((__u64 *)p); >} >static inline __attribute__((always_inline)) __be32 __cpu_to_be32p(const __u32 *p) >{ > return ( __be32)__swab32p(p); >} >static inline __attribute__((always_inline)) __u32 __be32_to_cpup(const __be32 *p) >{ > return __swab32p((__u32 *)p); >} >static inline __attribute__((always_inline)) __be16 __cpu_to_be16p(const __u16 *p) >{ > return ( __be16)__swab16p(p); >} >static inline __attribute__((always_inline)) __u16 __be16_to_cpup(const __be16 *p) >{ > return __swab16p((__u16 *)p); >} > > > >static inline __attribute__((always_inline)) void le16_add_cpu(__le16 *var, u16 val) >{ > *var = (( __le16)(__u16)((( __u16)(__le16)(*var)) + val)); >} > >static inline __attribute__((always_inline)) void le32_add_cpu(__le32 *var, u32 val) >{ > *var = (( __le32)(__u32)((( __u32)(__le32)(*var)) + val)); >} > >static inline __attribute__((always_inline)) void le64_add_cpu(__le64 *var, u64 val) >{ > *var = (( __le64)(__u64)((( __u64)(__le64)(*var)) + val)); >} > >static inline __attribute__((always_inline)) void be16_add_cpu(__be16 *var, u16 val) >{ > *var = (( __be16)(__builtin_constant_p((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val))) ? ((__u16)( (((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val)) & (__u16)0x00ffU) << 8) | (((__u16)(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val)) & (__u16)0xff00U) >> 8))) : __fswab16(((__builtin_constant_p((__u16)(( __u16)(__be16)(*var))) ? ((__u16)( (((__u16)(( __u16)(__be16)(*var)) & (__u16)0x00ffU) << 8) | (((__u16)(( __u16)(__be16)(*var)) & (__u16)0xff00U) >> 8))) : __fswab16(( __u16)(__be16)(*var))) + val)))); >} > >static inline __attribute__((always_inline)) void be32_add_cpu(__be32 *var, u32 val) >{ > *var = (( __be32)(__builtin_constant_p((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val))) ? ((__u32)( (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x000000ffUL) << 24) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)) & (__u32)0xff000000UL) >> 24))) : __fswab32(((__builtin_constant_p((__u32)(( __u32)(__be32)(*var))) ? ((__u32)( (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x000000ffUL) << 24) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x0000ff00UL) << 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0x00ff0000UL) >> 8) | (((__u32)(( __u32)(__be32)(*var)) & (__u32)0xff000000UL) >> 24))) : __fswab32(( __u32)(__be32)(*var))) + val)))); >} > >static inline __attribute__((always_inline)) void be64_add_cpu(__be64 *var, u64 val) >{ > *var = (( __be64)(__builtin_constant_p((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val))) ? ((__u64)( (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(((__builtin_constant_p((__u64)(( __u64)(__be64)(*var))) ? ((__u64)( (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000000000ffULL) << 56) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000000000ff00ULL) << 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000000000ff0000ULL) << 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00000000ff000000ULL) << 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x000000ff00000000ULL) >> 8) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x0000ff0000000000ULL) >> 24) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0x00ff000000000000ULL) >> 40) | (((__u64)(( __u64)(__be64)(*var)) & (__u64)0xff00000000000000ULL) >> 56))) : __fswab64(( __u64)(__be64)(*var))) + val)))); >} > > > > >typedef __kernel_ulong_t aio_context_t; > >enum { > IOCB_CMD_PREAD = 0, > IOCB_CMD_PWRITE = 1, > IOCB_CMD_FSYNC = 2, > IOCB_CMD_FDSYNC = 3, > > > > > IOCB_CMD_NOOP = 6, > IOCB_CMD_PREADV = 7, > IOCB_CMD_PWRITEV = 8, >}; > >struct io_event { > __u64 data; > __u64 obj; > __s64 res; > __s64 res2; >}; > >struct iocb { > > __u64 aio_data; > __u32 aio_key, aio_reserved1; > > > > __u16 aio_lio_opcode; > __s16 aio_reqprio; > __u32 aio_fildes; > > __u64 aio_buf; > __u64 aio_nbytes; > __s64 aio_offset; > > > __u64 aio_reserved2; > > > __u32 aio_flags; > > > > > > __u32 aio_resfd; >}; > > > >struct task_struct; > >typedef struct __user_cap_header_struct { > __u32 version; > int pid; >} *cap_user_header_t; > >typedef struct __user_cap_data_struct { > __u32 effective; > __u32 permitted; > __u32 inheritable; >} *cap_user_data_t; > >struct vfs_cap_data { > __le32 magic_etc; > struct { > __le32 permitted; > __le32 inheritable; > } data[2]; >}; > >extern int file_caps_enabled; > >typedef struct kernel_cap_struct { > __u32 cap[2]; >} kernel_cap_t; > > >struct cpu_vfs_cap_data { > __u32 magic_etc; > kernel_cap_t permitted; > kernel_cap_t inheritable; >}; > >struct dentry; >struct user_namespace; > >struct user_namespace *current_user_ns(void); > >extern const kernel_cap_t __cap_empty_set; >extern const kernel_cap_t __cap_init_eff_set; > >static inline __attribute__((always_inline)) kernel_cap_t cap_combine(const kernel_cap_t a, > const kernel_cap_t b) >{ > kernel_cap_t dest; > do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] | b.cap[__capi]; } } while (0); > return dest; >} > >static inline __attribute__((always_inline)) kernel_cap_t cap_intersect(const kernel_cap_t a, > const kernel_cap_t b) >{ > kernel_cap_t dest; > do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] & b.cap[__capi]; } } while (0); > return dest; >} > >static inline __attribute__((always_inline)) kernel_cap_t cap_drop(const kernel_cap_t a, > const kernel_cap_t drop) >{ > kernel_cap_t dest; > do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = a.cap[__capi] &~ drop.cap[__capi]; } } while (0); > return dest; >} > >static inline __attribute__((always_inline)) kernel_cap_t cap_invert(const kernel_cap_t c) >{ > kernel_cap_t dest; > do { unsigned __capi; for (__capi = 0; __capi < 2; ++__capi) { dest.cap[__capi] = ~ c.cap[__capi]; } } while (0); > return dest; >} > >static inline __attribute__((always_inline)) int cap_isclear(const kernel_cap_t a) >{ > unsigned __capi; > for (__capi = 0; __capi < 2; ++__capi) { > if (a.cap[__capi] != 0) > return 0; > } > return 1; >} > >static inline __attribute__((always_inline)) int cap_issubset(const kernel_cap_t a, const kernel_cap_t set) >{ > kernel_cap_t dest; > dest = cap_drop(a, set); > return cap_isclear(dest); >} > > > >static inline __attribute__((always_inline)) int cap_is_fs_cap(int cap) >{ > const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } }); > return !!((1 << ((cap) & 31)) & __cap_fs_set.cap[((cap) >> 5)]); >} > >static inline __attribute__((always_inline)) kernel_cap_t cap_drop_fs_set(const kernel_cap_t a) >{ > const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } }); > return cap_drop(a, __cap_fs_set); >} > >static inline __attribute__((always_inline)) kernel_cap_t cap_raise_fs_set(const kernel_cap_t a, > const kernel_cap_t permitted) >{ > const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((9) & 31)), ((1 << ((32) & 31))) } }); > return cap_combine(a, > cap_intersect(permitted, __cap_fs_set)); >} > >static inline __attribute__((always_inline)) kernel_cap_t cap_drop_nfsd_set(const kernel_cap_t a) >{ > const kernel_cap_t __cap_fs_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((24) & 31)), ((1 << ((32) & 31))) } }); > return cap_drop(a, __cap_fs_set); >} > >static inline __attribute__((always_inline)) kernel_cap_t cap_raise_nfsd_set(const kernel_cap_t a, > const kernel_cap_t permitted) >{ > const kernel_cap_t __cap_nfsd_set = ((kernel_cap_t){{ ((1 << ((0) & 31)) | (1 << ((27) & 31)) | (1 << ((1) & 31)) | (1 << ((2) & 31)) | (1 << ((3) & 31)) | (1 << ((4) & 31))) | (1 << ((24) & 31)), ((1 << ((32) & 31))) } }); > return cap_combine(a, > cap_intersect(permitted, __cap_nfsd_set)); >} > >extern bool has_capability(struct task_struct *t, int cap); >extern bool has_ns_capability(struct task_struct *t, > struct user_namespace *ns, int cap); >extern bool has_capability_noaudit(struct task_struct *t, int cap); >extern bool has_ns_capability_noaudit(struct task_struct *t, > struct user_namespace *ns, int cap); >extern bool capable(int cap); >extern bool ns_capable(struct user_namespace *ns, int cap); >extern bool nsown_capable(int cap); > > >extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps); > > > > > > > > > > > > >static inline __attribute__((always_inline)) void INIT_LIST_HEAD(struct list_head *list) >{ > list->next = list; > list->prev = list; >} > >static inline __attribute__((always_inline)) void __list_add(struct list_head *new, > struct list_head *prev, > struct list_head *next) >{ > next->prev = new; > new->next = next; > new->prev = prev; > prev->next = new; >} > >static inline __attribute__((always_inline)) void list_add(struct list_head *new, struct list_head *head) >{ > __list_add(new, head, head->next); >} > >static inline __attribute__((always_inline)) void list_add_tail(struct list_head *new, struct list_head *head) >{ > __list_add(new, head->prev, head); >} > >static inline __attribute__((always_inline)) void __list_del(struct list_head * prev, struct list_head * next) >{ > next->prev = prev; > prev->next = next; >} > >static inline __attribute__((always_inline)) void __list_del_entry(struct list_head *entry) >{ > __list_del(entry->prev, entry->next); >} > >static inline __attribute__((always_inline)) void list_del(struct list_head *entry) >{ > __list_del(entry->prev, entry->next); > entry->next = ((void *) 0x00100100 + 0); > entry->prev = ((void *) 0x00200200 + 0); >} > >static inline __attribute__((always_inline)) void list_replace(struct list_head *old, > struct list_head *new) >{ > new->next = old->next; > new->next->prev = new; > new->prev = old->prev; > new->prev->next = new; >} > >static inline __attribute__((always_inline)) void list_replace_init(struct list_head *old, > struct list_head *new) >{ > list_replace(old, new); > INIT_LIST_HEAD(old); >} > > > > > >static inline __attribute__((always_inline)) void list_del_init(struct list_head *entry) >{ > __list_del_entry(entry); > INIT_LIST_HEAD(entry); >} > > > > > > >static inline __attribute__((always_inline)) void list_move(struct list_head *list, struct list_head *head) >{ > __list_del_entry(list); > list_add(list, head); >} > > > > > > >static inline __attribute__((always_inline)) void list_move_tail(struct list_head *list, > struct list_head *head) >{ > __list_del_entry(list); > list_add_tail(list, head); >} > > > > > > >static inline __attribute__((always_inline)) int list_is_last(const struct list_head *list, > const struct list_head *head) >{ > return list->next == head; >} > > > > > >static inline __attribute__((always_inline)) int list_empty(const struct list_head *head) >{ > return head->next == head; >} > >static inline __attribute__((always_inline)) int list_empty_careful(const struct list_head *head) >{ > struct list_head *next = head->next; > return (next == head) && (next == head->prev); >} > > > > > >static inline __attribute__((always_inline)) void list_rotate_left(struct list_head *head) >{ > struct list_head *first; > > if (!list_empty(head)) { > first = head->next; > list_move_tail(first, head); > } >} > > > > > >static inline __attribute__((always_inline)) int list_is_singular(const struct list_head *head) >{ > return !list_empty(head) && (head->next == head->prev); >} > >static inline __attribute__((always_inline)) void __list_cut_position(struct list_head *list, > struct list_head *head, struct list_head *entry) >{ > struct list_head *new_first = entry->next; > list->next = head->next; > list->next->prev = list; > list->prev = entry; > entry->next = list; > head->next = new_first; > new_first->prev = head; >} > >static inline __attribute__((always_inline)) void list_cut_position(struct list_head *list, > struct list_head *head, struct list_head *entry) >{ > if (list_empty(head)) > return; > if (list_is_singular(head) && > (head->next != entry && head != entry)) > return; > if (entry == head) > INIT_LIST_HEAD(list); > else > __list_cut_position(list, head, entry); >} > >static inline __attribute__((always_inline)) void __list_splice(const struct list_head *list, > struct list_head *prev, > struct list_head *next) >{ > struct list_head *first = list->next; > struct list_head *last = list->prev; > > first->prev = prev; > prev->next = first; > > last->next = next; > next->prev = last; >} > > > > > > >static inline __attribute__((always_inline)) void list_splice(const struct list_head *list, > struct list_head *head) >{ > if (!list_empty(list)) > __list_splice(list, head, head->next); >} > > > > > > >static inline __attribute__((always_inline)) void list_splice_tail(struct list_head *list, > struct list_head *head) >{ > if (!list_empty(list)) > __list_splice(list, head->prev, head); >} > >static inline __attribute__((always_inline)) void list_splice_init(struct list_head *list, > struct list_head *head) >{ > if (!list_empty(list)) { > __list_splice(list, head, head->next); > INIT_LIST_HEAD(list); > } >} > >static inline __attribute__((always_inline)) void list_splice_tail_init(struct list_head *list, > struct list_head *head) >{ > if (!list_empty(list)) { > __list_splice(list, head->prev, head); > INIT_LIST_HEAD(list); > } >} > >static inline __attribute__((always_inline)) void INIT_HLIST_NODE(struct hlist_node *h) >{ > h->next = ((void *)0); > h->pprev = ((void *)0); >} > >static inline __attribute__((always_inline)) int hlist_unhashed(const struct hlist_node *h) >{ > return !h->pprev; >} > >static inline __attribute__((always_inline)) int hlist_empty(const struct hlist_head *h) >{ > return !h->first; >} > >static inline __attribute__((always_inline)) void __hlist_del(struct hlist_node *n) >{ > struct hlist_node *next = n->next; > struct hlist_node **pprev = n->pprev; > *pprev = next; > if (next) > next->pprev = pprev; >} > >static inline __attribute__((always_inline)) void hlist_del(struct hlist_node *n) >{ > __hlist_del(n); > n->next = ((void *) 0x00100100 + 0); > n->pprev = ((void *) 0x00200200 + 0); >} > >static inline __attribute__((always_inline)) void hlist_del_init(struct hlist_node *n) >{ > if (!hlist_unhashed(n)) { > __hlist_del(n); > INIT_HLIST_NODE(n); > } >} > >static inline __attribute__((always_inline)) void hlist_add_head(struct hlist_node *n, struct hlist_head *h) >{ > struct hlist_node *first = h->first; > n->next = first; > if (first) > first->pprev = &n->next; > h->first = n; > n->pprev = &h->first; >} > > >static inline __attribute__((always_inline)) void hlist_add_before(struct hlist_node *n, > struct hlist_node *next) >{ > n->pprev = next->pprev; > n->next = next; > next->pprev = &n->next; > *(n->pprev) = n; >} > >static inline __attribute__((always_inline)) void hlist_add_after(struct hlist_node *n, > struct hlist_node *next) >{ > next->next = n->next; > n->next = next; > next->pprev = &n->next; > > if(next->next) > next->next->pprev = &next->next; >} > > >static inline __attribute__((always_inline)) void hlist_add_fake(struct hlist_node *n) >{ > n->pprev = &n->next; >} > > > > > >static inline __attribute__((always_inline)) void hlist_move_list(struct hlist_head *old, > struct hlist_head *new) >{ > new->first = old->first; > if (new->first) > new->first->pprev = &new->first; > old->first = ((void *)0); >} > > > > > > > > > > > > > > > > > > > > >struct bug_entry { > > unsigned long bug_addr; > > > > > > const char *file; > > > > unsigned short line; > > unsigned short flags; >}; > >extern __attribute__((format(printf, 3, 4))) >void warn_slowpath_fmt(const char *file, const int line, > const char *fmt, ...); >extern __attribute__((format(printf, 4, 5))) >void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint, > const char *fmt, ...); >extern void warn_slowpath_null(const char *file, const int line); > > >struct pt_regs; >void die(const char *msg, struct pt_regs *regs, int err); > >struct siginfo; >void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, > unsigned long err, unsigned long trap); > >void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, > struct pt_regs *), > int sig, int code, const char *name); > >void hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, > struct pt_regs *), > int sig, int code, const char *name); > >extern void c_backtrace(unsigned long fp, int pmode); > >struct mm_struct; >extern void show_pte(struct mm_struct *mm, unsigned long addr); >extern void __show_regs(struct pt_regs *); > > >enum bug_trap_type { > BUG_TRAP_TYPE_NONE = 0, > BUG_TRAP_TYPE_WARN = 1, > BUG_TRAP_TYPE_BUG = 2, >}; > >struct pt_regs; > >extern int __build_bug_on_failed; > >static inline __attribute__((always_inline)) int is_warning_bug(const struct bug_entry *bug) >{ > return bug->flags & (1 << 0); >} > >const struct bug_entry *find_bug(unsigned long bugaddr); > >enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs); > > >int is_valid_bugaddr(unsigned long addr); > > > > > > > >struct ipc_perm >{ > __kernel_key_t key; > __kernel_uid_t uid; > __kernel_gid_t gid; > __kernel_uid_t cuid; > __kernel_gid_t cgid; > __kernel_mode_t mode; > unsigned short seq; >}; > > > > > >struct ipc64_perm { > __kernel_key_t key; > __kernel_uid32_t uid; > __kernel_gid32_t gid; > __kernel_uid32_t cuid; > __kernel_gid32_t cgid; > __kernel_mode_t mode; > > unsigned char __pad1[4 - sizeof(__kernel_mode_t)]; > unsigned short seq; > unsigned short __pad2; > unsigned long __unused1; > unsigned long __unused2; >}; > > > >struct ipc_kludge { > struct msgbuf *msgp; > long msgtyp; >}; > > > > > > > > > >struct timespec; >struct compat_timespec; > > > > >struct restart_block { > long (*fn)(struct restart_block *); > union { > > struct { > u32 *uaddr; > u32 val; > u32 flags; > u32 bitset; > u64 time; > u32 *uaddr2; > } futex; > > struct { > clockid_t clockid; > struct timespec *rmtp; > > > > u64 expires; > } nanosleep; > > struct { > struct pollfd *ufds; > int nfds; > int has_timeout; > unsigned long tv_sec; > unsigned long tv_nsec; > } poll; > }; >}; > >extern long do_no_restart_syscall(struct restart_block *parm); > > > >extern unsigned int __sw_hweight8(unsigned int w); >extern unsigned int __sw_hweight16(unsigned int w); >extern unsigned int __sw_hweight32(unsigned int w); >extern unsigned long __sw_hweight64(__u64 w); > > > > > > > > > > > > > > > > > > > >extern unsigned int elf_hwcap; > > >struct pt_regs { > unsigned long uregs[18]; >}; > >static inline __attribute__((always_inline)) int valid_user_regs(struct pt_regs *regs) >{ > unsigned long mode = regs->uregs[16] & 0x0000001f; > > > > > regs->uregs[16] &= ~(0x00000040 | 0x00000100); > > if ((regs->uregs[16] & 0x00000080) == 0) { > if (mode == 0x00000010) > return 1; > if (elf_hwcap & (1 << 3) && mode == 0x00000000) > return 1; > } > > > > > regs->uregs[16] &= 0xff000000 | 0x00ff0000 | 0x0000ff00 | 0x00000020 | 0x00000010; > if (!(elf_hwcap & (1 << 3))) > regs->uregs[16] |= 0x00000010; > > return 0; >} > >static inline __attribute__((always_inline)) long regs_return_value(struct pt_regs *regs) >{ > return regs->uregs[0]; >} > > > > >extern unsigned long profile_pc(struct pt_regs *regs); > >extern int regs_query_register_offset(const char *name); >extern const char *regs_query_register_name(unsigned int offset); >extern bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr); >extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, > unsigned int n); > >static inline __attribute__((always_inline)) unsigned long regs_get_register(struct pt_regs *regs, > unsigned int offset) >{ > if (__builtin_expect(!!(offset > (__builtin_offsetof(struct pt_regs,uregs[17]))), 0)) > return 0; > return *(unsigned long *)((unsigned long)regs + offset); >} > > >static inline __attribute__((always_inline)) unsigned long kernel_stack_pointer(struct pt_regs *regs) >{ > return regs->uregs[13]; >} > > > > > > > >static inline __attribute__((always_inline)) unsigned long arch_local_irq_save(void) >{ > unsigned long flags; > > asm volatile( > " mrs %0, cpsr @ arch_local_irq_save\n" > " cpsid i" > : "=r" (flags) : : "memory", "cc"); > return flags; >} > >static inline __attribute__((always_inline)) void arch_local_irq_enable(void) >{ > asm volatile( > " cpsie i @ arch_local_irq_enable" > : > : > : "memory", "cc"); >} > >static inline __attribute__((always_inline)) void arch_local_irq_disable(void) >{ > asm volatile( > " cpsid i @ arch_local_irq_disable" > : > : > : "memory", "cc"); >} > >static inline __attribute__((always_inline)) unsigned long arch_local_save_flags(void) >{ > unsigned long flags; > asm volatile( > " mrs %0, cpsr @ local_save_flags" > : "=r" (flags) : : "memory", "cc"); > return flags; >} > > > > >static inline __attribute__((always_inline)) void arch_local_irq_restore(unsigned long flags) >{ > asm volatile( > " msr cpsr_c, %0 @ local_irq_restore" > : > : "r" (flags) > : "memory", "cc"); >} > >static inline __attribute__((always_inline)) int arch_irqs_disabled_flags(unsigned long flags) >{ > return flags & 0x00000080; >} > > > >static inline __attribute__((always_inline)) void ____atomic_set_bit(unsigned int bit, volatile unsigned long *p) >{ > unsigned long flags; > unsigned long mask = 1UL << (bit & 31); > > p += bit >> 5; > > do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); > *p |= mask; > do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); >} > >static inline __attribute__((always_inline)) void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p) >{ > unsigned long flags; > unsigned long mask = 1UL << (bit & 31); > > p += bit >> 5; > > do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); > *p &= ~mask; > do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); >} > >static inline __attribute__((always_inline)) void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p) >{ > unsigned long flags; > unsigned long mask = 1UL << (bit & 31); > > p += bit >> 5; > > do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); > *p ^= mask; > do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); >} > >static inline __attribute__((always_inline)) int >____atomic_test_and_set_bit(unsigned int bit, volatile unsigned long *p) >{ > unsigned long flags; > unsigned int res; > unsigned long mask = 1UL << (bit & 31); > > p += bit >> 5; > > do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); > res = *p; > *p = res | mask; > do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); > > return (res & mask) != 0; >} > >static inline __attribute__((always_inline)) int >____atomic_test_and_clear_bit(unsigned int bit, volatile unsigned long *p) >{ > unsigned long flags; > unsigned int res; > unsigned long mask = 1UL << (bit & 31); > > p += bit >> 5; > > do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); > res = *p; > *p = res & ~mask; > do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); > > return (res & mask) != 0; >} > >static inline __attribute__((always_inline)) int >____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) >{ > unsigned long flags; > unsigned int res; > unsigned long mask = 1UL << (bit & 31); > > p += bit >> 5; > > do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); > res = *p; > *p = res ^ mask; > do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); > > return (res & mask) != 0; >} > > > >static inline __attribute__((always_inline)) void __set_bit(int nr, volatile unsigned long *addr) >{ > unsigned long mask = (1UL << ((nr) % 32)); > unsigned long *p = ((unsigned long *)addr) + ((nr) / 32); > > *p |= mask; >} > >static inline __attribute__((always_inline)) void __clear_bit(int nr, volatile unsigned long *addr) >{ > unsigned long mask = (1UL << ((nr) % 32)); > unsigned long *p = ((unsigned long *)addr) + ((nr) / 32); > > *p &= ~mask; >} > >static inline __attribute__((always_inline)) void __change_bit(int nr, volatile unsigned long *addr) >{ > unsigned long mask = (1UL << ((nr) % 32)); > unsigned long *p = ((unsigned long *)addr) + ((nr) / 32); > > *p ^= mask; >} > >static inline __attribute__((always_inline)) int __test_and_set_bit(int nr, volatile unsigned long *addr) >{ > unsigned long mask = (1UL << ((nr) % 32)); > unsigned long *p = ((unsigned long *)addr) + ((nr) / 32); > unsigned long old = *p; > > *p = old | mask; > return (old & mask) != 0; >} > >static inline __attribute__((always_inline)) int __test_and_clear_bit(int nr, volatile unsigned long *addr) >{ > unsigned long mask = (1UL << ((nr) % 32)); > unsigned long *p = ((unsigned long *)addr) + ((nr) / 32); > unsigned long old = *p; > > *p = old & ~mask; > return (old & mask) != 0; >} > > >static inline __attribute__((always_inline)) int __test_and_change_bit(int nr, > volatile unsigned long *addr) >{ > unsigned long mask = (1UL << ((nr) % 32)); > unsigned long *p = ((unsigned long *)addr) + ((nr) / 32); > unsigned long old = *p; > > *p = old ^ mask; > return (old & mask) != 0; >} > > > > > > >static inline __attribute__((always_inline)) int test_bit(int nr, const volatile unsigned long *addr) >{ > return 1UL & (addr[((nr) / 32)] >> (nr & (32 -1))); >} > > >extern void _set_bit(int nr, volatile unsigned long * p); >extern void _clear_bit(int nr, volatile unsigned long * p); >extern void _change_bit(int nr, volatile unsigned long * p); >extern int _test_and_set_bit(int nr, volatile unsigned long * p); >extern int _test_and_clear_bit(int nr, volatile unsigned long * p); >extern int _test_and_change_bit(int nr, volatile unsigned long * p); > > > > >extern int _find_first_zero_bit_le(const void * p, unsigned size); >extern int _find_next_zero_bit_le(const void * p, int size, int offset); >extern int _find_first_bit_le(const unsigned long *p, unsigned size); >extern int _find_next_bit_le(const unsigned long *p, int size, int offset); > > > > >extern int _find_first_zero_bit_be(const void * p, unsigned size); >extern int _find_next_zero_bit_be(const void * p, int size, int offset); >extern int _find_first_bit_be(const unsigned long *p, unsigned size); >extern int _find_next_bit_be(const unsigned long *p, int size, int offset); > >static inline __attribute__((always_inline)) int constant_fls(int x) >{ > int r = 32; > > if (!x) > return 0; > if (!(x & 0xffff0000u)) { > x <<= 16; > r -= 16; > } > if (!(x & 0xff000000u)) { > x <<= 8; > r -= 8; > } > if (!(x & 0xf0000000u)) { > x <<= 4; > r -= 4; > } > if (!(x & 0xc0000000u)) { > x <<= 2; > r -= 2; > } > if (!(x & 0x80000000u)) { > x <<= 1; > r -= 1; > } > return r; >} > > > > > > >static inline __attribute__((always_inline)) int fls(int x) >{ > int ret; > > if (__builtin_constant_p(x)) > return constant_fls(x); > > asm("clz\t%0, %1" : "=r" (ret) : "r" (x)); > ret = 32 - ret; > return ret; >} > > > >static inline __attribute__((always_inline)) __attribute__((always_inline)) int fls64(__u64 x) >{ > __u32 h = x >> 32; > if (h) > return fls(h) + 32; > return fls(x); >} > > > > >static inline __attribute__((always_inline)) int sched_find_first_bit(const unsigned long *b) >{ > > > > > > if (b[0]) > return (({ unsigned long __t = (b[0]); fls(__t & -__t); }) - 1); > if (b[1]) > return (({ unsigned long __t = (b[1]); fls(__t & -__t); }) - 1) + 32; > if (b[2]) > return (({ unsigned long __t = (b[2]); fls(__t & -__t); }) - 1) + 64; > return (({ unsigned long __t = (b[3]); fls(__t & -__t); }) - 1) + 96; > > > >} > > > > > > > > > > > >static inline __attribute__((always_inline)) unsigned int __arch_hweight32(unsigned int w) >{ > return __sw_hweight32(w); >} > >static inline __attribute__((always_inline)) unsigned int __arch_hweight16(unsigned int w) >{ > return __sw_hweight16(w); >} > >static inline __attribute__((always_inline)) unsigned int __arch_hweight8(unsigned int w) >{ > return __sw_hweight8(w); >} > >static inline __attribute__((always_inline)) unsigned long __arch_hweight64(__u64 w) >{ > return __sw_hweight64(w); >} > > > > > > > > > >static inline __attribute__((always_inline)) unsigned long find_next_zero_bit_le(const void *addr, > unsigned long size, unsigned long offset) >{ > return _find_next_zero_bit_le(addr,size,offset); >} > >static inline __attribute__((always_inline)) unsigned long find_next_bit_le(const void *addr, > unsigned long size, unsigned long offset) >{ > return _find_next_bit_le(addr,size,offset); >} > >static inline __attribute__((always_inline)) unsigned long find_first_zero_bit_le(const void *addr, > unsigned long size) >{ > return _find_first_zero_bit_le(addr,size); >} > >static inline __attribute__((always_inline)) int test_bit_le(int nr, const void *addr) >{ > return test_bit(nr ^ 0, addr); >} > >static inline __attribute__((always_inline)) void __set_bit_le(int nr, void *addr) >{ > __set_bit(nr ^ 0, addr); >} > >static inline __attribute__((always_inline)) void __clear_bit_le(int nr, void *addr) >{ > __clear_bit(nr ^ 0, addr); >} > >static inline __attribute__((always_inline)) int test_and_set_bit_le(int nr, void *addr) >{ > return _test_and_set_bit(nr ^ 0,addr); >} > >static inline __attribute__((always_inline)) int test_and_clear_bit_le(int nr, void *addr) >{ > return _test_and_clear_bit(nr ^ 0,addr); >} > >static inline __attribute__((always_inline)) int __test_and_set_bit_le(int nr, void *addr) >{ > return __test_and_set_bit(nr ^ 0, addr); >} > >static inline __attribute__((always_inline)) int __test_and_clear_bit_le(int nr, void *addr) >{ > return __test_and_clear_bit(nr ^ 0, addr); >} > > > > > > > > > >static __inline__ __attribute__((always_inline)) int get_bitmask_order(unsigned int count) >{ > int order; > > order = fls(count); > return order; >} > >static __inline__ __attribute__((always_inline)) int get_count_order(unsigned int count) >{ > int order; > > order = fls(count) - 1; > if (count & (count - 1)) > order++; > return order; >} > >static inline __attribute__((always_inline)) unsigned long hweight_long(unsigned long w) >{ > return sizeof(w) == 4 ? (__builtin_constant_p(w) ? ((( (!!((w) & (1ULL << 0))) + (!!((w) & (1ULL << 1))) + (!!((w) & (1ULL << 2))) + (!!((w) & (1ULL << 3))) + (!!((w) & (1ULL << 4))) + (!!((w) & (1ULL << 5))) + (!!((w) & (1ULL << 6))) + (!!((w) & (1ULL << 7))) ) + ( (!!(((w) >> 8) & (1ULL << 0))) + (!!(((w) >> 8) & (1ULL << 1))) + (!!(((w) >> 8) & (1ULL << 2))) + (!!(((w) >> 8) & (1ULL << 3))) + (!!(((w) >> 8) & (1ULL << 4))) + (!!(((w) >> 8) & (1ULL << 5))) + (!!(((w) >> 8) & (1ULL << 6))) + (!!(((w) >> 8) & (1ULL << 7))) )) + (( (!!(((w) >> 16) & (1ULL << 0))) + (!!(((w) >> 16) & (1ULL << 1))) + (!!(((w) >> 16) & (1ULL << 2))) + (!!(((w) >> 16) & (1ULL << 3))) + (!!(((w) >> 16) & (1ULL << 4))) + (!!(((w) >> 16) & (1ULL << 5))) + (!!(((w) >> 16) & (1ULL << 6))) + (!!(((w) >> 16) & (1ULL << 7))) ) + ( (!!((((w) >> 16) >> 8) & (1ULL << 0))) + (!!((((w) >> 16) >> 8) & (1ULL << 1))) + (!!((((w) >> 16) >> 8) & (1ULL << 2))) + (!!((((w) >> 16) >> 8) & (1ULL << 3))) + (!!((((w) >> 16) >> 8) & (1ULL << 4))) + (!!((((w) >> 16) >> 8) & (1ULL << 5))) + (!!((((w) >> 16) >> 8) & (1ULL << 6))) + (!!((((w) >> 16) >> 8) & (1ULL << 7))) ))) : __arch_hweight32(w)) : (__builtin_constant_p(w) ? (((( (!!((w) & (1ULL << 0))) + (!!((w) & (1ULL << 1))) + (!!((w) & (1ULL << 2))) + (!!((w) & (1ULL << 3))) + (!!((w) & (1ULL << 4))) + (!!((w) & (1ULL << 5))) + (!!((w) & (1ULL << 6))) + (!!((w) & (1ULL << 7))) ) + ( (!!(((w) >> 8) & (1ULL << 0))) + (!!(((w) >> 8) & (1ULL << 1))) + (!!(((w) >> 8) & (1ULL << 2))) + (!!(((w) >> 8) & (1ULL << 3))) + (!!(((w) >> 8) & (1ULL << 4))) + (!!(((w) >> 8) & (1ULL << 5))) + (!!(((w) >> 8) & (1ULL << 6))) + (!!(((w) >> 8) & (1ULL << 7))) )) + (( (!!(((w) >> 16) & (1ULL << 0))) + (!!(((w) >> 16) & (1ULL << 1))) + (!!(((w) >> 16) & (1ULL << 2))) + (!!(((w) >> 16) & (1ULL << 3))) + (!!(((w) >> 16) & (1ULL << 4))) + (!!(((w) >> 16) & (1ULL << 5))) + (!!(((w) >> 16) & (1ULL << 6))) + (!!(((w) >> 16) & (1ULL << 7))) ) + ( (!!((((w) >> 16) >> 8) & (1ULL << 0))) + (!!((((w) >> 16) >> 8) & (1ULL << 1))) + (!!((((w) >> 16) >> 8) & (1ULL << 2))) + (!!((((w) >> 16) >> 8) & (1ULL << 3))) + (!!((((w) >> 16) >> 8) & (1ULL << 4))) + (!!((((w) >> 16) >> 8) & (1ULL << 5))) + (!!((((w) >> 16) >> 8) & (1ULL << 6))) + (!!((((w) >> 16) >> 8) & (1ULL << 7))) ))) + ((( (!!(((w) >> 32) & (1ULL << 0))) + (!!(((w) >> 32) & (1ULL << 1))) + (!!(((w) >> 32) & (1ULL << 2))) + (!!(((w) >> 32) & (1ULL << 3))) + (!!(((w) >> 32) & (1ULL << 4))) + (!!(((w) >> 32) & (1ULL << 5))) + (!!(((w) >> 32) & (1ULL << 6))) + (!!(((w) >> 32) & (1ULL << 7))) ) + ( (!!((((w) >> 32) >> 8) & (1ULL << 0))) + (!!((((w) >> 32) >> 8) & (1ULL << 1))) + (!!((((w) >> 32) >> 8) & (1ULL << 2))) + (!!((((w) >> 32) >> 8) & (1ULL << 3))) + (!!((((w) >> 32) >> 8) & (1ULL << 4))) + (!!((((w) >> 32) >> 8) & (1ULL << 5))) + (!!((((w) >> 32) >> 8) & (1ULL << 6))) + (!!((((w) >> 32) >> 8) & (1ULL << 7))) )) + (( (!!((((w) >> 32) >> 16) & (1ULL << 0))) + (!!((((w) >> 32) >> 16) & (1ULL << 1))) + (!!((((w) >> 32) >> 16) & (1ULL << 2))) + (!!((((w) >> 32) >> 16) & (1ULL << 3))) + (!!((((w) >> 32) >> 16) & (1ULL << 4))) + (!!((((w) >> 32) >> 16) & (1ULL << 5))) + (!!((((w) >> 32) >> 16) & (1ULL << 6))) + (!!((((w) >> 32) >> 16) & (1ULL << 7))) ) + ( (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 0))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 1))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 2))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 3))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 4))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 5))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 6))) + (!!(((((w) >> 32) >> 16) >> 8) & (1ULL << 7))) )))) : __arch_hweight64(w)); >} > > > > > > >static inline __attribute__((always_inline)) __u64 rol64(__u64 word, unsigned int shift) >{ > return (word << shift) | (word >> (64 - shift)); >} > > > > > > >static inline __attribute__((always_inline)) __u64 ror64(__u64 word, unsigned int shift) >{ > return (word >> shift) | (word << (64 - shift)); >} > > > > > > >static inline __attribute__((always_inline)) __u32 rol32(__u32 word, unsigned int shift) >{ > return (word << shift) | (word >> (32 - shift)); >} > > > > > > >static inline __attribute__((always_inline)) __u32 ror32(__u32 word, unsigned int shift) >{ > return (word >> shift) | (word << (32 - shift)); >} > > > > > > >static inline __attribute__((always_inline)) __u16 rol16(__u16 word, unsigned int shift) >{ > return (word << shift) | (word >> (16 - shift)); >} > > > > > > >static inline __attribute__((always_inline)) __u16 ror16(__u16 word, unsigned int shift) >{ > return (word >> shift) | (word << (16 - shift)); >} > > > > > > >static inline __attribute__((always_inline)) __u8 rol8(__u8 word, unsigned int shift) >{ > return (word << shift) | (word >> (8 - shift)); >} > > > > > > >static inline __attribute__((always_inline)) __u8 ror8(__u8 word, unsigned int shift) >{ > return (word >> shift) | (word << (8 - shift)); >} > > > > > > >static inline __attribute__((always_inline)) __s32 sign_extend32(__u32 value, int index) >{ > __u8 shift = 31 - index; > return (__s32)(value << shift) >> shift; >} > >static inline __attribute__((always_inline)) unsigned fls_long(unsigned long l) >{ > if (sizeof(l) == 4) > return fls(l); > return fls64(l); >} > >static inline __attribute__((always_inline)) unsigned long __ffs64(u64 word) >{ > > if (((u32)word) == 0UL) > return (({ unsigned long __t = ((u32)(word >> 32)); fls(__t & -__t); }) - 1) + 32; > > > > return (({ unsigned long __t = ((unsigned long)word); fls(__t & -__t); }) - 1); >} > >extern unsigned long find_last_bit(const unsigned long *addr, > unsigned long size); > > > > > >struct vfp_hard_struct { > > __u64 fpregs[32]; > > > > > > > __u32 fpexc; > __u32 fpscr; > > > > __u32 fpinst; > __u32 fpinst2; > > > __u32 cpu; > >}; > >union vfp_state { > struct vfp_hard_struct hard; >}; > >extern void vfp_flush_thread(union vfp_state *); >extern void vfp_release_thread(union vfp_state *); > > > >struct fp_hard_struct { > unsigned int save[35]; >}; > > > >struct fp_soft_struct { > unsigned int save[35]; >}; > > > >struct iwmmxt_struct { > unsigned int save[0x98 / sizeof(unsigned int)]; >}; > >union fp_state { > struct fp_hard_struct hard; > struct fp_soft_struct soft; > > > >}; > > > >struct crunch_state { > unsigned int mvdx[16][2]; > unsigned int mvax[4][3]; > unsigned int dspsc[2]; >}; > > > > > > > > >struct task_struct; >struct exec_domain; > > > > > > > > > > > >struct outer_cache_fns { > void (*inv_range)(unsigned long, unsigned long); > void (*clean_range)(unsigned long, unsigned long); > void (*flush_range)(unsigned long, unsigned long); > void (*flush_all)(void); > void (*inv_all)(void); > void (*disable)(void); > > > > void (*set_debug)(unsigned long); > void (*resume)(void); >}; > >static inline __attribute__((always_inline)) void outer_inv_range(phys_addr_t start, phys_addr_t end) >{ } >static inline __attribute__((always_inline)) void outer_clean_range(phys_addr_t start, phys_addr_t end) >{ } >static inline __attribute__((always_inline)) void outer_flush_range(phys_addr_t start, phys_addr_t end) >{ } >static inline __attribute__((always_inline)) void outer_flush_all(void) { } >static inline __attribute__((always_inline)) void outer_inv_all(void) { } >static inline __attribute__((always_inline)) void outer_disable(void) { } > >static inline __attribute__((always_inline)) void outer_sync(void) >{ } > > > > >typedef unsigned long mm_segment_t; > >struct cpu_context_save { > __u32 r4; > __u32 r5; > __u32 r6; > __u32 r7; > __u32 r8; > __u32 r9; > __u32 sl; > __u32 fp; > __u32 sp; > __u32 pc; > __u32 extra[2]; >}; > > > > > >struct thread_info { > unsigned long flags; > int preempt_count; > mm_segment_t addr_limit; > struct task_struct *task; > struct exec_domain *exec_domain; > __u32 cpu; > __u32 cpu_domain; > struct cpu_context_save cpu_context; > __u32 syscall; > __u8 used_cp[16]; > unsigned long tp_value; > struct crunch_state crunchstate; > union fp_state fpstate __attribute__((aligned(8))); > union vfp_state vfpstate; > > > > struct restart_block restart_block; >}; > >static inline __attribute__((always_inline)) struct thread_info *current_thread_info(void) __attribute__((__const__)); > >static inline __attribute__((always_inline)) struct thread_info *current_thread_info(void) >{ > register unsigned long sp asm ("sp"); > return (struct thread_info *)(sp & ~(8192 - 1)); >} > >extern void crunch_task_disable(struct thread_info *); >extern void crunch_task_copy(struct thread_info *, void *); >extern void crunch_task_restore(struct thread_info *, void *); >extern void crunch_task_release(struct thread_info *); > >extern void iwmmxt_task_disable(struct thread_info *); >extern void iwmmxt_task_copy(struct thread_info *, void *); >extern void iwmmxt_task_restore(struct thread_info *, void *); >extern void iwmmxt_task_release(struct thread_info *); >extern void iwmmxt_task_switch(struct thread_info *); > >extern void vfp_sync_hwstate(struct thread_info *); >extern void vfp_flush_hwstate(struct thread_info *); > >struct user_vfp; >struct user_vfp_exc; > >extern int vfp_preserve_user_clear_hwstate(struct user_vfp *, > struct user_vfp_exc *); >extern int vfp_restore_user_hwstate(struct user_vfp *, > struct user_vfp_exc *); > > >static inline __attribute__((always_inline)) void set_ti_thread_flag(struct thread_info *ti, int flag) >{ > _set_bit(flag,(unsigned long *)&ti->flags); >} > >static inline __attribute__((always_inline)) void clear_ti_thread_flag(struct thread_info *ti, int flag) >{ > _clear_bit(flag,(unsigned long *)&ti->flags); >} > >static inline __attribute__((always_inline)) int test_and_set_ti_thread_flag(struct thread_info *ti, int flag) >{ > return _test_and_set_bit(flag,(unsigned long *)&ti->flags); >} > >static inline __attribute__((always_inline)) int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag) >{ > return _test_and_clear_bit(flag,(unsigned long *)&ti->flags); >} > >static inline __attribute__((always_inline)) int test_ti_thread_flag(struct thread_info *ti, int flag) >{ > return test_bit(flag, (unsigned long *)&ti->flags); >} > >static inline __attribute__((always_inline)) void set_restore_sigmask(void) >{ > set_ti_thread_flag(current_thread_info(), 20); > set_ti_thread_flag(current_thread_info(), 0); >} > > > void preempt_schedule(void); > > > > > > > > > > > > > > > > >struct sysinfo { > __kernel_long_t uptime; > __kernel_ulong_t loads[3]; > __kernel_ulong_t totalram; > __kernel_ulong_t freeram; > __kernel_ulong_t sharedram; > __kernel_ulong_t bufferram; > __kernel_ulong_t totalswap; > __kernel_ulong_t freeswap; > __u16 procs; > __u16 pad; > __kernel_ulong_t totalhigh; > __kernel_ulong_t freehigh; > __u32 mem_unit; > char _f[20-2*sizeof(__kernel_ulong_t)-sizeof(__u32)]; >}; > > > > >typedef __builtin_va_list __gnuc_va_list; > >typedef __gnuc_va_list va_list; > > > > > > > > >extern __attribute__((const, noreturn)) >int ____ilog2_NaN(void); > >static inline __attribute__((always_inline)) __attribute__((const)) >int __ilog2_u32(u32 n) >{ > return fls(n) - 1; >} > > > >static inline __attribute__((always_inline)) __attribute__((const)) >int __ilog2_u64(u64 n) >{ > return fls64(n) - 1; >} > > > > > > > >static inline __attribute__((always_inline)) __attribute__((const)) >bool is_power_of_2(unsigned long n) >{ > return (n != 0 && ((n & (n - 1)) == 0)); >} > > > > >static inline __attribute__((always_inline)) __attribute__((const)) >unsigned long __roundup_pow_of_two(unsigned long n) >{ > return 1UL << fls_long(n - 1); >} > > > > >static inline __attribute__((always_inline)) __attribute__((const)) >unsigned long __rounddown_pow_of_two(unsigned long n) >{ > return 1UL << (fls_long(n) - 1); >} > > > > > > > > >typedef int (*initcall_t)(void); >typedef void (*exitcall_t)(void); > >extern initcall_t __con_initcall_start[], __con_initcall_end[]; >extern initcall_t __security_initcall_start[], __security_initcall_end[]; > > >typedef void (*ctor_fn_t)(void); > > >extern int do_one_initcall(initcall_t fn); >extern char __attribute__ ((__section__(".init.data"))) boot_command_line[]; >extern char *saved_command_line; >extern unsigned int reset_devices; > > >void setup_arch(char **); >void prepare_namespace(void); > >extern void (*late_time_init)(void); > >extern bool initcall_debug; > >struct obs_kernel_param { > const char *str; > int (*setup_func)(char *); > int early; >}; > >void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) parse_early_param(void); >void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) parse_early_options(char *cmdline); > > >extern const char linux_banner[]; >extern const char linux_proc_banner[]; > >extern int console_printk[]; > > > > > > >static inline __attribute__((always_inline)) void console_silent(void) >{ > (console_printk[0]) = 0; >} > >static inline __attribute__((always_inline)) void console_verbose(void) >{ > if ((console_printk[0])) > (console_printk[0]) = 15; >} > >struct va_format { > const char *fmt; > va_list *va; >}; > >static inline __attribute__((always_inline)) __attribute__((format(printf, 1, 2))) >int no_printk(const char *fmt, ...) >{ > return 0; >} > >extern __attribute__((format(printf, 1, 2))) >void early_printk(const char *fmt, ...); > >extern int printk_needs_cpu(int cpu); >extern void printk_tick(void); > > > __attribute__((format(printf, 1, 0))) >int vprintk(const char *fmt, va_list args); > __attribute__((format(printf, 1, 2))) __attribute__((__cold__)) >int printk(const char *fmt, ...); > > > > >__attribute__((format(printf, 1, 2))) __attribute__((__cold__)) int printk_sched(const char *fmt, ...); > > > > > > >extern int __printk_ratelimit(const char *func); > >extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, > unsigned int interval_msec); > >extern int printk_delay_msec; >extern int dmesg_restrict; >extern int kptr_restrict; > >void log_buf_kexec_setup(void); >void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) setup_log_buf(int early); > >extern void dump_stack(void) __attribute__((__cold__)); > >enum { > DUMP_PREFIX_NONE, > DUMP_PREFIX_ADDRESS, > DUMP_PREFIX_OFFSET >}; >extern void hex_dump_to_buffer(const void *buf, size_t len, > int rowsize, int groupsize, > char *linebuf, size_t linebuflen, bool ascii); > >extern void print_hex_dump(const char *level, const char *prefix_str, > int prefix_type, int rowsize, int groupsize, > const void *buf, size_t len, bool ascii); >extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type, > const void *buf, size_t len); > > > >struct _ddebug { > > > > > const char *modname; > const char *function; > const char *filename; > const char *format; > unsigned int lineno:18; > > unsigned int flags:8; >} __attribute__((aligned(8))); > > >int ddebug_add_module(struct _ddebug *tab, unsigned int n, > const char *modname); > > >extern int ddebug_remove_module(const char *mod_name); >extern __attribute__((format(printf, 2, 3))) >int __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...); > >struct device; > >extern __attribute__((format(printf, 3, 4))) >int __dynamic_dev_dbg(struct _ddebug *descriptor, const struct device *dev, > const char *fmt, ...); > >struct net_device; > >extern __attribute__((format(printf, 3, 4))) >int __dynamic_netdev_dbg(struct _ddebug *descriptor, > const struct net_device *dev, > const char *fmt, ...); > > > > > > > > > > > >struct completion; >struct pt_regs; >struct user; > > void __might_sleep(const char *file, int line, int preempt_offset); > >static inline __attribute__((always_inline)) void might_fault(void) >{ > do { __might_sleep("include/linux/kernel.h", 196, 0); do { } while (0); } while (0); >} > > >extern struct atomic_notifier_head panic_notifier_list; >extern long (*panic_blink)(int state); >__attribute__((format(printf, 1, 2))) >void panic(const char *fmt, ...) > __attribute__((noreturn)) __attribute__((__cold__)); >extern void oops_enter(void); >extern void oops_exit(void); >void print_oops_end_marker(void); >extern int oops_may_print(void); >void do_exit(long error_code) > __attribute__((noreturn)); >void complete_and_exit(struct completion *, long) > __attribute__((noreturn)); > > >int __attribute__((warn_unused_result)) _kstrtoul(const char *s, unsigned int base, unsigned long *res); >int __attribute__((warn_unused_result)) _kstrtol(const char *s, unsigned int base, long *res); > >int __attribute__((warn_unused_result)) kstrtoull(const char *s, unsigned int base, unsigned long long *res); >int __attribute__((warn_unused_result)) kstrtoll(const char *s, unsigned int base, long long *res); >static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtoul(const char *s, unsigned int base, unsigned long *res) >{ > > > > > if (sizeof(unsigned long) == sizeof(unsigned long long) && > __alignof__(unsigned long) == __alignof__(unsigned long long)) > return kstrtoull(s, base, (unsigned long long *)res); > else > return _kstrtoul(s, base, res); >} > >static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtol(const char *s, unsigned int base, long *res) >{ > > > > > if (sizeof(long) == sizeof(long long) && > __alignof__(long) == __alignof__(long long)) > return kstrtoll(s, base, (long long *)res); > else > return _kstrtol(s, base, res); >} > >int __attribute__((warn_unused_result)) kstrtouint(const char *s, unsigned int base, unsigned int *res); >int __attribute__((warn_unused_result)) kstrtoint(const char *s, unsigned int base, int *res); > >static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtou64(const char *s, unsigned int base, u64 *res) >{ > return kstrtoull(s, base, res); >} > >static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtos64(const char *s, unsigned int base, s64 *res) >{ > return kstrtoll(s, base, res); >} > >static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtou32(const char *s, unsigned int base, u32 *res) >{ > return kstrtouint(s, base, res); >} > >static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtos32(const char *s, unsigned int base, s32 *res) >{ > return kstrtoint(s, base, res); >} > >int __attribute__((warn_unused_result)) kstrtou16(const char *s, unsigned int base, u16 *res); >int __attribute__((warn_unused_result)) kstrtos16(const char *s, unsigned int base, s16 *res); >int __attribute__((warn_unused_result)) kstrtou8(const char *s, unsigned int base, u8 *res); >int __attribute__((warn_unused_result)) kstrtos8(const char *s, unsigned int base, s8 *res); > >int __attribute__((warn_unused_result)) kstrtoull_from_user(const char *s, size_t count, unsigned int base, unsigned long long *res); >int __attribute__((warn_unused_result)) kstrtoll_from_user(const char *s, size_t count, unsigned int base, long long *res); >int __attribute__((warn_unused_result)) kstrtoul_from_user(const char *s, size_t count, unsigned int base, unsigned long *res); >int __attribute__((warn_unused_result)) kstrtol_from_user(const char *s, size_t count, unsigned int base, long *res); >int __attribute__((warn_unused_result)) kstrtouint_from_user(const char *s, size_t count, unsigned int base, unsigned int *res); >int __attribute__((warn_unused_result)) kstrtoint_from_user(const char *s, size_t count, unsigned int base, int *res); >int __attribute__((warn_unused_result)) kstrtou16_from_user(const char *s, size_t count, unsigned int base, u16 *res); >int __attribute__((warn_unused_result)) kstrtos16_from_user(const char *s, size_t count, unsigned int base, s16 *res); >int __attribute__((warn_unused_result)) kstrtou8_from_user(const char *s, size_t count, unsigned int base, u8 *res); >int __attribute__((warn_unused_result)) kstrtos8_from_user(const char *s, size_t count, unsigned int base, s8 *res); > >static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtou64_from_user(const char *s, size_t count, unsigned int base, u64 *res) >{ > return kstrtoull_from_user(s, count, base, res); >} > >static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtos64_from_user(const char *s, size_t count, unsigned int base, s64 *res) >{ > return kstrtoll_from_user(s, count, base, res); >} > >static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtou32_from_user(const char *s, size_t count, unsigned int base, u32 *res) >{ > return kstrtouint_from_user(s, count, base, res); >} > >static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) kstrtos32_from_user(const char *s, size_t count, unsigned int base, s32 *res) >{ > return kstrtoint_from_user(s, count, base, res); >} > > > >extern unsigned long simple_strtoul(const char *,char **,unsigned int); >extern long simple_strtol(const char *,char **,unsigned int); >extern unsigned long long simple_strtoull(const char *,char **,unsigned int); >extern long long simple_strtoll(const char *,char **,unsigned int); > > > > > >extern int num_to_str(char *buf, int size, unsigned long long num); > > > >extern __attribute__((format(printf, 2, 3))) int sprintf(char *buf, const char * fmt, ...); >extern __attribute__((format(printf, 2, 0))) int vsprintf(char *buf, const char *, va_list); >extern __attribute__((format(printf, 3, 4))) >int snprintf(char *buf, size_t size, const char *fmt, ...); >extern __attribute__((format(printf, 3, 0))) >int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); >extern __attribute__((format(printf, 3, 4))) >int scnprintf(char *buf, size_t size, const char *fmt, ...); >extern __attribute__((format(printf, 3, 0))) >int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); >extern __attribute__((format(printf, 2, 3))) >char *kasprintf(gfp_t gfp, const char *fmt, ...); >extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args); > >extern __attribute__((format(scanf, 2, 3))) >int sscanf(const char *, const char *, ...); >extern __attribute__((format(scanf, 2, 0))) >int vsscanf(const char *, const char *, va_list); > >extern int get_option(char **str, int *pint); >extern char *get_options(const char *str, int nints, int *ints); >extern unsigned long long memparse(const char *ptr, char **retptr); > >extern int core_kernel_text(unsigned long addr); >extern int core_kernel_data(unsigned long addr); >extern int __kernel_text_address(unsigned long addr); >extern int kernel_text_address(unsigned long addr); >extern int func_ptr_is_kernel_text(void *ptr); > >struct pid; >extern struct pid *session_of_pgrp(struct pid *pgrp); > >unsigned long int_sqrt(unsigned long); > >extern void bust_spinlocks(int yes); >extern void wake_up_klogd(void); >extern int oops_in_progress; >extern int panic_timeout; >extern int panic_on_oops; >extern int panic_on_unrecovered_nmi; >extern int panic_on_io_nmi; >extern int sysctl_panic_on_stackoverflow; >extern const char *print_tainted(void); >extern void add_taint(unsigned flag); >extern int test_taint(unsigned flag); >extern unsigned long get_taint(void); >extern int root_mountflags; > >extern bool early_boot_irqs_disabled; > > >extern enum system_states { > SYSTEM_BOOTING, > SYSTEM_RUNNING, > SYSTEM_HALT, > SYSTEM_POWER_OFF, > SYSTEM_RESTART, > SYSTEM_SUSPEND_DISK, >} system_state; > >extern const char hex_asc[]; > > > >static inline __attribute__((always_inline)) char *hex_byte_pack(char *buf, u8 byte) >{ > *buf++ = hex_asc[((byte) & 0xf0) >> 4]; > *buf++ = hex_asc[((byte) & 0x0f)]; > return buf; >} > >static inline __attribute__((always_inline)) char * __attribute__((deprecated)) pack_hex_byte(char *buf, u8 byte) >{ > return hex_byte_pack(buf, byte); >} > >extern int hex_to_bin(char ch); >extern int __attribute__((warn_unused_result)) hex2bin(u8 *dst, const char *src, size_t count); > >void tracing_off_permanent(void); > > > > >enum ftrace_dump_mode { > DUMP_NONE, > DUMP_ALL, > DUMP_ORIG, >}; > > >void tracing_on(void); >void tracing_off(void); >int tracing_is_on(void); > >extern void tracing_start(void); >extern void tracing_stop(void); >extern void ftrace_off_permanent(void); > >static inline __attribute__((always_inline)) __attribute__((format(printf, 1, 2))) >void ____trace_printk_check_format(const char *fmt, ...) >{ >} > >extern __attribute__((format(printf, 2, 3))) >int __trace_bprintk(unsigned long ip, const char *fmt, ...); > >extern __attribute__((format(printf, 2, 3))) >int __trace_printk(unsigned long ip, const char *fmt, ...); > >extern void trace_dump_stack(void); > >extern int >__ftrace_vbprintk(unsigned long ip, const char *fmt, va_list ap); > >extern int >__ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap); > >extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); > >extern int do_sysinfo(struct sysinfo *info); > > >extern char *mach_panic_string; > > > > > > > >extern void local_bh_disable(void); >extern void _local_bh_enable(void); >extern void local_bh_enable(void); >extern void local_bh_enable_ip(unsigned long ip); > > > > > > > > > > > > >typedef struct { > volatile unsigned int lock; >} arch_spinlock_t; > > > >typedef struct { > volatile unsigned int lock; >} arch_rwlock_t; > > > > > > > >struct task_struct; >struct lockdep_map; > > >extern int prove_locking; >extern int lock_stat; > >static inline __attribute__((always_inline)) void lockdep_off(void) >{ >} > >static inline __attribute__((always_inline)) void lockdep_on(void) >{ >} > >struct lock_class_key { }; > >static inline __attribute__((always_inline)) void print_irqtrace_events(struct task_struct *curr) >{ >} > > >typedef struct raw_spinlock { > arch_spinlock_t raw_lock; > > unsigned int break_lock; > >} raw_spinlock_t; > >typedef struct spinlock { > union { > struct raw_spinlock rlock; > > }; >} spinlock_t; > > > >typedef struct { > arch_rwlock_t raw_lock; > > unsigned int break_lock; > >} rwlock_t; > > > > > > > > > > > > > > > > > > > > > > > >struct task_struct; > > > >struct arch_hw_breakpoint_ctrl { > u32 __reserved : 9, > mismatch : 1, > : 9, > len : 8, > type : 2, > privilege : 2, > enabled : 1; >}; > >struct arch_hw_breakpoint { > u32 address; > u32 trigger; > struct arch_hw_breakpoint_ctrl step_ctrl; > struct arch_hw_breakpoint_ctrl ctrl; >}; > >static inline __attribute__((always_inline)) u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl) >{ > return (ctrl.mismatch << 22) | (ctrl.len << 5) | (ctrl.type << 3) | > (ctrl.privilege << 1) | ctrl.enabled; >} > >static inline __attribute__((always_inline)) void decode_ctrl_reg(u32 reg, > struct arch_hw_breakpoint_ctrl *ctrl) >{ > ctrl->enabled = reg & 0x1; > reg >>= 1; > ctrl->privilege = reg & 0x3; > reg >>= 2; > ctrl->type = reg & 0x3; > reg >>= 2; > ctrl->len = reg & 0xff; > reg >>= 17; > ctrl->mismatch = reg & 0x1; >} > >struct notifier_block; >struct perf_event; >struct pmu; > >extern struct pmu perf_ops_bp; >extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, > int *gen_len, int *gen_type); >extern int arch_check_bp_in_kernelspace(struct perf_event *bp); >extern int arch_validate_hwbkpt_settings(struct perf_event *bp); >extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, > unsigned long val, void *data); > >extern u8 arch_get_debug_arch(void); >extern u8 arch_get_max_wp_len(void); >extern void clear_ptrace_hw_breakpoint(struct task_struct *tsk); > >int arch_install_hw_breakpoint(struct perf_event *bp); >void arch_uninstall_hw_breakpoint(struct perf_event *bp); >void hw_breakpoint_pmu_read(struct perf_event *bp); >int hw_breakpoint_slots(int type); > > >struct debug_info { > > struct perf_event *hbp[(16 + 16)]; > >}; > >struct thread_struct { > > unsigned long address; > unsigned long trap_no; > unsigned long error_code; > > struct debug_info debug; >}; > >struct task_struct; > > >extern void release_thread(struct task_struct *); > > > > >unsigned long get_wchan(struct task_struct *p); > > > > > > > >void cpu_idle_wait(void); > > > > >extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); > >static inline __attribute__((always_inline)) void prefetch(const void *ptr) >{ > __asm__ __volatile__( > "pld\t%a0" > : > : "p" (ptr) > : "cc"); >} > > >static inline __attribute__((always_inline)) void dsb_sev(void) >{ > > __asm__ __volatile__ ( > "dsb\n" > "9998: " "sev" "\n" " .pushsection \".alt.smp.init\", \"a\"\n" " .long 9998b\n" " " "nop" "\n" " .popsection\n" > ); > > > > > > > >} > >static inline __attribute__((always_inline)) void arch_spin_lock(arch_spinlock_t *lock) >{ > unsigned long tmp; > > __asm__ __volatile__( >"1: ldrex %0, [%1]\n" >" teq %0, #0\n" > "9998: " "wfe" "ne" "\n" " .pushsection \".alt.smp.init\", \"a\"\n" " .long 9998b\n" " " "nop" "\n" " .popsection\n" >" strexeq %0, %2, [%1]\n" >" teqeq %0, #0\n" >" bne 1b" > : "=&r" (tmp) > : "r" (&lock->lock), "r" (1) > : "cc"); > > __asm__ __volatile__ ("dmb" : : : "memory"); >} > >static inline __attribute__((always_inline)) int arch_spin_trylock(arch_spinlock_t *lock) >{ > unsigned long tmp; > > __asm__ __volatile__( >" ldrex %0, [%1]\n" >" teq %0, #0\n" >" strexeq %0, %2, [%1]" > : "=&r" (tmp) > : "r" (&lock->lock), "r" (1) > : "cc"); > > if (tmp == 0) { > __asm__ __volatile__ ("dmb" : : : "memory"); > return 1; > } else { > return 0; > } >} > >static inline __attribute__((always_inline)) void arch_spin_unlock(arch_spinlock_t *lock) >{ > __asm__ __volatile__ ("dmb" : : : "memory"); > > __asm__ __volatile__( >" str %1, [%0]\n" > : > : "r" (&lock->lock), "r" (0) > : "cc"); > > dsb_sev(); >} > >static inline __attribute__((always_inline)) void arch_write_lock(arch_rwlock_t *rw) >{ > unsigned long tmp; > > __asm__ __volatile__( >"1: ldrex %0, [%1]\n" >" teq %0, #0\n" > "9998: " "wfe" "ne" "\n" " .pushsection \".alt.smp.init\", \"a\"\n" " .long 9998b\n" " " "nop" "\n" " .popsection\n" >" strexeq %0, %2, [%1]\n" >" teq %0, #0\n" >" bne 1b" > : "=&r" (tmp) > : "r" (&rw->lock), "r" (0x80000000) > : "cc"); > > __asm__ __volatile__ ("dmb" : : : "memory"); >} > >static inline __attribute__((always_inline)) int arch_write_trylock(arch_rwlock_t *rw) >{ > unsigned long tmp; > > __asm__ __volatile__( >"1: ldrex %0, [%1]\n" >" teq %0, #0\n" >" strexeq %0, %2, [%1]" > : "=&r" (tmp) > : "r" (&rw->lock), "r" (0x80000000) > : "cc"); > > if (tmp == 0) { > __asm__ __volatile__ ("dmb" : : : "memory"); > return 1; > } else { > return 0; > } >} > >static inline __attribute__((always_inline)) void arch_write_unlock(arch_rwlock_t *rw) >{ > __asm__ __volatile__ ("dmb" : : : "memory"); > > __asm__ __volatile__( > "str %1, [%0]\n" > : > : "r" (&rw->lock), "r" (0) > : "cc"); > > dsb_sev(); >} > >static inline __attribute__((always_inline)) void arch_read_lock(arch_rwlock_t *rw) >{ > unsigned long tmp, tmp2; > > __asm__ __volatile__( >"1: ldrex %0, [%2]\n" >" adds %0, %0, #1\n" >" strexpl %1, %0, [%2]\n" > "9998: " "wfe" "mi" "\n" " .pushsection \".alt.smp.init\", \"a\"\n" " .long 9998b\n" " " "nop" "\n" " .popsection\n" >" rsbpls %0, %1, #0\n" >" bmi 1b" > : "=&r" (tmp), "=&r" (tmp2) > : "r" (&rw->lock) > : "cc"); > > __asm__ __volatile__ ("dmb" : : : "memory"); >} > >static inline __attribute__((always_inline)) void arch_read_unlock(arch_rwlock_t *rw) >{ > unsigned long tmp, tmp2; > > __asm__ __volatile__ ("dmb" : : : "memory"); > > __asm__ __volatile__( >"1: ldrex %0, [%2]\n" >" sub %0, %0, #1\n" >" strex %1, %0, [%2]\n" >" teq %1, #0\n" >" bne 1b" > : "=&r" (tmp), "=&r" (tmp2) > : "r" (&rw->lock) > : "cc"); > > if (tmp == 0) > dsb_sev(); >} > >static inline __attribute__((always_inline)) int arch_read_trylock(arch_rwlock_t *rw) >{ > unsigned long tmp, tmp2 = 1; > > __asm__ __volatile__( >"1: ldrex %0, [%2]\n" >" adds %0, %0, #1\n" >" strexpl %1, %0, [%2]\n" > : "=&r" (tmp), "+r" (tmp2) > : "r" (&rw->lock) > : "cc"); > > __asm__ __volatile__ ("dmb" : : : "memory"); > return tmp2 == 0; >} > > >static inline __attribute__((always_inline)) void smp_mb__after_lock(void) { __asm__ __volatile__ ("dmb" : : : "memory"); } > >static inline __attribute__((always_inline)) void do_raw_spin_lock(raw_spinlock_t *lock) >{ > (void)0; > arch_spin_lock(&lock->raw_lock); >} > >static inline __attribute__((always_inline)) void >do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) >{ > (void)0; > arch_spin_lock(&lock->raw_lock); >} > >static inline __attribute__((always_inline)) int do_raw_spin_trylock(raw_spinlock_t *lock) >{ > return arch_spin_trylock(&(lock)->raw_lock); >} > >static inline __attribute__((always_inline)) void do_raw_spin_unlock(raw_spinlock_t *lock) >{ > arch_spin_unlock(&lock->raw_lock); > (void)0; >} > > > > > > > > > > >int in_lock_functions(unsigned long addr); > > > >void __attribute__((section(".spinlock.text"))) _raw_spin_lock(raw_spinlock_t *lock) ; >void __attribute__((section(".spinlock.text"))) _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) > ; >void __attribute__((section(".spinlock.text"))) >_raw_spin_lock_nest_lock(raw_spinlock_t *lock, struct lockdep_map *map) > ; >void __attribute__((section(".spinlock.text"))) _raw_spin_lock_bh(raw_spinlock_t *lock) ; >void __attribute__((section(".spinlock.text"))) _raw_spin_lock_irq(raw_spinlock_t *lock) > ; > >unsigned long __attribute__((section(".spinlock.text"))) _raw_spin_lock_irqsave(raw_spinlock_t *lock) > ; >unsigned long __attribute__((section(".spinlock.text"))) >_raw_spin_lock_irqsave_nested(raw_spinlock_t *lock, int subclass) > ; >int __attribute__((section(".spinlock.text"))) _raw_spin_trylock(raw_spinlock_t *lock); >int __attribute__((section(".spinlock.text"))) _raw_spin_trylock_bh(raw_spinlock_t *lock); >void __attribute__((section(".spinlock.text"))) _raw_spin_unlock(raw_spinlock_t *lock) ; >void __attribute__((section(".spinlock.text"))) _raw_spin_unlock_bh(raw_spinlock_t *lock) ; >void __attribute__((section(".spinlock.text"))) _raw_spin_unlock_irq(raw_spinlock_t *lock) ; >void __attribute__((section(".spinlock.text"))) >_raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags) > ; > >static inline __attribute__((always_inline)) int __raw_spin_trylock(raw_spinlock_t *lock) >{ > do { do { (current_thread_info()->preempt_count) += (1); } while (0); __asm__ __volatile__("": : :"memory"); } while (0); > if (do_raw_spin_trylock(lock)) { > do { } while (0); > return 1; > } > do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 1)), 0)) preempt_schedule(); } while (0); } while (0); > return 0; >} > >static inline __attribute__((always_inline)) void __raw_spin_unlock(raw_spinlock_t *lock) >{ > do { } while (0); > do_raw_spin_unlock(lock); > do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 1)), 0)) preempt_schedule(); } while (0); } while (0); >} > >static inline __attribute__((always_inline)) void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock, > unsigned long flags) >{ > do { } while (0); > do_raw_spin_unlock(lock); > do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); do { } while (0); } else { do { } while (0); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); > do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 1)), 0)) preempt_schedule(); } while (0); } while (0); >} > >static inline __attribute__((always_inline)) void __raw_spin_unlock_irq(raw_spinlock_t *lock) >{ > do { } while (0); > do_raw_spin_unlock(lock); > do { do { } while (0); arch_local_irq_enable(); } while (0); > do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 1)), 0)) preempt_schedule(); } while (0); } while (0); >} > >static inline __attribute__((always_inline)) void __raw_spin_unlock_bh(raw_spinlock_t *lock) >{ > do { } while (0); > do_raw_spin_unlock(lock); > do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); > local_bh_enable_ip((unsigned long)__builtin_return_address(0)); >} > >static inline __attribute__((always_inline)) int __raw_spin_trylock_bh(raw_spinlock_t *lock) >{ > local_bh_disable(); > do { do { (current_thread_info()->preempt_count) += (1); } while (0); __asm__ __volatile__("": : :"memory"); } while (0); > if (do_raw_spin_trylock(lock)) { > do { } while (0); > return 1; > } > do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); > local_bh_enable_ip((unsigned long)__builtin_return_address(0)); > return 0; >} > > > >void __attribute__((section(".spinlock.text"))) _raw_read_lock(rwlock_t *lock) ; >void __attribute__((section(".spinlock.text"))) _raw_write_lock(rwlock_t *lock) ; >void __attribute__((section(".spinlock.text"))) _raw_read_lock_bh(rwlock_t *lock) ; >void __attribute__((section(".spinlock.text"))) _raw_write_lock_bh(rwlock_t *lock) ; >void __attribute__((section(".spinlock.text"))) _raw_read_lock_irq(rwlock_t *lock) ; >void __attribute__((section(".spinlock.text"))) _raw_write_lock_irq(rwlock_t *lock) ; >unsigned long __attribute__((section(".spinlock.text"))) _raw_read_lock_irqsave(rwlock_t *lock) > ; >unsigned long __attribute__((section(".spinlock.text"))) _raw_write_lock_irqsave(rwlock_t *lock) > ; >int __attribute__((section(".spinlock.text"))) _raw_read_trylock(rwlock_t *lock); >int __attribute__((section(".spinlock.text"))) _raw_write_trylock(rwlock_t *lock); >void __attribute__((section(".spinlock.text"))) _raw_read_unlock(rwlock_t *lock) ; >void __attribute__((section(".spinlock.text"))) _raw_write_unlock(rwlock_t *lock) ; >void __attribute__((section(".spinlock.text"))) _raw_read_unlock_bh(rwlock_t *lock) ; >void __attribute__((section(".spinlock.text"))) _raw_write_unlock_bh(rwlock_t *lock) ; >void __attribute__((section(".spinlock.text"))) _raw_read_unlock_irq(rwlock_t *lock) ; >void __attribute__((section(".spinlock.text"))) _raw_write_unlock_irq(rwlock_t *lock) ; >void __attribute__((section(".spinlock.text"))) >_raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) > ; >void __attribute__((section(".spinlock.text"))) >_raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags) > ; > >static inline __attribute__((always_inline)) int __raw_read_trylock(rwlock_t *lock) >{ > do { do { (current_thread_info()->preempt_count) += (1); } while (0); __asm__ __volatile__("": : :"memory"); } while (0); > if (arch_read_trylock(&(lock)->raw_lock)) { > do { } while (0); > return 1; > } > do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 1)), 0)) preempt_schedule(); } while (0); } while (0); > return 0; >} > >static inline __attribute__((always_inline)) int __raw_write_trylock(rwlock_t *lock) >{ > do { do { (current_thread_info()->preempt_count) += (1); } while (0); __asm__ __volatile__("": : :"memory"); } while (0); > if (arch_write_trylock(&(lock)->raw_lock)) { > do { } while (0); > return 1; > } > do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 1)), 0)) preempt_schedule(); } while (0); } while (0); > return 0; >} > >static inline __attribute__((always_inline)) void __raw_write_unlock(rwlock_t *lock) >{ > do { } while (0); > do {arch_write_unlock(&(lock)->raw_lock); (void)0; } while (0); > do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 1)), 0)) preempt_schedule(); } while (0); } while (0); >} > >static inline __attribute__((always_inline)) void __raw_read_unlock(rwlock_t *lock) >{ > do { } while (0); > do {arch_read_unlock(&(lock)->raw_lock); (void)0; } while (0); > do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 1)), 0)) preempt_schedule(); } while (0); } while (0); >} > >static inline __attribute__((always_inline)) void >__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags) >{ > do { } while (0); > do {arch_read_unlock(&(lock)->raw_lock); (void)0; } while (0); > do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); do { } while (0); } else { do { } while (0); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); > do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 1)), 0)) preempt_schedule(); } while (0); } while (0); >} > >static inline __attribute__((always_inline)) void __raw_read_unlock_irq(rwlock_t *lock) >{ > do { } while (0); > do {arch_read_unlock(&(lock)->raw_lock); (void)0; } while (0); > do { do { } while (0); arch_local_irq_enable(); } while (0); > do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 1)), 0)) preempt_schedule(); } while (0); } while (0); >} > >static inline __attribute__((always_inline)) void __raw_read_unlock_bh(rwlock_t *lock) >{ > do { } while (0); > do {arch_read_unlock(&(lock)->raw_lock); (void)0; } while (0); > do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); > local_bh_enable_ip((unsigned long)__builtin_return_address(0)); >} > >static inline __attribute__((always_inline)) void __raw_write_unlock_irqrestore(rwlock_t *lock, > unsigned long flags) >{ > do { } while (0); > do {arch_write_unlock(&(lock)->raw_lock); (void)0; } while (0); > do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); do { } while (0); } else { do { } while (0); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); > do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 1)), 0)) preempt_schedule(); } while (0); } while (0); >} > >static inline __attribute__((always_inline)) void __raw_write_unlock_irq(rwlock_t *lock) >{ > do { } while (0); > do {arch_write_unlock(&(lock)->raw_lock); (void)0; } while (0); > do { do { } while (0); arch_local_irq_enable(); } while (0); > do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 1)), 0)) preempt_schedule(); } while (0); } while (0); >} > >static inline __attribute__((always_inline)) void __raw_write_unlock_bh(rwlock_t *lock) >{ > do { } while (0); > do {arch_write_unlock(&(lock)->raw_lock); (void)0; } while (0); > do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); > local_bh_enable_ip((unsigned long)__builtin_return_address(0)); >} > > > >static inline __attribute__((always_inline)) raw_spinlock_t *spinlock_check(spinlock_t *lock) >{ > return &lock->rlock; >} > > > > > > > >static inline __attribute__((always_inline)) void spin_lock(spinlock_t *lock) >{ > _raw_spin_lock(&lock->rlock); >} > >static inline __attribute__((always_inline)) void spin_lock_bh(spinlock_t *lock) >{ > _raw_spin_lock_bh(&lock->rlock); >} > >static inline __attribute__((always_inline)) int spin_trylock(spinlock_t *lock) >{ > return (_raw_spin_trylock(&lock->rlock)); >} > >static inline __attribute__((always_inline)) void spin_lock_irq(spinlock_t *lock) >{ > _raw_spin_lock_irq(&lock->rlock); >} > >static inline __attribute__((always_inline)) void spin_unlock(spinlock_t *lock) >{ > _raw_spin_unlock(&lock->rlock); >} > >static inline __attribute__((always_inline)) void spin_unlock_bh(spinlock_t *lock) >{ > _raw_spin_unlock_bh(&lock->rlock); >} > >static inline __attribute__((always_inline)) void spin_unlock_irq(spinlock_t *lock) >{ > _raw_spin_unlock_irq(&lock->rlock); >} > >static inline __attribute__((always_inline)) void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) >{ > do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _raw_spin_unlock_irqrestore(&lock->rlock, flags); } while (0); >} > >static inline __attribute__((always_inline)) int spin_trylock_bh(spinlock_t *lock) >{ > return (_raw_spin_trylock_bh(&lock->rlock)); >} > >static inline __attribute__((always_inline)) int spin_trylock_irq(spinlock_t *lock) >{ > return ({ do { arch_local_irq_disable(); do { } while (0); } while (0); (_raw_spin_trylock(&lock->rlock)) ? 1 : ({ do { do { } while (0); arch_local_irq_enable(); } while (0); 0; }); }); >} > > > > > > >static inline __attribute__((always_inline)) void spin_unlock_wait(spinlock_t *lock) >{ > do { while (((&(&lock->rlock)->raw_lock)->lock != 0)) __asm__ __volatile__("": : :"memory"); } while (0); >} > >static inline __attribute__((always_inline)) int spin_is_locked(spinlock_t *lock) >{ > return ((&(&lock->rlock)->raw_lock)->lock != 0); >} > >static inline __attribute__((always_inline)) int spin_is_contended(spinlock_t *lock) >{ > return ((&lock->rlock)->break_lock); >} > >static inline __attribute__((always_inline)) int spin_can_lock(spinlock_t *lock) >{ > return (!((&(&lock->rlock)->raw_lock)->lock != 0)); >} > > > > > > > > > > > > > > > >static inline __attribute__((always_inline)) unsigned long __xchg(unsigned long x, volatile void *ptr, int size) >{ > extern void __bad_xchg(volatile void *, int); > unsigned long ret; > > > > > unsigned int tmp; > > > __asm__ __volatile__ ("dmb" : : : "memory"); > > switch (size) { > > case 1: > asm volatile("@ __xchg1\n" > "1: ldrexb %0, [%3]\n" > " strexb %1, %2, [%3]\n" > " teq %1, #0\n" > " bne 1b" > : "=&r" (ret), "=&r" (tmp) > : "r" (x), "r" (ptr) > : "memory", "cc"); > break; > case 4: > asm volatile("@ __xchg4\n" > "1: ldrex %0, [%3]\n" > " strex %1, %2, [%3]\n" > " teq %1, #0\n" > " bne 1b" > : "=&r" (ret), "=&r" (tmp) > : "r" (x), "r" (ptr) > : "memory", "cc"); > break; > > default: > __bad_xchg(ptr, size), ret = 0; > break; > } > __asm__ __volatile__ ("dmb" : : : "memory"); > > return ret; >} > > > > > > > > > > > >extern unsigned long wrong_size_cmpxchg(volatile void *ptr); > > > > > >static inline __attribute__((always_inline)) unsigned long __cmpxchg_local_generic(volatile void *ptr, > unsigned long old, unsigned long new, int size) >{ > unsigned long flags, prev; > > > > > if (size == 8 && sizeof(unsigned long) != 8) > wrong_size_cmpxchg(ptr); > > do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); do { } while (0); } while (0); > switch (size) { > case 1: prev = *(u8 *)ptr; > if (prev == old) > *(u8 *)ptr = (u8)new; > break; > case 2: prev = *(u16 *)ptr; > if (prev == old) > *(u16 *)ptr = (u16)new; > break; > case 4: prev = *(u32 *)ptr; > if (prev == old) > *(u32 *)ptr = (u32)new; > break; > case 8: prev = *(u64 *)ptr; > if (prev == old) > *(u64 *)ptr = (u64)new; > break; > default: > wrong_size_cmpxchg(ptr); > } > do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); do { } while (0); } else { do { } while (0); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); > return prev; >} > > > > >static inline __attribute__((always_inline)) u64 __cmpxchg64_local_generic(volatile void *ptr, > u64 old, u64 new) >{ > u64 prev; > unsigned long flags; > > do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); do { } while (0); } while (0); > prev = *(u64 *)ptr; > if (prev == old) > *(u64 *)ptr = new; > do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); do { } while (0); } else { do { } while (0); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); > return prev; >} > > >extern void __bad_cmpxchg(volatile void *ptr, int size); > > > > > >static inline __attribute__((always_inline)) unsigned long __cmpxchg(volatile void *ptr, unsigned long old, > unsigned long new, int size) >{ > unsigned long oldval, res; > > switch (size) { > > case 1: > do { > asm volatile("@ __cmpxchg1\n" > " ldrexb %1, [%2]\n" > " mov %0, #0\n" > " teq %1, %3\n" > " strexbeq %0, %4, [%2]\n" > : "=&r" (res), "=&r" (oldval) > : "r" (ptr), "Ir" (old), "r" (new) > : "memory", "cc"); > } while (res); > break; > case 2: > do { > asm volatile("@ __cmpxchg1\n" > " ldrexh %1, [%2]\n" > " mov %0, #0\n" > " teq %1, %3\n" > " strexheq %0, %4, [%2]\n" > : "=&r" (res), "=&r" (oldval) > : "r" (ptr), "Ir" (old), "r" (new) > : "memory", "cc"); > } while (res); > break; > > case 4: > do { > asm volatile("@ __cmpxchg4\n" > " ldrex %1, [%2]\n" > " mov %0, #0\n" > " teq %1, %3\n" > " strexeq %0, %4, [%2]\n" > : "=&r" (res), "=&r" (oldval) > : "r" (ptr), "Ir" (old), "r" (new) > : "memory", "cc"); > } while (res); > break; > default: > __bad_cmpxchg(ptr, size); > oldval = 0; > } > > return oldval; >} > >static inline __attribute__((always_inline)) unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, > unsigned long new, int size) >{ > unsigned long ret; > > __asm__ __volatile__ ("dmb" : : : "memory"); > ret = __cmpxchg(ptr, old, new, size); > __asm__ __volatile__ ("dmb" : : : "memory"); > > return ret; >} > > > > > > > >static inline __attribute__((always_inline)) unsigned long __cmpxchg_local(volatile void *ptr, > unsigned long old, > unsigned long new, int size) >{ > unsigned long ret; > > switch (size) { > > > > > > > default: > ret = __cmpxchg(ptr, old, new, size); > } > > return ret; >} > >static inline __attribute__((always_inline)) unsigned long long __cmpxchg64(volatile void *ptr, > unsigned long long old, > unsigned long long new) >{ > register unsigned long long oldval asm("r0"); > register unsigned long long __old asm("r2") = old; > register unsigned long long __new asm("r4") = new; > unsigned long res; > > do { > asm volatile( > " @ __cmpxchg8\n" > " ldrexd %1, %H1, [%2]\n" > " mov %0, #0\n" > " teq %1, %3\n" > " teqeq %H1, %H3\n" > " strexdeq %0, %4, %H4, [%2]\n" > : "=&r" (res), "=&r" (oldval) > : "r" (ptr), "Ir" (__old), "r" (__new) > : "memory", "cc"); > } while (res); > > return oldval; >} > >static inline __attribute__((always_inline)) unsigned long long __cmpxchg64_mb(volatile void *ptr, > unsigned long long old, > unsigned long long new) >{ > unsigned long long ret; > > __asm__ __volatile__ ("dmb" : : : "memory"); > ret = __cmpxchg64(ptr, old, new); > __asm__ __volatile__ ("dmb" : : : "memory"); > > return ret; >} > > >static inline __attribute__((always_inline)) void atomic_add(int i, atomic_t *v) >{ > unsigned long tmp; > int result; > > __asm__ __volatile__("@ atomic_add\n" >"1: ldrex %0, [%3]\n" >" add %0, %0, %4\n" >" strex %1, %0, [%3]\n" >" teq %1, #0\n" >" bne 1b" > : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) > : "r" (&v->counter), "Ir" (i) > : "cc"); >} > >static inline __attribute__((always_inline)) int atomic_add_return(int i, atomic_t *v) >{ > unsigned long tmp; > int result; > > __asm__ __volatile__ ("dmb" : : : "memory"); > > __asm__ __volatile__("@ atomic_add_return\n" >"1: ldrex %0, [%3]\n" >" add %0, %0, %4\n" >" strex %1, %0, [%3]\n" >" teq %1, #0\n" >" bne 1b" > : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) > : "r" (&v->counter), "Ir" (i) > : "cc"); > > __asm__ __volatile__ ("dmb" : : : "memory"); > > return result; >} > >static inline __attribute__((always_inline)) void atomic_sub(int i, atomic_t *v) >{ > unsigned long tmp; > int result; > > __asm__ __volatile__("@ atomic_sub\n" >"1: ldrex %0, [%3]\n" >" sub %0, %0, %4\n" >" strex %1, %0, [%3]\n" >" teq %1, #0\n" >" bne 1b" > : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) > : "r" (&v->counter), "Ir" (i) > : "cc"); >} > >static inline __attribute__((always_inline)) int atomic_sub_return(int i, atomic_t *v) >{ > unsigned long tmp; > int result; > > __asm__ __volatile__ ("dmb" : : : "memory"); > > __asm__ __volatile__("@ atomic_sub_return\n" >"1: ldrex %0, [%3]\n" >" sub %0, %0, %4\n" >" strex %1, %0, [%3]\n" >" teq %1, #0\n" >" bne 1b" > : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) > : "r" (&v->counter), "Ir" (i) > : "cc"); > > __asm__ __volatile__ ("dmb" : : : "memory"); > > return result; >} > >static inline __attribute__((always_inline)) int atomic_cmpxchg(atomic_t *ptr, int old, int new) >{ > unsigned long oldval, res; > > __asm__ __volatile__ ("dmb" : : : "memory"); > > do { > __asm__ __volatile__("@ atomic_cmpxchg\n" > "ldrex %1, [%3]\n" > "mov %0, #0\n" > "teq %1, %4\n" > "strexeq %0, %5, [%3]\n" > : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) > : "r" (&ptr->counter), "Ir" (old), "r" (new) > : "cc"); > } while (res); > > __asm__ __volatile__ ("dmb" : : : "memory"); > > return oldval; >} > >static inline __attribute__((always_inline)) void atomic_clear_mask(unsigned long mask, unsigned long *addr) >{ > unsigned long tmp, tmp2; > > __asm__ __volatile__("@ atomic_clear_mask\n" >"1: ldrex %0, [%3]\n" >" bic %0, %0, %4\n" >" strex %1, %0, [%3]\n" >" teq %1, #0\n" >" bne 1b" > : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr) > : "r" (addr), "Ir" (mask) > : "cc"); >} > >static inline __attribute__((always_inline)) int __atomic_add_unless(atomic_t *v, int a, int u) >{ > int c, old; > > c = (*(volatile int *)&(v)->counter); > while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c) > c = old; > return c; >} > >typedef struct { > u64 __attribute__((aligned(8))) counter; >} atomic64_t; > > > >static inline __attribute__((always_inline)) u64 atomic64_read(atomic64_t *v) >{ > u64 result; > > __asm__ __volatile__("@ atomic64_read\n" >" ldrexd %0, %H0, [%1]" > : "=&r" (result) > : "r" (&v->counter), "Qo" (v->counter) > ); > > return result; >} > >static inline __attribute__((always_inline)) void atomic64_set(atomic64_t *v, u64 i) >{ > u64 tmp; > > __asm__ __volatile__("@ atomic64_set\n" >"1: ldrexd %0, %H0, [%2]\n" >" strexd %0, %3, %H3, [%2]\n" >" teq %0, #0\n" >" bne 1b" > : "=&r" (tmp), "=Qo" (v->counter) > : "r" (&v->counter), "r" (i) > : "cc"); >} > >static inline __attribute__((always_inline)) void atomic64_add(u64 i, atomic64_t *v) >{ > u64 result; > unsigned long tmp; > > __asm__ __volatile__("@ atomic64_add\n" >"1: ldrexd %0, %H0, [%3]\n" >" adds %0, %0, %4\n" >" adc %H0, %H0, %H4\n" >" strexd %1, %0, %H0, [%3]\n" >" teq %1, #0\n" >" bne 1b" > : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) > : "r" (&v->counter), "r" (i) > : "cc"); >} > >static inline __attribute__((always_inline)) u64 atomic64_add_return(u64 i, atomic64_t *v) >{ > u64 result; > unsigned long tmp; > > __asm__ __volatile__ ("dmb" : : : "memory"); > > __asm__ __volatile__("@ atomic64_add_return\n" >"1: ldrexd %0, %H0, [%3]\n" >" adds %0, %0, %4\n" >" adc %H0, %H0, %H4\n" >" strexd %1, %0, %H0, [%3]\n" >" teq %1, #0\n" >" bne 1b" > : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) > : "r" (&v->counter), "r" (i) > : "cc"); > > __asm__ __volatile__ ("dmb" : : : "memory"); > > return result; >} > >static inline __attribute__((always_inline)) void atomic64_sub(u64 i, atomic64_t *v) >{ > u64 result; > unsigned long tmp; > > __asm__ __volatile__("@ atomic64_sub\n" >"1: ldrexd %0, %H0, [%3]\n" >" subs %0, %0, %4\n" >" sbc %H0, %H0, %H4\n" >" strexd %1, %0, %H0, [%3]\n" >" teq %1, #0\n" >" bne 1b" > : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) > : "r" (&v->counter), "r" (i) > : "cc"); >} > >static inline __attribute__((always_inline)) u64 atomic64_sub_return(u64 i, atomic64_t *v) >{ > u64 result; > unsigned long tmp; > > __asm__ __volatile__ ("dmb" : : : "memory"); > > __asm__ __volatile__("@ atomic64_sub_return\n" >"1: ldrexd %0, %H0, [%3]\n" >" subs %0, %0, %4\n" >" sbc %H0, %H0, %H4\n" >" strexd %1, %0, %H0, [%3]\n" >" teq %1, #0\n" >" bne 1b" > : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) > : "r" (&v->counter), "r" (i) > : "cc"); > > __asm__ __volatile__ ("dmb" : : : "memory"); > > return result; >} > >static inline __attribute__((always_inline)) u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) >{ > u64 oldval; > unsigned long res; > > __asm__ __volatile__ ("dmb" : : : "memory"); > > do { > __asm__ __volatile__("@ atomic64_cmpxchg\n" > "ldrexd %1, %H1, [%3]\n" > "mov %0, #0\n" > "teq %1, %4\n" > "teqeq %H1, %H4\n" > "strexdeq %0, %5, %H5, [%3]" > : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter) > : "r" (&ptr->counter), "r" (old), "r" (new) > : "cc"); > } while (res); > > __asm__ __volatile__ ("dmb" : : : "memory"); > > return oldval; >} > >static inline __attribute__((always_inline)) u64 atomic64_xchg(atomic64_t *ptr, u64 new) >{ > u64 result; > unsigned long tmp; > > __asm__ __volatile__ ("dmb" : : : "memory"); > > __asm__ __volatile__("@ atomic64_xchg\n" >"1: ldrexd %0, %H0, [%3]\n" >" strexd %1, %4, %H4, [%3]\n" >" teq %1, #0\n" >" bne 1b" > : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter) > : "r" (&ptr->counter), "r" (new) > : "cc"); > > __asm__ __volatile__ ("dmb" : : : "memory"); > > return result; >} > >static inline __attribute__((always_inline)) u64 atomic64_dec_if_positive(atomic64_t *v) >{ > u64 result; > unsigned long tmp; > > __asm__ __volatile__ ("dmb" : : : "memory"); > > __asm__ __volatile__("@ atomic64_dec_if_positive\n" >"1: ldrexd %0, %H0, [%3]\n" >" subs %0, %0, #1\n" >" sbc %H0, %H0, #0\n" >" teq %H0, #0\n" >" bmi 2f\n" >" strexd %1, %0, %H0, [%3]\n" >" teq %1, #0\n" >" bne 1b\n" >"2:" > : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) > : "r" (&v->counter) > : "cc"); > > __asm__ __volatile__ ("dmb" : : : "memory"); > > return result; >} > >static inline __attribute__((always_inline)) int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) >{ > u64 val; > unsigned long tmp; > int ret = 1; > > __asm__ __volatile__ ("dmb" : : : "memory"); > > __asm__ __volatile__("@ atomic64_add_unless\n" >"1: ldrexd %0, %H0, [%4]\n" >" teq %0, %5\n" >" teqeq %H0, %H5\n" >" moveq %1, #0\n" >" beq 2f\n" >" adds %0, %0, %6\n" >" adc %H0, %H0, %H6\n" >" strexd %2, %0, %H0, [%4]\n" >" teq %2, #0\n" >" bne 1b\n" >"2:" > : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter) > : "r" (&v->counter), "r" (u), "r" (a) > : "cc"); > > if (ret) > __asm__ __volatile__ ("dmb" : : : "memory"); > > return ret; >} > > >static inline __attribute__((always_inline)) int atomic_add_unless(atomic_t *v, int a, int u) >{ > return __atomic_add_unless(v, a, u) != u; >} > >static inline __attribute__((always_inline)) int atomic_inc_not_zero_hint(atomic_t *v, int hint) >{ > int val, c = hint; > > > if (!hint) > return atomic_add_unless((v), 1, 0); > > do { > val = atomic_cmpxchg(v, c, c + 1); > if (val == c) > return 1; > c = val; > } while (c); > > return 0; >} > > > >static inline __attribute__((always_inline)) int atomic_inc_unless_negative(atomic_t *p) >{ > int v, v1; > for (v = 0; v >= 0; v = v1) { > v1 = atomic_cmpxchg(p, v, v + 1); > if (__builtin_expect(!!(v1 == v), 1)) > return 1; > } > return 0; >} > > > >static inline __attribute__((always_inline)) int atomic_dec_unless_positive(atomic_t *p) >{ > int v, v1; > for (v = 0; v <= 0; v = v1) { > v1 = atomic_cmpxchg(p, v, v - 1); > if (__builtin_expect(!!(v1 == v), 1)) > return 1; > } > return 0; >} > > > >static inline __attribute__((always_inline)) void atomic_or(int i, atomic_t *v) >{ > int old; > int new; > > do { > old = (*(volatile int *)&(v)->counter); > new = old | i; > } while (atomic_cmpxchg(v, old, new) != old); >} > > > > >typedef atomic_t atomic_long_t; > > >static inline __attribute__((always_inline)) long atomic_long_read(atomic_long_t *l) >{ > atomic_t *v = (atomic_t *)l; > > return (long)(*(volatile int *)&(v)->counter); >} > >static inline __attribute__((always_inline)) void atomic_long_set(atomic_long_t *l, long i) >{ > atomic_t *v = (atomic_t *)l; > > (((v)->counter) = (i)); >} > >static inline __attribute__((always_inline)) void atomic_long_inc(atomic_long_t *l) >{ > atomic_t *v = (atomic_t *)l; > > atomic_add(1, v); >} > >static inline __attribute__((always_inline)) void atomic_long_dec(atomic_long_t *l) >{ > atomic_t *v = (atomic_t *)l; > > atomic_sub(1, v); >} > >static inline __attribute__((always_inline)) void atomic_long_add(long i, atomic_long_t *l) >{ > atomic_t *v = (atomic_t *)l; > > atomic_add(i, v); >} > >static inline __attribute__((always_inline)) void atomic_long_sub(long i, atomic_long_t *l) >{ > atomic_t *v = (atomic_t *)l; > > atomic_sub(i, v); >} > >static inline __attribute__((always_inline)) int atomic_long_sub_and_test(long i, atomic_long_t *l) >{ > atomic_t *v = (atomic_t *)l; > > return (atomic_sub_return(i, v) == 0); >} > >static inline __attribute__((always_inline)) int atomic_long_dec_and_test(atomic_long_t *l) >{ > atomic_t *v = (atomic_t *)l; > > return (atomic_sub_return(1, v) == 0); >} > >static inline __attribute__((always_inline)) int atomic_long_inc_and_test(atomic_long_t *l) >{ > atomic_t *v = (atomic_t *)l; > > return (atomic_add_return(1, v) == 0); >} > >static inline __attribute__((always_inline)) int atomic_long_add_negative(long i, atomic_long_t *l) >{ > atomic_t *v = (atomic_t *)l; > > return (atomic_add_return(i, v) < 0); >} > >static inline __attribute__((always_inline)) long atomic_long_add_return(long i, atomic_long_t *l) >{ > atomic_t *v = (atomic_t *)l; > > return (long)atomic_add_return(i, v); >} > >static inline __attribute__((always_inline)) long atomic_long_sub_return(long i, atomic_long_t *l) >{ > atomic_t *v = (atomic_t *)l; > > return (long)atomic_sub_return(i, v); >} > >static inline __attribute__((always_inline)) long atomic_long_inc_return(atomic_long_t *l) >{ > atomic_t *v = (atomic_t *)l; > > return (long)(atomic_add_return(1, v)); >} > >static inline __attribute__((always_inline)) long atomic_long_dec_return(atomic_long_t *l) >{ > atomic_t *v = (atomic_t *)l; > > return (long)(atomic_sub_return(1, v)); >} > >static inline __attribute__((always_inline)) long atomic_long_add_unless(atomic_long_t *l, long a, long u) >{ > atomic_t *v = (atomic_t *)l; > > return (long)atomic_add_unless(v, a, u); >} > > > >extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); > > > > > >struct kern_ipc_perm >{ > spinlock_t lock; > int deleted; > int id; > key_t key; > uid_t uid; > gid_t gid; > uid_t cuid; > gid_t cgid; > umode_t mode; > unsigned long seq; > void *security; >}; > > >struct semid_ds { > struct ipc_perm sem_perm; > __kernel_time_t sem_otime; > __kernel_time_t sem_ctime; > struct sem *sem_base; > struct sem_queue *sem_pending; > struct sem_queue **sem_pending_last; > struct sem_undo *undo; > unsigned short sem_nsems; >}; > > > > >struct semid64_ds { > struct ipc64_perm sem_perm; > __kernel_time_t sem_otime; > unsigned long __unused1; > __kernel_time_t sem_ctime; > unsigned long __unused2; > unsigned long sem_nsems; > unsigned long __unused3; > unsigned long __unused4; >}; > > > >struct sembuf { > unsigned short sem_num; > short sem_op; > short sem_flg; >}; > > >union semun { > int val; > struct semid_ds *buf; > unsigned short *array; > struct seminfo *__buf; > void *__pad; >}; > >struct seminfo { > int semmap; > int semmni; > int semmns; > int semmnu; > int semmsl; > int semopm; > int semume; > int semusz; > int semvmx; > int semaem; >}; > > > > > > > > > > > > > > > > > > > > > > > > > > >extern char *strndup_user(const char *, long); >extern void *memdup_user(const void *, size_t); > > > > > > >extern char * strrchr(const char * s, int c); > > >extern char * strchr(const char * s, int c); > > >extern void * memcpy(void *, const void *, __kernel_size_t); > > >extern void * memmove(void *, const void *, __kernel_size_t); > > >extern void * memchr(const void *, int, __kernel_size_t); > > >extern void * memset(void *, int, __kernel_size_t); > >extern void __memzero(void *ptr, __kernel_size_t n); > > > >extern char * strcpy(char *,const char *); > > >extern char * strncpy(char *,const char *, __kernel_size_t); > > >size_t strlcpy(char *, const char *, size_t); > > >extern char * strcat(char *, const char *); > > >extern char * strncat(char *, const char *, __kernel_size_t); > > >extern size_t strlcat(char *, const char *, __kernel_size_t); > > >extern int strcmp(const char *,const char *); > > >extern int strncmp(const char *,const char *,__kernel_size_t); > > >extern int strnicmp(const char *, const char *, __kernel_size_t); > > >extern int strcasecmp(const char *s1, const char *s2); > > >extern int strncasecmp(const char *s1, const char *s2, size_t n); > > > > > >extern char * strnchr(const char *, size_t, int); > > > > >extern char * __attribute__((warn_unused_result)) skip_spaces(const char *); > >extern char *strim(char *); > >static inline __attribute__((always_inline)) __attribute__((warn_unused_result)) char *strstrip(char *str) >{ > return strim(str); >} > > >extern char * strstr(const char *, const char *); > > >extern char * strnstr(const char *, const char *, size_t); > > >extern __kernel_size_t strlen(const char *); > > >extern __kernel_size_t strnlen(const char *,__kernel_size_t); > > >extern char * strpbrk(const char *,const char *); > > >extern char * strsep(char **,const char *); > > >extern __kernel_size_t strspn(const char *,const char *); > > >extern __kernel_size_t strcspn(const char *,const char *); > >extern void * memscan(void *,int,__kernel_size_t); > > >extern int memcmp(const void *,const void *,__kernel_size_t); > > > > >void *memchr_inv(const void *s, int c, size_t n); > >extern char *kstrdup(const char *s, gfp_t gfp); >extern char *kstrndup(const char *s, size_t len, gfp_t gfp); >extern void *kmemdup(const void *src, size_t len, gfp_t gfp); > >extern char **argv_split(gfp_t gfp, const char *str, int *argcp); >extern void argv_free(char **argv); > >extern bool sysfs_streq(const char *s1, const char *s2); >extern int strtobool(const char *s, bool *res); > > >int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args); >int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf); >int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __attribute__((format(printf, 3, 4))); > > >extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, > const void *from, size_t available); > > > > > > >static inline __attribute__((always_inline)) bool strstarts(const char *str, const char *prefix) >{ > return strncmp(str, prefix, strlen(prefix)) == 0; >} > > >extern int __bitmap_empty(const unsigned long *bitmap, int bits); >extern int __bitmap_full(const unsigned long *bitmap, int bits); >extern int __bitmap_equal(const unsigned long *bitmap1, > const unsigned long *bitmap2, int bits); >extern void __bitmap_complement(unsigned long *dst, const unsigned long *src, > int bits); >extern void __bitmap_shift_right(unsigned long *dst, > const unsigned long *src, int shift, int bits); >extern void __bitmap_shift_left(unsigned long *dst, > const unsigned long *src, int shift, int bits); >extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, > const unsigned long *bitmap2, int bits); >extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, > const unsigned long *bitmap2, int bits); >extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, > const unsigned long *bitmap2, int bits); >extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, > const unsigned long *bitmap2, int bits); >extern int __bitmap_intersects(const unsigned long *bitmap1, > const unsigned long *bitmap2, int bits); >extern int __bitmap_subset(const unsigned long *bitmap1, > const unsigned long *bitmap2, int bits); >extern int __bitmap_weight(const unsigned long *bitmap, int bits); > >extern void bitmap_set(unsigned long *map, int i, int len); >extern void bitmap_clear(unsigned long *map, int start, int nr); >extern unsigned long bitmap_find_next_zero_area(unsigned long *map, > unsigned long size, > unsigned long start, > unsigned int nr, > unsigned long align_mask); > >extern int bitmap_scnprintf(char *buf, unsigned int len, > const unsigned long *src, int nbits); >extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, > unsigned long *dst, int nbits); >extern int bitmap_parse_user(const char *ubuf, unsigned int ulen, > unsigned long *dst, int nbits); >extern int bitmap_scnlistprintf(char *buf, unsigned int len, > const unsigned long *src, int nbits); >extern int bitmap_parselist(const char *buf, unsigned long *maskp, > int nmaskbits); >extern int bitmap_parselist_user(const char *ubuf, unsigned int ulen, > unsigned long *dst, int nbits); >extern void bitmap_remap(unsigned long *dst, const unsigned long *src, > const unsigned long *old, const unsigned long *new, int bits); >extern int bitmap_bitremap(int oldbit, > const unsigned long *old, const unsigned long *new, int bits); >extern void bitmap_onto(unsigned long *dst, const unsigned long *orig, > const unsigned long *relmap, int bits); >extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, > int sz, int bits); >extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order); >extern void bitmap_release_region(unsigned long *bitmap, int pos, int order); >extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order); >extern void bitmap_copy_le(void *dst, const unsigned long *src, int nbits); >extern int bitmap_ord_to_pos(const unsigned long *bitmap, int n, int bits); > >static inline __attribute__((always_inline)) void bitmap_zero(unsigned long *dst, int nbits) >{ > if ((__builtin_constant_p(nbits) && (nbits) <= 32)) > *dst = 0UL; > else { > int len = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(unsigned long); > ({ void *__p = (dst); size_t __n = len; if ((__n) != 0) { if (__builtin_constant_p((0)) && (0) == 0) __memzero((__p),(__n)); else memset((__p),(0),(__n)); } (__p); }); > } >} > >static inline __attribute__((always_inline)) void bitmap_fill(unsigned long *dst, int nbits) >{ > size_t nlongs = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))); > if (!(__builtin_constant_p(nbits) && (nbits) <= 32)) { > int len = (nlongs - 1) * sizeof(unsigned long); > ({ void *__p = (dst); size_t __n = len; if ((__n) != 0) { if (__builtin_constant_p((0xff)) && (0xff) == 0) __memzero((__p),(__n)); else memset((__p),(0xff),(__n)); } (__p); }); > } > dst[nlongs - 1] = ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ); >} > >static inline __attribute__((always_inline)) void bitmap_copy(unsigned long *dst, const unsigned long *src, > int nbits) >{ > if ((__builtin_constant_p(nbits) && (nbits) <= 32)) > *dst = *src; > else { > int len = (((nbits) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(unsigned long); > memcpy(dst, src, len); > } >} > >static inline __attribute__((always_inline)) int bitmap_and(unsigned long *dst, const unsigned long *src1, > const unsigned long *src2, int nbits) >{ > if ((__builtin_constant_p(nbits) && (nbits) <= 32)) > return (*dst = *src1 & *src2) != 0; > return __bitmap_and(dst, src1, src2, nbits); >} > >static inline __attribute__((always_inline)) void bitmap_or(unsigned long *dst, const unsigned long *src1, > const unsigned long *src2, int nbits) >{ > if ((__builtin_constant_p(nbits) && (nbits) <= 32)) > *dst = *src1 | *src2; > else > __bitmap_or(dst, src1, src2, nbits); >} > >static inline __attribute__((always_inline)) void bitmap_xor(unsigned long *dst, const unsigned long *src1, > const unsigned long *src2, int nbits) >{ > if ((__builtin_constant_p(nbits) && (nbits) <= 32)) > *dst = *src1 ^ *src2; > else > __bitmap_xor(dst, src1, src2, nbits); >} > >static inline __attribute__((always_inline)) int bitmap_andnot(unsigned long *dst, const unsigned long *src1, > const unsigned long *src2, int nbits) >{ > if ((__builtin_constant_p(nbits) && (nbits) <= 32)) > return (*dst = *src1 & ~(*src2)) != 0; > return __bitmap_andnot(dst, src1, src2, nbits); >} > >static inline __attribute__((always_inline)) void bitmap_complement(unsigned long *dst, const unsigned long *src, > int nbits) >{ > if ((__builtin_constant_p(nbits) && (nbits) <= 32)) > *dst = ~(*src) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ); > else > __bitmap_complement(dst, src, nbits); >} > >static inline __attribute__((always_inline)) int bitmap_equal(const unsigned long *src1, > const unsigned long *src2, int nbits) >{ > if ((__builtin_constant_p(nbits) && (nbits) <= 32)) > return ! ((*src1 ^ *src2) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )); > else > return __bitmap_equal(src1, src2, nbits); >} > >static inline __attribute__((always_inline)) int bitmap_intersects(const unsigned long *src1, > const unsigned long *src2, int nbits) >{ > if ((__builtin_constant_p(nbits) && (nbits) <= 32)) > return ((*src1 & *src2) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )) != 0; > else > return __bitmap_intersects(src1, src2, nbits); >} > >static inline __attribute__((always_inline)) int bitmap_subset(const unsigned long *src1, > const unsigned long *src2, int nbits) >{ > if ((__builtin_constant_p(nbits) && (nbits) <= 32)) > return ! ((*src1 & ~(*src2)) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )); > else > return __bitmap_subset(src1, src2, nbits); >} > >static inline __attribute__((always_inline)) int bitmap_empty(const unsigned long *src, int nbits) >{ > if ((__builtin_constant_p(nbits) && (nbits) <= 32)) > return ! (*src & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )); > else > return __bitmap_empty(src, nbits); >} > >static inline __attribute__((always_inline)) int bitmap_full(const unsigned long *src, int nbits) >{ > if ((__builtin_constant_p(nbits) && (nbits) <= 32)) > return ! (~(*src) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )); > else > return __bitmap_full(src, nbits); >} > >static inline __attribute__((always_inline)) int bitmap_weight(const unsigned long *src, int nbits) >{ > if ((__builtin_constant_p(nbits) && (nbits) <= 32)) > return hweight_long(*src & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL )); > return __bitmap_weight(src, nbits); >} > >static inline __attribute__((always_inline)) void bitmap_shift_right(unsigned long *dst, > const unsigned long *src, int n, int nbits) >{ > if ((__builtin_constant_p(nbits) && (nbits) <= 32)) > *dst = *src >> n; > else > __bitmap_shift_right(dst, src, n, nbits); >} > >static inline __attribute__((always_inline)) void bitmap_shift_left(unsigned long *dst, > const unsigned long *src, int n, int nbits) >{ > if ((__builtin_constant_p(nbits) && (nbits) <= 32)) > *dst = (*src << n) & ( ((nbits) % 32) ? (1UL<<((nbits) % 32))-1 : ~0UL ); > else > __bitmap_shift_left(dst, src, n, nbits); >} > >static inline __attribute__((always_inline)) int bitmap_parse(const char *buf, unsigned int buflen, > unsigned long *maskp, int nmaskbits) >{ > return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits); >} > > > >typedef struct cpumask { unsigned long bits[(((2) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; } cpumask_t; > >extern int nr_cpu_ids; > >extern const struct cpumask *const cpu_possible_mask; >extern const struct cpumask *const cpu_online_mask; >extern const struct cpumask *const cpu_present_mask; >extern const struct cpumask *const cpu_active_mask; > >static inline __attribute__((always_inline)) unsigned int cpumask_check(unsigned int cpu) >{ > > > > return cpu; >} > >static inline __attribute__((always_inline)) unsigned int cpumask_first(const struct cpumask *srcp) >{ > return _find_first_bit_le(((srcp)->bits),2); >} > >static inline __attribute__((always_inline)) unsigned int cpumask_next(int n, const struct cpumask *srcp) >{ > > if (n != -1) > cpumask_check(n); > return _find_next_bit_le(((srcp)->bits),2,n+1); >} > >static inline __attribute__((always_inline)) unsigned int cpumask_next_zero(int n, const struct cpumask *srcp) >{ > > if (n != -1) > cpumask_check(n); > return _find_next_zero_bit_le(((srcp)->bits),2,n+1); >} > >int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *); >int cpumask_any_but(const struct cpumask *mask, unsigned int cpu); > >static inline __attribute__((always_inline)) void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) >{ > _set_bit(cpumask_check(cpu),((dstp)->bits)); >} > > > > > > >static inline __attribute__((always_inline)) void cpumask_clear_cpu(int cpu, struct cpumask *dstp) >{ > _clear_bit(cpumask_check(cpu),((dstp)->bits)); >} > >static inline __attribute__((always_inline)) int cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask) >{ > return _test_and_set_bit(cpumask_check(cpu),((cpumask)->bits)); >} > >static inline __attribute__((always_inline)) int cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask) >{ > return _test_and_clear_bit(cpumask_check(cpu),((cpumask)->bits)); >} > > > > > >static inline __attribute__((always_inline)) void cpumask_setall(struct cpumask *dstp) >{ > bitmap_fill(((dstp)->bits), 2); >} > > > > > >static inline __attribute__((always_inline)) void cpumask_clear(struct cpumask *dstp) >{ > bitmap_zero(((dstp)->bits), 2); >} > > > > > > > >static inline __attribute__((always_inline)) int cpumask_and(struct cpumask *dstp, > const struct cpumask *src1p, > const struct cpumask *src2p) >{ > return bitmap_and(((dstp)->bits), ((src1p)->bits), > ((src2p)->bits), 2); >} > > > > > > > >static inline __attribute__((always_inline)) void cpumask_or(struct cpumask *dstp, const struct cpumask *src1p, > const struct cpumask *src2p) >{ > bitmap_or(((dstp)->bits), ((src1p)->bits), > ((src2p)->bits), 2); >} > > > > > > > >static inline __attribute__((always_inline)) void cpumask_xor(struct cpumask *dstp, > const struct cpumask *src1p, > const struct cpumask *src2p) >{ > bitmap_xor(((dstp)->bits), ((src1p)->bits), > ((src2p)->bits), 2); >} > > > > > > > >static inline __attribute__((always_inline)) int cpumask_andnot(struct cpumask *dstp, > const struct cpumask *src1p, > const struct cpumask *src2p) >{ > return bitmap_andnot(((dstp)->bits), ((src1p)->bits), > ((src2p)->bits), 2); >} > > > > > > >static inline __attribute__((always_inline)) void cpumask_complement(struct cpumask *dstp, > const struct cpumask *srcp) >{ > bitmap_complement(((dstp)->bits), ((srcp)->bits), > 2); >} > > > > > > >static inline __attribute__((always_inline)) bool cpumask_equal(const struct cpumask *src1p, > const struct cpumask *src2p) >{ > return bitmap_equal(((src1p)->bits), ((src2p)->bits), > 2); >} > > > > > > >static inline __attribute__((always_inline)) bool cpumask_intersects(const struct cpumask *src1p, > const struct cpumask *src2p) >{ > return bitmap_intersects(((src1p)->bits), ((src2p)->bits), > 2); >} > > > > > > >static inline __attribute__((always_inline)) int cpumask_subset(const struct cpumask *src1p, > const struct cpumask *src2p) >{ > return bitmap_subset(((src1p)->bits), ((src2p)->bits), > 2); >} > > > > > >static inline __attribute__((always_inline)) bool cpumask_empty(const struct cpumask *srcp) >{ > return bitmap_empty(((srcp)->bits), 2); >} > > > > > >static inline __attribute__((always_inline)) bool cpumask_full(const struct cpumask *srcp) >{ > return bitmap_full(((srcp)->bits), 2); >} > > > > > >static inline __attribute__((always_inline)) unsigned int cpumask_weight(const struct cpumask *srcp) >{ > return bitmap_weight(((srcp)->bits), 2); >} > > > > > > > >static inline __attribute__((always_inline)) void cpumask_shift_right(struct cpumask *dstp, > const struct cpumask *srcp, int n) >{ > bitmap_shift_right(((dstp)->bits), ((srcp)->bits), n, > 2); >} > > > > > > > >static inline __attribute__((always_inline)) void cpumask_shift_left(struct cpumask *dstp, > const struct cpumask *srcp, int n) >{ > bitmap_shift_left(((dstp)->bits), ((srcp)->bits), n, > 2); >} > > > > > > >static inline __attribute__((always_inline)) void cpumask_copy(struct cpumask *dstp, > const struct cpumask *srcp) >{ > bitmap_copy(((dstp)->bits), ((srcp)->bits), 2); >} > >static inline __attribute__((always_inline)) int cpumask_scnprintf(char *buf, int len, > const struct cpumask *srcp) >{ > return bitmap_scnprintf(buf, len, ((srcp)->bits), 2); >} > >static inline __attribute__((always_inline)) int cpumask_parse_user(const char *buf, int len, > struct cpumask *dstp) >{ > return bitmap_parse_user(buf, len, ((dstp)->bits), 2); >} > >static inline __attribute__((always_inline)) int cpumask_parselist_user(const char *buf, int len, > struct cpumask *dstp) >{ > return bitmap_parselist_user(buf, len, ((dstp)->bits), > 2); >} > >static inline __attribute__((always_inline)) int cpulist_scnprintf(char *buf, int len, > const struct cpumask *srcp) >{ > return bitmap_scnlistprintf(buf, len, ((srcp)->bits), > 2); >} > >static inline __attribute__((always_inline)) int cpulist_parse(const char *buf, struct cpumask *dstp) >{ > return bitmap_parselist(buf, ((dstp)->bits), 2); >} > > > > > > >static inline __attribute__((always_inline)) size_t cpumask_size(void) >{ > > > return (((2) + (8 * sizeof(long)) - 1) / (8 * sizeof(long))) * sizeof(long); >} > >typedef struct cpumask cpumask_var_t[1]; > >static inline __attribute__((always_inline)) bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) >{ > return true; >} > >static inline __attribute__((always_inline)) bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, > int node) >{ > return true; >} > >static inline __attribute__((always_inline)) bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) >{ > cpumask_clear(*mask); > return true; >} > >static inline __attribute__((always_inline)) bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, > int node) >{ > cpumask_clear(*mask); > return true; >} > >static inline __attribute__((always_inline)) void alloc_bootmem_cpumask_var(cpumask_var_t *mask) >{ >} > >static inline __attribute__((always_inline)) void free_cpumask_var(cpumask_var_t mask) >{ >} > >static inline __attribute__((always_inline)) void free_bootmem_cpumask_var(cpumask_var_t mask) >{ >} > > > > >extern const unsigned long cpu_all_bits[(((2) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; > >void set_cpu_possible(unsigned int cpu, bool possible); >void set_cpu_present(unsigned int cpu, bool present); >void set_cpu_online(unsigned int cpu, bool online); >void set_cpu_active(unsigned int cpu, bool active); >void init_cpu_present(const struct cpumask *src); >void init_cpu_possible(const struct cpumask *src); >void init_cpu_online(const struct cpumask *src); > >static inline __attribute__((always_inline)) int __check_is_bitmap(const unsigned long *bitmap) >{ > return 1; >} > >extern const unsigned long > cpu_bit_bitmap[32 +1][(((2) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; > >static inline __attribute__((always_inline)) const struct cpumask *get_cpu_mask(unsigned int cpu) >{ > const unsigned long *p = cpu_bit_bitmap[1 + cpu % 32]; > p -= cpu / 32; > return ((struct cpumask *)(1 ? (p) : (void *)sizeof(__check_is_bitmap(p)))); >} > >int __first_cpu(const cpumask_t *srcp); >int __next_cpu(int n, const cpumask_t *srcp); > >static inline __attribute__((always_inline)) void __cpu_set(int cpu, volatile cpumask_t *dstp) >{ > _set_bit(cpu,dstp->bits); >} > > >static inline __attribute__((always_inline)) void __cpu_clear(int cpu, volatile cpumask_t *dstp) >{ > _clear_bit(cpu,dstp->bits); >} > > >static inline __attribute__((always_inline)) void __cpus_setall(cpumask_t *dstp, int nbits) >{ > bitmap_fill(dstp->bits, nbits); >} > > >static inline __attribute__((always_inline)) void __cpus_clear(cpumask_t *dstp, int nbits) >{ > bitmap_zero(dstp->bits, nbits); >} > > > > > >static inline __attribute__((always_inline)) int __cpu_test_and_set(int cpu, cpumask_t *addr) >{ > return _test_and_set_bit(cpu,addr->bits); >} > > >static inline __attribute__((always_inline)) int __cpus_and(cpumask_t *dstp, const cpumask_t *src1p, > const cpumask_t *src2p, int nbits) >{ > return bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); >} > > >static inline __attribute__((always_inline)) void __cpus_or(cpumask_t *dstp, const cpumask_t *src1p, > const cpumask_t *src2p, int nbits) >{ > bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); >} > > >static inline __attribute__((always_inline)) void __cpus_xor(cpumask_t *dstp, const cpumask_t *src1p, > const cpumask_t *src2p, int nbits) >{ > bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); >} > > > >static inline __attribute__((always_inline)) int __cpus_andnot(cpumask_t *dstp, const cpumask_t *src1p, > const cpumask_t *src2p, int nbits) >{ > return bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); >} > > >static inline __attribute__((always_inline)) int __cpus_equal(const cpumask_t *src1p, > const cpumask_t *src2p, int nbits) >{ > return bitmap_equal(src1p->bits, src2p->bits, nbits); >} > > >static inline __attribute__((always_inline)) int __cpus_intersects(const cpumask_t *src1p, > const cpumask_t *src2p, int nbits) >{ > return bitmap_intersects(src1p->bits, src2p->bits, nbits); >} > > >static inline __attribute__((always_inline)) int __cpus_subset(const cpumask_t *src1p, > const cpumask_t *src2p, int nbits) >{ > return bitmap_subset(src1p->bits, src2p->bits, nbits); >} > > >static inline __attribute__((always_inline)) int __cpus_empty(const cpumask_t *srcp, int nbits) >{ > return bitmap_empty(srcp->bits, nbits); >} > > >static inline __attribute__((always_inline)) int __cpus_weight(const cpumask_t *srcp, int nbits) >{ > return bitmap_weight(srcp->bits, nbits); >} > > > >static inline __attribute__((always_inline)) void __cpus_shift_left(cpumask_t *dstp, > const cpumask_t *srcp, int n, int nbits) >{ > bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); >} > > > >typedef struct { > unsigned sequence; > spinlock_t lock; >} seqlock_t; > >static inline __attribute__((always_inline)) void write_seqlock(seqlock_t *sl) >{ > spin_lock(&sl->lock); > ++sl->sequence; > __asm__ __volatile__ ("dmb" : : : "memory"); >} > >static inline __attribute__((always_inline)) void write_sequnlock(seqlock_t *sl) >{ > __asm__ __volatile__ ("dmb" : : : "memory"); > sl->sequence++; > spin_unlock(&sl->lock); >} > >static inline __attribute__((always_inline)) int write_tryseqlock(seqlock_t *sl) >{ > int ret = spin_trylock(&sl->lock); > > if (ret) { > ++sl->sequence; > __asm__ __volatile__ ("dmb" : : : "memory"); > } > return ret; >} > > >static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned read_seqbegin(const seqlock_t *sl) >{ > unsigned ret; > >repeat: > ret = (*(volatile typeof(sl->sequence) *)&(sl->sequence)); > if (__builtin_expect(!!(ret & 1), 0)) { > __asm__ __volatile__("": : :"memory"); > goto repeat; > } > __asm__ __volatile__ ("dmb" : : : "memory"); > > return ret; >} > > > > > > >static inline __attribute__((always_inline)) __attribute__((always_inline)) int read_seqretry(const seqlock_t *sl, unsigned start) >{ > __asm__ __volatile__ ("dmb" : : : "memory"); > > return __builtin_expect(!!(sl->sequence != start), 0); >} > >typedef struct seqcount { > unsigned sequence; >} seqcount_t; > >static inline __attribute__((always_inline)) unsigned __read_seqcount_begin(const seqcount_t *s) >{ > unsigned ret; > >repeat: > ret = (*(volatile typeof(s->sequence) *)&(s->sequence)); > if (__builtin_expect(!!(ret & 1), 0)) { > __asm__ __volatile__("": : :"memory"); > goto repeat; > } > return ret; >} > >static inline __attribute__((always_inline)) unsigned read_seqcount_begin(const seqcount_t *s) >{ > unsigned ret = __read_seqcount_begin(s); > __asm__ __volatile__ ("dmb" : : : "memory"); > return ret; >} > >static inline __attribute__((always_inline)) unsigned raw_seqcount_begin(const seqcount_t *s) >{ > unsigned ret = (*(volatile typeof(s->sequence) *)&(s->sequence)); > __asm__ __volatile__ ("dmb" : : : "memory"); > return ret & ~1; >} > >static inline __attribute__((always_inline)) int __read_seqcount_retry(const seqcount_t *s, unsigned start) >{ > return __builtin_expect(!!(s->sequence != start), 0); >} > >static inline __attribute__((always_inline)) int read_seqcount_retry(const seqcount_t *s, unsigned start) >{ > __asm__ __volatile__ ("dmb" : : : "memory"); > > return __read_seqcount_retry(s, start); >} > > > > > > >static inline __attribute__((always_inline)) void write_seqcount_begin(seqcount_t *s) >{ > s->sequence++; > __asm__ __volatile__ ("dmb" : : : "memory"); >} > >static inline __attribute__((always_inline)) void write_seqcount_end(seqcount_t *s) >{ > __asm__ __volatile__ ("dmb" : : : "memory"); > s->sequence++; >} > >static inline __attribute__((always_inline)) void write_seqcount_barrier(seqcount_t *s) >{ > __asm__ __volatile__ ("dmb" : : : "memory"); > s->sequence+=2; >} > > > > > > > > > > > > >static inline __attribute__((always_inline)) struct task_struct *get_current(void) __attribute__((__const__)); > >static inline __attribute__((always_inline)) struct task_struct *get_current(void) >{ > return current_thread_info()->task; >} > > >typedef struct __wait_queue wait_queue_t; >typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); >int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); > >struct __wait_queue { > unsigned int flags; > > void *private; > wait_queue_func_t func; > struct list_head task_list; >}; > >struct wait_bit_key { > void *flags; > int bit_nr; >}; > >struct wait_bit_queue { > struct wait_bit_key key; > wait_queue_t wait; >}; > >struct __wait_queue_head { > spinlock_t lock; > struct list_head task_list; >}; >typedef struct __wait_queue_head wait_queue_head_t; > >struct task_struct; > >extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *); > >static inline __attribute__((always_inline)) void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) >{ > q->flags = 0; > q->private = p; > q->func = default_wake_function; >} > >static inline __attribute__((always_inline)) void init_waitqueue_func_entry(wait_queue_t *q, > wait_queue_func_t func) >{ > q->flags = 0; > q->private = ((void *)0); > q->func = func; >} > >static inline __attribute__((always_inline)) int waitqueue_active(wait_queue_head_t *q) >{ > return !list_empty(&q->task_list); >} > >extern void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); >extern void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait); >extern void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait); > >static inline __attribute__((always_inline)) void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new) >{ > list_add(&new->task_list, &head->task_list); >} > > > > >static inline __attribute__((always_inline)) void __add_wait_queue_exclusive(wait_queue_head_t *q, > wait_queue_t *wait) >{ > wait->flags |= 0x01; > __add_wait_queue(q, wait); >} > >static inline __attribute__((always_inline)) void __add_wait_queue_tail(wait_queue_head_t *head, > wait_queue_t *new) >{ > list_add_tail(&new->task_list, &head->task_list); >} > >static inline __attribute__((always_inline)) void __add_wait_queue_tail_exclusive(wait_queue_head_t *q, > wait_queue_t *wait) >{ > wait->flags |= 0x01; > __add_wait_queue_tail(q, wait); >} > >static inline __attribute__((always_inline)) void __remove_wait_queue(wait_queue_head_t *head, > wait_queue_t *old) >{ > list_del(&old->task_list); >} > >void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); >void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); >void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, > void *key); >void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); >void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); >void __wake_up_bit(wait_queue_head_t *, void *, int); >int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); >int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); >void wake_up_bit(void *, int); >int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned); >int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned); >wait_queue_head_t *bit_waitqueue(void *, int); > >extern void sleep_on(wait_queue_head_t *q); >extern long sleep_on_timeout(wait_queue_head_t *q, > signed long timeout); >extern void interruptible_sleep_on(wait_queue_head_t *q); >extern long interruptible_sleep_on_timeout(wait_queue_head_t *q, > signed long timeout); > > > > >void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state); >void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); >void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); >void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, > unsigned int mode, void *key); >int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); >int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); > >static inline __attribute__((always_inline)) int wait_on_bit(void *word, int bit, > int (*action)(void *), unsigned mode) >{ > if (!test_bit(bit, word)) > return 0; > return out_of_line_wait_on_bit(word, bit, action, mode); >} > >static inline __attribute__((always_inline)) int wait_on_bit_lock(void *word, int bit, > int (*action)(void *), unsigned mode) >{ > if (!_test_and_set_bit(bit,word)) > return 0; > return out_of_line_wait_on_bit_lock(word, bit, action, mode); >} > > >struct completion { > unsigned int done; > wait_queue_head_t wait; >}; > >static inline __attribute__((always_inline)) void init_completion(struct completion *x) >{ > x->done = 0; > do { static struct lock_class_key __key; __init_waitqueue_head((&x->wait), "&x->wait", &__key); } while (0); >} > >extern void wait_for_completion(struct completion *); >extern int wait_for_completion_interruptible(struct completion *x); >extern int wait_for_completion_killable(struct completion *x); >extern unsigned long wait_for_completion_timeout(struct completion *x, > unsigned long timeout); >extern long wait_for_completion_interruptible_timeout( > struct completion *x, unsigned long timeout); >extern long wait_for_completion_killable_timeout( > struct completion *x, unsigned long timeout); >extern bool try_wait_for_completion(struct completion *x); >extern bool completion_done(struct completion *x); > >extern void complete(struct completion *); >extern void complete_all(struct completion *); > > > > > > > > >enum debug_obj_state { > ODEBUG_STATE_NONE, > ODEBUG_STATE_INIT, > ODEBUG_STATE_INACTIVE, > ODEBUG_STATE_ACTIVE, > ODEBUG_STATE_DESTROYED, > ODEBUG_STATE_NOTAVAILABLE, > ODEBUG_STATE_MAX, >}; > >struct debug_obj_descr; > >struct debug_obj { > struct hlist_node node; > enum debug_obj_state state; > unsigned int astate; > void *object; > struct debug_obj_descr *descr; >}; > >struct debug_obj_descr { > const char *name; > void *(*debug_hint) (void *addr); > int (*fixup_init) (void *addr, enum debug_obj_state state); > int (*fixup_activate) (void *addr, enum debug_obj_state state); > int (*fixup_destroy) (void *addr, enum debug_obj_state state); > int (*fixup_free) (void *addr, enum debug_obj_state state); > int (*fixup_assert_init)(void *addr, enum debug_obj_state state); >}; > >static inline __attribute__((always_inline)) void >debug_object_init (void *addr, struct debug_obj_descr *descr) { } >static inline __attribute__((always_inline)) void >debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { } >static inline __attribute__((always_inline)) void >debug_object_activate (void *addr, struct debug_obj_descr *descr) { } >static inline __attribute__((always_inline)) void >debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { } >static inline __attribute__((always_inline)) void >debug_object_destroy (void *addr, struct debug_obj_descr *descr) { } >static inline __attribute__((always_inline)) void >debug_object_free (void *addr, struct debug_obj_descr *descr) { } >static inline __attribute__((always_inline)) void >debug_object_assert_init(void *addr, struct debug_obj_descr *descr) { } > >static inline __attribute__((always_inline)) void debug_objects_early_init(void) { } >static inline __attribute__((always_inline)) void debug_objects_mem_init(void) { } > > > > > >static inline __attribute__((always_inline)) void >debug_check_no_obj_freed(const void *address, unsigned long size) { } > > >extern void rcutorture_record_test_transition(void); >extern void rcutorture_record_progress(unsigned long vernum); >extern void do_trace_rcu_torture_read(char *rcutorturename, > struct rcu_head *rhp); > >extern void call_rcu(struct rcu_head *head, > void (*func)(struct rcu_head *head)); > >extern void call_rcu_bh(struct rcu_head *head, > void (*func)(struct rcu_head *head)); > >extern void call_rcu_sched(struct rcu_head *head, > void (*func)(struct rcu_head *rcu)); > >extern void synchronize_sched(void); > > > >extern void __rcu_read_lock(void); >extern void __rcu_read_unlock(void); >void synchronize_rcu(void); > >extern void rcu_sched_qs(int cpu); >extern void rcu_bh_qs(int cpu); >extern void rcu_check_callbacks(int cpu, int user); >struct notifier_block; >extern void rcu_idle_enter(void); >extern void rcu_idle_exit(void); >extern void rcu_irq_enter(void); >extern void rcu_irq_exit(void); > >typedef void call_rcu_func_t(struct rcu_head *head, > void (*func)(struct rcu_head *head)); >void wait_rcu_gp(call_rcu_func_t crf); > > > > >extern void rcu_init(void); >extern void rcu_note_context_switch(int cpu); >extern int rcu_needs_cpu(int cpu); >extern void rcu_cpu_stall_reset(void); > > > > > > >static inline __attribute__((always_inline)) void rcu_virt_note_context_switch(int cpu) >{ > rcu_note_context_switch(cpu); >} > > > >extern void exit_rcu(void); > >extern void synchronize_rcu_bh(void); >extern void synchronize_sched_expedited(void); >extern void synchronize_rcu_expedited(void); > >void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); > >static inline __attribute__((always_inline)) void synchronize_rcu_bh_expedited(void) >{ > synchronize_sched_expedited(); >} > >extern void rcu_barrier(void); >extern void rcu_barrier_bh(void); >extern void rcu_barrier_sched(void); > >extern unsigned long rcutorture_testseq; >extern unsigned long rcutorture_vernum; >extern long rcu_batches_completed(void); >extern long rcu_batches_completed_bh(void); >extern long rcu_batches_completed_sched(void); > >extern void rcu_force_quiescent_state(void); >extern void rcu_bh_force_quiescent_state(void); >extern void rcu_sched_force_quiescent_state(void); > > >static inline __attribute__((always_inline)) int rcu_blocking_is_gp(void) >{ > do { __might_sleep("include/linux/rcutree.h", 104, 0); do { } while (0); } while (0); > return cpumask_weight(cpu_online_mask) == 1; >} > >extern void rcu_scheduler_starting(void); >extern int rcu_scheduler_active __attribute__((__section__(".data..read_mostly"))); > > >static inline __attribute__((always_inline)) void init_rcu_head_on_stack(struct rcu_head *head) >{ >} > >static inline __attribute__((always_inline)) void destroy_rcu_head_on_stack(struct rcu_head *head) >{ >} > > > > > >static inline __attribute__((always_inline)) bool rcu_lockdep_current_cpu_online(void) >{ > return 1; >} > >static inline __attribute__((always_inline)) int rcu_read_lock_held(void) >{ > return 1; >} > >static inline __attribute__((always_inline)) int rcu_read_lock_bh_held(void) >{ > return 1; >} > > >static inline __attribute__((always_inline)) int rcu_read_lock_sched_held(void) >{ > return (current_thread_info()->preempt_count) != 0 || ({ unsigned long _flags; do { ({ unsigned long __dummy; typeof(_flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); _flags = arch_local_save_flags(); } while (0); ({ ({ unsigned long __dummy; typeof(_flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(_flags); }); }); >} > >static inline __attribute__((always_inline)) void rcu_read_lock(void) >{ > __rcu_read_lock(); > (void)0; > do { } while (0); > do { } while (0) > ; >} > >static inline __attribute__((always_inline)) void rcu_read_unlock(void) >{ > do { } while (0) > ; > do { } while (0); > (void)0; > __rcu_read_unlock(); >} > >static inline __attribute__((always_inline)) void rcu_read_lock_bh(void) >{ > local_bh_disable(); > (void)0; > do { } while (0); > do { } while (0) > ; >} > > > > > > >static inline __attribute__((always_inline)) void rcu_read_unlock_bh(void) >{ > do { } while (0) > ; > do { } while (0); > (void)0; > local_bh_enable(); >} > >static inline __attribute__((always_inline)) void rcu_read_lock_sched(void) >{ > do { do { (current_thread_info()->preempt_count) += (1); } while (0); __asm__ __volatile__("": : :"memory"); } while (0); > (void)0; > do { } while (0); > do { } while (0) > ; >} > > >static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void rcu_read_lock_sched_notrace(void) >{ > do { do { (current_thread_info()->preempt_count) += (1); } while (0); __asm__ __volatile__("": : :"memory"); } while (0); > (void)0; >} > > > > > > >static inline __attribute__((always_inline)) void rcu_read_unlock_sched(void) >{ > do { } while (0) > ; > do { } while (0); > (void)0; > do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 1)), 0)) preempt_schedule(); } while (0); } while (0); >} > > >static inline __attribute__((always_inline)) __attribute__((no_instrument_function)) void rcu_read_unlock_sched_notrace(void) >{ > (void)0; > do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 1)), 0)) preempt_schedule(); } while (0); } while (0); >} > >static inline __attribute__((always_inline)) __attribute__((always_inline)) bool __is_kfree_rcu_offset(unsigned long offset) >{ > return offset < 4096; >} > >static inline __attribute__((always_inline)) __attribute__((always_inline)) >void __kfree_rcu(struct rcu_head *head, unsigned long offset) >{ > typedef void (*rcu_callback)(struct rcu_head *); > > do { ((void)sizeof(char[1 - 2*!!(!__builtin_constant_p(offset))])); if (!__builtin_constant_p(offset)) __build_bug_on_failed = 1; } while(0); > > > do { ((void)sizeof(char[1 - 2*!!(!__is_kfree_rcu_offset(offset))])); if (!__is_kfree_rcu_offset(offset)) __build_bug_on_failed = 1; } while(0); > > kfree_call_rcu(head, (rcu_callback)offset); >} > > > >struct task_struct; > > >struct sem_array { > struct kern_ipc_perm __attribute__((__aligned__((1 << 6)))) > sem_perm; > time_t sem_otime; > time_t sem_ctime; > struct sem *sem_base; > struct list_head sem_pending; > struct list_head list_id; > int sem_nsems; > int complex_count; >}; > > > >struct sysv_sem { > struct sem_undo_list *undo_list; >}; > >extern int copy_semundo(unsigned long clone_flags, struct task_struct *tsk); >extern void exit_sem(struct task_struct *tsk); > > > > > > > > > >typedef union sigval { > int sival_int; > void *sival_ptr; >} sigval_t; > >typedef struct siginfo { > int si_signo; > int si_errno; > int si_code; > > union { > int _pad[((128 - (3 * sizeof(int))) / sizeof(int))]; > > > struct { > __kernel_pid_t _pid; > __kernel_uid32_t _uid; > } _kill; > > > struct { > __kernel_timer_t _tid; > int _overrun; > char _pad[sizeof( __kernel_uid32_t) - sizeof(int)]; > sigval_t _sigval; > int _sys_private; > } _timer; > > > struct { > __kernel_pid_t _pid; > __kernel_uid32_t _uid; > sigval_t _sigval; > } _rt; > > > struct { > __kernel_pid_t _pid; > __kernel_uid32_t _uid; > int _status; > __kernel_clock_t _utime; > __kernel_clock_t _stime; > } _sigchld; > > > struct { > void *_addr; > > > > short _addr_lsb; > } _sigfault; > > > struct { > long _band; > int _fd; > } _sigpoll; > > > struct { > void *_call_addr; > int _syscall; > unsigned int _arch; > } _sigsys; > } _sifields; >} siginfo_t; > >typedef struct sigevent { > sigval_t sigev_value; > int sigev_signo; > int sigev_notify; > union { > int _pad[((64 - (sizeof(int) * 2 + sizeof(sigval_t))) / sizeof(int))]; > int _tid; > > struct { > void (*_function)(sigval_t); > void *_attribute; > } _sigev_thread; > } _sigev_un; >} sigevent_t; > > > > > > > >struct siginfo; >void do_schedule_next_timer(struct siginfo *info); > > > > > >static inline __attribute__((always_inline)) void copy_siginfo(struct siginfo *to, struct siginfo *from) >{ > if (from->si_code < 0) > memcpy(to, from, sizeof(*to)); > else > > memcpy(to, from, (3 * sizeof(int)) + sizeof(from->_sifields._sigchld)); >} > > > >extern int copy_siginfo_to_user(struct siginfo *to, struct siginfo *from); > > > > > > > > > >struct siginfo; > >typedef unsigned long old_sigset_t; > >typedef struct { > unsigned long sig[(64 / 32)]; >} sigset_t; > > > >typedef void __signalfn_t(int); >typedef __signalfn_t *__sighandler_t; > >typedef void __restorefn_t(void); >typedef __restorefn_t *__sigrestore_t; > > > >struct old_sigaction { > __sighandler_t sa_handler; > old_sigset_t sa_mask; > unsigned long sa_flags; > __sigrestore_t sa_restorer; >}; > >struct sigaction { > __sighandler_t sa_handler; > unsigned long sa_flags; > __sigrestore_t sa_restorer; > sigset_t sa_mask; >}; > > >struct k_sigaction { > struct sigaction sa; >}; > >typedef struct sigaltstack { > void *ss_sp; > int ss_flags; > size_t ss_size; >} stack_t; > > > > >struct sigcontext { > unsigned long trap_no; > unsigned long error_code; > unsigned long oldmask; > unsigned long arm_r0; > unsigned long arm_r1; > unsigned long arm_r2; > unsigned long arm_r3; > unsigned long arm_r4; > unsigned long arm_r5; > unsigned long arm_r6; > unsigned long arm_r7; > unsigned long arm_r8; > unsigned long arm_r9; > unsigned long arm_r10; > unsigned long arm_fp; > unsigned long arm_ip; > unsigned long arm_sp; > unsigned long arm_lr; > unsigned long arm_pc; > unsigned long arm_cpsr; > unsigned long fault_address; >}; > > > > > > > > > > > > > > > > > > > > > > > > > > > > > >enum { > QIF_BLIMITS_B = 0, > QIF_SPACE_B, > QIF_ILIMITS_B, > QIF_INODES_B, > QIF_BTIME_B, > QIF_ITIME_B, >}; > >struct if_dqblk { > __u64 dqb_bhardlimit; > __u64 dqb_bsoftlimit; > __u64 dqb_curspace; > __u64 dqb_ihardlimit; > __u64 dqb_isoftlimit; > __u64 dqb_curinodes; > __u64 dqb_btime; > __u64 dqb_itime; > __u32 dqb_valid; >}; > >struct if_dqinfo { > __u64 dqi_bgrace; > __u64 dqi_igrace; > __u32 dqi_flags; > __u32 dqi_valid; >}; > >enum { > QUOTA_NL_C_UNSPEC, > QUOTA_NL_C_WARNING, > __QUOTA_NL_C_MAX, >}; > > >enum { > QUOTA_NL_A_UNSPEC, > QUOTA_NL_A_QTYPE, > QUOTA_NL_A_EXCESS_ID, > QUOTA_NL_A_WARNING, > QUOTA_NL_A_DEV_MAJOR, > QUOTA_NL_A_DEV_MINOR, > QUOTA_NL_A_CAUSED_ID, > __QUOTA_NL_A_MAX, >}; > > > > > > > >struct mutex { > > atomic_t count; > spinlock_t wait_lock; > struct list_head wait_list; > > struct task_struct *owner; > >}; > > > > > >struct mutex_waiter { > struct list_head list; > struct task_struct *task; > > > >}; > >static inline __attribute__((always_inline)) void mutex_destroy(struct mutex *lock) {} > >extern void __mutex_init(struct mutex *lock, const char *name, > struct lock_class_key *key); > > > > > > > >static inline __attribute__((always_inline)) int mutex_is_locked(struct mutex *lock) >{ > return (*(volatile int *)&(&lock->count)->counter) != 1; >} > >extern void mutex_lock(struct mutex *lock); >extern int __attribute__((warn_unused_result)) mutex_lock_interruptible(struct mutex *lock); >extern int __attribute__((warn_unused_result)) mutex_lock_killable(struct mutex *lock); > >extern int mutex_trylock(struct mutex *lock); >extern void mutex_unlock(struct mutex *lock); >extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); > > > >struct rw_semaphore; > > > > >struct rw_semaphore { > __s32 activity; > raw_spinlock_t wait_lock; > struct list_head wait_list; > > > >}; > > > >extern void __down_read(struct rw_semaphore *sem); >extern int __down_read_trylock(struct rw_semaphore *sem); >extern void __down_write(struct rw_semaphore *sem); >extern void __down_write_nested(struct rw_semaphore *sem, int subclass); >extern int __down_write_trylock(struct rw_semaphore *sem); >extern void __up_read(struct rw_semaphore *sem); >extern void __up_write(struct rw_semaphore *sem); >extern void __downgrade_write(struct rw_semaphore *sem); >extern int rwsem_is_locked(struct rw_semaphore *sem); > > >extern void __init_rwsem(struct rw_semaphore *sem, const char *name, > struct lock_class_key *key); > >extern void down_read(struct rw_semaphore *sem); > > > > >extern int down_read_trylock(struct rw_semaphore *sem); > > > > >extern void down_write(struct rw_semaphore *sem); > > > > >extern int down_write_trylock(struct rw_semaphore *sem); > > > > >extern void up_read(struct rw_semaphore *sem); > > > > >extern void up_write(struct rw_semaphore *sem); > > > > >extern void downgrade_write(struct rw_semaphore *sem); > > > > > > > >extern void cpu_idle(void); > >typedef void (*smp_call_func_t)(void *info); >struct call_single_data { > struct list_head list; > smp_call_func_t func; > void *info; > u16 flags; > u16 priv; >}; > > >extern unsigned int total_cpus; > >int smp_call_function_single(int cpuid, smp_call_func_t func, void *info, > int wait); > > > > > > > > > >struct seq_file; > > > > >extern void show_ipi_list(struct seq_file *, int); > > > > > void do_IPI(int ipinr, struct pt_regs *regs); > > > > >void handle_IPI(int ipinr, struct pt_regs *regs); > > > > >extern void smp_init_cpus(void); > > > > > >extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int)); > > > > > >extern int boot_secondary(unsigned int cpu, struct task_struct *); > > > > > > void secondary_start_kernel(void); > > > > >extern void platform_secondary_init(unsigned int cpu); > > > > >extern void platform_smp_prepare_cpus(unsigned int); > > > > >struct secondary_data { > unsigned long pgdir; > unsigned long swapper_pg_dir; > void *stack; >}; >extern struct secondary_data secondary_data; > >extern int __cpu_disable(void); >extern int platform_cpu_disable(unsigned int cpu); > >extern void __cpu_die(unsigned int cpu); >extern void cpu_die(void); > >extern void platform_cpu_die(unsigned int cpu); >extern int platform_cpu_kill(unsigned int cpu); >extern void platform_cpu_enable(unsigned int cpu); > >extern void arch_send_call_function_single_ipi(int cpu); >extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); > >extern void smp_send_all_cpu_backtrace(void); > > >extern void smp_send_stop(void); > > > > >extern void smp_send_reschedule(int cpu); > > > > > >extern void smp_prepare_cpus(unsigned int max_cpus); > > > > >extern int __cpu_up(unsigned int cpunum); > > > > >extern void smp_cpus_done(unsigned int max_cpus); > > > > >int smp_call_function(smp_call_func_t func, void *info, int wait); >void smp_call_function_many(const struct cpumask *mask, > smp_call_func_t func, void *info, bool wait); > >void __smp_call_function_single(int cpuid, struct call_single_data *data, > int wait); > >int smp_call_function_any(const struct cpumask *mask, > smp_call_func_t func, void *info, int wait); > > > > > >void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) call_function_init(void); >void generic_smp_call_function_single_interrupt(void); >void generic_smp_call_function_interrupt(void); >void ipi_call_lock(void); >void ipi_call_unlock(void); >void ipi_call_lock_irq(void); >void ipi_call_unlock_irq(void); > > > > > > > >int on_each_cpu(smp_call_func_t func, void *info, int wait); > > > > > >void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, > void *info, bool wait); > > > > > > >void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), > smp_call_func_t func, void *info, bool wait, > gfp_t gfp_flags); > > > > > >void smp_prepare_boot_cpu(void); > >extern unsigned int setup_max_cpus; >extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) setup_nr_cpu_ids(void); >extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) smp_init(void); > >extern void arch_disable_smp_support(void); > >void smp_setup_processor_id(void); > > > > > > > > > > > > > > > > > > > > > > > > >extern unsigned long __per_cpu_offset[2]; > > > >extern void *pcpu_base_addr; >extern const unsigned long *pcpu_unit_offsets; > >struct pcpu_group_info { > int nr_units; > unsigned long base_offset; > unsigned int *cpu_map; > >}; > >struct pcpu_alloc_info { > size_t static_size; > size_t reserved_size; > size_t dyn_size; > size_t unit_size; > size_t atom_size; > size_t alloc_size; > size_t __ai_size; > int nr_groups; > struct pcpu_group_info groups[]; >}; > >enum pcpu_fc { > PCPU_FC_AUTO, > PCPU_FC_EMBED, > PCPU_FC_PAGE, > > PCPU_FC_NR, >}; >extern const char *pcpu_fc_names[PCPU_FC_NR]; > >extern enum pcpu_fc pcpu_chosen_fc; > >typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size, > size_t align); >typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size); >typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr); >typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to); > >extern struct pcpu_alloc_info * __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_alloc_alloc_info(int nr_groups, > int nr_units); >extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_free_alloc_info(struct pcpu_alloc_info *ai); > >extern int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, > void *base_addr); > >extern void *__alloc_reserved_percpu(size_t size, size_t align); >extern bool is_kernel_percpu_address(unsigned long addr); > > >extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) setup_per_cpu_areas(void); > >extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) percpu_init_late(void); > >extern void *__alloc_percpu(size_t size, size_t align); >extern void free_percpu(void *__pdata); >extern phys_addr_t per_cpu_ptr_to_phys(void *addr); > >extern void __bad_size_call_parameter(void); > > > > > >struct percpu_counter { > raw_spinlock_t lock; > s64 count; > > struct list_head list; > > s32 *counters; >}; > >extern int percpu_counter_batch; > >int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, > struct lock_class_key *key); > >void percpu_counter_destroy(struct percpu_counter *fbc); >void percpu_counter_set(struct percpu_counter *fbc, s64 amount); >void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); >s64 __percpu_counter_sum(struct percpu_counter *fbc); >int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs); > >static inline __attribute__((always_inline)) void percpu_counter_add(struct percpu_counter *fbc, s64 amount) >{ > __percpu_counter_add(fbc, amount, percpu_counter_batch); >} > >static inline __attribute__((always_inline)) s64 percpu_counter_sum_positive(struct percpu_counter *fbc) >{ > s64 ret = __percpu_counter_sum(fbc); > return ret < 0 ? 0 : ret; >} > >static inline __attribute__((always_inline)) s64 percpu_counter_sum(struct percpu_counter *fbc) >{ > return __percpu_counter_sum(fbc); >} > >static inline __attribute__((always_inline)) s64 percpu_counter_read(struct percpu_counter *fbc) >{ > return fbc->count; >} > > > > > > >static inline __attribute__((always_inline)) s64 percpu_counter_read_positive(struct percpu_counter *fbc) >{ > s64 ret = fbc->count; > > __asm__ __volatile__("": : :"memory"); > if (ret >= 0) > return ret; > return 0; >} > >static inline __attribute__((always_inline)) int percpu_counter_initialized(struct percpu_counter *fbc) >{ > return (fbc->counters != ((void *)0)); >} > >static inline __attribute__((always_inline)) void percpu_counter_inc(struct percpu_counter *fbc) >{ > percpu_counter_add(fbc, 1); >} > >static inline __attribute__((always_inline)) void percpu_counter_dec(struct percpu_counter *fbc) >{ > percpu_counter_add(fbc, -1); >} > >static inline __attribute__((always_inline)) void percpu_counter_sub(struct percpu_counter *fbc, s64 amount) >{ > percpu_counter_add(fbc, -amount); >} > > > > >typedef struct fs_disk_quota { > __s8 d_version; > __s8 d_flags; > __u16 d_fieldmask; > __u32 d_id; > __u64 d_blk_hardlimit; > __u64 d_blk_softlimit; > __u64 d_ino_hardlimit; > __u64 d_ino_softlimit; > __u64 d_bcount; > __u64 d_icount; > __s32 d_itimer; > > __s32 d_btimer; > __u16 d_iwarns; > __u16 d_bwarns; > __s32 d_padding2; > __u64 d_rtb_hardlimit; > __u64 d_rtb_softlimit; > __u64 d_rtbcount; > __s32 d_rtbtimer; > __u16 d_rtbwarns; > __s16 d_padding3; > char d_padding4[8]; >} fs_disk_quota_t; > >typedef struct fs_qfilestat { > __u64 qfs_ino; > __u64 qfs_nblks; > __u32 qfs_nextents; >} fs_qfilestat_t; > >typedef struct fs_quota_stat { > __s8 qs_version; > __u16 qs_flags; > __s8 qs_pad; > fs_qfilestat_t qs_uquota; > fs_qfilestat_t qs_gquota; > __u32 qs_incoredqs; > __s32 qs_btimelimit; > __s32 qs_itimelimit; > __s32 qs_rtbtimelimit; > __u16 qs_bwarnlimit; > __u16 qs_iwarnlimit; >} fs_quota_stat_t; > > > > > > > > > > > > > >struct dquot; > > >struct qtree_fmt_operations { > void (*mem2disk_dqblk)(void *disk, struct dquot *dquot); > void (*disk2mem_dqblk)(struct dquot *dquot, void *disk); > int (*is_id)(void *disk, struct dquot *dquot); >}; > > >struct qtree_mem_dqinfo { > struct super_block *dqi_sb; > int dqi_type; > unsigned int dqi_blocks; > unsigned int dqi_free_blk; > unsigned int dqi_free_entry; > unsigned int dqi_blocksize_bits; > unsigned int dqi_entry_size; > unsigned int dqi_usable_bs; > unsigned int dqi_qtree_depth; > struct qtree_fmt_operations *dqi_ops; >}; > >int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); >int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); >int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); >int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot); >int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk); >static inline __attribute__((always_inline)) int qtree_depth(struct qtree_mem_dqinfo *info) >{ > unsigned int epb = info->dqi_usable_bs >> 2; > unsigned long long entries = epb; > int i; > > for (i = 1; entries < (1ULL << 32); i++) > entries *= epb; > return i; >} > > > > > >typedef __kernel_uid32_t qid_t; >typedef long long qsize_t; > >extern spinlock_t dq_data_lock; > >struct mem_dqblk { > qsize_t dqb_bhardlimit; > qsize_t dqb_bsoftlimit; > qsize_t dqb_curspace; > qsize_t dqb_rsvspace; > qsize_t dqb_ihardlimit; > qsize_t dqb_isoftlimit; > qsize_t dqb_curinodes; > time_t dqb_btime; > time_t dqb_itime; >}; > > > > >struct quota_format_type; > >struct mem_dqinfo { > struct quota_format_type *dqi_format; > int dqi_fmt_id; > > struct list_head dqi_dirty_list; > unsigned long dqi_flags; > unsigned int dqi_bgrace; > unsigned int dqi_igrace; > qsize_t dqi_maxblimit; > qsize_t dqi_maxilimit; > void *dqi_priv; >}; > >struct super_block; > >extern void mark_info_dirty(struct super_block *sb, int type); >static inline __attribute__((always_inline)) int info_dirty(struct mem_dqinfo *info) >{ > return test_bit(31, &info->dqi_flags); >} > >enum { > DQST_LOOKUPS, > DQST_DROPS, > DQST_READS, > DQST_WRITES, > DQST_CACHE_HITS, > DQST_ALLOC_DQUOTS, > DQST_FREE_DQUOTS, > DQST_SYNCS, > _DQST_DQSTAT_LAST >}; > >struct dqstats { > int stat[_DQST_DQSTAT_LAST]; > struct percpu_counter counter[_DQST_DQSTAT_LAST]; >}; > >extern struct dqstats *dqstats_pcpu; >extern struct dqstats dqstats; > >static inline __attribute__((always_inline)) void dqstats_inc(unsigned int type) >{ > percpu_counter_inc(&dqstats.counter[type]); >} > >static inline __attribute__((always_inline)) void dqstats_dec(unsigned int type) >{ > percpu_counter_dec(&dqstats.counter[type]); >} > >struct dquot { > struct hlist_node dq_hash; > struct list_head dq_inuse; > struct list_head dq_free; > struct list_head dq_dirty; > struct mutex dq_lock; > atomic_t dq_count; > wait_queue_head_t dq_wait_unused; > struct super_block *dq_sb; > unsigned int dq_id; > loff_t dq_off; > unsigned long dq_flags; > short dq_type; > struct mem_dqblk dq_dqb; >}; > > >struct quota_format_ops { > int (*check_quota_file)(struct super_block *sb, int type); > int (*read_file_info)(struct super_block *sb, int type); > int (*write_file_info)(struct super_block *sb, int type); > int (*free_file_info)(struct super_block *sb, int type); > int (*read_dqblk)(struct dquot *dquot); > int (*commit_dqblk)(struct dquot *dquot); > int (*release_dqblk)(struct dquot *dquot); >}; > > >struct dquot_operations { > int (*write_dquot) (struct dquot *); > struct dquot *(*alloc_dquot)(struct super_block *, int); > void (*destroy_dquot)(struct dquot *); > int (*acquire_dquot) (struct dquot *); > int (*release_dquot) (struct dquot *); > int (*mark_dirty) (struct dquot *); > int (*write_info) (struct super_block *, int); > > > qsize_t *(*get_reserved_space) (struct inode *); >}; > >struct path; > > >struct quotactl_ops { > int (*quota_on)(struct super_block *, int, int, struct path *); > int (*quota_on_meta)(struct super_block *, int, int); > int (*quota_off)(struct super_block *, int); > int (*quota_sync)(struct super_block *, int, int); > int (*get_info)(struct super_block *, int, struct if_dqinfo *); > int (*set_info)(struct super_block *, int, struct if_dqinfo *); > int (*get_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *); > int (*set_dqblk)(struct super_block *, int, qid_t, struct fs_disk_quota *); > int (*get_xstate)(struct super_block *, struct fs_quota_stat *); > int (*set_xstate)(struct super_block *, unsigned int, int); >}; > >struct quota_format_type { > int qf_fmt_id; > const struct quota_format_ops *qf_ops; > struct module *qf_owner; > struct quota_format_type *qf_next; >}; > > >enum { > _DQUOT_USAGE_ENABLED = 0, > _DQUOT_LIMITS_ENABLED, > _DQUOT_SUSPENDED, > > > _DQUOT_STATE_FLAGS >}; > >static inline __attribute__((always_inline)) unsigned int dquot_state_flag(unsigned int flags, int type) >{ > return flags << _DQUOT_STATE_FLAGS * type; >} > >static inline __attribute__((always_inline)) unsigned int dquot_generic_flag(unsigned int flags, int type) >{ > return (flags >> _DQUOT_STATE_FLAGS * type) & ((1 << _DQUOT_USAGE_ENABLED) | (1 << _DQUOT_LIMITS_ENABLED) | (1 << _DQUOT_SUSPENDED)); >} > > > > > >static inline __attribute__((always_inline)) void quota_send_warning(short type, unsigned int id, dev_t dev, > const char warntype) >{ > return; >} > > >struct quota_info { > unsigned int flags; > struct mutex dqio_mutex; > struct mutex dqonoff_mutex; > struct rw_semaphore dqptr_sem; > struct inode *files[2]; > struct mem_dqinfo info[2]; > const struct quota_format_ops *ops[2]; >}; > >int register_quota_format(struct quota_format_type *fmt); >void unregister_quota_format(struct quota_format_type *fmt); > >struct quota_module_name { > int qm_fmt_id; > char *qm_mod_name; >}; > > > > > >struct rb_node >{ > unsigned long rb_parent_color; > > > struct rb_node *rb_right; > struct rb_node *rb_left; >} __attribute__((aligned(sizeof(long)))); > > >struct rb_root >{ > struct rb_node *rb_node; >}; > >static inline __attribute__((always_inline)) void rb_set_parent(struct rb_node *rb, struct rb_node *p) >{ > rb->rb_parent_color = (rb->rb_parent_color & 3) | (unsigned long)p; >} >static inline __attribute__((always_inline)) void rb_set_color(struct rb_node *rb, int color) >{ > rb->rb_parent_color = (rb->rb_parent_color & ~1) | color; >} > > > >static inline __attribute__((always_inline)) void rb_root_init(struct rb_root *root, struct rb_node *node) >{ > root->rb_node = node; > if (node) { > node->rb_parent_color = 1; > node->rb_left = ((void *)0); > node->rb_right = ((void *)0); > } >} > > > > > > > >static inline __attribute__((always_inline)) void rb_init_node(struct rb_node *rb) >{ > rb->rb_parent_color = 0; > rb->rb_right = ((void *)0); > rb->rb_left = ((void *)0); > (rb_set_parent(rb, rb)); >} > >extern void rb_insert_color(struct rb_node *, struct rb_root *); >extern void rb_erase(struct rb_node *, struct rb_root *); > >typedef void (*rb_augment_f)(struct rb_node *node, void *data); > >extern void rb_augment_insert(struct rb_node *node, > rb_augment_f func, void *data); >extern struct rb_node *rb_augment_erase_begin(struct rb_node *node); >extern void rb_augment_erase_end(struct rb_node *node, > rb_augment_f func, void *data); > > >extern struct rb_node *rb_next(const struct rb_node *); >extern struct rb_node *rb_prev(const struct rb_node *); >extern struct rb_node *rb_first(const struct rb_root *); >extern struct rb_node *rb_last(const struct rb_root *); > > >extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, > struct rb_root *root); > >static inline __attribute__((always_inline)) void rb_link_node(struct rb_node * node, struct rb_node * parent, > struct rb_node ** rb_link) >{ > node->rb_parent_color = (unsigned long )parent; > node->rb_left = node->rb_right = ((void *)0); > > *rb_link = node; >} > > > > >struct completion; > > > > > > >struct __sysctl_args { > int *name; > int nlen; > void *oldval; > size_t *oldlenp; > void *newval; > size_t newlen; > unsigned long __unused[4]; >}; > > > > > >enum >{ > CTL_KERN=1, > CTL_VM=2, > CTL_NET=3, > CTL_PROC=4, > CTL_FS=5, > CTL_DEBUG=6, > CTL_DEV=7, > CTL_BUS=8, > CTL_ABI=9, > CTL_CPU=10, > CTL_ARLAN=254, > CTL_S390DBF=5677, > CTL_SUNRPC=7249, > CTL_PM=9899, > CTL_FRV=9898, >}; > > >enum >{ > CTL_BUS_ISA=1 >}; > > >enum >{ > INOTIFY_MAX_USER_INSTANCES=1, > INOTIFY_MAX_USER_WATCHES=2, > INOTIFY_MAX_QUEUED_EVENTS=3 >}; > > >enum >{ > KERN_OSTYPE=1, > KERN_OSRELEASE=2, > KERN_OSREV=3, > KERN_VERSION=4, > KERN_SECUREMASK=5, > KERN_PROF=6, > KERN_NODENAME=7, > KERN_DOMAINNAME=8, > > KERN_PANIC=15, > KERN_REALROOTDEV=16, > > KERN_SPARC_REBOOT=21, > KERN_CTLALTDEL=22, > KERN_PRINTK=23, > KERN_NAMETRANS=24, > KERN_PPC_HTABRECLAIM=25, > KERN_PPC_ZEROPAGED=26, > KERN_PPC_POWERSAVE_NAP=27, > KERN_MODPROBE=28, > KERN_SG_BIG_BUFF=29, > KERN_ACCT=30, > KERN_PPC_L2CR=31, > > KERN_RTSIGNR=32, > KERN_RTSIGMAX=33, > > KERN_SHMMAX=34, > KERN_MSGMAX=35, > KERN_MSGMNB=36, > KERN_MSGPOOL=37, > KERN_SYSRQ=38, > KERN_MAX_THREADS=39, > KERN_RANDOM=40, > KERN_SHMALL=41, > KERN_MSGMNI=42, > KERN_SEM=43, > KERN_SPARC_STOP_A=44, > KERN_SHMMNI=45, > KERN_OVERFLOWUID=46, > KERN_OVERFLOWGID=47, > KERN_SHMPATH=48, > KERN_HOTPLUG=49, > KERN_IEEE_EMULATION_WARNINGS=50, > KERN_S390_USER_DEBUG_LOGGING=51, > KERN_CORE_USES_PID=52, > KERN_TAINTED=53, > KERN_CADPID=54, > KERN_PIDMAX=55, > KERN_CORE_PATTERN=56, > KERN_PANIC_ON_OOPS=57, > KERN_HPPA_PWRSW=58, > KERN_HPPA_UNALIGNED=59, > KERN_PRINTK_RATELIMIT=60, > KERN_PRINTK_RATELIMIT_BURST=61, > KERN_PTY=62, > KERN_NGROUPS_MAX=63, > KERN_SPARC_SCONS_PWROFF=64, > KERN_HZ_TIMER=65, > KERN_UNKNOWN_NMI_PANIC=66, > KERN_BOOTLOADER_TYPE=67, > KERN_RANDOMIZE=68, > KERN_SETUID_DUMPABLE=69, > KERN_SPIN_RETRY=70, > KERN_ACPI_VIDEO_FLAGS=71, > KERN_IA64_UNALIGNED=72, > KERN_COMPAT_LOG=73, > KERN_MAX_LOCK_DEPTH=74, > KERN_NMI_WATCHDOG=75, > KERN_PANIC_ON_NMI=76, >}; > > > > >enum >{ > VM_UNUSED1=1, > VM_UNUSED2=2, > VM_UNUSED3=3, > VM_UNUSED4=4, > VM_OVERCOMMIT_MEMORY=5, > VM_UNUSED5=6, > VM_UNUSED7=7, > VM_UNUSED8=8, > VM_UNUSED9=9, > VM_PAGE_CLUSTER=10, > VM_DIRTY_BACKGROUND=11, > VM_DIRTY_RATIO=12, > VM_DIRTY_WB_CS=13, > VM_DIRTY_EXPIRE_CS=14, > VM_NR_PDFLUSH_THREADS=15, > VM_OVERCOMMIT_RATIO=16, > VM_PAGEBUF=17, > VM_HUGETLB_PAGES=18, > VM_SWAPPINESS=19, > VM_LOWMEM_RESERVE_RATIO=20, > VM_MIN_FREE_KBYTES=21, > VM_MAX_MAP_COUNT=22, > VM_LAPTOP_MODE=23, > VM_BLOCK_DUMP=24, > VM_HUGETLB_GROUP=25, > VM_VFS_CACHE_PRESSURE=26, > VM_LEGACY_VA_LAYOUT=27, > VM_SWAP_TOKEN_TIMEOUT=28, > VM_DROP_PAGECACHE=29, > VM_PERCPU_PAGELIST_FRACTION=30, > VM_ZONE_RECLAIM_MODE=31, > VM_MIN_UNMAPPED=32, > VM_PANIC_ON_OOM=33, > VM_VDSO_ENABLED=34, > VM_MIN_SLAB=35, >}; > > > >enum >{ > NET_CORE=1, > NET_ETHER=2, > NET_802=3, > NET_UNIX=4, > NET_IPV4=5, > NET_IPX=6, > NET_ATALK=7, > NET_NETROM=8, > NET_AX25=9, > NET_BRIDGE=10, > NET_ROSE=11, > NET_IPV6=12, > NET_X25=13, > NET_TR=14, > NET_DECNET=15, > NET_ECONET=16, > NET_SCTP=17, > NET_LLC=18, > NET_NETFILTER=19, > NET_DCCP=20, > NET_IRDA=412, >}; > > >enum >{ > RANDOM_POOLSIZE=1, > RANDOM_ENTROPY_COUNT=2, > RANDOM_READ_THRESH=3, > RANDOM_WRITE_THRESH=4, > RANDOM_BOOT_ID=5, > RANDOM_UUID=6 >}; > > >enum >{ > PTY_MAX=1, > PTY_NR=2 >}; > > >enum >{ > BUS_ISA_MEM_BASE=1, > BUS_ISA_PORT_BASE=2, > BUS_ISA_PORT_SHIFT=3 >}; > > >enum >{ > NET_CORE_WMEM_MAX=1, > NET_CORE_RMEM_MAX=2, > NET_CORE_WMEM_DEFAULT=3, > NET_CORE_RMEM_DEFAULT=4, > > NET_CORE_MAX_BACKLOG=6, > NET_CORE_FASTROUTE=7, > NET_CORE_MSG_COST=8, > NET_CORE_MSG_BURST=9, > NET_CORE_OPTMEM_MAX=10, > NET_CORE_HOT_LIST_LENGTH=11, > NET_CORE_DIVERT_VERSION=12, > NET_CORE_NO_CONG_THRESH=13, > NET_CORE_NO_CONG=14, > NET_CORE_LO_CONG=15, > NET_CORE_MOD_CONG=16, > NET_CORE_DEV_WEIGHT=17, > NET_CORE_SOMAXCONN=18, > NET_CORE_BUDGET=19, > NET_CORE_AEVENT_ETIME=20, > NET_CORE_AEVENT_RSEQTH=21, > NET_CORE_WARNINGS=22, >}; > > > > > > > >enum >{ > NET_UNIX_DESTROY_DELAY=1, > NET_UNIX_DELETE_DELAY=2, > NET_UNIX_MAX_DGRAM_QLEN=3, >}; > > >enum >{ > NET_NF_CONNTRACK_MAX=1, > NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2, > NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3, > NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4, > NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5, > NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6, > NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7, > NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8, > NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9, > NET_NF_CONNTRACK_UDP_TIMEOUT=10, > NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11, > NET_NF_CONNTRACK_ICMP_TIMEOUT=12, > NET_NF_CONNTRACK_GENERIC_TIMEOUT=13, > NET_NF_CONNTRACK_BUCKETS=14, > NET_NF_CONNTRACK_LOG_INVALID=15, > NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16, > NET_NF_CONNTRACK_TCP_LOOSE=17, > NET_NF_CONNTRACK_TCP_BE_LIBERAL=18, > NET_NF_CONNTRACK_TCP_MAX_RETRANS=19, > NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20, > NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21, > NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22, > NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23, > NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24, > NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25, > NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26, > NET_NF_CONNTRACK_COUNT=27, > NET_NF_CONNTRACK_ICMPV6_TIMEOUT=28, > NET_NF_CONNTRACK_FRAG6_TIMEOUT=29, > NET_NF_CONNTRACK_FRAG6_LOW_THRESH=30, > NET_NF_CONNTRACK_FRAG6_HIGH_THRESH=31, > NET_NF_CONNTRACK_CHECKSUM=32, >}; > > >enum >{ > > NET_IPV4_FORWARD=8, > NET_IPV4_DYNADDR=9, > > NET_IPV4_CONF=16, > NET_IPV4_NEIGH=17, > NET_IPV4_ROUTE=18, > NET_IPV4_FIB_HASH=19, > NET_IPV4_NETFILTER=20, > > NET_IPV4_TCP_TIMESTAMPS=33, > NET_IPV4_TCP_WINDOW_SCALING=34, > NET_IPV4_TCP_SACK=35, > NET_IPV4_TCP_RETRANS_COLLAPSE=36, > NET_IPV4_DEFAULT_TTL=37, > NET_IPV4_AUTOCONFIG=38, > NET_IPV4_NO_PMTU_DISC=39, > NET_IPV4_TCP_SYN_RETRIES=40, > NET_IPV4_IPFRAG_HIGH_THRESH=41, > NET_IPV4_IPFRAG_LOW_THRESH=42, > NET_IPV4_IPFRAG_TIME=43, > NET_IPV4_TCP_MAX_KA_PROBES=44, > NET_IPV4_TCP_KEEPALIVE_TIME=45, > NET_IPV4_TCP_KEEPALIVE_PROBES=46, > NET_IPV4_TCP_RETRIES1=47, > NET_IPV4_TCP_RETRIES2=48, > NET_IPV4_TCP_FIN_TIMEOUT=49, > NET_IPV4_IP_MASQ_DEBUG=50, > NET_TCP_SYNCOOKIES=51, > NET_TCP_STDURG=52, > NET_TCP_RFC1337=53, > NET_TCP_SYN_TAILDROP=54, > NET_TCP_MAX_SYN_BACKLOG=55, > NET_IPV4_LOCAL_PORT_RANGE=56, > NET_IPV4_ICMP_ECHO_IGNORE_ALL=57, > NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS=58, > NET_IPV4_ICMP_SOURCEQUENCH_RATE=59, > NET_IPV4_ICMP_DESTUNREACH_RATE=60, > NET_IPV4_ICMP_TIMEEXCEED_RATE=61, > NET_IPV4_ICMP_PARAMPROB_RATE=62, > NET_IPV4_ICMP_ECHOREPLY_RATE=63, > NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES=64, > NET_IPV4_IGMP_MAX_MEMBERSHIPS=65, > NET_TCP_TW_RECYCLE=66, > NET_IPV4_ALWAYS_DEFRAG=67, > NET_IPV4_TCP_KEEPALIVE_INTVL=68, > NET_IPV4_INET_PEER_THRESHOLD=69, > NET_IPV4_INET_PEER_MINTTL=70, > NET_IPV4_INET_PEER_MAXTTL=71, > NET_IPV4_INET_PEER_GC_MINTIME=72, > NET_IPV4_INET_PEER_GC_MAXTIME=73, > NET_TCP_ORPHAN_RETRIES=74, > NET_TCP_ABORT_ON_OVERFLOW=75, > NET_TCP_SYNACK_RETRIES=76, > NET_TCP_MAX_ORPHANS=77, > NET_TCP_MAX_TW_BUCKETS=78, > NET_TCP_FACK=79, > NET_TCP_REORDERING=80, > NET_TCP_ECN=81, > NET_TCP_DSACK=82, > NET_TCP_MEM=83, > NET_TCP_WMEM=84, > NET_TCP_RMEM=85, > NET_TCP_APP_WIN=86, > NET_TCP_ADV_WIN_SCALE=87, > NET_IPV4_NONLOCAL_BIND=88, > NET_IPV4_ICMP_RATELIMIT=89, > NET_IPV4_ICMP_RATEMASK=90, > NET_TCP_TW_REUSE=91, > NET_TCP_FRTO=92, > NET_TCP_LOW_LATENCY=93, > NET_IPV4_IPFRAG_SECRET_INTERVAL=94, > NET_IPV4_IGMP_MAX_MSF=96, > NET_TCP_NO_METRICS_SAVE=97, > NET_TCP_DEFAULT_WIN_SCALE=105, > NET_TCP_MODERATE_RCVBUF=106, > NET_TCP_TSO_WIN_DIVISOR=107, > NET_TCP_BIC_BETA=108, > NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR=109, > NET_TCP_CONG_CONTROL=110, > NET_TCP_ABC=111, > NET_IPV4_IPFRAG_MAX_DIST=112, > NET_TCP_MTU_PROBING=113, > NET_TCP_BASE_MSS=114, > NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS=115, > NET_TCP_DMA_COPYBREAK=116, > NET_TCP_SLOW_START_AFTER_IDLE=117, > NET_CIPSOV4_CACHE_ENABLE=118, > NET_CIPSOV4_CACHE_BUCKET_SIZE=119, > NET_CIPSOV4_RBM_OPTFMT=120, > NET_CIPSOV4_RBM_STRICTVALID=121, > NET_TCP_AVAIL_CONG_CONTROL=122, > NET_TCP_ALLOWED_CONG_CONTROL=123, > NET_TCP_MAX_SSTHRESH=124, > NET_TCP_FRTO_RESPONSE=125, >}; > >enum { > NET_IPV4_ROUTE_FLUSH=1, > NET_IPV4_ROUTE_MIN_DELAY=2, > NET_IPV4_ROUTE_MAX_DELAY=3, > NET_IPV4_ROUTE_GC_THRESH=4, > NET_IPV4_ROUTE_MAX_SIZE=5, > NET_IPV4_ROUTE_GC_MIN_INTERVAL=6, > NET_IPV4_ROUTE_GC_TIMEOUT=7, > NET_IPV4_ROUTE_GC_INTERVAL=8, > NET_IPV4_ROUTE_REDIRECT_LOAD=9, > NET_IPV4_ROUTE_REDIRECT_NUMBER=10, > NET_IPV4_ROUTE_REDIRECT_SILENCE=11, > NET_IPV4_ROUTE_ERROR_COST=12, > NET_IPV4_ROUTE_ERROR_BURST=13, > NET_IPV4_ROUTE_GC_ELASTICITY=14, > NET_IPV4_ROUTE_MTU_EXPIRES=15, > NET_IPV4_ROUTE_MIN_PMTU=16, > NET_IPV4_ROUTE_MIN_ADVMSS=17, > NET_IPV4_ROUTE_SECRET_INTERVAL=18, > NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS=19, >}; > >enum >{ > NET_PROTO_CONF_ALL=-2, > NET_PROTO_CONF_DEFAULT=-3 > > >}; > >enum >{ > NET_IPV4_CONF_FORWARDING=1, > NET_IPV4_CONF_MC_FORWARDING=2, > NET_IPV4_CONF_PROXY_ARP=3, > NET_IPV4_CONF_ACCEPT_REDIRECTS=4, > NET_IPV4_CONF_SECURE_REDIRECTS=5, > NET_IPV4_CONF_SEND_REDIRECTS=6, > NET_IPV4_CONF_SHARED_MEDIA=7, > NET_IPV4_CONF_RP_FILTER=8, > NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE=9, > NET_IPV4_CONF_BOOTP_RELAY=10, > NET_IPV4_CONF_LOG_MARTIANS=11, > NET_IPV4_CONF_TAG=12, > NET_IPV4_CONF_ARPFILTER=13, > NET_IPV4_CONF_MEDIUM_ID=14, > NET_IPV4_CONF_NOXFRM=15, > NET_IPV4_CONF_NOPOLICY=16, > NET_IPV4_CONF_FORCE_IGMP_VERSION=17, > NET_IPV4_CONF_ARP_ANNOUNCE=18, > NET_IPV4_CONF_ARP_IGNORE=19, > NET_IPV4_CONF_PROMOTE_SECONDARIES=20, > NET_IPV4_CONF_ARP_ACCEPT=21, > NET_IPV4_CONF_ARP_NOTIFY=22, >}; > > >enum >{ > NET_IPV4_NF_CONNTRACK_MAX=1, > NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2, > NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3, > NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4, > NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5, > NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6, > NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7, > NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8, > NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9, > NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT=10, > NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11, > NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT=12, > NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT=13, > NET_IPV4_NF_CONNTRACK_BUCKETS=14, > NET_IPV4_NF_CONNTRACK_LOG_INVALID=15, > NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16, > NET_IPV4_NF_CONNTRACK_TCP_LOOSE=17, > NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL=18, > NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS=19, > NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20, > NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21, > NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22, > NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23, > NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24, > NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25, > NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26, > NET_IPV4_NF_CONNTRACK_COUNT=27, > NET_IPV4_NF_CONNTRACK_CHECKSUM=28, >}; > > >enum { > NET_IPV6_CONF=16, > NET_IPV6_NEIGH=17, > NET_IPV6_ROUTE=18, > NET_IPV6_ICMP=19, > NET_IPV6_BINDV6ONLY=20, > NET_IPV6_IP6FRAG_HIGH_THRESH=21, > NET_IPV6_IP6FRAG_LOW_THRESH=22, > NET_IPV6_IP6FRAG_TIME=23, > NET_IPV6_IP6FRAG_SECRET_INTERVAL=24, > NET_IPV6_MLD_MAX_MSF=25, >}; > >enum { > NET_IPV6_ROUTE_FLUSH=1, > NET_IPV6_ROUTE_GC_THRESH=2, > NET_IPV6_ROUTE_MAX_SIZE=3, > NET_IPV6_ROUTE_GC_MIN_INTERVAL=4, > NET_IPV6_ROUTE_GC_TIMEOUT=5, > NET_IPV6_ROUTE_GC_INTERVAL=6, > NET_IPV6_ROUTE_GC_ELASTICITY=7, > NET_IPV6_ROUTE_MTU_EXPIRES=8, > NET_IPV6_ROUTE_MIN_ADVMSS=9, > NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS=10 >}; > >enum { > NET_IPV6_FORWARDING=1, > NET_IPV6_HOP_LIMIT=2, > NET_IPV6_MTU=3, > NET_IPV6_ACCEPT_RA=4, > NET_IPV6_ACCEPT_REDIRECTS=5, > NET_IPV6_AUTOCONF=6, > NET_IPV6_DAD_TRANSMITS=7, > NET_IPV6_RTR_SOLICITS=8, > NET_IPV6_RTR_SOLICIT_INTERVAL=9, > NET_IPV6_RTR_SOLICIT_DELAY=10, > NET_IPV6_USE_TEMPADDR=11, > NET_IPV6_TEMP_VALID_LFT=12, > NET_IPV6_TEMP_PREFERED_LFT=13, > NET_IPV6_REGEN_MAX_RETRY=14, > NET_IPV6_MAX_DESYNC_FACTOR=15, > NET_IPV6_MAX_ADDRESSES=16, > NET_IPV6_FORCE_MLD_VERSION=17, > NET_IPV6_ACCEPT_RA_DEFRTR=18, > NET_IPV6_ACCEPT_RA_PINFO=19, > NET_IPV6_ACCEPT_RA_RTR_PREF=20, > NET_IPV6_RTR_PROBE_INTERVAL=21, > NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN=22, > NET_IPV6_PROXY_NDP=23, > NET_IPV6_ACCEPT_SOURCE_ROUTE=25, > __NET_IPV6_MAX >}; > > >enum { > NET_IPV6_ICMP_RATELIMIT=1 >}; > > >enum { > NET_NEIGH_MCAST_SOLICIT=1, > NET_NEIGH_UCAST_SOLICIT=2, > NET_NEIGH_APP_SOLICIT=3, > NET_NEIGH_RETRANS_TIME=4, > NET_NEIGH_REACHABLE_TIME=5, > NET_NEIGH_DELAY_PROBE_TIME=6, > NET_NEIGH_GC_STALE_TIME=7, > NET_NEIGH_UNRES_QLEN=8, > NET_NEIGH_PROXY_QLEN=9, > NET_NEIGH_ANYCAST_DELAY=10, > NET_NEIGH_PROXY_DELAY=11, > NET_NEIGH_LOCKTIME=12, > NET_NEIGH_GC_INTERVAL=13, > NET_NEIGH_GC_THRESH1=14, > NET_NEIGH_GC_THRESH2=15, > NET_NEIGH_GC_THRESH3=16, > NET_NEIGH_RETRANS_TIME_MS=17, > NET_NEIGH_REACHABLE_TIME_MS=18, >}; > > >enum { > NET_DCCP_DEFAULT=1, >}; > > >enum { > NET_IPX_PPROP_BROADCASTING=1, > NET_IPX_FORWARDING=2 >}; > > >enum { > NET_LLC2=1, > NET_LLC_STATION=2, >}; > > >enum { > NET_LLC2_TIMEOUT=1, >}; > > >enum { > NET_LLC_STATION_ACK_TIMEOUT=1, >}; > > >enum { > NET_LLC2_ACK_TIMEOUT=1, > NET_LLC2_P_TIMEOUT=2, > NET_LLC2_REJ_TIMEOUT=3, > NET_LLC2_BUSY_TIMEOUT=4, >}; > > >enum { > NET_ATALK_AARP_EXPIRY_TIME=1, > NET_ATALK_AARP_TICK_TIME=2, > NET_ATALK_AARP_RETRANSMIT_LIMIT=3, > NET_ATALK_AARP_RESOLVE_TIME=4 >}; > > > >enum { > NET_NETROM_DEFAULT_PATH_QUALITY=1, > NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER=2, > NET_NETROM_NETWORK_TTL_INITIALISER=3, > NET_NETROM_TRANSPORT_TIMEOUT=4, > NET_NETROM_TRANSPORT_MAXIMUM_TRIES=5, > NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY=6, > NET_NETROM_TRANSPORT_BUSY_DELAY=7, > NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE=8, > NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT=9, > NET_NETROM_ROUTING_CONTROL=10, > NET_NETROM_LINK_FAILS_COUNT=11, > NET_NETROM_RESET=12 >}; > > >enum { > NET_AX25_IP_DEFAULT_MODE=1, > NET_AX25_DEFAULT_MODE=2, > NET_AX25_BACKOFF_TYPE=3, > NET_AX25_CONNECT_MODE=4, > NET_AX25_STANDARD_WINDOW=5, > NET_AX25_EXTENDED_WINDOW=6, > NET_AX25_T1_TIMEOUT=7, > NET_AX25_T2_TIMEOUT=8, > NET_AX25_T3_TIMEOUT=9, > NET_AX25_IDLE_TIMEOUT=10, > NET_AX25_N2=11, > NET_AX25_PACLEN=12, > NET_AX25_PROTOCOL=13, > NET_AX25_DAMA_SLAVE_TIMEOUT=14 >}; > > >enum { > NET_ROSE_RESTART_REQUEST_TIMEOUT=1, > NET_ROSE_CALL_REQUEST_TIMEOUT=2, > NET_ROSE_RESET_REQUEST_TIMEOUT=3, > NET_ROSE_CLEAR_REQUEST_TIMEOUT=4, > NET_ROSE_ACK_HOLD_BACK_TIMEOUT=5, > NET_ROSE_ROUTING_CONTROL=6, > NET_ROSE_LINK_FAIL_TIMEOUT=7, > NET_ROSE_MAX_VCS=8, > NET_ROSE_WINDOW_SIZE=9, > NET_ROSE_NO_ACTIVITY_TIMEOUT=10 >}; > > >enum { > NET_X25_RESTART_REQUEST_TIMEOUT=1, > NET_X25_CALL_REQUEST_TIMEOUT=2, > NET_X25_RESET_REQUEST_TIMEOUT=3, > NET_X25_CLEAR_REQUEST_TIMEOUT=4, > NET_X25_ACK_HOLD_BACK_TIMEOUT=5, > NET_X25_FORWARD=6 >}; > > >enum >{ > NET_TR_RIF_TIMEOUT=1 >}; > > >enum { > NET_DECNET_NODE_TYPE = 1, > NET_DECNET_NODE_ADDRESS = 2, > NET_DECNET_NODE_NAME = 3, > NET_DECNET_DEFAULT_DEVICE = 4, > NET_DECNET_TIME_WAIT = 5, > NET_DECNET_DN_COUNT = 6, > NET_DECNET_DI_COUNT = 7, > NET_DECNET_DR_COUNT = 8, > NET_DECNET_DST_GC_INTERVAL = 9, > NET_DECNET_CONF = 10, > NET_DECNET_NO_FC_MAX_CWND = 11, > NET_DECNET_MEM = 12, > NET_DECNET_RMEM = 13, > NET_DECNET_WMEM = 14, > NET_DECNET_DEBUG_LEVEL = 255 >}; > > >enum { > NET_DECNET_CONF_LOOPBACK = -2, > NET_DECNET_CONF_DDCMP = -3, > NET_DECNET_CONF_PPP = -4, > NET_DECNET_CONF_X25 = -5, > NET_DECNET_CONF_GRE = -6, > NET_DECNET_CONF_ETHER = -7 > > >}; > > >enum { > NET_DECNET_CONF_DEV_PRIORITY = 1, > NET_DECNET_CONF_DEV_T1 = 2, > NET_DECNET_CONF_DEV_T2 = 3, > NET_DECNET_CONF_DEV_T3 = 4, > NET_DECNET_CONF_DEV_FORWARDING = 5, > NET_DECNET_CONF_DEV_BLKSIZE = 6, > NET_DECNET_CONF_DEV_STATE = 7 >}; > > >enum { > NET_SCTP_RTO_INITIAL = 1, > NET_SCTP_RTO_MIN = 2, > NET_SCTP_RTO_MAX = 3, > NET_SCTP_RTO_ALPHA = 4, > NET_SCTP_RTO_BETA = 5, > NET_SCTP_VALID_COOKIE_LIFE = 6, > NET_SCTP_ASSOCIATION_MAX_RETRANS = 7, > NET_SCTP_PATH_MAX_RETRANS = 8, > NET_SCTP_MAX_INIT_RETRANSMITS = 9, > NET_SCTP_HB_INTERVAL = 10, > NET_SCTP_PRESERVE_ENABLE = 11, > NET_SCTP_MAX_BURST = 12, > NET_SCTP_ADDIP_ENABLE = 13, > NET_SCTP_PRSCTP_ENABLE = 14, > NET_SCTP_SNDBUF_POLICY = 15, > NET_SCTP_SACK_TIMEOUT = 16, > NET_SCTP_RCVBUF_POLICY = 17, >}; > > >enum { > NET_BRIDGE_NF_CALL_ARPTABLES = 1, > NET_BRIDGE_NF_CALL_IPTABLES = 2, > NET_BRIDGE_NF_CALL_IP6TABLES = 3, > NET_BRIDGE_NF_FILTER_VLAN_TAGGED = 4, > NET_BRIDGE_NF_FILTER_PPPOE_TAGGED = 5, >}; > > >enum { > NET_IRDA_DISCOVERY=1, > NET_IRDA_DEVNAME=2, > NET_IRDA_DEBUG=3, > NET_IRDA_FAST_POLL=4, > NET_IRDA_DISCOVERY_SLOTS=5, > NET_IRDA_DISCOVERY_TIMEOUT=6, > NET_IRDA_SLOT_TIMEOUT=7, > NET_IRDA_MAX_BAUD_RATE=8, > NET_IRDA_MIN_TX_TURN_TIME=9, > NET_IRDA_MAX_TX_DATA_SIZE=10, > NET_IRDA_MAX_TX_WINDOW=11, > NET_IRDA_MAX_NOREPLY_TIME=12, > NET_IRDA_WARN_NOREPLY_TIME=13, > NET_IRDA_LAP_KEEPALIVE_TIME=14, >}; > > > >enum >{ > FS_NRINODE=1, > FS_STATINODE=2, > FS_MAXINODE=3, > FS_NRDQUOT=4, > FS_MAXDQUOT=5, > FS_NRFILE=6, > FS_MAXFILE=7, > FS_DENTRY=8, > FS_NRSUPER=9, > FS_MAXSUPER=10, > FS_OVERFLOWUID=11, > FS_OVERFLOWGID=12, > FS_LEASES=13, > FS_DIR_NOTIFY=14, > FS_LEASE_TIME=15, > FS_DQSTATS=16, > FS_XFS=17, > FS_AIO_NR=18, > FS_AIO_MAX_NR=19, > FS_INOTIFY=20, > FS_OCFS2=988, >}; > > >enum { > FS_DQ_LOOKUPS = 1, > FS_DQ_DROPS = 2, > FS_DQ_READS = 3, > FS_DQ_WRITES = 4, > FS_DQ_CACHE_HITS = 5, > FS_DQ_ALLOCATED = 6, > FS_DQ_FREE = 7, > FS_DQ_SYNCS = 8, > FS_DQ_WARNINGS = 9, >}; > > > > >enum { > DEV_CDROM=1, > DEV_HWMON=2, > DEV_PARPORT=3, > DEV_RAID=4, > DEV_MAC_HID=5, > DEV_SCSI=6, > DEV_IPMI=7, >}; > > >enum { > DEV_CDROM_INFO=1, > DEV_CDROM_AUTOCLOSE=2, > DEV_CDROM_AUTOEJECT=3, > DEV_CDROM_DEBUG=4, > DEV_CDROM_LOCK=5, > DEV_CDROM_CHECK_MEDIA=6 >}; > > >enum { > DEV_PARPORT_DEFAULT=-3 >}; > > >enum { > DEV_RAID_SPEED_LIMIT_MIN=1, > DEV_RAID_SPEED_LIMIT_MAX=2 >}; > > >enum { > DEV_PARPORT_DEFAULT_TIMESLICE=1, > DEV_PARPORT_DEFAULT_SPINTIME=2 >}; > > >enum { > DEV_PARPORT_SPINTIME=1, > DEV_PARPORT_BASE_ADDR=2, > DEV_PARPORT_IRQ=3, > DEV_PARPORT_DMA=4, > DEV_PARPORT_MODES=5, > DEV_PARPORT_DEVICES=6, > DEV_PARPORT_AUTOPROBE=16 >}; > > >enum { > DEV_PARPORT_DEVICES_ACTIVE=-3, >}; > > >enum { > DEV_PARPORT_DEVICE_TIMESLICE=1, >}; > > >enum { > DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES=1, > DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES=2, > DEV_MAC_HID_MOUSE_BUTTON_EMULATION=3, > DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE=4, > DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE=5, > DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES=6 >}; > > >enum { > DEV_SCSI_LOGGING_LEVEL=1, >}; > > >enum { > DEV_IPMI_POWEROFF_POWERCYCLE=1, >}; > > >enum >{ > ABI_DEFHANDLER_COFF=1, > ABI_DEFHANDLER_ELF=2, > ABI_DEFHANDLER_LCALL7=3, > ABI_DEFHANDLER_LIBCSO=4, > ABI_TRACE=5, > ABI_FAKE_UTSNAME=6, >}; > >struct ctl_table; >struct nsproxy; >struct ctl_table_root; >struct ctl_table_header; >struct ctl_dir; > >typedef struct ctl_table ctl_table; > >typedef int proc_handler (struct ctl_table *ctl, int write, > void *buffer, size_t *lenp, loff_t *ppos); > >extern int proc_dostring(struct ctl_table *, int, > void *, size_t *, loff_t *); >extern int proc_dointvec(struct ctl_table *, int, > void *, size_t *, loff_t *); >extern int proc_dointvec_minmax(struct ctl_table *, int, > void *, size_t *, loff_t *); >extern int proc_dointvec_jiffies(struct ctl_table *, int, > void *, size_t *, loff_t *); >extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, > void *, size_t *, loff_t *); >extern int proc_dointvec_ms_jiffies(struct ctl_table *, int, > void *, size_t *, loff_t *); >extern int proc_doulongvec_minmax(struct ctl_table *, int, > void *, size_t *, loff_t *); >extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, > void *, size_t *, loff_t *); >extern int proc_do_large_bitmap(struct ctl_table *, int, > void *, size_t *, loff_t *); > >struct ctl_table_poll { > atomic_t event; > wait_queue_head_t wait; >}; > >static inline __attribute__((always_inline)) void *proc_sys_poll_event(struct ctl_table_poll *poll) >{ > return (void *)(unsigned long)(*(volatile int *)&(&poll->event)->counter); >} > >struct ctl_table >{ > const char *procname; > void *data; > int maxlen; > umode_t mode; > struct ctl_table *child; > proc_handler *proc_handler; > struct ctl_table_poll *poll; > void *extra1; > void *extra2; >}; > >struct ctl_node { > struct rb_node node; > struct ctl_table_header *header; >}; > > > >struct ctl_table_header >{ > union { > struct { > struct ctl_table *ctl_table; > int used; > int count; > int nreg; > }; > struct rcu_head rcu; > }; > struct completion *unregistering; > struct ctl_table *ctl_table_arg; > struct ctl_table_root *root; > struct ctl_table_set *set; > struct ctl_dir *parent; > struct ctl_node *node; >}; > >struct ctl_dir { > > struct ctl_table_header header; > struct rb_root root; >}; > >struct ctl_table_set { > int (*is_seen)(struct ctl_table_set *); > struct ctl_dir dir; >}; > >struct ctl_table_root { > struct ctl_table_set default_set; > struct ctl_table_set *(*lookup)(struct ctl_table_root *root, > struct nsproxy *namespaces); > int (*permissions)(struct ctl_table_root *root, > struct nsproxy *namespaces, struct ctl_table *table); >}; > > >struct ctl_path { > const char *procname; >}; > > > >void proc_sys_poll_notify(struct ctl_table_poll *poll); > >extern void setup_sysctl_set(struct ctl_table_set *p, > struct ctl_table_root *root, > int (*is_seen)(struct ctl_table_set *)); >extern void retire_sysctl_set(struct ctl_table_set *set); > >void register_sysctl_root(struct ctl_table_root *root); >struct ctl_table_header *__register_sysctl_table( > struct ctl_table_set *set, > const char *path, struct ctl_table *table); >struct ctl_table_header *__register_sysctl_paths( > struct ctl_table_set *set, > const struct ctl_path *path, struct ctl_table *table); >struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table); >struct ctl_table_header *register_sysctl_table(struct ctl_table * table); >struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path, > struct ctl_table *table); > >void unregister_sysctl_table(struct ctl_table_header * table); > >extern int sysctl_init(void); > > > > > > > >typedef int32_t key_serial_t; > > >typedef uint32_t key_perm_t; > >struct key; > > > > > > > > > > > > > > > > > > > > > > > > > > > > > >static inline __attribute__((always_inline)) u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) >{ > *remainder = ({ unsigned int __r, __b = (divisor); if (!__builtin_constant_p(__b) || __b == 0 || (7 < 4 && (__b & (__b - 1)) != 0)) { __r = ({ register unsigned int __base asm("r4") = __b; register unsigned long long __n asm("r0") = dividend; register unsigned long long __res asm("r2"); register unsigned int __rem asm("r1"); asm( ".ifnc " "%0" "," "r1" " ; .err ; .endif\n\t" ".ifnc " "%1" "," "r2" " ; .err ; .endif\n\t" ".ifnc " "%2" "," "r0" " ; .err ; .endif\n\t" ".ifnc " "%3" "," "r4" " ; .err ; .endif\n\t" "bl __do_div64" : "=r" (__rem), "=r" (__res) : "r" (__n), "r" (__base) : "ip", "lr", "cc"); dividend = __res; __rem; }); } else if ((__b & (__b - 1)) == 0) { __r = dividend; __r &= (__b - 1); dividend /= __b; } else { unsigned long long __res, __x, __t, __m, __n = dividend; unsigned int __c, __p, __z = 0; __r = __n; __p = 1 << ({ unsigned int __left = (__b), __nr = 0; if (__left & 0xffff0000) __nr += 16, __left >>= 16; if (__left & 0x0000ff00) __nr += 8, __left >>= 8; if (__left & 0x000000f0) __nr += 4, __left >>= 4; if (__left & 0x0000000c) __nr += 2, __left >>= 2; if (__left & 0x00000002) __nr += 1; __nr; }); __m = (~0ULL / __b) * __p; __m += (((~0ULL % __b + 1) * __p) + __b - 1) / __b; __x = ~0ULL / __b * __b - 1; __res = (__m & 0xffffffff) * (__x & 0xffffffff); __res >>= 32; __res += (__m & 0xffffffff) * (__x >> 32); __t = __res; __res += (__x & 0xffffffff) * (__m >> 32); __t = (__res < __t) ? (1ULL << 32) : 0; __res = (__res >> 32) + __t; __res += (__m >> 32) * (__x >> 32); __res /= __p; if (~0ULL % (__b / (__b & -__b)) == 0) { __n /= (__b & -__b); __m = ~0ULL / (__b / (__b & -__b)); __p = 1; __c = 1; } else if (__res != __x / __b) { __c = 1; __m = (~0ULL / __b) * __p; __m += ((~0ULL % __b + 1) * __p) / __b; } else { unsigned int __bits = -(__m & -__m); __bits |= __m >> 32; __bits = (~__bits) << 1; if (!__bits) { __p /= (__m & -__m); __m /= (__m & -__m); } else { __p >>= ({ unsigned int __left = (__bits), __nr = 0; if (__left & 0xffff0000) __nr += 16, __left >>= 16; if (__left & 0x0000ff00) __nr += 8, __left >>= 8; if (__left & 0x000000f0) __nr += 4, __left >>= 4; if (__left & 0x0000000c) __nr += 2, __left >>= 2; if (__left & 0x00000002) __nr += 1; __nr; }); __m >>= ({ unsigned int __left = (__bits), __nr = 0; if (__left & 0xffff0000) __nr += 16, __left >>= 16; if (__left & 0x0000ff00) __nr += 8, __left >>= 8; if (__left & 0x000000f0) __nr += 4, __left >>= 4; if (__left & 0x0000000c) __nr += 2, __left >>= 2; if (__left & 0x00000002) __nr += 1; __nr; }); } __c = 0; } if (!__c) { asm ( "umull %Q0, %R0, %1, %Q2\n\t" "mov %Q0, #0" : "=&r" (__res) : "r" (__m), "r" (__n) : "cc" ); } else if (!(__m & ((1ULL << 63) | (1ULL << 31)))) { __res = __m; asm ( "umlal %Q0, %R0, %Q1, %Q2\n\t" "mov %Q0, #0" : "+&r" (__res) : "r" (__m), "r" (__n) : "cc" ); } else { asm ( "umull %Q0, %R0, %Q1, %Q2\n\t" "cmn %Q0, %Q1\n\t" "adcs %R0, %R0, %R1\n\t" "adc %Q0, %3, #0" : "=&r" (__res) : "r" (__m), "r" (__n), "r" (__z) : "cc" ); } if (!(__m & ((1ULL << 63) | (1ULL << 31)))) { asm ( "umlal %R0, %Q0, %R1, %Q2\n\t" "umlal %R0, %Q0, %Q1, %R2\n\t" "mov %R0, #0\n\t" "umlal %Q0, %R0, %R1, %R2" : "+&r" (__res) : "r" (__m), "r" (__n) : "cc" ); } else { asm ( "umlal %R0, %Q0, %R2, %Q3\n\t" "umlal %R0, %1, %Q2, %R3\n\t" "mov %R0, #0\n\t" "adds %Q0, %1, %Q0\n\t" "adc %R0, %R0, #0\n\t" "umlal %Q0, %R0, %R2, %R3" : "+&r" (__res), "+&r" (__z) : "r" (__m), "r" (__n) : "cc" ); } __res /= __p; { unsigned int __res0 = __res; unsigned int __b0 = __b; __r -= __res0 * __b0; } dividend = __res; } __r; }); > return dividend; >} > > > >extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder); > > > >extern u64 div64_u64(u64 dividend, u64 divisor); > > > >extern s64 div64_s64(s64 dividend, s64 divisor); > >static inline __attribute__((always_inline)) u64 div_u64(u64 dividend, u32 divisor) >{ > u32 remainder; > return div_u64_rem(dividend, divisor, &remainder); >} > > > > > > >static inline __attribute__((always_inline)) s64 div_s64(s64 dividend, s32 divisor) >{ > s32 remainder; > return div_s64_rem(dividend, divisor, &remainder); >} > > >u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder); > >static inline __attribute__((always_inline)) __attribute__((always_inline)) u32 >__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder) >{ > u32 ret = 0; > > while (dividend >= divisor) { > > > asm("" : "+rm"(dividend)); > > dividend -= divisor; > ret++; > } > > *remainder = dividend; > > return ret; >} > > > > > >struct timespec { > __kernel_time_t tv_sec; > long tv_nsec; >}; > > >struct timeval { > __kernel_time_t tv_sec; > __kernel_suseconds_t tv_usec; >}; > >struct timezone { > int tz_minuteswest; > int tz_dsttime; >}; > > > >extern struct timezone sys_tz; > >static inline __attribute__((always_inline)) int timespec_equal(const struct timespec *a, > const struct timespec *b) >{ > return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); >} > > > > > > >static inline __attribute__((always_inline)) int timespec_compare(const struct timespec *lhs, const struct timespec *rhs) >{ > if (lhs->tv_sec < rhs->tv_sec) > return -1; > if (lhs->tv_sec > rhs->tv_sec) > return 1; > return lhs->tv_nsec - rhs->tv_nsec; >} > >static inline __attribute__((always_inline)) int timeval_compare(const struct timeval *lhs, const struct timeval *rhs) >{ > if (lhs->tv_sec < rhs->tv_sec) > return -1; > if (lhs->tv_sec > rhs->tv_sec) > return 1; > return lhs->tv_usec - rhs->tv_usec; >} > >extern unsigned long mktime(const unsigned int year, const unsigned int mon, > const unsigned int day, const unsigned int hour, > const unsigned int min, const unsigned int sec); > >extern void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec); > > > > > > >extern struct timespec timespec_add_safe(const struct timespec lhs, > const struct timespec rhs); > > >static inline __attribute__((always_inline)) struct timespec timespec_add(struct timespec lhs, > struct timespec rhs) >{ > struct timespec ts_delta; > set_normalized_timespec(&ts_delta, lhs.tv_sec + rhs.tv_sec, > lhs.tv_nsec + rhs.tv_nsec); > return ts_delta; >} > > > > >static inline __attribute__((always_inline)) struct timespec timespec_sub(struct timespec lhs, > struct timespec rhs) >{ > struct timespec ts_delta; > set_normalized_timespec(&ts_delta, lhs.tv_sec - rhs.tv_sec, > lhs.tv_nsec - rhs.tv_nsec); > return ts_delta; >} > >static inline __attribute__((always_inline)) bool timespec_valid(const struct timespec *ts) >{ > > if (ts->tv_sec < 0) > return false; > > if ((unsigned long)ts->tv_nsec >= 1000000000L) > return false; > return true; >} > >static inline __attribute__((always_inline)) bool timespec_valid_strict(const struct timespec *ts) >{ > if (!timespec_valid(ts)) > return false; > > if ((unsigned long long)ts->tv_sec >= ((long)(~0UL>>1))) > return false; > return true; >} > >extern void read_persistent_clock(struct timespec *ts); >extern void read_boot_clock(struct timespec *ts); >extern int update_persistent_clock(struct timespec now); >void timekeeping_init(void); >extern int timekeeping_suspended; > >unsigned long get_seconds(void); >struct timespec current_kernel_time(void); >struct timespec __current_kernel_time(void); >struct timespec get_monotonic_coarse(void); >void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, > struct timespec *wtom, struct timespec *sleep); >void timekeeping_inject_sleeptime(struct timespec *delta); > >static inline __attribute__((always_inline)) u32 arch_gettimeoffset(void) { return 0; } > > >extern void do_gettimeofday(struct timeval *tv); >extern int do_settimeofday(const struct timespec *tv); >extern int do_sys_settimeofday(const struct timespec *tv, > const struct timezone *tz); > >extern long do_utimes(int dfd, const char *filename, struct timespec *times, int flags); >struct itimerval; >extern int do_setitimer(int which, struct itimerval *value, > struct itimerval *ovalue); >extern unsigned int alarm_setitimer(unsigned int seconds); >extern int do_getitimer(int which, struct itimerval *value); >extern void getnstimeofday(struct timespec *tv); >extern void getrawmonotonic(struct timespec *ts); >extern void getnstime_raw_and_real(struct timespec *ts_raw, > struct timespec *ts_real); >extern void getboottime(struct timespec *ts); >extern void monotonic_to_bootbased(struct timespec *ts); >extern void get_monotonic_boottime(struct timespec *ts); > >extern struct timespec timespec_trunc(struct timespec t, unsigned gran); >extern int timekeeping_valid_for_hres(void); >extern u64 timekeeping_max_deferment(void); >extern void timekeeping_leap_insert(int leapsecond); >extern int timekeeping_inject_offset(struct timespec *ts); > >struct tms; >extern void do_sys_times(struct tms *); > > > > > >struct tm { > > > > > int tm_sec; > > int tm_min; > > int tm_hour; > > int tm_mday; > > int tm_mon; > > long tm_year; > > int tm_wday; > > int tm_yday; >}; > >void time_to_tm(time_t totalsecs, int offset, struct tm *result); > >static inline __attribute__((always_inline)) s64 timespec_to_ns(const struct timespec *ts) >{ > return ((s64) ts->tv_sec * 1000000000L) + ts->tv_nsec; >} > >static inline __attribute__((always_inline)) s64 timeval_to_ns(const struct timeval *tv) >{ > return ((s64) tv->tv_sec * 1000000000L) + > tv->tv_usec * 1000L; >} > > > > > > > >extern struct timespec ns_to_timespec(const s64 nsec); > > > > > > > >extern struct timeval ns_to_timeval(const s64 nsec); > >static inline __attribute__((always_inline)) __attribute__((always_inline)) void timespec_add_ns(struct timespec *a, u64 ns) >{ > a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, 1000000000L, &ns); > a->tv_nsec = ns; >} > >struct itimerspec { > struct timespec it_interval; > struct timespec it_value; >}; > >struct itimerval { > struct timeval it_interval; > struct timeval it_value; >}; > > > > > > > > > > > >struct timex { > unsigned int modes; > long offset; > long freq; > long maxerror; > long esterror; > int status; > long constant; > long precision; > long tolerance; > > > struct timeval time; > long tick; > > long ppsfreq; > long jitter; > int shift; > long stabil; > long jitcnt; > long calcnt; > long errcnt; > long stbcnt; > > int tai; > > int :32; int :32; int :32; int :32; > int :32; int :32; int :32; int :32; > int :32; int :32; int :32; >}; > > > > > > > > > > > > > > >typedef unsigned long cycles_t; > >static inline __attribute__((always_inline)) cycles_t get_cycles (void) >{ > return 0; >} > > >extern unsigned long tick_usec; >extern unsigned long tick_nsec; > >extern void ntp_init(void); >extern void ntp_clear(void); > >extern u64 ntp_tick_length(void); > >extern int second_overflow(unsigned long secs); >extern int do_adjtimex(struct timex *); >extern void hardpps(const struct timespec *, const struct timespec *); > >int read_current_timer(unsigned long *timer_val); > > >extern u64 __attribute__((section(".data"))) jiffies_64; >extern unsigned long volatile __attribute__((section(".data"))) jiffies; > > >u64 get_jiffies_64(void); > >extern unsigned long preset_lpj; > >extern unsigned int jiffies_to_msecs(const unsigned long j); >extern unsigned int jiffies_to_usecs(const unsigned long j); >extern unsigned long msecs_to_jiffies(const unsigned int m); >extern unsigned long usecs_to_jiffies(const unsigned int u); >extern unsigned long timespec_to_jiffies(const struct timespec *value); >extern void jiffies_to_timespec(const unsigned long jiffies, > struct timespec *value); >extern unsigned long timeval_to_jiffies(const struct timeval *value); >extern void jiffies_to_timeval(const unsigned long jiffies, > struct timeval *value); >extern clock_t jiffies_to_clock_t(unsigned long x); >extern unsigned long clock_t_to_jiffies(unsigned long x); >extern u64 jiffies_64_to_clock_t(u64 x); >extern u64 nsec_to_clock_t(u64 x); >extern u64 nsecs_to_jiffies64(u64 n); >extern unsigned long nsecs_to_jiffies(u64 n); > > >union ktime { > s64 tv64; > >}; > >typedef union ktime ktime_t; > >static inline __attribute__((always_inline)) ktime_t ktime_set(const long secs, const unsigned long nsecs) >{ > > > > > return (ktime_t) { .tv64 = (s64)secs * 1000000000L + (s64)nsecs }; >} > >static inline __attribute__((always_inline)) ktime_t timespec_to_ktime(struct timespec ts) >{ > return ktime_set(ts.tv_sec, ts.tv_nsec); >} > > >static inline __attribute__((always_inline)) ktime_t timeval_to_ktime(struct timeval tv) >{ > return ktime_set(tv.tv_sec, tv.tv_usec * 1000L); >} > >static inline __attribute__((always_inline)) int ktime_equal(const ktime_t cmp1, const ktime_t cmp2) >{ > return cmp1.tv64 == cmp2.tv64; >} > >static inline __attribute__((always_inline)) s64 ktime_to_us(const ktime_t kt) >{ > struct timeval tv = ns_to_timeval((kt).tv64); > return (s64) tv.tv_sec * 1000000L + tv.tv_usec; >} > >static inline __attribute__((always_inline)) s64 ktime_to_ms(const ktime_t kt) >{ > struct timeval tv = ns_to_timeval((kt).tv64); > return (s64) tv.tv_sec * 1000L + tv.tv_usec / 1000L; >} > >static inline __attribute__((always_inline)) s64 ktime_us_delta(const ktime_t later, const ktime_t earlier) >{ > return ktime_to_us(({ (ktime_t){ .tv64 = (later).tv64 - (earlier).tv64 }; })); >} > >static inline __attribute__((always_inline)) ktime_t ktime_add_us(const ktime_t kt, const u64 usec) >{ > return ({ (ktime_t){ .tv64 = (kt).tv64 + (usec * 1000) }; }); >} > >static inline __attribute__((always_inline)) ktime_t ktime_sub_us(const ktime_t kt, const u64 usec) >{ > return ({ (ktime_t){ .tv64 = (kt).tv64 - (usec * 1000) }; }); >} > >extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs); > >extern void ktime_get_ts(struct timespec *ts); > > > > >static inline __attribute__((always_inline)) ktime_t ns_to_ktime(u64 ns) >{ > static const ktime_t ktime_zero = { .tv64 = 0 }; > return ({ (ktime_t){ .tv64 = (ktime_zero).tv64 + (ns) }; }); >} > > > > > >struct tvec_base; > >struct timer_list { > > > > > struct list_head entry; > unsigned long expires; > struct tvec_base *base; > > void (*function)(unsigned long); > unsigned long data; > > int slack; > >}; > >extern struct tvec_base boot_tvec_bases; > >void init_timer_key(struct timer_list *timer, > const char *name, > struct lock_class_key *key); >void init_timer_deferrable_key(struct timer_list *timer, > const char *name, > struct lock_class_key *key); > >static inline __attribute__((always_inline)) void destroy_timer_on_stack(struct timer_list *timer) { } >static inline __attribute__((always_inline)) void init_timer_on_stack_key(struct timer_list *timer, > const char *name, > struct lock_class_key *key) >{ > init_timer_key(timer, name, key); >} > > >static inline __attribute__((always_inline)) void setup_timer_key(struct timer_list * timer, > const char *name, > struct lock_class_key *key, > void (*function)(unsigned long), > unsigned long data) >{ > timer->function = function; > timer->data = data; > init_timer_key(timer, name, key); >} > >static inline __attribute__((always_inline)) void setup_timer_on_stack_key(struct timer_list *timer, > const char *name, > struct lock_class_key *key, > void (*function)(unsigned long), > unsigned long data) >{ > timer->function = function; > timer->data = data; > init_timer_on_stack_key(timer, name, key); >} > >extern void setup_deferrable_timer_on_stack_key(struct timer_list *timer, > const char *name, > struct lock_class_key *key, > void (*function)(unsigned long), > unsigned long data); > >static inline __attribute__((always_inline)) int timer_pending(const struct timer_list * timer) >{ > return timer->entry.next != ((void *)0); >} > >extern void add_timer_on(struct timer_list *timer, int cpu); >extern int del_timer(struct timer_list * timer); >extern int mod_timer(struct timer_list *timer, unsigned long expires); >extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); >extern int mod_timer_pinned(struct timer_list *timer, unsigned long expires); > >extern void set_timer_slack(struct timer_list *time, int slack_hz); > >extern unsigned long get_next_timer_interrupt(unsigned long now); > >static inline __attribute__((always_inline)) void init_timer_stats(void) >{ >} > >static inline __attribute__((always_inline)) void timer_stats_timer_set_start_info(struct timer_list *timer) >{ >} > >static inline __attribute__((always_inline)) void timer_stats_timer_clear_start_info(struct timer_list *timer) >{ >} > > >extern void add_timer(struct timer_list *timer); > >extern int try_to_del_timer_sync(struct timer_list *timer); > > > extern int del_timer_sync(struct timer_list *timer); > > > > > > >extern void init_timers(void); >extern void run_local_timers(void); >struct hrtimer; >extern enum hrtimer_restart it_real_fn(struct hrtimer *); > >unsigned long __round_jiffies(unsigned long j, int cpu); >unsigned long __round_jiffies_relative(unsigned long j, int cpu); >unsigned long round_jiffies(unsigned long j); >unsigned long round_jiffies_relative(unsigned long j); > >unsigned long __round_jiffies_up(unsigned long j, int cpu); >unsigned long __round_jiffies_up_relative(unsigned long j, int cpu); >unsigned long round_jiffies_up(unsigned long j); >unsigned long round_jiffies_up_relative(unsigned long j); > > > > > > > >struct workqueue_struct; > >struct work_struct; >typedef void (*work_func_t)(struct work_struct *work); > > > > > > > >enum { > WORK_STRUCT_PENDING_BIT = 0, > WORK_STRUCT_DELAYED_BIT = 1, > WORK_STRUCT_CWQ_BIT = 2, > WORK_STRUCT_LINKED_BIT = 3, > > > > > WORK_STRUCT_COLOR_SHIFT = 4, > > > WORK_STRUCT_COLOR_BITS = 4, > > WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, > WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, > WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, > WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, > > > > WORK_STRUCT_STATIC = 0, > > > > > > > WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, > WORK_NO_COLOR = WORK_NR_COLORS, > > > WORK_CPU_UNBOUND = 2, > WORK_CPU_NONE = 2 + 1, > WORK_CPU_LAST = WORK_CPU_NONE, > > > > > > > WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + > WORK_STRUCT_COLOR_BITS, > > WORK_STRUCT_FLAG_MASK = (1UL << WORK_STRUCT_FLAG_BITS) - 1, > WORK_STRUCT_WQ_DATA_MASK = ~WORK_STRUCT_FLAG_MASK, > WORK_STRUCT_NO_CPU = WORK_CPU_NONE << WORK_STRUCT_FLAG_BITS, > > > WORK_BUSY_PENDING = 1 << 0, > WORK_BUSY_RUNNING = 1 << 1, >}; > >struct work_struct { > atomic_long_t data; > struct list_head entry; > work_func_t func; > > > >}; > > > > > >struct delayed_work { > struct work_struct work; > struct timer_list timer; >}; > >static inline __attribute__((always_inline)) struct delayed_work *to_delayed_work(struct work_struct *work) >{ > return ({ const typeof( ((struct delayed_work *)0)->work ) *__mptr = (work); (struct delayed_work *)( (char *)__mptr - __builtin_offsetof(struct delayed_work,work) );}); >} > >struct execute_work { > struct work_struct work; >}; > >static inline __attribute__((always_inline)) void __init_work(struct work_struct *work, int onstack) { } >static inline __attribute__((always_inline)) void destroy_work_on_stack(struct work_struct *work) { } >static inline __attribute__((always_inline)) unsigned int work_static(struct work_struct *work) { return 0; } > >enum { > WQ_NON_REENTRANT = 1 << 0, > WQ_UNBOUND = 1 << 1, > WQ_FREEZABLE = 1 << 2, > WQ_MEM_RECLAIM = 1 << 3, > WQ_HIGHPRI = 1 << 4, > WQ_CPU_INTENSIVE = 1 << 5, > > WQ_DRAINING = 1 << 6, > WQ_RESCUER = 1 << 7, > > WQ_MAX_ACTIVE = 512, > WQ_MAX_UNBOUND_PER_CPU = 4, > WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, >}; > >extern struct workqueue_struct *system_wq; >extern struct workqueue_struct *system_long_wq; >extern struct workqueue_struct *system_nrt_wq; >extern struct workqueue_struct *system_unbound_wq; >extern struct workqueue_struct *system_freezable_wq; >extern struct workqueue_struct *system_nrt_freezable_wq; > >extern struct workqueue_struct * >__alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, > struct lock_class_key *key, const char *lock_name, ...) __attribute__((format(printf, 1, 6))); > >extern void destroy_workqueue(struct workqueue_struct *wq); > >extern int queue_work(struct workqueue_struct *wq, struct work_struct *work); >extern int queue_work_on(int cpu, struct workqueue_struct *wq, > struct work_struct *work); >extern int queue_delayed_work(struct workqueue_struct *wq, > struct delayed_work *work, unsigned long delay); >extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, > struct delayed_work *work, unsigned long delay); > >extern void flush_workqueue(struct workqueue_struct *wq); >extern void drain_workqueue(struct workqueue_struct *wq); >extern void flush_scheduled_work(void); > >extern int schedule_work(struct work_struct *work); >extern int schedule_work_on(int cpu, struct work_struct *work); >extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay); >extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, > unsigned long delay); >extern int schedule_on_each_cpu(work_func_t func); >extern int keventd_up(void); > >int execute_in_process_context(work_func_t fn, struct execute_work *); > >extern bool flush_work(struct work_struct *work); >extern bool flush_work_sync(struct work_struct *work); >extern bool cancel_work_sync(struct work_struct *work); > >extern bool flush_delayed_work(struct delayed_work *dwork); >extern bool flush_delayed_work_sync(struct delayed_work *work); >extern bool cancel_delayed_work_sync(struct delayed_work *dwork); > >extern void workqueue_set_max_active(struct workqueue_struct *wq, > int max_active); >extern bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq); >extern unsigned int work_cpu(struct work_struct *work); >extern unsigned int work_busy(struct work_struct *work); > > > > > > > >static inline __attribute__((always_inline)) bool cancel_delayed_work(struct delayed_work *work) >{ > bool ret; > > ret = del_timer_sync(&work->timer); > if (ret) > _clear_bit(WORK_STRUCT_PENDING_BIT,((unsigned long *)(&(&work->work)->data))); > return ret; >} > > > > > > >static inline __attribute__((always_inline)) bool __cancel_delayed_work(struct delayed_work *work) >{ > bool ret; > > ret = del_timer(&work->timer); > if (ret) > _clear_bit(WORK_STRUCT_PENDING_BIT,((unsigned long *)(&(&work->work)->data))); > return ret; >} > > > > > > > >long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg); > > > >extern void freeze_workqueues_begin(void); >extern bool freeze_workqueues_busy(void); >extern void thaw_workqueues(void); > > >enum jump_label_type { > JUMP_LABEL_DISABLE = 0, > JUMP_LABEL_ENABLE, >}; > >struct module; > >struct static_key { > atomic_t enabled; >}; > >static inline __attribute__((always_inline)) __attribute__((always_inline)) void jump_label_init(void) >{ >} > >struct static_key_deferred { > struct static_key key; >}; > >static inline __attribute__((always_inline)) __attribute__((always_inline)) bool static_key_false(struct static_key *key) >{ > if (__builtin_expect(!!((*(volatile int *)&(&key->enabled)->counter)), 0) > 0) > return true; > return false; >} > >static inline __attribute__((always_inline)) __attribute__((always_inline)) bool static_key_true(struct static_key *key) >{ > if (__builtin_expect(!!((*(volatile int *)&(&key->enabled)->counter)), 1) > 0) > return true; > return false; >} > > >static inline __attribute__((always_inline)) __attribute__((always_inline)) bool static_branch(struct static_key *key) >{ > if (__builtin_expect(!!((*(volatile int *)&(&key->enabled)->counter)), 0) > 0) > return true; > return false; >} > >static inline __attribute__((always_inline)) void static_key_slow_inc(struct static_key *key) >{ > atomic_add(1, &key->enabled); >} > >static inline __attribute__((always_inline)) void static_key_slow_dec(struct static_key *key) >{ > atomic_sub(1, &key->enabled); >} > >static inline __attribute__((always_inline)) void static_key_slow_dec_deferred(struct static_key_deferred *key) >{ > static_key_slow_dec(&key->key); >} > >static inline __attribute__((always_inline)) int jump_label_text_reserved(void *start, void *end) >{ > return 0; >} > >static inline __attribute__((always_inline)) void jump_label_lock(void) {} >static inline __attribute__((always_inline)) void jump_label_unlock(void) {} > >static inline __attribute__((always_inline)) int jump_label_apply_nops(struct module *mod) >{ > return 0; >} > >static inline __attribute__((always_inline)) void >jump_label_rate_limit(struct static_key_deferred *key, > unsigned long rl) >{ >} > >static inline __attribute__((always_inline)) bool static_key_enabled(struct static_key *key) >{ > return ((*(volatile int *)&(&key->enabled)->counter) > 0); >} > > > >struct module; >struct tracepoint; > >struct tracepoint_func { > void *func; > void *data; >}; > >struct tracepoint { > const char *name; > struct static_key key; > void (*regfunc)(void); > void (*unregfunc)(void); > struct tracepoint_func *funcs; >}; > > > > > >extern int tracepoint_probe_register(const char *name, void *probe, void *data); > > > > > >extern int >tracepoint_probe_unregister(const char *name, void *probe, void *data); > >extern int tracepoint_probe_register_noupdate(const char *name, void *probe, > void *data); >extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe, > void *data); >extern void tracepoint_probe_update_all(void); > >struct tracepoint_iter { > > > > struct tracepoint * const *tracepoint; >}; > >extern void tracepoint_iter_start(struct tracepoint_iter *iter); >extern void tracepoint_iter_next(struct tracepoint_iter *iter); >extern void tracepoint_iter_stop(struct tracepoint_iter *iter); >extern void tracepoint_iter_reset(struct tracepoint_iter *iter); > > > > > > >static inline __attribute__((always_inline)) void tracepoint_synchronize_unregister(void) >{ > synchronize_sched(); >} > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > >struct raw_prio_tree_node { > struct prio_tree_node *left; > struct prio_tree_node *right; > struct prio_tree_node *parent; >}; > >struct prio_tree_node { > struct prio_tree_node *left; > struct prio_tree_node *right; > struct prio_tree_node *parent; > unsigned long start; > unsigned long last; >}; > >struct prio_tree_root { > struct prio_tree_node *prio_tree_node; > unsigned short index_bits; > unsigned short raw; > > > > >}; > >struct prio_tree_iter { > struct prio_tree_node *cur; > unsigned long mask; > unsigned long value; > int size_level; > > struct prio_tree_root *root; > unsigned long r_index; > unsigned long h_index; >}; > >static inline __attribute__((always_inline)) void prio_tree_iter_init(struct prio_tree_iter *iter, > struct prio_tree_root *root, unsigned long r_index, unsigned long h_index) >{ > iter->root = root; > iter->r_index = r_index; > iter->h_index = h_index; > iter->cur = ((void *)0); >} > >static inline __attribute__((always_inline)) int prio_tree_empty(const struct prio_tree_root *root) >{ > return root->prio_tree_node == ((void *)0); >} > >static inline __attribute__((always_inline)) int prio_tree_root(const struct prio_tree_node *node) >{ > return node->parent == node; >} > >static inline __attribute__((always_inline)) int prio_tree_left_empty(const struct prio_tree_node *node) >{ > return node->left == node; >} > >static inline __attribute__((always_inline)) int prio_tree_right_empty(const struct prio_tree_node *node) >{ > return node->right == node; >} > > >struct prio_tree_node *prio_tree_replace(struct prio_tree_root *root, > struct prio_tree_node *old, struct prio_tree_node *node); >struct prio_tree_node *prio_tree_insert(struct prio_tree_root *root, > struct prio_tree_node *node); >void prio_tree_remove(struct prio_tree_root *root, struct prio_tree_node *node); >struct prio_tree_node *prio_tree_next(struct prio_tree_iter *iter); > > > > > > > >enum page_debug_flags { > PAGE_DEBUG_FLAG_POISON, > PAGE_DEBUG_FLAG_GUARD, >}; > > > > > > >struct page; >struct vm_area_struct; > >struct cpu_user_fns { > void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); > void (*cpu_copy_user_highpage)(struct page *to, struct page *from, > unsigned long vaddr, struct vm_area_struct *vma); >}; > > >extern struct cpu_user_fns cpu_user; > >extern void copy_page(void *to, const void *from); > > > > > > > > >typedef u32 pteval_t; >typedef u32 pmdval_t; > >typedef pteval_t pte_t; >typedef pmdval_t pmd_t; >typedef pmdval_t pgd_t[2]; >typedef pteval_t pgprot_t; > > > > > >typedef struct page *pgtable_t; > > >extern int pfn_valid(unsigned long); > > > > > > > > > > > > > >extern unsigned long __pv_phys_offset; > >static inline __attribute__((always_inline)) unsigned long __virt_to_phys(unsigned long x) >{ > unsigned long t; > __asm__("@ __pv_stub\n" "1: " "add" " %0, %1, %2\n" " .pushsection .pv_table,\"a\"\n" " .long 1b\n" " .popsection\n" : "=r" (t) : "r" (x), "I" (0x81000000)); > return t; >} > >static inline __attribute__((always_inline)) unsigned long __phys_to_virt(unsigned long x) >{ > unsigned long t; > __asm__("@ __pv_stub\n" "1: " "sub" " %0, %1, %2\n" " .pushsection .pv_table,\"a\"\n" " .long 1b\n" " .popsection\n" : "=r" (t) : "r" (x), "I" (0x81000000)); > return t; >} > >static inline __attribute__((always_inline)) phys_addr_t virt_to_phys(const volatile void *x) >{ > return __virt_to_phys((unsigned long)(x)); >} > >static inline __attribute__((always_inline)) void *phys_to_virt(phys_addr_t x) >{ > return (void *)(__phys_to_virt((unsigned long)(x))); >} > >static inline __attribute__((always_inline)) __attribute__((deprecated)) unsigned long virt_to_bus(void *x) >{ > return __virt_to_phys((unsigned long)x); >} > >static inline __attribute__((always_inline)) __attribute__((deprecated)) void *bus_to_virt(unsigned long x) >{ > return (void *)__phys_to_virt(x); >} > > > > > > > > > > > > > >static inline __attribute__((always_inline)) __attribute__((__const__)) >int __get_order(unsigned long size) >{ > int order; > > size--; > size >>= 12; > > order = fls(size); > > > > return order; >} > > > > > > > > >typedef struct { > > atomic64_t id; > > unsigned int vmalloc_seq; >} mm_context_t; > > > > > > > >struct address_space; > >struct page { > > unsigned long flags; > > struct address_space *mapping; > > > > > > > > struct { > union { > unsigned long index; > void *freelist; > }; > > union { > > unsigned counters; > > > struct { > > union { > > atomic_t _mapcount; > > struct { > unsigned inuse:16; > unsigned objects:15; > unsigned frozen:1; > }; > }; > atomic_t _count; > }; > }; > }; > > > union { > struct list_head lru; > > > struct { > struct page *next; > > > > > short int pages; > short int pobjects; > > }; > }; > > > union { > unsigned long private; > > struct kmem_cache *slab; > struct page *first_page; > }; > >} > > > > > > > >; > >struct page_frag { > struct page *page; > > > > > __u16 offset; > __u16 size; > >}; > >typedef unsigned long vm_flags_t; > > > > > > >struct vm_region { > struct rb_node vm_rb; > vm_flags_t vm_flags; > unsigned long vm_start; > unsigned long vm_end; > unsigned long vm_top; > unsigned long vm_pgoff; > struct file *vm_file; > > int vm_usage; > bool vm_icache_flushed : 1; > >}; > > > > > > > >struct vm_area_struct { > struct mm_struct * vm_mm; > unsigned long vm_start; > unsigned long vm_end; > > > > struct vm_area_struct *vm_next, *vm_prev; > > pgprot_t vm_page_prot; > unsigned long vm_flags; > > struct rb_node vm_rb; > > union { > struct { > struct list_head list; > void *parent; > struct vm_area_struct *head; > } vm_set; > > struct raw_prio_tree_node prio_tree_node; > const char *anon_name; > } shared; > > > > > > > > struct list_head anon_vma_chain; > > struct anon_vma *anon_vma; > > > const struct vm_operations_struct *vm_ops; > > > unsigned long vm_pgoff; > > struct file * vm_file; > void * vm_private_data; > > > > > > > >}; > >struct core_thread { > struct task_struct *task; > struct core_thread *next; >}; > >struct core_state { > atomic_t nr_threads; > struct core_thread dumper; > struct completion startup; >}; > >enum { > MM_FILEPAGES, > MM_ANONPAGES, > MM_SWAPENTS, > NR_MM_COUNTERS >}; > >struct mm_rss_stat { > atomic_long_t count[NR_MM_COUNTERS]; >}; > >struct mm_struct { > struct vm_area_struct * mmap; > struct rb_root mm_rb; > struct vm_area_struct * mmap_cache; > > unsigned long (*get_unmapped_area) (struct file *filp, > unsigned long addr, unsigned long len, > unsigned long pgoff, unsigned long flags); > void (*unmap_area) (struct mm_struct *mm, unsigned long addr); > > unsigned long mmap_base; > unsigned long task_size; > unsigned long cached_hole_size; > unsigned long free_area_cache; > pgd_t * pgd; > atomic_t mm_users; > atomic_t mm_count; > int map_count; > > spinlock_t page_table_lock; > struct rw_semaphore mmap_sem; > > struct list_head mmlist; > > > > > > unsigned long hiwater_rss; > unsigned long hiwater_vm; > > unsigned long total_vm; > unsigned long locked_vm; > unsigned long pinned_vm; > unsigned long shared_vm; > unsigned long exec_vm; > unsigned long stack_vm; > unsigned long reserved_vm; > unsigned long def_flags; > unsigned long nr_ptes; > unsigned long start_code, end_code, start_data, end_data; > unsigned long start_brk, brk, start_stack; > unsigned long arg_start, arg_end, env_start, env_end; > > unsigned long saved_auxv[(2*(0 + 19 + 1))]; > > > > > > struct mm_rss_stat rss_stat; > > struct linux_binfmt *binfmt; > > cpumask_var_t cpu_vm_mask_var; > > > mm_context_t context; > > unsigned int faultstamp; > unsigned int token_priority; > unsigned int last_interval; > > unsigned long flags; > > struct core_state *core_state; > > struct task_struct *owner; > > > > struct file *exe_file; > unsigned long num_exe_file_vmas; > >}; > >static inline __attribute__((always_inline)) void mm_init_cpumask(struct mm_struct *mm) >{ > > > >} > > >static inline __attribute__((always_inline)) cpumask_t *mm_cpumask(struct mm_struct *mm) >{ > return mm->cpu_vm_mask_var; >} > > > >static inline __attribute__((always_inline)) const char *vma_get_anon_name(struct vm_area_struct *vma) >{ > if (vma->vm_file) > return ((void *)0); > > return vma->shared.anon_name; >} > > >static inline __attribute__((always_inline)) void >kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) >{ >} > >static inline __attribute__((always_inline)) void >kmemcheck_free_shadow(struct page *page, int order) >{ >} > >static inline __attribute__((always_inline)) void >kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object, > size_t size) >{ >} > >static inline __attribute__((always_inline)) void kmemcheck_slab_free(struct kmem_cache *s, void *object, > size_t size) >{ >} > >static inline __attribute__((always_inline)) void kmemcheck_pagealloc_alloc(struct page *p, > unsigned int order, gfp_t gfpflags) >{ >} > >static inline __attribute__((always_inline)) bool kmemcheck_page_is_tracked(struct page *p) >{ > return false; >} > >static inline __attribute__((always_inline)) void kmemcheck_mark_unallocated(void *address, unsigned int n) >{ >} > >static inline __attribute__((always_inline)) void kmemcheck_mark_uninitialized(void *address, unsigned int n) >{ >} > >static inline __attribute__((always_inline)) void kmemcheck_mark_initialized(void *address, unsigned int n) >{ >} > >static inline __attribute__((always_inline)) void kmemcheck_mark_freed(void *address, unsigned int n) >{ >} > >static inline __attribute__((always_inline)) void kmemcheck_mark_unallocated_pages(struct page *p, > unsigned int n) >{ >} > >static inline __attribute__((always_inline)) void kmemcheck_mark_uninitialized_pages(struct page *p, > unsigned int n) >{ >} > >static inline __attribute__((always_inline)) void kmemcheck_mark_initialized_pages(struct page *p, > unsigned int n) >{ >} > >static inline __attribute__((always_inline)) bool kmemcheck_is_obj_initialized(unsigned long addr, size_t size) >{ > return true; >} > > > > > > > > > > > > > > > > > > > > > >typedef struct { unsigned long bits[((((1 << 0)) + (8 * sizeof(long)) - 1) / (8 * sizeof(long)))]; } nodemask_t; >extern nodemask_t _unused_nodemask_arg_; > > >static inline __attribute__((always_inline)) void __node_set(int node, volatile nodemask_t *dstp) >{ > _set_bit(node,dstp->bits); >} > > >static inline __attribute__((always_inline)) void __node_clear(int node, volatile nodemask_t *dstp) >{ > _clear_bit(node,dstp->bits); >} > > >static inline __attribute__((always_inline)) void __nodes_setall(nodemask_t *dstp, int nbits) >{ > bitmap_fill(dstp->bits, nbits); >} > > >static inline __attribute__((always_inline)) void __nodes_clear(nodemask_t *dstp, int nbits) >{ > bitmap_zero(dstp->bits, nbits); >} > > > > > > >static inline __attribute__((always_inline)) int __node_test_and_set(int node, nodemask_t *addr) >{ > return _test_and_set_bit(node,addr->bits); >} > > > >static inline __attribute__((always_inline)) void __nodes_and(nodemask_t *dstp, const nodemask_t *src1p, > const nodemask_t *src2p, int nbits) >{ > bitmap_and(dstp->bits, src1p->bits, src2p->bits, nbits); >} > > > >static inline __attribute__((always_inline)) void __nodes_or(nodemask_t *dstp, const nodemask_t *src1p, > const nodemask_t *src2p, int nbits) >{ > bitmap_or(dstp->bits, src1p->bits, src2p->bits, nbits); >} > > > >static inline __attribute__((always_inline)) void __nodes_xor(nodemask_t *dstp, const nodemask_t *src1p, > const nodemask_t *src2p, int nbits) >{ > bitmap_xor(dstp->bits, src1p->bits, src2p->bits, nbits); >} > > > >static inline __attribute__((always_inline)) void __nodes_andnot(nodemask_t *dstp, const nodemask_t *src1p, > const nodemask_t *src2p, int nbits) >{ > bitmap_andnot(dstp->bits, src1p->bits, src2p->bits, nbits); >} > > > >static inline __attribute__((always_inline)) void __nodes_complement(nodemask_t *dstp, > const nodemask_t *srcp, int nbits) >{ > bitmap_complement(dstp->bits, srcp->bits, nbits); >} > > > >static inline __attribute__((always_inline)) int __nodes_equal(const nodemask_t *src1p, > const nodemask_t *src2p, int nbits) >{ > return bitmap_equal(src1p->bits, src2p->bits, nbits); >} > > > >static inline __attribute__((always_inline)) int __nodes_intersects(const nodemask_t *src1p, > const nodemask_t *src2p, int nbits) >{ > return bitmap_intersects(src1p->bits, src2p->bits, nbits); >} > > > >static inline __attribute__((always_inline)) int __nodes_subset(const nodemask_t *src1p, > const nodemask_t *src2p, int nbits) >{ > return bitmap_subset(src1p->bits, src2p->bits, nbits); >} > > >static inline __attribute__((always_inline)) int __nodes_empty(const nodemask_t *srcp, int nbits) >{ > return bitmap_empty(srcp->bits, nbits); >} > > >static inline __attribute__((always_inline)) int __nodes_full(const nodemask_t *srcp, int nbits) >{ > return bitmap_full(srcp->bits, nbits); >} > > >static inline __attribute__((always_inline)) int __nodes_weight(const nodemask_t *srcp, int nbits) >{ > return bitmap_weight(srcp->bits, nbits); >} > > > >static inline __attribute__((always_inline)) void __nodes_shift_right(nodemask_t *dstp, > const nodemask_t *srcp, int n, int nbits) >{ > bitmap_shift_right(dstp->bits, srcp->bits, n, nbits); >} > > > >static inline __attribute__((always_inline)) void __nodes_shift_left(nodemask_t *dstp, > const nodemask_t *srcp, int n, int nbits) >{ > bitmap_shift_left(dstp->bits, srcp->bits, n, nbits); >} > > > > > >static inline __attribute__((always_inline)) int __first_node(const nodemask_t *srcp) >{ > return ({ int __min1 = ((1 << 0)); int __min2 = (_find_first_bit_le(srcp->bits,(1 << 0))); __min1 < __min2 ? __min1: __min2; }); >} > > >static inline __attribute__((always_inline)) int __next_node(int n, const nodemask_t *srcp) >{ > return ({ int __min1 = ((1 << 0)); int __min2 = (_find_next_bit_le(srcp->bits,(1 << 0),n+1)); __min1 < __min2 ? __min1: __min2; }); >} > >static inline __attribute__((always_inline)) void init_nodemask_of_node(nodemask_t *mask, int node) >{ > __nodes_clear(&(*mask), (1 << 0)); > __node_set((node), &(*mask)); >} > >static inline __attribute__((always_inline)) int __first_unset_node(const nodemask_t *maskp) >{ > return ({ int __min1 = ((1 << 0)); int __min2 = (_find_first_zero_bit_le(maskp->bits,(1 << 0))); __min1 < __min2 ? __min1: __min2; }) > ; >} > >static inline __attribute__((always_inline)) int __nodemask_scnprintf(char *buf, int len, > const nodemask_t *srcp, int nbits) >{ > return bitmap_scnprintf(buf, len, srcp->bits, nbits); >} > > > >static inline __attribute__((always_inline)) int __nodemask_parse_user(const char *buf, int len, > nodemask_t *dstp, int nbits) >{ > return bitmap_parse_user(buf, len, dstp->bits, nbits); >} > > > >static inline __attribute__((always_inline)) int __nodelist_scnprintf(char *buf, int len, > const nodemask_t *srcp, int nbits) >{ > return bitmap_scnlistprintf(buf, len, srcp->bits, nbits); >} > > >static inline __attribute__((always_inline)) int __nodelist_parse(const char *buf, nodemask_t *dstp, int nbits) >{ > return bitmap_parselist(buf, dstp->bits, nbits); >} > > > >static inline __attribute__((always_inline)) int __node_remap(int oldbit, > const nodemask_t *oldp, const nodemask_t *newp, int nbits) >{ > return bitmap_bitremap(oldbit, oldp->bits, newp->bits, nbits); >} > > > >static inline __attribute__((always_inline)) void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp, > const nodemask_t *oldp, const nodemask_t *newp, int nbits) >{ > bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits); >} > > > >static inline __attribute__((always_inline)) void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp, > const nodemask_t *relmapp, int nbits) >{ > bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits); >} > > > >static inline __attribute__((always_inline)) void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp, > int sz, int nbits) >{ > bitmap_fold(dstp->bits, origp->bits, sz, nbits); >} > >enum node_states { > N_POSSIBLE, > N_ONLINE, > N_NORMAL_MEMORY, > > N_HIGH_MEMORY, > > > > N_CPU, > NR_NODE_STATES >}; > > > > > > >extern nodemask_t node_states[NR_NODE_STATES]; > >static inline __attribute__((always_inline)) int node_state(int node, enum node_states state) >{ > return node == 0; >} > >static inline __attribute__((always_inline)) void node_set_state(int node, enum node_states state) >{ >} > >static inline __attribute__((always_inline)) void node_clear_state(int node, enum node_states state) >{ >} > >static inline __attribute__((always_inline)) int num_node_state(enum node_states state) >{ > return 1; >} > >static inline __attribute__((always_inline)) int node_random(const nodemask_t *mask) >{ > return 0; >} > >struct nodemask_scratch { > nodemask_t mask1; > nodemask_t mask2; >}; > > > >enum pageblock_bits { > PB_migrate, > PB_migrate_end = PB_migrate + 3 - 1, > > NR_PAGEBLOCK_BITS >}; > >struct page; > > >unsigned long get_pageblock_flags_group(struct page *page, > int start_bitidx, int end_bitidx); >void set_pageblock_flags_group(struct page *page, unsigned long flags, > int start_bitidx, int end_bitidx); > > > > >extern int page_group_by_mobility_disabled; > >static inline __attribute__((always_inline)) int get_pageblock_migratetype(struct page *page) >{ > return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); >} > >struct free_area { > struct list_head free_list[5]; > unsigned long nr_free; >}; > >struct pglist_data; > >struct zone_padding { > char x[0]; >} __attribute__((__aligned__(1 << (6)))); > > > > > >enum zone_stat_item { > > NR_FREE_PAGES, > NR_LRU_BASE, > NR_INACTIVE_ANON = NR_LRU_BASE, > NR_ACTIVE_ANON, > NR_INACTIVE_FILE, > NR_ACTIVE_FILE, > NR_UNEVICTABLE, > NR_MLOCK, > NR_ANON_PAGES, > NR_FILE_MAPPED, > > NR_FILE_PAGES, > NR_FILE_DIRTY, > NR_WRITEBACK, > NR_SLAB_RECLAIMABLE, > NR_SLAB_UNRECLAIMABLE, > NR_PAGETABLE, > NR_KERNEL_STACK, > > NR_UNSTABLE_NFS, > NR_BOUNCE, > NR_VMSCAN_WRITE, > NR_VMSCAN_IMMEDIATE, > NR_WRITEBACK_TEMP, > NR_ISOLATED_ANON, > NR_ISOLATED_FILE, > NR_SHMEM, > NR_DIRTIED, > NR_WRITTEN, > > NR_ANON_TRANSPARENT_HUGEPAGES, > NR_VM_ZONE_STAT_ITEMS }; > >enum lru_list { > LRU_INACTIVE_ANON = 0, > LRU_ACTIVE_ANON = 0 + 1, > LRU_INACTIVE_FILE = 0 + 2, > LRU_ACTIVE_FILE = 0 + 2 + 1, > LRU_UNEVICTABLE, > NR_LRU_LISTS >}; > > > > > >static inline __attribute__((always_inline)) int is_file_lru(enum lru_list lru) >{ > return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE); >} > >static inline __attribute__((always_inline)) int is_active_lru(enum lru_list lru) >{ > return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE); >} > >static inline __attribute__((always_inline)) int is_unevictable_lru(enum lru_list lru) >{ > return (lru == LRU_UNEVICTABLE); >} > >struct lruvec { > struct list_head lists[NR_LRU_LISTS]; >}; > >typedef unsigned isolate_mode_t; > >enum zone_watermarks { > WMARK_MIN, > WMARK_LOW, > WMARK_HIGH, > NR_WMARK >}; > > > > > >struct per_cpu_pages { > int count; > int high; > int batch; > > > struct list_head lists[3]; >}; > >struct per_cpu_pageset { > struct per_cpu_pages pcp; > > > > > s8 stat_threshold; > s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS]; > >}; > > > >enum zone_type { > > ZONE_NORMAL, > > ZONE_HIGHMEM, > > ZONE_MOVABLE, > __MAX_NR_ZONES >}; > >struct zone_reclaim_stat { > > unsigned long recent_rotated[2]; > unsigned long recent_scanned[2]; >}; > >struct zone { > > > > unsigned long watermark[NR_WMARK]; > > > > > > > unsigned long percpu_drift_mark; > > unsigned long lowmem_reserve[3]; > > > > > > unsigned long dirty_balance_reserve; > > struct per_cpu_pageset *pageset; > > > > spinlock_t lock; > int all_unreclaimable; > > > > > struct free_area free_area[11]; > > > > > > > unsigned long *pageblock_flags; > > unsigned int compact_considered; > unsigned int compact_defer_shift; > int compact_order_failed; > > > struct zone_padding _pad1_; > > > spinlock_t lru_lock; > struct lruvec lruvec; > > struct zone_reclaim_stat reclaim_stat; > > unsigned long pages_scanned; > unsigned long flags; > > > atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; > > > > > > unsigned int inactive_ratio; > > > struct zone_padding _pad2_; > > wait_queue_head_t * wait_table; > unsigned long wait_table_hash_nr_entries; > unsigned long wait_table_bits; > > > > > struct pglist_data *zone_pgdat; > > unsigned long zone_start_pfn; > > unsigned long spanned_pages; > unsigned long present_pages; > > > > > const char *name; >} __attribute__((__aligned__(1 << (6)))); > >typedef enum { > ZONE_RECLAIM_LOCKED, > ZONE_OOM_LOCKED, > ZONE_CONGESTED, > > >} zone_flags_t; > >static inline __attribute__((always_inline)) void zone_set_flag(struct zone *zone, zone_flags_t flag) >{ > _set_bit(flag,&zone->flags); >} > >static inline __attribute__((always_inline)) int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag) >{ > return _test_and_set_bit(flag,&zone->flags); >} > >static inline __attribute__((always_inline)) void zone_clear_flag(struct zone *zone, zone_flags_t flag) >{ > _clear_bit(flag,&zone->flags); >} > >static inline __attribute__((always_inline)) int zone_is_reclaim_congested(const struct zone *zone) >{ > return test_bit(ZONE_CONGESTED, &zone->flags); >} > >static inline __attribute__((always_inline)) int zone_is_reclaim_locked(const struct zone *zone) >{ > return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); >} > >static inline __attribute__((always_inline)) int zone_is_oom_locked(const struct zone *zone) >{ > return test_bit(ZONE_OOM_LOCKED, &zone->flags); >} > >struct zonelist_cache; > > > > > > >struct zoneref { > struct zone *zone; > int zone_idx; >}; > >struct zonelist { > struct zonelist_cache *zlcache_ptr; > struct zoneref _zonerefs[((1 << 0) * 3) + 1]; > > > >}; > >extern struct page *mem_map; > >struct bootmem_data; >typedef struct pglist_data { > struct zone node_zones[3]; > struct zonelist node_zonelists[1]; > int nr_zones; > > struct page *node_mem_map; > > struct page_cgroup *node_page_cgroup; > > > > struct bootmem_data *bdata; > > unsigned long node_start_pfn; > unsigned long node_present_pages; > unsigned long node_spanned_pages; > > int node_id; > wait_queue_head_t kswapd_wait; > struct task_struct *kswapd; > int kswapd_max_order; > enum zone_type classzone_idx; >} pg_data_t; > > > > > > > > > > > > >struct srcu_struct_array { > int c[2]; >}; > >struct srcu_struct { > int completed; > struct srcu_struct_array *per_cpu_ref; > struct mutex mutex; > > > >}; > >int init_srcu_struct(struct srcu_struct *sp); > > > >void cleanup_srcu_struct(struct srcu_struct *sp); >int __srcu_read_lock(struct srcu_struct *sp) ; >void __srcu_read_unlock(struct srcu_struct *sp, int idx) ; >void synchronize_srcu(struct srcu_struct *sp); >void synchronize_srcu_expedited(struct srcu_struct *sp); >long srcu_batches_completed(struct srcu_struct *sp); > >static inline __attribute__((always_inline)) int srcu_read_lock_held(struct srcu_struct *sp) >{ > return 1; >} > >static inline __attribute__((always_inline)) int srcu_read_lock(struct srcu_struct *sp) >{ > int retval = __srcu_read_lock(sp); > > do { } while (0); > do { } while (0) > ; > return retval; >} > >static inline __attribute__((always_inline)) void srcu_read_unlock(struct srcu_struct *sp, int idx) > >{ > do { } while (0) > ; > do { } while (0); > __srcu_read_unlock(sp, idx); >} > >static inline __attribute__((always_inline)) int srcu_read_lock_raw(struct srcu_struct *sp) >{ > unsigned long flags; > int ret; > > do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); do { } while (0); } while (0); > ret = __srcu_read_lock(sp); > do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); do { } while (0); } else { do { } while (0); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); > return ret; >} > >static inline __attribute__((always_inline)) void srcu_read_unlock_raw(struct srcu_struct *sp, int idx) >{ > unsigned long flags; > > do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); do { } while (0); } while (0); > __srcu_read_unlock(sp, idx); > do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); do { } while (0); } else { do { } while (0); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); >} > > >struct notifier_block { > int (*notifier_call)(struct notifier_block *, unsigned long, void *); > struct notifier_block *next; > int priority; >}; > >struct atomic_notifier_head { > spinlock_t lock; > struct notifier_block *head; >}; > >struct blocking_notifier_head { > struct rw_semaphore rwsem; > struct notifier_block *head; >}; > >struct raw_notifier_head { > struct notifier_block *head; >}; > >struct srcu_notifier_head { > struct mutex mutex; > struct srcu_struct srcu; > struct notifier_block *head; >}; > >extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); > >extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, > struct notifier_block *nb); >extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh, > struct notifier_block *nb); >extern int raw_notifier_chain_register(struct raw_notifier_head *nh, > struct notifier_block *nb); >extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh, > struct notifier_block *nb); > >extern int blocking_notifier_chain_cond_register( > struct blocking_notifier_head *nh, > struct notifier_block *nb); > >extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, > struct notifier_block *nb); >extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, > struct notifier_block *nb); >extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh, > struct notifier_block *nb); >extern int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh, > struct notifier_block *nb); > >extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh, > unsigned long val, void *v); >extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh, > unsigned long val, void *v, int nr_to_call, int *nr_calls); >extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh, > unsigned long val, void *v); >extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh, > unsigned long val, void *v, int nr_to_call, int *nr_calls); >extern int raw_notifier_call_chain(struct raw_notifier_head *nh, > unsigned long val, void *v); >extern int __raw_notifier_call_chain(struct raw_notifier_head *nh, > unsigned long val, void *v, int nr_to_call, int *nr_calls); >extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh, > unsigned long val, void *v); >extern int __srcu_notifier_call_chain(struct srcu_notifier_head *nh, > unsigned long val, void *v, int nr_to_call, int *nr_calls); > >static inline __attribute__((always_inline)) int notifier_from_errno(int err) >{ > if (err) > return 0x8000 | (0x0001 - err); > > return 0x0001; >} > > >static inline __attribute__((always_inline)) int notifier_to_errno(int ret) >{ > ret &= ~0x8000; > return ret > 0x0001 ? 0x0001 - ret : 0; >} > >extern struct blocking_notifier_head reboot_notifier_list; > > > >struct page; >struct zone; >struct pglist_data; >struct mem_section; > >static inline __attribute__((always_inline)) void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {} >static inline __attribute__((always_inline)) void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {} >static inline __attribute__((always_inline)) void pgdat_resize_init(struct pglist_data *pgdat) {} > >static inline __attribute__((always_inline)) unsigned zone_span_seqbegin(struct zone *zone) >{ > return 0; >} >static inline __attribute__((always_inline)) int zone_span_seqretry(struct zone *zone, unsigned iv) >{ > return 0; >} >static inline __attribute__((always_inline)) void zone_span_writelock(struct zone *zone) {} >static inline __attribute__((always_inline)) void zone_span_writeunlock(struct zone *zone) {} >static inline __attribute__((always_inline)) void zone_seqlock_init(struct zone *zone) {} > >static inline __attribute__((always_inline)) int mhp_notimplemented(const char *func) >{ > printk("<4>" "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func); > dump_stack(); > return -38; >} > >static inline __attribute__((always_inline)) void register_page_bootmem_info_node(struct pglist_data *pgdat) >{ >} > >static inline __attribute__((always_inline)) void lock_memory_hotplug(void) {} >static inline __attribute__((always_inline)) void unlock_memory_hotplug(void) {} > >static inline __attribute__((always_inline)) int is_mem_section_removable(unsigned long pfn, > unsigned long nr_pages) >{ > return 0; >} > > >extern int mem_online_node(int nid); >extern int add_memory(int nid, u64 start, u64 size); >extern int arch_add_memory(int nid, u64 start, u64 size); >extern int remove_memory(u64 start, u64 size); >extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, > int nr_pages); >extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms); >extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, > unsigned long pnum); > > >extern struct mutex zonelists_mutex; >void build_all_zonelists(void *data); >void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx); >bool zone_watermark_ok(struct zone *z, int order, unsigned long mark, > int classzone_idx, int alloc_flags); >bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark, > int classzone_idx, int alloc_flags); >enum memmap_context { > MEMMAP_EARLY, > MEMMAP_HOTPLUG, >}; >extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, > unsigned long size, > enum memmap_context context); > > > > >static inline __attribute__((always_inline)) void memory_present(int nid, unsigned long start, unsigned long end) {} > > > > > >static inline __attribute__((always_inline)) int local_memory_node(int node_id) { return node_id; }; > >static inline __attribute__((always_inline)) int populated_zone(struct zone *zone) >{ > return (!!zone->present_pages); >} > >extern int movable_zone; > >static inline __attribute__((always_inline)) int zone_movable_is_highmem(void) >{ > > > > return 0; > >} > >static inline __attribute__((always_inline)) int is_highmem_idx(enum zone_type idx) >{ > > return (idx == ZONE_HIGHMEM || > (idx == ZONE_MOVABLE && zone_movable_is_highmem())); > > > >} > >static inline __attribute__((always_inline)) int is_normal_idx(enum zone_type idx) >{ > return (idx == ZONE_NORMAL); >} > > > > > > > >static inline __attribute__((always_inline)) int is_highmem(struct zone *zone) >{ > > int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones; > return zone_off == ZONE_HIGHMEM * sizeof(*zone) || > (zone_off == ZONE_MOVABLE * sizeof(*zone) && > zone_movable_is_highmem()); > > > >} > >static inline __attribute__((always_inline)) int is_normal(struct zone *zone) >{ > return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL; >} > >static inline __attribute__((always_inline)) int is_dma32(struct zone *zone) >{ > > > > return 0; > >} > >static inline __attribute__((always_inline)) int is_dma(struct zone *zone) >{ > > > > return 0; > >} > > >struct ctl_table; >int min_free_kbytes_sysctl_handler(struct ctl_table *, int, > void *, size_t *, loff_t *); >extern int sysctl_lowmem_reserve_ratio[3 -1]; >int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, > void *, size_t *, loff_t *); >int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, > void *, size_t *, loff_t *); >int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, > void *, size_t *, loff_t *); >int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, > void *, size_t *, loff_t *); > >extern int numa_zonelist_order_handler(struct ctl_table *, int, > void *, size_t *, loff_t *); >extern char numa_zonelist_order[]; > > > > >extern struct pglist_data contig_page_data; > >extern struct pglist_data *first_online_pgdat(void); >extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); >extern struct zone *next_zone(struct zone *zone); > >static inline __attribute__((always_inline)) struct zone *zonelist_zone(struct zoneref *zoneref) >{ > return zoneref->zone; >} > >static inline __attribute__((always_inline)) int zonelist_zone_idx(struct zoneref *zoneref) >{ > return zoneref->zone_idx; >} > >static inline __attribute__((always_inline)) int zonelist_node_idx(struct zoneref *zoneref) >{ > > > > > return 0; > >} > >struct zoneref *next_zones_zonelist(struct zoneref *z, > enum zone_type highest_zoneidx, > nodemask_t *nodes, > struct zone **zone); > >static inline __attribute__((always_inline)) struct zoneref *first_zones_zonelist(struct zonelist *zonelist, > enum zone_type highest_zoneidx, > nodemask_t *nodes, > struct zone **zone) >{ > return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes, > zone); >} > >static inline __attribute__((always_inline)) unsigned long early_pfn_to_nid(unsigned long pfn) >{ > return 0; >} > >void memory_present(int nid, unsigned long start, unsigned long end); >unsigned long __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) node_memmap_size_bytes(int, unsigned long, unsigned long); > >static inline __attribute__((always_inline)) int memmap_valid_within(unsigned long pfn, > struct page *page, struct zone *zone) >{ > return 1; >} > > > > > > > > > > > > > >struct cputopo_arm { > int thread_id; > int core_id; > int socket_id; > cpumask_t thread_sibling; > cpumask_t core_sibling; >}; > >extern struct cputopo_arm cpu_topology[2]; > >void init_cpu_topology(void); >void store_cpu_topology(unsigned int cpuid); >const struct cpumask *cpu_coregroup_mask(int cpu); > > > > > >int arch_update_cpu_topology(void); > >static inline __attribute__((always_inline)) int numa_node_id(void) >{ > return ((void)((current_thread_info()->cpu)),0); >} > >static inline __attribute__((always_inline)) int numa_mem_id(void) >{ > return numa_node_id(); >} > > > > >struct vm_area_struct; > >static inline __attribute__((always_inline)) int allocflags_to_migratetype(gfp_t gfp_flags) >{ > ({ int __ret_warn_on = !!((gfp_flags & ((( gfp_t)0x80000u)|(( gfp_t)0x08u))) == ((( gfp_t)0x80000u)|(( gfp_t)0x08u))); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_slowpath_null("include/linux/gfp.h", 154); __builtin_expect(!!(__ret_warn_on), 0); }); > > if (__builtin_expect(!!(page_group_by_mobility_disabled), 0)) > return 0; > > > return (((gfp_flags & (( gfp_t)0x08u)) != 0) << 1) | > ((gfp_flags & (( gfp_t)0x80000u)) != 0); >} > >static inline __attribute__((always_inline)) enum zone_type gfp_zone(gfp_t flags) >{ > enum zone_type z; > int bit = ( int) (flags & ((( gfp_t)0x01u)|(( gfp_t)0x02u)|(( gfp_t)0x04u)|(( gfp_t)0x08u))); > > z = (( (ZONE_NORMAL << 0 * 2) | (ZONE_NORMAL << 0x01u * 2) | (ZONE_HIGHMEM << 0x02u * 2) | (ZONE_NORMAL << 0x04u * 2) | (ZONE_NORMAL << 0x08u * 2) | (ZONE_NORMAL << (0x08u | 0x01u) * 2) | (ZONE_MOVABLE << (0x08u | 0x02u) * 2) | (ZONE_NORMAL << (0x08u | 0x04u) * 2) ) >> (bit * 2)) & > ((1 << 2) - 1); > do { (void)((( 1 << (0x01u | 0x02u) | 1 << (0x01u | 0x04u) | 1 << (0x04u | 0x02u) | 1 << (0x01u | 0x04u | 0x02u) | 1 << (0x08u | 0x02u | 0x01u) | 1 << (0x08u | 0x04u | 0x01u) | 1 << (0x08u | 0x04u | 0x02u) | 1 << (0x08u | 0x04u | 0x01u | 0x02u) ) >> bit) & 1); } while (0); > return z; >} > >static inline __attribute__((always_inline)) int gfp_zonelist(gfp_t flags) >{ > if (0 && __builtin_expect(!!(flags & (( gfp_t)0x40000u)), 0)) > return 1; > > return 0; >} > >static inline __attribute__((always_inline)) struct zonelist *node_zonelist(int nid, gfp_t flags) >{ > return (&contig_page_data)->node_zonelists + gfp_zonelist(flags); >} > > >static inline __attribute__((always_inline)) void arch_free_page(struct page *page, int order) { } > > >static inline __attribute__((always_inline)) void arch_alloc_page(struct page *page, int order) { } > > >struct page * >__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, > struct zonelist *zonelist, nodemask_t *nodemask); > >static inline __attribute__((always_inline)) struct page * >__alloc_pages(gfp_t gfp_mask, unsigned int order, > struct zonelist *zonelist) >{ > return __alloc_pages_nodemask(gfp_mask, order, zonelist, ((void *)0)); >} > >static inline __attribute__((always_inline)) struct page *alloc_pages_node(int nid, gfp_t gfp_mask, > unsigned int order) >{ > > if (nid < 0) > nid = numa_node_id(); > > return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); >} > >static inline __attribute__((always_inline)) struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask, > unsigned int order) >{ > do { (void)(nid < 0 || nid >= (1 << 0) || !node_state((nid), N_ONLINE)); } while (0); > > return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask)); >} > >extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); >extern unsigned long get_zeroed_page(gfp_t gfp_mask); > >void *alloc_pages_exact(size_t size, gfp_t gfp_mask); >void free_pages_exact(void *virt, size_t size); > >void *alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask); > > > > > > > >extern void __free_pages(struct page *page, unsigned int order); >extern void free_pages(unsigned long addr, unsigned int order); >extern void free_hot_cold_page(struct page *page, int cold); >extern void free_hot_cold_page_list(struct list_head *list, int cold); > > > > >void page_alloc_init(void); >void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); >void drain_all_pages(void); >void drain_local_pages(void *dummy); > >extern gfp_t gfp_allowed_mask; > >extern void pm_restrict_gfp_mask(void); >extern void pm_restore_gfp_mask(void); > > >extern bool pm_suspended_storage(void); > > > > > > > > > > > > > > > >struct task_struct; > >extern int debug_locks; >extern int debug_locks_silent; > > >static inline __attribute__((always_inline)) int __debug_locks_off(void) >{ > return ((__typeof__(*(&debug_locks)))__xchg((unsigned long)(0),(&debug_locks),sizeof(*(&debug_locks)))); >} > > > > >extern int debug_locks_off(void); > >struct task_struct; > > > > > > > >static inline __attribute__((always_inline)) void debug_show_all_locks(void) >{ >} > >static inline __attribute__((always_inline)) void debug_show_held_locks(struct task_struct *task) >{ >} > >static inline __attribute__((always_inline)) void >debug_check_no_locks_freed(const void *from, unsigned long len) >{ >} > >static inline __attribute__((always_inline)) void >debug_check_no_locks_held(void) >{ >} > > > > > > >struct range { > u64 start; > u64 end; >}; > >int add_range(struct range *range, int az, int nr_range, > u64 start, u64 end); > > >int add_range_with_merge(struct range *range, int az, int nr_range, > u64 start, u64 end); > >void subtract_range(struct range *range, int az, u64 start, u64 end); > >int clean_sort_range(struct range *range, int az); > >void sort_range(struct range *range, int nr_range); > > >static inline __attribute__((always_inline)) resource_size_t cap_resource(u64 val) >{ > if (val > ((resource_size_t)~0)) > return ((resource_size_t)~0); > > return val; >} > > > > >static inline __attribute__((always_inline)) void bit_spin_lock(int bitnum, unsigned long *addr) >{ > > > > > > > > do { do { (current_thread_info()->preempt_count) += (1); } while (0); __asm__ __volatile__("": : :"memory"); } while (0); > > while (__builtin_expect(!!(_test_and_set_bit(bitnum,addr)), 0)) { > do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 1)), 0)) preempt_schedule(); } while (0); } while (0); > do { > __asm__ __volatile__("": : :"memory"); > } while (test_bit(bitnum, addr)); > do { do { (current_thread_info()->preempt_count) += (1); } while (0); __asm__ __volatile__("": : :"memory"); } while (0); > } > > (void)0; >} > > > > >static inline __attribute__((always_inline)) int bit_spin_trylock(int bitnum, unsigned long *addr) >{ > do { do { (current_thread_info()->preempt_count) += (1); } while (0); __asm__ __volatile__("": : :"memory"); } while (0); > > if (__builtin_expect(!!(_test_and_set_bit(bitnum,addr)), 0)) { > do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 1)), 0)) preempt_schedule(); } while (0); } while (0); > return 0; > } > > (void)0; > return 1; >} > > > > >static inline __attribute__((always_inline)) void bit_spin_unlock(int bitnum, unsigned long *addr) >{ > > > > > do { __asm__ __volatile__ ("dmb" : : : "memory"); _clear_bit(bitnum,addr); } while (0); > > do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 1)), 0)) preempt_schedule(); } while (0); } while (0); > (void)0; >} > > > > > > >static inline __attribute__((always_inline)) void __bit_spin_unlock(int bitnum, unsigned long *addr) >{ > > > > > do { __asm__ __volatile__ ("dmb" : : : "memory"); __clear_bit(bitnum, addr); } while (0); > > do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 1)), 0)) preempt_schedule(); } while (0); } while (0); > (void)0; >} > > > > >static inline __attribute__((always_inline)) int bit_spin_is_locked(int bitnum, unsigned long *addr) >{ > > return test_bit(bitnum, addr); > > > > > >} > > > > > > > > > >struct shrink_control { > gfp_t gfp_mask; > > > unsigned long nr_to_scan; >}; > >struct shrinker { > int (*shrink)(struct shrinker *, struct shrink_control *sc); > int seeks; > long batch; > > > struct list_head list; > atomic_long_t nr_in_batch; >}; > >extern void register_shrinker(struct shrinker *); >extern void unregister_shrinker(struct shrinker *); > > >struct mempolicy; >struct anon_vma; >struct file_ra_state; >struct user_struct; >struct writeback_control; > > >extern unsigned long max_mapnr; > > >extern unsigned long num_physpages; >extern unsigned long totalram_pages; >extern void * high_memory; >extern int page_cluster; > > >extern int sysctl_legacy_va_layout; > > > > > > > > > > > > > > > > > > >struct mm_struct; > > > > >extern struct processor { > > > > void (*_data_abort)(unsigned long pc); > > > > unsigned long (*_prefetch_abort)(unsigned long lr); > > > > void (*_proc_init)(void); > > > > void (*_proc_fin)(void); > > > > void (*reset)(unsigned long addr) __attribute__((noreturn)); > > > > int (*_do_idle)(void); > > > > > > > > void (*dcache_clean_area)(void *addr, int size); > > > > > void (*switch_mm)(unsigned long pgd_phys, struct mm_struct *mm); > > > > > > > > void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); > > > > unsigned int suspend_size; > void (*do_suspend)(void *); > void (*do_resume)(void *); >} processor; > > >extern void cpu_v7_proc_init(void); >extern void cpu_v7_proc_fin(void); >extern int cpu_v7_do_idle(void); >extern void cpu_v7_dcache_clean_area(void *, int); >extern void cpu_v7_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); > > > >extern void cpu_v7_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); > >extern void cpu_v7_reset(unsigned long addr) __attribute__((noreturn)); > > >extern void cpu_v7_do_suspend(void *); >extern void cpu_v7_do_resume(void *); > >extern void cpu_resume(void); > > > > >typedef struct { pgd_t pgd; } pud_t; > >static inline __attribute__((always_inline)) int pgd_none(pgd_t pgd) { return 0; } >static inline __attribute__((always_inline)) int pgd_bad(pgd_t pgd) { return 0; } >static inline __attribute__((always_inline)) int pgd_present(pgd_t pgd) { return 1; } >static inline __attribute__((always_inline)) void pgd_clear(pgd_t *pgd) { } > >static inline __attribute__((always_inline)) pud_t * pud_offset(pgd_t * pgd, unsigned long address) >{ > return (pud_t *)pgd; >} > > > > > > > > > > > > > >static inline __attribute__((always_inline)) pmd_t *pmd_offset(pud_t *pud, unsigned long addr) >{ > return (pmd_t *)pud; >} > > >extern void __pte_error(const char *file, int line, pte_t); >extern void __pmd_error(const char *file, int line, pmd_t); >extern void __pgd_error(const char *file, int line, pgd_t); > >extern pgprot_t pgprot_user; >extern pgprot_t pgprot_kernel; > >struct file; >extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, > unsigned long size, pgprot_t vma_prot); > >extern struct page *empty_zero_page; > > > >extern pgd_t swapper_pg_dir[2048]; > >static inline __attribute__((always_inline)) pte_t *pmd_page_vaddr(pmd_t pmd) >{ > return ((void *)__phys_to_virt((unsigned long)((pmd) & (~0UL) & (s32)(~(((1UL) << 12)-1))))); >} > >extern void __sync_icache_dcache(pte_t pteval); > > >static inline __attribute__((always_inline)) void set_pte_at(struct mm_struct *mm, unsigned long addr, > pte_t *ptep, pte_t pteval) >{ > unsigned long ext = 0; > > if (addr < ((0xC0000000UL) - (0x01000000UL)) && (((pteval) & ((((pteval_t)(1)) << 0) | (((pteval_t)(1)) << 8))) == ((((pteval_t)(1)) << 0) | (((pteval_t)(1)) << 8)))) { > __sync_icache_dcache(pteval); > ext |= (((pteval_t)(1)) << 11); > } > > cpu_v7_set_pte_ext(ptep,pteval,ext); >} > > > > >static inline __attribute__((always_inline)) pte_t pte_wrprotect(pte_t pte) { (pte) |= (((pteval_t)(1)) << 7); return pte; }; >static inline __attribute__((always_inline)) pte_t pte_mkwrite(pte_t pte) { (pte) &= ~(((pteval_t)(1)) << 7); return pte; }; >static inline __attribute__((always_inline)) pte_t pte_mkclean(pte_t pte) { (pte) &= ~(((pteval_t)(1)) << 6); return pte; }; >static inline __attribute__((always_inline)) pte_t pte_mkdirty(pte_t pte) { (pte) |= (((pteval_t)(1)) << 6); return pte; }; >static inline __attribute__((always_inline)) pte_t pte_mkold(pte_t pte) { (pte) &= ~(((pteval_t)(1)) << 1); return pte; }; >static inline __attribute__((always_inline)) pte_t pte_mkyoung(pte_t pte) { (pte) |= (((pteval_t)(1)) << 1); return pte; }; > >static inline __attribute__((always_inline)) pte_t pte_mkspecial(pte_t pte) { return pte; } > >static inline __attribute__((always_inline)) pte_t pte_modify(pte_t pte, pgprot_t newprot) >{ > const pteval_t mask = (((pteval_t)(1)) << 9) | (((pteval_t)(1)) << 7) | (((pteval_t)(1)) << 8); > (pte) = ((pte) & ~mask) | ((newprot) & mask); > return pte; >} > > > >extern int ptep_set_access_flags(struct vm_area_struct *vma, > unsigned long address, pte_t *ptep, > pte_t entry, int dirty); > > > >extern int pmdp_set_access_flags(struct vm_area_struct *vma, > unsigned long address, pmd_t *pmdp, > pmd_t entry, int dirty); > > > >static inline __attribute__((always_inline)) int ptep_test_and_clear_young(struct vm_area_struct *vma, > unsigned long address, > pte_t *ptep) >{ > pte_t pte = *ptep; > int r = 1; > if (!((pte) & (((pteval_t)(1)) << 1))) > r = 0; > else > set_pte_at(vma->vm_mm, address, ptep, pte_mkold(pte)); > return r; >} > >static inline __attribute__((always_inline)) int pmdp_test_and_clear_young(struct vm_area_struct *vma, > unsigned long address, > pmd_t *pmdp) >{ > do { asm volatile("1:\t" ".word " "0xe7f001f2" "\n" ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" "2:\t.asciz " "\"include/asm-generic/pgtable.h\"" "\n" ".popsection\n" ".pushsection __bug_table,\"a\"\n" "3:\t.word 1b, 2b\n" "\t.hword " "56" ", 0\n" ".popsection"); __builtin_unreachable(); } while (0); > return 0; >} > > > > >int ptep_clear_flush_young(struct vm_area_struct *vma, > unsigned long address, pte_t *ptep); > > > >int pmdp_clear_flush_young(struct vm_area_struct *vma, > unsigned long address, pmd_t *pmdp); > > > >static inline __attribute__((always_inline)) pte_t ptep_get_and_clear(struct mm_struct *mm, > unsigned long address, > pte_t *ptep) >{ > pte_t pte = *ptep; > cpu_v7_set_pte_ext(ptep,(0),0); > return pte; >} > >static inline __attribute__((always_inline)) pte_t ptep_get_and_clear_full(struct mm_struct *mm, > unsigned long address, pte_t *ptep, > int full) >{ > pte_t pte; > pte = ptep_get_and_clear(mm, address, ptep); > return pte; >} > >static inline __attribute__((always_inline)) void pte_clear_not_present_full(struct mm_struct *mm, > unsigned long address, > pte_t *ptep, > int full) >{ > cpu_v7_set_pte_ext(ptep,(0),0); >} > > > >extern pte_t ptep_clear_flush(struct vm_area_struct *vma, > unsigned long address, > pte_t *ptep); > > > >extern pmd_t pmdp_clear_flush(struct vm_area_struct *vma, > unsigned long address, > pmd_t *pmdp); > > > >struct mm_struct; >static inline __attribute__((always_inline)) void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) >{ > pte_t old_pte = *ptep; > set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); >} > >static inline __attribute__((always_inline)) void pmdp_set_wrprotect(struct mm_struct *mm, > unsigned long address, pmd_t *pmdp) >{ > do { asm volatile("1:\t" ".word " "0xe7f001f2" "\n" ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" "2:\t.asciz " "\"include/asm-generic/pgtable.h\"" "\n" ".popsection\n" ".pushsection __bug_table,\"a\"\n" "3:\t.word 1b, 2b\n" "\t.hword " "155" ", 0\n" ".popsection"); __builtin_unreachable(); } while (0); >} > > > > >extern pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, > unsigned long address, > pmd_t *pmdp); > > > >static inline __attribute__((always_inline)) int pte_same(pte_t pte_a, pte_t pte_b) >{ > return (pte_a) == (pte_b); >} > >static inline __attribute__((always_inline)) int pmd_same(pmd_t pmd_a, pmd_t pmd_b) >{ > do { asm volatile("1:\t" ".word " "0xe7f001f2" "\n" ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" "2:\t.asciz " "\"include/asm-generic/pgtable.h\"" "\n" ".popsection\n" ".pushsection __bug_table,\"a\"\n" "3:\t.word 1b, 2b\n" "\t.hword " "182" ", 0\n" ".popsection"); __builtin_unreachable(); } while (0); > return 0; >} > >void pgd_clear_bad(pgd_t *); >void pud_clear_bad(pud_t *); >void pmd_clear_bad(pmd_t *); > >static inline __attribute__((always_inline)) int pgd_none_or_clear_bad(pgd_t *pgd) >{ > if (pgd_none(*pgd)) > return 1; > if (__builtin_expect(!!(pgd_bad(*pgd)), 0)) { > pgd_clear_bad(pgd); > return 1; > } > return 0; >} > >static inline __attribute__((always_inline)) int pud_none_or_clear_bad(pud_t *pud) >{ > if ((0)) > return 1; > if (__builtin_expect(!!((0)), 0)) { > pud_clear_bad(pud); > return 1; > } > return 0; >} > >static inline __attribute__((always_inline)) int pmd_none_or_clear_bad(pmd_t *pmd) >{ > if ((!(*pmd))) > return 1; > if (__builtin_expect(!!(((*pmd) & 2)), 0)) { > pmd_clear_bad(pmd); > return 1; > } > return 0; >} > >static inline __attribute__((always_inline)) pte_t __ptep_modify_prot_start(struct mm_struct *mm, > unsigned long addr, > pte_t *ptep) >{ > > > > > > return ptep_get_and_clear(mm, addr, ptep); >} > >static inline __attribute__((always_inline)) void __ptep_modify_prot_commit(struct mm_struct *mm, > unsigned long addr, > pte_t *ptep, pte_t pte) >{ > > > > > set_pte_at(mm, addr, ptep, pte); >} > >static inline __attribute__((always_inline)) pte_t ptep_modify_prot_start(struct mm_struct *mm, > unsigned long addr, > pte_t *ptep) >{ > return __ptep_modify_prot_start(mm, addr, ptep); >} > > > > > >static inline __attribute__((always_inline)) void ptep_modify_prot_commit(struct mm_struct *mm, > unsigned long addr, > pte_t *ptep, pte_t pte) >{ > __ptep_modify_prot_commit(mm, addr, ptep, pte); >} > >static inline __attribute__((always_inline)) int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot, > unsigned long pfn, unsigned long size) >{ > return 0; >} > >static inline __attribute__((always_inline)) int track_pfn_vma_copy(struct vm_area_struct *vma) >{ > return 0; >} > >static inline __attribute__((always_inline)) void untrack_pfn_vma(struct vm_area_struct *vma, > unsigned long pfn, unsigned long size) >{ >} > >static inline __attribute__((always_inline)) int pmd_trans_huge(pmd_t pmd) >{ > return 0; >} >static inline __attribute__((always_inline)) int pmd_trans_splitting(pmd_t pmd) >{ > return 0; >} > >static inline __attribute__((always_inline)) int pmd_write(pmd_t pmd) >{ > do { asm volatile("1:\t" ".word " "0xe7f001f2" "\n" ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" "2:\t.asciz " "\"include/asm-generic/pgtable.h\"" "\n" ".popsection\n" ".pushsection __bug_table,\"a\"\n" "3:\t.word 1b, 2b\n" "\t.hword " "443" ", 0\n" ".popsection"); __builtin_unreachable(); } while (0); > return 0; >} > > > > >static inline __attribute__((always_inline)) pmd_t pmd_read_atomic(pmd_t *pmdp) >{ > > > > > > return *pmdp; >} > >static inline __attribute__((always_inline)) int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) >{ > pmd_t pmdval = pmd_read_atomic(pmd); > > if ((!(pmdval))) > return 1; > if (__builtin_expect(!!(((pmdval) & 2)), 0)) { > if (!pmd_trans_huge(pmdval)) > pmd_clear_bad(pmd); > return 1; > } > return 0; >} > >static inline __attribute__((always_inline)) int pmd_trans_unstable(pmd_t *pmd) >{ > > > > return 0; > >} > > > >extern struct kmem_cache *vm_area_cachep; > >extern pgprot_t protection_map[16]; > >static inline __attribute__((always_inline)) int is_linear_pfn_mapping(struct vm_area_struct *vma) >{ > return !!(vma->vm_flags & 0x40000000); >} > >static inline __attribute__((always_inline)) int is_pfn_mapping(struct vm_area_struct *vma) >{ > return !!(vma->vm_flags & 0x00000400); >} > >struct vm_fault { > unsigned int flags; > unsigned long pgoff; > void *virtual_address; > > struct page *page; > > > > >}; > > > > > > >struct vm_operations_struct { > void (*open)(struct vm_area_struct * area); > void (*close)(struct vm_area_struct * area); > int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf); > > > > int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf); > > > > > int (*access)(struct vm_area_struct *vma, unsigned long addr, > void *buf, int len, int write); > >}; > >struct mmu_gather; >struct inode; > > > >enum pageflags { > PG_locked, > PG_error, > PG_referenced, > PG_uptodate, > PG_dirty, > PG_lru, > PG_active, > PG_slab, > PG_owner_priv_1, > PG_arch_1, > PG_reserved, > PG_private, > PG_private_2, > PG_writeback, > > PG_head, > PG_tail, > > > > PG_swapcache, > PG_mappedtodisk, > PG_reclaim, > PG_swapbacked, > PG_unevictable, > > PG_mlocked, > > __NR_PAGEFLAGS, > > > PG_checked = PG_owner_priv_1, > > > > > > PG_fscache = PG_private_2, > > > PG_pinned = PG_owner_priv_1, > PG_savepinned = PG_dirty, > > > PG_slob_free = PG_private, >}; > >struct page; > >static inline __attribute__((always_inline)) int PageLocked(const struct page *page) { return test_bit(PG_locked, &page->flags); } >static inline __attribute__((always_inline)) int PageError(const struct page *page) { return test_bit(PG_error, &page->flags); } static inline __attribute__((always_inline)) void SetPageError(struct page *page) { _set_bit(PG_error,&page->flags); } static inline __attribute__((always_inline)) void ClearPageError(struct page *page) { _clear_bit(PG_error,&page->flags); } static inline __attribute__((always_inline)) int TestClearPageError(struct page *page) { return _test_and_clear_bit(PG_error,&page->flags); } >static inline __attribute__((always_inline)) int PageReferenced(const struct page *page) { return test_bit(PG_referenced, &page->flags); } static inline __attribute__((always_inline)) void SetPageReferenced(struct page *page) { _set_bit(PG_referenced,&page->flags); } static inline __attribute__((always_inline)) void ClearPageReferenced(struct page *page) { _clear_bit(PG_referenced,&page->flags); } static inline __attribute__((always_inline)) int TestClearPageReferenced(struct page *page) { return _test_and_clear_bit(PG_referenced,&page->flags); } >static inline __attribute__((always_inline)) int PageDirty(const struct page *page) { return test_bit(PG_dirty, &page->flags); } static inline __attribute__((always_inline)) void SetPageDirty(struct page *page) { _set_bit(PG_dirty,&page->flags); } static inline __attribute__((always_inline)) void ClearPageDirty(struct page *page) { _clear_bit(PG_dirty,&page->flags); } static inline __attribute__((always_inline)) int TestSetPageDirty(struct page *page) { return _test_and_set_bit(PG_dirty,&page->flags); } static inline __attribute__((always_inline)) int TestClearPageDirty(struct page *page) { return _test_and_clear_bit(PG_dirty,&page->flags); } static inline __attribute__((always_inline)) void __ClearPageDirty(struct page *page) { __clear_bit(PG_dirty, &page->flags); } >static inline __attribute__((always_inline)) int PageLRU(const struct page *page) { return test_bit(PG_lru, &page->flags); } static inline __attribute__((always_inline)) void SetPageLRU(struct page *page) { _set_bit(PG_lru,&page->flags); } static inline __attribute__((always_inline)) void ClearPageLRU(struct page *page) { _clear_bit(PG_lru,&page->flags); } static inline __attribute__((always_inline)) void __ClearPageLRU(struct page *page) { __clear_bit(PG_lru, &page->flags); } >static inline __attribute__((always_inline)) int PageActive(const struct page *page) { return test_bit(PG_active, &page->flags); } static inline __attribute__((always_inline)) void SetPageActive(struct page *page) { _set_bit(PG_active,&page->flags); } static inline __attribute__((always_inline)) void ClearPageActive(struct page *page) { _clear_bit(PG_active,&page->flags); } static inline __attribute__((always_inline)) void __ClearPageActive(struct page *page) { __clear_bit(PG_active, &page->flags); } > static inline __attribute__((always_inline)) int TestClearPageActive(struct page *page) { return _test_and_clear_bit(PG_active,&page->flags); } >static inline __attribute__((always_inline)) int PageSlab(const struct page *page) { return test_bit(PG_slab, &page->flags); } static inline __attribute__((always_inline)) void __SetPageSlab(struct page *page) { __set_bit(PG_slab, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlab(struct page *page) { __clear_bit(PG_slab, &page->flags); } >static inline __attribute__((always_inline)) int PageChecked(const struct page *page) { return test_bit(PG_checked, &page->flags); } static inline __attribute__((always_inline)) void SetPageChecked(struct page *page) { _set_bit(PG_checked,&page->flags); } static inline __attribute__((always_inline)) void ClearPageChecked(struct page *page) { _clear_bit(PG_checked,&page->flags); } >static inline __attribute__((always_inline)) int PagePinned(const struct page *page) { return test_bit(PG_pinned, &page->flags); } static inline __attribute__((always_inline)) void SetPagePinned(struct page *page) { _set_bit(PG_pinned,&page->flags); } static inline __attribute__((always_inline)) void ClearPagePinned(struct page *page) { _clear_bit(PG_pinned,&page->flags); } static inline __attribute__((always_inline)) int TestSetPagePinned(struct page *page) { return _test_and_set_bit(PG_pinned,&page->flags); } static inline __attribute__((always_inline)) int TestClearPagePinned(struct page *page) { return _test_and_clear_bit(PG_pinned,&page->flags); } >static inline __attribute__((always_inline)) int PageSavePinned(const struct page *page) { return test_bit(PG_savepinned, &page->flags); } static inline __attribute__((always_inline)) void SetPageSavePinned(struct page *page) { _set_bit(PG_savepinned,&page->flags); } static inline __attribute__((always_inline)) void ClearPageSavePinned(struct page *page) { _clear_bit(PG_savepinned,&page->flags); }; >static inline __attribute__((always_inline)) int PageReserved(const struct page *page) { return test_bit(PG_reserved, &page->flags); } static inline __attribute__((always_inline)) void SetPageReserved(struct page *page) { _set_bit(PG_reserved,&page->flags); } static inline __attribute__((always_inline)) void ClearPageReserved(struct page *page) { _clear_bit(PG_reserved,&page->flags); } static inline __attribute__((always_inline)) void __ClearPageReserved(struct page *page) { __clear_bit(PG_reserved, &page->flags); } >static inline __attribute__((always_inline)) int PageSwapBacked(const struct page *page) { return test_bit(PG_swapbacked, &page->flags); } static inline __attribute__((always_inline)) void SetPageSwapBacked(struct page *page) { _set_bit(PG_swapbacked,&page->flags); } static inline __attribute__((always_inline)) void ClearPageSwapBacked(struct page *page) { _clear_bit(PG_swapbacked,&page->flags); } static inline __attribute__((always_inline)) void __ClearPageSwapBacked(struct page *page) { __clear_bit(PG_swapbacked, &page->flags); } > >static inline __attribute__((always_inline)) int PageSlobFree(const struct page *page) { return test_bit(PG_slob_free, &page->flags); } static inline __attribute__((always_inline)) void __SetPageSlobFree(struct page *page) { __set_bit(PG_slob_free, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageSlobFree(struct page *page) { __clear_bit(PG_slob_free, &page->flags); } > > > > > > >static inline __attribute__((always_inline)) int PagePrivate(const struct page *page) { return test_bit(PG_private, &page->flags); } static inline __attribute__((always_inline)) void SetPagePrivate(struct page *page) { _set_bit(PG_private,&page->flags); } static inline __attribute__((always_inline)) void ClearPagePrivate(struct page *page) { _clear_bit(PG_private,&page->flags); } static inline __attribute__((always_inline)) void __SetPagePrivate(struct page *page) { __set_bit(PG_private, &page->flags); } > static inline __attribute__((always_inline)) void __ClearPagePrivate(struct page *page) { __clear_bit(PG_private, &page->flags); } >static inline __attribute__((always_inline)) int PagePrivate2(const struct page *page) { return test_bit(PG_private_2, &page->flags); } static inline __attribute__((always_inline)) void SetPagePrivate2(struct page *page) { _set_bit(PG_private_2,&page->flags); } static inline __attribute__((always_inline)) void ClearPagePrivate2(struct page *page) { _clear_bit(PG_private_2,&page->flags); } static inline __attribute__((always_inline)) int TestSetPagePrivate2(struct page *page) { return _test_and_set_bit(PG_private_2,&page->flags); } static inline __attribute__((always_inline)) int TestClearPagePrivate2(struct page *page) { return _test_and_clear_bit(PG_private_2,&page->flags); } >static inline __attribute__((always_inline)) int PageOwnerPriv1(const struct page *page) { return test_bit(PG_owner_priv_1, &page->flags); } static inline __attribute__((always_inline)) void SetPageOwnerPriv1(struct page *page) { _set_bit(PG_owner_priv_1,&page->flags); } static inline __attribute__((always_inline)) void ClearPageOwnerPriv1(struct page *page) { _clear_bit(PG_owner_priv_1,&page->flags); } static inline __attribute__((always_inline)) int TestClearPageOwnerPriv1(struct page *page) { return _test_and_clear_bit(PG_owner_priv_1,&page->flags); } > > > > > >static inline __attribute__((always_inline)) int PageWriteback(const struct page *page) { return test_bit(PG_writeback, &page->flags); } static inline __attribute__((always_inline)) int TestSetPageWriteback(struct page *page) { return _test_and_set_bit(PG_writeback,&page->flags); } static inline __attribute__((always_inline)) int TestClearPageWriteback(struct page *page) { return _test_and_clear_bit(PG_writeback,&page->flags); } >static inline __attribute__((always_inline)) int PageMappedToDisk(const struct page *page) { return test_bit(PG_mappedtodisk, &page->flags); } static inline __attribute__((always_inline)) void SetPageMappedToDisk(struct page *page) { _set_bit(PG_mappedtodisk,&page->flags); } static inline __attribute__((always_inline)) void ClearPageMappedToDisk(struct page *page) { _clear_bit(PG_mappedtodisk,&page->flags); } > > >static inline __attribute__((always_inline)) int PageReclaim(const struct page *page) { return test_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void SetPageReclaim(struct page *page) { _set_bit(PG_reclaim,&page->flags); } static inline __attribute__((always_inline)) void ClearPageReclaim(struct page *page) { _clear_bit(PG_reclaim,&page->flags); } static inline __attribute__((always_inline)) int TestClearPageReclaim(struct page *page) { return _test_and_clear_bit(PG_reclaim,&page->flags); } >static inline __attribute__((always_inline)) int PageReadahead(const struct page *page) { return test_bit(PG_reclaim, &page->flags); } static inline __attribute__((always_inline)) void SetPageReadahead(struct page *page) { _set_bit(PG_reclaim,&page->flags); } static inline __attribute__((always_inline)) void ClearPageReadahead(struct page *page) { _clear_bit(PG_reclaim,&page->flags); } > >static inline __attribute__((always_inline)) int PageSwapCache(const struct page *page) { return 0; } > static inline __attribute__((always_inline)) void SetPageSwapCache(struct page *page) { } static inline __attribute__((always_inline)) void ClearPageSwapCache(struct page *page) { } > > >static inline __attribute__((always_inline)) int PageUnevictable(const struct page *page) { return test_bit(PG_unevictable, &page->flags); } static inline __attribute__((always_inline)) void SetPageUnevictable(struct page *page) { _set_bit(PG_unevictable,&page->flags); } static inline __attribute__((always_inline)) void ClearPageUnevictable(struct page *page) { _clear_bit(PG_unevictable,&page->flags); } static inline __attribute__((always_inline)) void __ClearPageUnevictable(struct page *page) { __clear_bit(PG_unevictable, &page->flags); } > static inline __attribute__((always_inline)) int TestClearPageUnevictable(struct page *page) { return _test_and_clear_bit(PG_unevictable,&page->flags); } > > >static inline __attribute__((always_inline)) int PageMlocked(const struct page *page) { return test_bit(PG_mlocked, &page->flags); } static inline __attribute__((always_inline)) void SetPageMlocked(struct page *page) { _set_bit(PG_mlocked,&page->flags); } static inline __attribute__((always_inline)) void ClearPageMlocked(struct page *page) { _clear_bit(PG_mlocked,&page->flags); } static inline __attribute__((always_inline)) void __ClearPageMlocked(struct page *page) { __clear_bit(PG_mlocked, &page->flags); } > static inline __attribute__((always_inline)) int TestSetPageMlocked(struct page *page) { return _test_and_set_bit(PG_mlocked,&page->flags); } static inline __attribute__((always_inline)) int TestClearPageMlocked(struct page *page) { return _test_and_clear_bit(PG_mlocked,&page->flags); } static inline __attribute__((always_inline)) int __TestClearPageMlocked(struct page *page) { return __test_and_clear_bit(PG_mlocked, &page->flags); } > >static inline __attribute__((always_inline)) int PageUncached(const struct page *page) { return 0; } > > > > > > > >static inline __attribute__((always_inline)) int PageHWPoison(const struct page *page) { return 0; } > > > >u64 stable_page_flags(struct page *page); > >static inline __attribute__((always_inline)) int PageUptodate(struct page *page) >{ > int ret = test_bit(PG_uptodate, &(page)->flags); > > if (ret) > __asm__ __volatile__ ("dmb" : : : "memory"); > > return ret; >} > >static inline __attribute__((always_inline)) void __SetPageUptodate(struct page *page) >{ > __asm__ __volatile__ ("dmb" : : : "memory"); > __set_bit(PG_uptodate, &(page)->flags); >} > >static inline __attribute__((always_inline)) void SetPageUptodate(struct page *page) >{ > > __asm__ __volatile__ ("dmb" : : : "memory"); > _set_bit(PG_uptodate,&(page)->flags); > >} > >static inline __attribute__((always_inline)) void ClearPageUptodate(struct page *page) { _clear_bit(PG_uptodate,&page->flags); } > >extern void cancel_dirty_page(struct page *page, unsigned int account_size); > >int test_clear_page_writeback(struct page *page); >int test_set_page_writeback(struct page *page); > >static inline __attribute__((always_inline)) void set_page_writeback(struct page *page) >{ > test_set_page_writeback(page); >} > >static inline __attribute__((always_inline)) int PageHead(const struct page *page) { return test_bit(PG_head, &page->flags); } static inline __attribute__((always_inline)) void __SetPageHead(struct page *page) { __set_bit(PG_head, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageHead(struct page *page) { __clear_bit(PG_head, &page->flags); } static inline __attribute__((always_inline)) void ClearPageHead(struct page *page) { _clear_bit(PG_head,&page->flags); } >static inline __attribute__((always_inline)) int PageTail(const struct page *page) { return test_bit(PG_tail, &page->flags); } static inline __attribute__((always_inline)) void __SetPageTail(struct page *page) { __set_bit(PG_tail, &page->flags); } static inline __attribute__((always_inline)) void __ClearPageTail(struct page *page) { __clear_bit(PG_tail, &page->flags); } > >static inline __attribute__((always_inline)) int PageCompound(struct page *page) >{ > return page->flags & ((1L << PG_head) | (1L << PG_tail)); > >} > >static inline __attribute__((always_inline)) int PageTransHuge(struct page *page) >{ > return 0; >} > >static inline __attribute__((always_inline)) int PageTransCompound(struct page *page) >{ > return 0; >} > >static inline __attribute__((always_inline)) int PageTransTail(struct page *page) >{ > return 0; >} > >static inline __attribute__((always_inline)) int page_has_private(struct page *page) >{ > return !!(page->flags & (1 << PG_private | 1 << PG_private_2)); >} > > > > > >extern int do_huge_pmd_anonymous_page(struct mm_struct *mm, > struct vm_area_struct *vma, > unsigned long address, pmd_t *pmd, > unsigned int flags); >extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, > pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr, > struct vm_area_struct *vma); >extern int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, > unsigned long address, pmd_t *pmd, > pmd_t orig_pmd); >extern pgtable_t get_pmd_huge_pte(struct mm_struct *mm); >extern struct page *follow_trans_huge_pmd(struct mm_struct *mm, > unsigned long addr, > pmd_t *pmd, > unsigned int flags); >extern int zap_huge_pmd(struct mmu_gather *tlb, > struct vm_area_struct *vma, > pmd_t *pmd, unsigned long addr); >extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, > unsigned long addr, unsigned long end, > unsigned char *vec); >extern int move_huge_pmd(struct vm_area_struct *vma, > struct vm_area_struct *new_vma, > unsigned long old_addr, > unsigned long new_addr, unsigned long old_end, > pmd_t *old_pmd, pmd_t *new_pmd); >extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, > unsigned long addr, pgprot_t newprot); > >enum transparent_hugepage_flag { > TRANSPARENT_HUGEPAGE_FLAG, > TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, > TRANSPARENT_HUGEPAGE_DEFRAG_FLAG, > TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, > TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG, > > > >}; > >enum page_check_address_pmd_flag { > PAGE_CHECK_ADDRESS_PMD_FLAG, > PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, > PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, >}; >extern pmd_t *page_check_address_pmd(struct page *page, > struct mm_struct *mm, > unsigned long address, > enum page_check_address_pmd_flag flag); > >static inline __attribute__((always_inline)) int split_huge_page(struct page *page) >{ > return 0; >} > > > > > >static inline __attribute__((always_inline)) int hugepage_madvise(struct vm_area_struct *vma, > unsigned long *vm_flags, int advice) >{ > do { asm volatile("1:\t" ".word " "0xe7f001f2" "\n" ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" "2:\t.asciz " "\"include/linux/huge_mm.h\"" "\n" ".popsection\n" ".pushsection __bug_table,\"a\"\n" "3:\t.word 1b, 2b\n" "\t.hword " "183" ", 0\n" ".popsection"); __builtin_unreachable(); } while (0); > return 0; >} >static inline __attribute__((always_inline)) void vma_adjust_trans_huge(struct vm_area_struct *vma, > unsigned long start, > unsigned long end, > long adjust_next) >{ >} >static inline __attribute__((always_inline)) int pmd_trans_huge_lock(pmd_t *pmd, > struct vm_area_struct *vma) >{ > return 0; >} > > >static inline __attribute__((always_inline)) int put_page_testzero(struct page *page) >{ > do { (void)((*(volatile int *)&(&page->_count)->counter) == 0); } while (0); > return (atomic_sub_return(1, &page->_count) == 0); >} > > > > > >static inline __attribute__((always_inline)) int get_page_unless_zero(struct page *page) >{ > return atomic_add_unless((&page->_count), 1, 0); >} > >extern int page_is_ram(unsigned long pfn); > > >struct page *vmalloc_to_page(const void *addr); >unsigned long vmalloc_to_pfn(const void *addr); > > > > > > > >static inline __attribute__((always_inline)) int is_vmalloc_addr(const void *x) >{ > > unsigned long addr = (unsigned long)x; > > return addr >= (((unsigned long)high_memory + (8*1024*1024)) & ~((8*1024*1024)-1)) && addr < 0xff000000UL; > > > >} > >extern int is_vmalloc_or_module_addr(const void *x); > > > > > > > >static inline __attribute__((always_inline)) void compound_lock(struct page *page) >{ > > > >} > >static inline __attribute__((always_inline)) void compound_unlock(struct page *page) >{ > > > >} > >static inline __attribute__((always_inline)) unsigned long compound_lock_irqsave(struct page *page) >{ > unsigned long flags = flags; > > > > > return flags; >} > >static inline __attribute__((always_inline)) void compound_unlock_irqrestore(struct page *page, > unsigned long flags) >{ > > > > >} > >static inline __attribute__((always_inline)) struct page *compound_head(struct page *page) >{ > if (__builtin_expect(!!(PageTail(page)), 0)) > return page->first_page; > return page; >} > > > > > > >static inline __attribute__((always_inline)) void reset_page_mapcount(struct page *page) >{ > (((&(page)->_mapcount)->counter) = (-1)); >} > >static inline __attribute__((always_inline)) int page_mapcount(struct page *page) >{ > return (*(volatile int *)&(&(page)->_mapcount)->counter) + 1; >} > >static inline __attribute__((always_inline)) int page_count(struct page *page) >{ > return (*(volatile int *)&(&compound_head(page)->_count)->counter); >} > >static inline __attribute__((always_inline)) void get_huge_page_tail(struct page *page) >{ > > > > > do { (void)(page_mapcount(page) < 0); } while (0); > do { (void)((*(volatile int *)&(&page->_count)->counter) != 0); } while (0); > atomic_add(1, &page->_mapcount); >} > >extern bool __get_page_tail(struct page *page); > >static inline __attribute__((always_inline)) void get_page(struct page *page) >{ > if (__builtin_expect(!!(PageTail(page)), 0)) > if (__builtin_expect(!!(__get_page_tail(page)), 1)) > return; > > > > > do { (void)((*(volatile int *)&(&page->_count)->counter) <= 0); } while (0); > atomic_add(1, &page->_count); >} > >static inline __attribute__((always_inline)) struct page *virt_to_head_page(const void *x) >{ > struct page *page = (mem_map + ((__virt_to_phys((unsigned long)(x)) >> 12) - (__pv_phys_offset >> 12))); > return compound_head(page); >} > > > > > >static inline __attribute__((always_inline)) void init_page_count(struct page *page) >{ > (((&page->_count)->counter) = (1)); >} > >static inline __attribute__((always_inline)) int PageBuddy(struct page *page) >{ > return (*(volatile int *)&(&page->_mapcount)->counter) == (-128); >} > >static inline __attribute__((always_inline)) void __SetPageBuddy(struct page *page) >{ > do { (void)((*(volatile int *)&(&page->_mapcount)->counter) != -1); } while (0); > (((&page->_mapcount)->counter) = ((-128))); >} > >static inline __attribute__((always_inline)) void __ClearPageBuddy(struct page *page) >{ > do { (void)(!PageBuddy(page)); } while (0); > (((&page->_mapcount)->counter) = (-1)); >} > >void put_page(struct page *page); >void put_pages_list(struct list_head *pages); > >void split_page(struct page *page, unsigned int order); >int split_free_page(struct page *page); > > > > > > >typedef void compound_page_dtor(struct page *); > >static inline __attribute__((always_inline)) void set_compound_page_dtor(struct page *page, > compound_page_dtor *dtor) >{ > page[1].lru.next = (void *)dtor; >} > >static inline __attribute__((always_inline)) compound_page_dtor *get_compound_page_dtor(struct page *page) >{ > return (compound_page_dtor *)page[1].lru.next; >} > >static inline __attribute__((always_inline)) int compound_order(struct page *page) >{ > if (!PageHead(page)) > return 0; > return (unsigned long)page[1].lru.prev; >} > >static inline __attribute__((always_inline)) int compound_trans_order(struct page *page) >{ > int order; > unsigned long flags; > > if (!PageHead(page)) > return 0; > > flags = compound_lock_irqsave(page); > order = compound_order(page); > compound_unlock_irqrestore(page, flags); > return order; >} > >static inline __attribute__((always_inline)) void set_compound_order(struct page *page, unsigned long order) >{ > page[1].lru.prev = (void *)order; >} > >static inline __attribute__((always_inline)) pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) >{ > if (__builtin_expect(!!(vma->vm_flags & 0x00000002), 1)) > pte = pte_mkwrite(pte); > return pte; >} > >static inline __attribute__((always_inline)) enum zone_type page_zonenum(const struct page *page) >{ > return (page->flags >> (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0))) & ((1UL << 2) - 1); >} > >static inline __attribute__((always_inline)) int page_zone_id(struct page *page) >{ > return (page->flags >> ((((((sizeof(unsigned long)*8) - 0) - 0) < ((((sizeof(unsigned long)*8) - 0) - 0) - 2))? (((sizeof(unsigned long)*8) - 0) - 0) : ((((sizeof(unsigned long)*8) - 0) - 0) - 2)) * ((0 + 2) != 0))) & ((1UL << (0 + 2)) - 1); >} > >static inline __attribute__((always_inline)) int zone_to_nid(struct zone *zone) >{ > > > > return 0; > >} > > > > >static inline __attribute__((always_inline)) int page_to_nid(const struct page *page) >{ > return (page->flags >> ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0))) & ((1UL << 0) - 1); >} > > >static inline __attribute__((always_inline)) struct zone *page_zone(const struct page *page) >{ > return &(&contig_page_data)->node_zones[page_zonenum(page)]; >} > >static inline __attribute__((always_inline)) void set_page_zone(struct page *page, enum zone_type zone) >{ > page->flags &= ~(((1UL << 2) - 1) << (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0))); > page->flags |= (zone & ((1UL << 2) - 1)) << (((((sizeof(unsigned long)*8) - 0) - 0) - 2) * (2 != 0)); >} > >static inline __attribute__((always_inline)) void set_page_node(struct page *page, unsigned long node) >{ > page->flags &= ~(((1UL << 0) - 1) << ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0))); > page->flags |= (node & ((1UL << 0) - 1)) << ((((sizeof(unsigned long)*8) - 0) - 0) * (0 != 0)); >} > >static inline __attribute__((always_inline)) void set_page_links(struct page *page, enum zone_type zone, > unsigned long node, unsigned long pfn) >{ > set_page_zone(page, zone); > set_page_node(page, node); > > > >} > > > > > > > > > > > > > > > >enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, > PGALLOC_NORMAL , PGALLOC_HIGH , PGALLOC_MOVABLE, > PGFREE, PGACTIVATE, PGDEACTIVATE, > PGFAULT, PGMAJFAULT, > PGREFILL_NORMAL , PGREFILL_HIGH , PGREFILL_MOVABLE, > PGSTEAL_KSWAPD_NORMAL , PGSTEAL_KSWAPD_HIGH , PGSTEAL_KSWAPD_MOVABLE, > PGSTEAL_DIRECT_NORMAL , PGSTEAL_DIRECT_HIGH , PGSTEAL_DIRECT_MOVABLE, > PGSCAN_KSWAPD_NORMAL , PGSCAN_KSWAPD_HIGH , PGSCAN_KSWAPD_MOVABLE, > PGSCAN_DIRECT_NORMAL , PGSCAN_DIRECT_HIGH , PGSCAN_DIRECT_MOVABLE, > > > > PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL, > KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, > KSWAPD_SKIP_CONGESTION_WAIT, > PAGEOUTRUN, ALLOCSTALL, PGROTATED, > > COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED, > COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS, > > > > > UNEVICTABLE_PGCULLED, > UNEVICTABLE_PGSCANNED, > UNEVICTABLE_PGRESCUED, > UNEVICTABLE_PGMLOCKED, > UNEVICTABLE_PGMUNLOCKED, > UNEVICTABLE_PGCLEARED, > UNEVICTABLE_PGSTRANDED, > UNEVICTABLE_MLOCKFREED, > > > > > > > > NR_VM_EVENT_ITEMS >}; > > > >extern int sysctl_stat_interval; > >struct vm_event_state { > unsigned long event[NR_VM_EVENT_ITEMS]; >}; > >extern __attribute__((section(".data..percpu" ""))) __typeof__(struct vm_event_state) vm_event_states; > >static inline __attribute__((always_inline)) void __count_vm_event(enum vm_event_item item) >{ > do { do { const void *__vpp_verify = (typeof(&(((vm_event_states.event[item])))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((vm_event_states.event[item])))) { case 1: do { *({ do { const void *__vpp_verify = (typeof((&((((vm_event_states.event[item])))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))); (typeof((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))) (__ptr + (((__per_cpu_offset[(current_thread_info()->cpu)])))); }); }) += ((1)); } while (0);break; case 2: do { *({ do { const void *__vpp_verify = (typeof((&((((vm_event_states.event[item])))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))); (typeof((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))) (__ptr + (((__per_cpu_offset[(current_thread_info()->cpu)])))); }); }) += ((1)); } while (0);break; case 4: do { *({ do { const void *__vpp_verify = (typeof((&((((vm_event_states.event[item])))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))); (typeof((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))) (__ptr + (((__per_cpu_offset[(current_thread_info()->cpu)])))); }); }) += ((1)); } while (0);break; case 8: do { *({ do { const void *__vpp_verify = (typeof((&((((vm_event_states.event[item])))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))); (typeof((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))) (__ptr + (((__per_cpu_offset[(current_thread_info()->cpu)])))); }); }) += ((1)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); >} > >static inline __attribute__((always_inline)) void count_vm_event(enum vm_event_item item) >{ > do { do { const void *__vpp_verify = (typeof(&(((vm_event_states.event[item])))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof(((vm_event_states.event[item])))) { case 1: do { unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); *({ do { const void *__vpp_verify = (typeof((&((((vm_event_states.event[item])))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))); (typeof((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))) (__ptr + (((__per_cpu_offset[(current_thread_info()->cpu)])))); }); }) += ((1)); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } while (0);break; case 2: do { unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); *({ do { const void *__vpp_verify = (typeof((&((((vm_event_states.event[item])))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))); (typeof((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))) (__ptr + (((__per_cpu_offset[(current_thread_info()->cpu)])))); }); }) += ((1)); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } while (0);break; case 4: do { unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); *({ do { const void *__vpp_verify = (typeof((&((((vm_event_states.event[item])))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))); (typeof((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))) (__ptr + (((__per_cpu_offset[(current_thread_info()->cpu)])))); }); }) += ((1)); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } while (0);break; case 8: do { unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); *({ do { const void *__vpp_verify = (typeof((&((((vm_event_states.event[item])))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))); (typeof((typeof(*(&((((vm_event_states.event[item])))))) *)(&((((vm_event_states.event[item]))))))) (__ptr + (((__per_cpu_offset[(current_thread_info()->cpu)])))); }); }) += ((1)); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); >} > >static inline __attribute__((always_inline)) void __count_vm_events(enum vm_event_item item, long delta) >{ > do { do { const void *__vpp_verify = (typeof(&((vm_event_states.event[item]))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((vm_event_states.event[item]))) { case 1: do { *({ do { const void *__vpp_verify = (typeof((&(((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))); (typeof((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))) (__ptr + (((__per_cpu_offset[(current_thread_info()->cpu)])))); }); }) += ((delta)); } while (0);break; case 2: do { *({ do { const void *__vpp_verify = (typeof((&(((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))); (typeof((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))) (__ptr + (((__per_cpu_offset[(current_thread_info()->cpu)])))); }); }) += ((delta)); } while (0);break; case 4: do { *({ do { const void *__vpp_verify = (typeof((&(((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))); (typeof((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))) (__ptr + (((__per_cpu_offset[(current_thread_info()->cpu)])))); }); }) += ((delta)); } while (0);break; case 8: do { *({ do { const void *__vpp_verify = (typeof((&(((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))); (typeof((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))) (__ptr + (((__per_cpu_offset[(current_thread_info()->cpu)])))); }); }) += ((delta)); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); >} > >static inline __attribute__((always_inline)) void count_vm_events(enum vm_event_item item, long delta) >{ > do { do { const void *__vpp_verify = (typeof(&((vm_event_states.event[item]))))((void *)0); (void)__vpp_verify; } while (0); switch(sizeof((vm_event_states.event[item]))) { case 1: do { unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); *({ do { const void *__vpp_verify = (typeof((&(((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))); (typeof((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))) (__ptr + (((__per_cpu_offset[(current_thread_info()->cpu)])))); }); }) += ((delta)); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } while (0);break; case 2: do { unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); *({ do { const void *__vpp_verify = (typeof((&(((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))); (typeof((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))) (__ptr + (((__per_cpu_offset[(current_thread_info()->cpu)])))); }); }) += ((delta)); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } while (0);break; case 4: do { unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); *({ do { const void *__vpp_verify = (typeof((&(((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))); (typeof((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))) (__ptr + (((__per_cpu_offset[(current_thread_info()->cpu)])))); }); }) += ((delta)); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } while (0);break; case 8: do { unsigned long flags; do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); *({ do { const void *__vpp_verify = (typeof((&(((vm_event_states.event[item]))))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))); (typeof((typeof(*(&(((vm_event_states.event[item]))))) *)(&(((vm_event_states.event[item])))))) (__ptr + (((__per_cpu_offset[(current_thread_info()->cpu)])))); }); }) += ((delta)); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } while (0);break; default: __bad_size_call_parameter();break; } } while (0); >} > >extern void all_vm_events(unsigned long *); > >extern void vm_events_fold_cpu(int cpu); > >extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; > >static inline __attribute__((always_inline)) void zone_page_state_add(long x, struct zone *zone, > enum zone_stat_item item) >{ > atomic_long_add(x, &zone->vm_stat[item]); > atomic_long_add(x, &vm_stat[item]); >} > >static inline __attribute__((always_inline)) unsigned long global_page_state(enum zone_stat_item item) >{ > long x = atomic_long_read(&vm_stat[item]); > > if (x < 0) > x = 0; > > return x; >} > >static inline __attribute__((always_inline)) unsigned long zone_page_state(struct zone *zone, > enum zone_stat_item item) >{ > long x = atomic_long_read(&zone->vm_stat[item]); > > if (x < 0) > x = 0; > > return x; >} > > > > > > > >static inline __attribute__((always_inline)) unsigned long zone_page_state_snapshot(struct zone *zone, > enum zone_stat_item item) >{ > long x = atomic_long_read(&zone->vm_stat[item]); > > > int cpu; > for (((cpu)) = -1; ((cpu)) = cpumask_next(((cpu)), (cpu_online_mask)), ((cpu)) < nr_cpu_ids;) > x += ({ do { const void *__vpp_verify = (typeof(((zone->pageset))))((void *)0); (void)__vpp_verify; } while (0); ({ unsigned long __ptr; __asm__ ("" : "=r"(__ptr) : "0"((typeof(*((zone->pageset))) *)((zone->pageset)))); (typeof((typeof(*((zone->pageset))) *)((zone->pageset)))) (__ptr + (((__per_cpu_offset[(cpu)])))); }); })->vm_stat_diff[item]; > > if (x < 0) > x = 0; > > return x; >} > >extern unsigned long global_reclaimable_pages(void); >extern unsigned long zone_reclaimable_pages(struct zone *zone); > >static inline __attribute__((always_inline)) void zap_zone_vm_stats(struct zone *zone) >{ > ({ void *__p = (zone->vm_stat); size_t __n = sizeof(zone->vm_stat); if ((__n) != 0) { if (__builtin_constant_p((0)) && (0) == 0) __memzero((__p),(__n)); else memset((__p),(0),(__n)); } (__p); }); >} > >extern void inc_zone_state(struct zone *, enum zone_stat_item); > > >void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int); >void __inc_zone_page_state(struct page *, enum zone_stat_item); >void __dec_zone_page_state(struct page *, enum zone_stat_item); > >void mod_zone_page_state(struct zone *, enum zone_stat_item, int); >void inc_zone_page_state(struct page *, enum zone_stat_item); >void dec_zone_page_state(struct page *, enum zone_stat_item); > >extern void inc_zone_state(struct zone *, enum zone_stat_item); >extern void __inc_zone_state(struct zone *, enum zone_stat_item); >extern void dec_zone_state(struct zone *, enum zone_stat_item); >extern void __dec_zone_state(struct zone *, enum zone_stat_item); > >void refresh_cpu_vm_stats(int); >void refresh_zone_stat_thresholds(void); > >int calculate_pressure_threshold(struct zone *zone); >int calculate_normal_threshold(struct zone *zone); >void set_pgdat_percpu_threshold(pg_data_t *pgdat, > int (*calculate_pressure)(struct zone *)); > >extern const char * const vmstat_text[]; > > >static inline __attribute__((always_inline)) __attribute__((always_inline)) void *lowmem_page_address(const struct page *page) >{ > return ((void *)__phys_to_virt((unsigned long)(((phys_addr_t)(((unsigned long)((page) - mem_map) + (__pv_phys_offset >> 12))) << 12)))); >} > >void *page_address(const struct page *page); >void set_page_address(struct page *page, void *virtual); >void page_address_init(void); > >extern struct address_space swapper_space; >static inline __attribute__((always_inline)) struct address_space *page_mapping(struct page *page) >{ > struct address_space *mapping = page->mapping; > > do { (void)(PageSlab(page)); } while (0); > if (__builtin_expect(!!(PageSwapCache(page)), 0)) > mapping = &swapper_space; > else if ((unsigned long)mapping & 1) > mapping = ((void *)0); > return mapping; >} > > >static inline __attribute__((always_inline)) void *page_rmapping(struct page *page) >{ > return (void *)((unsigned long)page->mapping & ~(1 | 2)); >} > >static inline __attribute__((always_inline)) int PageAnon(struct page *page) >{ > return ((unsigned long)page->mapping & 1) != 0; >} > > > > > >static inline __attribute__((always_inline)) unsigned long page_index(struct page *page) >{ > if (__builtin_expect(!!(PageSwapCache(page)), 0)) > return ((page)->private); > return page->index; >} > > > > >static inline __attribute__((always_inline)) int page_mapped(struct page *page) >{ > return (*(volatile int *)&(&(page)->_mapcount)->counter) >= 0; >} > >extern void pagefault_out_of_memory(void); > >extern void show_free_areas(unsigned int flags); >extern bool skip_free_areas_node(unsigned int flags, int nid); > >int shmem_lock(struct file *file, int lock, struct user_struct *user); >struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags); >void shmem_set_file(struct vm_area_struct *vma, struct file *file); >int shmem_zero_setup(struct vm_area_struct *); > >extern int can_do_mlock(void); >extern int user_shm_lock(size_t, struct user_struct *); >extern void user_shm_unlock(size_t, struct user_struct *); > > > > >struct zap_details { > struct vm_area_struct *nonlinear_vma; > struct address_space *check_mapping; > unsigned long first_index; > unsigned long last_index; >}; > >struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, > pte_t pte); > >int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, > unsigned long size); >void zap_page_range(struct vm_area_struct *vma, unsigned long address, > unsigned long size, struct zap_details *); >void unmap_vmas(struct mmu_gather *tlb, > struct vm_area_struct *start_vma, unsigned long start_addr, > unsigned long end_addr, unsigned long *nr_accounted, > struct zap_details *); > >struct mm_walk { > int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *); > int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *); > int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *); > int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *); > int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *); > int (*hugetlb_entry)(pte_t *, unsigned long, > unsigned long, unsigned long, struct mm_walk *); > struct mm_struct *mm; > void *private; >}; > >int walk_page_range(unsigned long addr, unsigned long end, > struct mm_walk *walk); >void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, > unsigned long end, unsigned long floor, unsigned long ceiling); >int copy_page_range(struct mm_struct *dst, struct mm_struct *src, > struct vm_area_struct *vma); >void unmap_mapping_range(struct address_space *mapping, > loff_t const holebegin, loff_t const holelen, int even_cows); >int follow_pfn(struct vm_area_struct *vma, unsigned long address, > unsigned long *pfn); >int follow_phys(struct vm_area_struct *vma, unsigned long address, > unsigned int flags, unsigned long *prot, resource_size_t *phys); >int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, > void *buf, int len, int write); > >static inline __attribute__((always_inline)) void unmap_shared_mapping_range(struct address_space *mapping, > loff_t const holebegin, loff_t const holelen) >{ > unmap_mapping_range(mapping, holebegin, holelen, 0); >} > >extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new); >extern void truncate_setsize(struct inode *inode, loff_t newsize); >extern int vmtruncate(struct inode *inode, loff_t offset); >extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end); >void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); >int truncate_inode_page(struct address_space *mapping, struct page *page); >int generic_error_remove_page(struct address_space *mapping, struct page *page); > >int invalidate_inode_page(struct page *page); > > >extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, > unsigned long address, unsigned int flags); >extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, > unsigned long address, unsigned int fault_flags); > >extern int make_pages_present(unsigned long addr, unsigned long end); >extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); >extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, > void *buf, int len, int write); > >int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, > unsigned long start, int len, unsigned int foll_flags, > struct page **pages, struct vm_area_struct **vmas, > int *nonblocking); >int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, > unsigned long start, int nr_pages, int write, int force, > struct page **pages, struct vm_area_struct **vmas); >int get_user_pages_fast(unsigned long start, int nr_pages, int write, > struct page **pages); >struct page *get_dump_page(unsigned long addr); > >extern int try_to_release_page(struct page * page, gfp_t gfp_mask); >extern void do_invalidatepage(struct page *page, unsigned long offset); > >int __set_page_dirty_nobuffers(struct page *page); >int __set_page_dirty_no_writeback(struct page *page); >int redirty_page_for_writepage(struct writeback_control *wbc, > struct page *page); >void account_page_dirtied(struct page *page, struct address_space *mapping); >void account_page_writeback(struct page *page); >int set_page_dirty(struct page *page); >int set_page_dirty_lock(struct page *page); >int clear_page_dirty_for_io(struct page *page); > > >static inline __attribute__((always_inline)) int vma_growsdown(struct vm_area_struct *vma, unsigned long addr) >{ > return vma && (vma->vm_end == addr) && (vma->vm_flags & 0x00000100); >} > >static inline __attribute__((always_inline)) int stack_guard_page_start(struct vm_area_struct *vma, > unsigned long addr) >{ > return (vma->vm_flags & 0x00000100) && > (vma->vm_start == addr) && > !vma_growsdown(vma->vm_prev, addr); >} > > >static inline __attribute__((always_inline)) int vma_growsup(struct vm_area_struct *vma, unsigned long addr) >{ > return vma && (vma->vm_start == addr) && (vma->vm_flags & 0x00000000); >} > >static inline __attribute__((always_inline)) int stack_guard_page_end(struct vm_area_struct *vma, > unsigned long addr) >{ > return (vma->vm_flags & 0x00000000) && > (vma->vm_end == addr) && > !vma_growsup(vma->vm_next, addr); >} > >extern pid_t >vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group); > >extern unsigned long move_page_tables(struct vm_area_struct *vma, > unsigned long old_addr, struct vm_area_struct *new_vma, > unsigned long new_addr, unsigned long len); >extern unsigned long do_mremap(unsigned long addr, > unsigned long old_len, unsigned long new_len, > unsigned long flags, unsigned long new_addr); >extern int mprotect_fixup(struct vm_area_struct *vma, > struct vm_area_struct **pprev, unsigned long start, > unsigned long end, unsigned long newflags); > > > > >int __get_user_pages_fast(unsigned long start, int nr_pages, int write, > struct page **pages); > > > >static inline __attribute__((always_inline)) unsigned long get_mm_counter(struct mm_struct *mm, int member) >{ > long val = atomic_long_read(&mm->rss_stat.count[member]); > > return (unsigned long)val; >} > >static inline __attribute__((always_inline)) void add_mm_counter(struct mm_struct *mm, int member, long value) >{ > atomic_long_add(value, &mm->rss_stat.count[member]); >} > >static inline __attribute__((always_inline)) void inc_mm_counter(struct mm_struct *mm, int member) >{ > atomic_long_inc(&mm->rss_stat.count[member]); >} > >static inline __attribute__((always_inline)) void dec_mm_counter(struct mm_struct *mm, int member) >{ > atomic_long_dec(&mm->rss_stat.count[member]); >} > >static inline __attribute__((always_inline)) unsigned long get_mm_rss(struct mm_struct *mm) >{ > return get_mm_counter(mm, MM_FILEPAGES) + > get_mm_counter(mm, MM_ANONPAGES); >} > >static inline __attribute__((always_inline)) unsigned long get_mm_hiwater_rss(struct mm_struct *mm) >{ > return ({ typeof(mm->hiwater_rss) _max1 = (mm->hiwater_rss); typeof(get_mm_rss(mm)) _max2 = (get_mm_rss(mm)); (void) (&_max1 == &_max2); _max1 > _max2 ? _max1 : _max2; }); >} > >static inline __attribute__((always_inline)) unsigned long get_mm_hiwater_vm(struct mm_struct *mm) >{ > return ({ typeof(mm->hiwater_vm) _max1 = (mm->hiwater_vm); typeof(mm->total_vm) _max2 = (mm->total_vm); (void) (&_max1 == &_max2); _max1 > _max2 ? _max1 : _max2; }); >} > >static inline __attribute__((always_inline)) void update_hiwater_rss(struct mm_struct *mm) >{ > unsigned long _rss = get_mm_rss(mm); > > if ((mm)->hiwater_rss < _rss) > (mm)->hiwater_rss = _rss; >} > >static inline __attribute__((always_inline)) void update_hiwater_vm(struct mm_struct *mm) >{ > if (mm->hiwater_vm < mm->total_vm) > mm->hiwater_vm = mm->total_vm; >} > >static inline __attribute__((always_inline)) void setmax_mm_hiwater_rss(unsigned long *maxrss, > struct mm_struct *mm) >{ > unsigned long hiwater_rss = get_mm_hiwater_rss(mm); > > if (*maxrss < hiwater_rss) > *maxrss = hiwater_rss; >} > > > > >static inline __attribute__((always_inline)) void sync_mm_rss(struct mm_struct *mm) >{ >} > > >int vma_wants_writenotify(struct vm_area_struct *vma); > >extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, > spinlock_t **ptl); >static inline __attribute__((always_inline)) pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, > spinlock_t **ptl) >{ > pte_t *ptep; > (ptep = __get_locked_pte(mm, addr, ptl)); > return ptep; >} > > >static inline __attribute__((always_inline)) int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, > unsigned long address) >{ > return 0; >} > >int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); > > >int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, > pmd_t *pmd, unsigned long address); >int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); > > > > > > >static inline __attribute__((always_inline)) pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) >{ > return (__builtin_expect(!!(pgd_none(*pgd)), 0) && __pud_alloc(mm, pgd, address))? > ((void *)0): pud_offset(pgd, address); >} > >static inline __attribute__((always_inline)) pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) >{ > return (__builtin_expect(!!((0)), 0) && __pmd_alloc(mm, pud, address))? > ((void *)0): pmd_offset(pud, address); >} > >static inline __attribute__((always_inline)) void pgtable_page_ctor(struct page *page) >{ > do {} while (0); > inc_zone_page_state(page, NR_PAGETABLE); >} > >static inline __attribute__((always_inline)) void pgtable_page_dtor(struct page *page) >{ > do {} while (0); > dec_zone_page_state(page, NR_PAGETABLE); >} > >extern void free_area_init(unsigned long * zones_size); >extern void free_area_init_node(int nid, unsigned long * zones_size, > unsigned long zone_start_pfn, unsigned long *zholes_size); >extern void free_initmem(void); > >static inline __attribute__((always_inline)) int __early_pfn_to_nid(unsigned long pfn) >{ > return 0; >} > >extern void set_dma_reserve(unsigned long new_dma_reserve); >extern void memmap_init_zone(unsigned long, int, unsigned long, > unsigned long, enum memmap_context); >extern void setup_per_zone_wmarks(void); >extern int __attribute__ ((__section__(".meminit.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) init_per_zone_wmark_min(void); >extern void mem_init(void); >extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) mmap_init(void); >extern void show_mem(unsigned int flags); >extern void si_meminfo(struct sysinfo * val); >extern void si_meminfo_node(struct sysinfo *val, int nid); >extern int after_bootmem; > >extern __attribute__((format(printf, 3, 4))) >void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...); > >extern void setup_per_cpu_pageset(void); > >extern void zone_pcp_update(struct zone *zone); > > >extern atomic_long_t mmap_pages_allocated; >extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); > > >void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); >void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *); >void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *); >struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma, > struct prio_tree_iter *iter); > > > > > >static inline __attribute__((always_inline)) void vma_nonlinear_insert(struct vm_area_struct *vma, > struct list_head *list) >{ > vma->shared.vm_set.parent = ((void *)0); > list_add_tail(&vma->shared.vm_set.list, list); >} > > >extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); >extern int vma_adjust(struct vm_area_struct *vma, unsigned long start, > unsigned long end, unsigned long pgoff, struct vm_area_struct *insert); >extern struct vm_area_struct *vma_merge(struct mm_struct *, > struct vm_area_struct *prev, unsigned long addr, unsigned long end, > unsigned long vm_flags, struct anon_vma *, struct file *, unsigned long, > struct mempolicy *, const char *); >extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); >extern int split_vma(struct mm_struct *, > struct vm_area_struct *, unsigned long addr, int new_below); >extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); >extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, > struct rb_node **, struct rb_node *); >extern void unlink_file_vma(struct vm_area_struct *); >extern struct vm_area_struct *copy_vma(struct vm_area_struct **, > unsigned long addr, unsigned long len, unsigned long pgoff); >extern void exit_mmap(struct mm_struct *); > >extern int mm_take_all_locks(struct mm_struct *mm); >extern void mm_drop_all_locks(struct mm_struct *mm); > > >extern void added_exe_file_vma(struct mm_struct *mm); >extern void removed_exe_file_vma(struct mm_struct *mm); >extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); >extern struct file *get_mm_exe_file(struct mm_struct *mm); > >extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); >extern int install_special_mapping(struct mm_struct *mm, > unsigned long addr, unsigned long len, > unsigned long flags, struct page **pages); > >extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); > >extern unsigned long mmap_region(struct file *file, unsigned long addr, > unsigned long len, unsigned long flags, > vm_flags_t vm_flags, unsigned long pgoff); >extern unsigned long do_mmap(struct file *, unsigned long, > unsigned long, unsigned long, > unsigned long, unsigned long); >extern int do_munmap(struct mm_struct *, unsigned long, size_t); > > >extern unsigned long vm_brk(unsigned long, unsigned long); >extern int vm_munmap(unsigned long, size_t); >extern unsigned long vm_mmap(struct file *, unsigned long, > unsigned long, unsigned long, > unsigned long, unsigned long); > > >extern void truncate_inode_pages(struct address_space *, loff_t); >extern void truncate_inode_pages_range(struct address_space *, > loff_t lstart, loff_t lend); > > >extern int filemap_fault(struct vm_area_struct *, struct vm_fault *); > > >int write_one_page(struct page *page, int wait); >void task_dirty_inc(struct task_struct *tsk); > > > > > >int force_page_cache_readahead(struct address_space *mapping, struct file *filp, > unsigned long offset, unsigned long nr_to_read); > >void page_cache_sync_readahead(struct address_space *mapping, > struct file_ra_state *ra, > struct file *filp, > unsigned long offset, > unsigned long size); > >void page_cache_async_readahead(struct address_space *mapping, > struct file_ra_state *ra, > struct file *filp, > struct page *pg, > unsigned long offset, > unsigned long size); > >unsigned long max_sane_readahead(unsigned long nr); >unsigned long ra_submit(struct file_ra_state *ra, > struct address_space *mapping, > struct file *filp); > > >extern int expand_stack(struct vm_area_struct *vma, unsigned long address); > > >extern int expand_downwards(struct vm_area_struct *vma, > unsigned long address); > > > > > > > >extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); >extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, > struct vm_area_struct **pprev); > > > >static inline __attribute__((always_inline)) struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) >{ > struct vm_area_struct * vma = find_vma(mm,start_addr); > > if (vma && end_addr <= vma->vm_start) > vma = ((void *)0); > return vma; >} > >static inline __attribute__((always_inline)) unsigned long vma_pages(struct vm_area_struct *vma) >{ > return (vma->vm_end - vma->vm_start) >> 12; >} > > >static inline __attribute__((always_inline)) struct vm_area_struct *find_exact_vma(struct mm_struct *mm, > unsigned long vm_start, unsigned long vm_end) >{ > struct vm_area_struct *vma = find_vma(mm, vm_start); > > if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end)) > vma = ((void *)0); > > return vma; >} > > >pgprot_t vm_get_page_prot(unsigned long vm_flags); > > > > > > > >struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); >int remap_pfn_range(struct vm_area_struct *, unsigned long addr, > unsigned long pfn, unsigned long size, pgprot_t); >int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); >int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, > unsigned long pfn); >int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, > unsigned long pfn); > >struct page *follow_page(struct vm_area_struct *, unsigned long address, > unsigned int foll_flags); > >typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, > void *data); >extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, > unsigned long size, pte_fn_t fn, void *data); > > >void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long); > >static inline __attribute__((always_inline)) void >kernel_map_pages(struct page *page, int numpages, int enable) {} > > > > > >extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm); > >int in_gate_area_no_mm(unsigned long addr); >int in_gate_area(struct mm_struct *mm, unsigned long addr); > > > > > >int drop_caches_sysctl_handler(struct ctl_table *, int, > void *, size_t *, loff_t *); >unsigned long shrink_slab(struct shrink_control *shrink, > unsigned long nr_pages_scanned, > unsigned long lru_pages); > > > > >extern int randomize_va_space; > > >const char * arch_vma_name(struct vm_area_struct *vma); >void print_vma_addr(char *prefix, unsigned long rip); > >void sparse_mem_maps_populate_node(struct page **map_map, > unsigned long pnum_begin, > unsigned long pnum_end, > unsigned long map_count, > int nodeid); > >struct page *sparse_mem_map_populate(unsigned long pnum, int nid); >pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); >pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node); >pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); >pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); >void *vmemmap_alloc_block(unsigned long size, int node); >void *vmemmap_alloc_block_buf(unsigned long size, int node); >void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); >int vmemmap_populate_basepages(struct page *start_page, > unsigned long pages, int node); >int vmemmap_populate(struct page *start_page, unsigned long pages, int node); >void vmemmap_populate_print_last(void); > > >enum mf_flags { > MF_COUNT_INCREASED = 1 << 0, > MF_ACTION_REQUIRED = 1 << 1, > MF_MUST_KILL = 1 << 2, >}; >extern int memory_failure(unsigned long pfn, int trapno, int flags); >extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); >extern int unpoison_memory(unsigned long pfn); >extern int sysctl_memory_failure_early_kill; >extern int sysctl_memory_failure_recovery; >extern void shake_page(struct page *p, int access); >extern atomic_long_t mce_bad_pages; >extern int soft_offline_page(struct page *page, int flags); > >extern void dump_page(struct page *page); > >static inline __attribute__((always_inline)) unsigned int debug_guardpage_minorder(void) { return 0; } >static inline __attribute__((always_inline)) bool page_is_guard(struct page *page) { return false; } > > > >struct seq_operations; >struct file; >struct path; >struct inode; >struct dentry; > >struct seq_file { > char *buf; > size_t size; > size_t from; > size_t count; > loff_t index; > loff_t read_pos; > u64 version; > struct mutex lock; > const struct seq_operations *op; > int poll_event; > void *private; >}; > >struct seq_operations { > void * (*start) (struct seq_file *m, loff_t *pos); > void (*stop) (struct seq_file *m, void *v); > void * (*next) (struct seq_file *m, void *v, loff_t *pos); > int (*show) (struct seq_file *m, void *v); >}; > >static inline __attribute__((always_inline)) size_t seq_get_buf(struct seq_file *m, char **bufp) >{ > do { if (__builtin_expect(!!(m->count > m->size), 0)) do { asm volatile("1:\t" ".word " "0xe7f001f2" "\n" ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" "2:\t.asciz " "\"include/linux/seq_file.h\"" "\n" ".popsection\n" ".pushsection __bug_table,\"a\"\n" "3:\t.word 1b, 2b\n" "\t.hword " "50" ", 0\n" ".popsection"); __builtin_unreachable(); } while (0); } while(0); > if (m->count < m->size) > *bufp = m->buf + m->count; > else > *bufp = ((void *)0); > > return m->size - m->count; >} > >static inline __attribute__((always_inline)) void seq_commit(struct seq_file *m, int num) >{ > if (num < 0) { > m->count = m->size; > } else { > do { if (__builtin_expect(!!(m->count + num > m->size), 0)) do { asm volatile("1:\t" ".word " "0xe7f001f2" "\n" ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" "2:\t.asciz " "\"include/linux/seq_file.h\"" "\n" ".popsection\n" ".pushsection __bug_table,\"a\"\n" "3:\t.word 1b, 2b\n" "\t.hword " "73" ", 0\n" ".popsection"); __builtin_unreachable(); } while (0); } while(0); > m->count += num; > } >} > >char *mangle_path(char *s, const char *p, const char *esc); >int seq_open(struct file *, const struct seq_operations *); >ssize_t seq_read(struct file *, char *, size_t, loff_t *); >loff_t seq_lseek(struct file *, loff_t, int); >int seq_release(struct inode *, struct file *); >int seq_escape(struct seq_file *, const char *, const char *); >int seq_putc(struct seq_file *m, char c); >int seq_puts(struct seq_file *m, const char *s); >int seq_write(struct seq_file *seq, const void *data, size_t len); > >__attribute__((format(printf, 2, 3))) int seq_printf(struct seq_file *, const char *, ...); > >int seq_path(struct seq_file *, const struct path *, const char *); >int seq_dentry(struct seq_file *, struct dentry *, const char *); >int seq_path_root(struct seq_file *m, const struct path *path, > const struct path *root, const char *esc); >int seq_bitmap(struct seq_file *m, const unsigned long *bits, > unsigned int nr_bits); >static inline __attribute__((always_inline)) int seq_cpumask(struct seq_file *m, const struct cpumask *mask) >{ > return seq_bitmap(m, ((mask)->bits), nr_cpu_ids); >} > >static inline __attribute__((always_inline)) int seq_nodemask(struct seq_file *m, nodemask_t *mask) >{ > return seq_bitmap(m, mask->bits, (1 << 0)); >} > >int seq_bitmap_list(struct seq_file *m, const unsigned long *bits, > unsigned int nr_bits); > >static inline __attribute__((always_inline)) int seq_cpumask_list(struct seq_file *m, > const struct cpumask *mask) >{ > return seq_bitmap_list(m, ((mask)->bits), nr_cpu_ids); >} > >static inline __attribute__((always_inline)) int seq_nodemask_list(struct seq_file *m, nodemask_t *mask) >{ > return seq_bitmap_list(m, mask->bits, (1 << 0)); >} > >int single_open(struct file *, int (*)(struct seq_file *, void *), void *); >int single_release(struct inode *, struct file *); >void *__seq_open_private(struct file *, const struct seq_operations *, int); >int seq_open_private(struct file *, const struct seq_operations *, int); >int seq_release_private(struct inode *, struct file *); >int seq_put_decimal_ull(struct seq_file *m, char delimiter, > unsigned long long num); >int seq_put_decimal_ll(struct seq_file *m, char delimiter, > long long num); > > > > > > >extern struct list_head *seq_list_start(struct list_head *head, > loff_t pos); >extern struct list_head *seq_list_start_head(struct list_head *head, > loff_t pos); >extern struct list_head *seq_list_next(void *v, struct list_head *head, > loff_t *ppos); > > > > > >extern struct hlist_node *seq_hlist_start(struct hlist_head *head, > loff_t pos); >extern struct hlist_node *seq_hlist_start_head(struct hlist_head *head, > loff_t pos); >extern struct hlist_node *seq_hlist_next(void *v, struct hlist_head *head, > loff_t *ppos); > >extern struct hlist_node *seq_hlist_start_rcu(struct hlist_head *head, > loff_t pos); >extern struct hlist_node *seq_hlist_start_head_rcu(struct hlist_head *head, > loff_t pos); >extern struct hlist_node *seq_hlist_next_rcu(void *v, > struct hlist_head *head, > loff_t *ppos); > > >struct ring_buffer; >struct ring_buffer_iter; > > > > >struct ring_buffer_event { > ; > u32 type_len:5, time_delta:27; > ; > > u32 array[]; >}; > >enum ring_buffer_type { > RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28, > RINGBUF_TYPE_PADDING, > RINGBUF_TYPE_TIME_EXTEND, > > RINGBUF_TYPE_TIME_STAMP, >}; > >unsigned ring_buffer_event_length(struct ring_buffer_event *event); >void *ring_buffer_event_data(struct ring_buffer_event *event); > >void ring_buffer_discard_commit(struct ring_buffer *buffer, > struct ring_buffer_event *event); > > > > >struct ring_buffer * >__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key); > >void ring_buffer_free(struct ring_buffer *buffer); > >int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size); > >void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val); > >struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer, > unsigned long length); >int ring_buffer_unlock_commit(struct ring_buffer *buffer, > struct ring_buffer_event *event); >int ring_buffer_write(struct ring_buffer *buffer, > unsigned long length, void *data); > >struct ring_buffer_event * >ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, > unsigned long *lost_events); >struct ring_buffer_event * >ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, > unsigned long *lost_events); > >struct ring_buffer_iter * >ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu); >void ring_buffer_read_prepare_sync(void); >void ring_buffer_read_start(struct ring_buffer_iter *iter); >void ring_buffer_read_finish(struct ring_buffer_iter *iter); > >struct ring_buffer_event * >ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts); >struct ring_buffer_event * >ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts); >void ring_buffer_iter_reset(struct ring_buffer_iter *iter); >int ring_buffer_iter_empty(struct ring_buffer_iter *iter); > >unsigned long ring_buffer_size(struct ring_buffer *buffer); > >void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); >void ring_buffer_reset(struct ring_buffer *buffer); > > > > > >static inline __attribute__((always_inline)) int >ring_buffer_swap_cpu(struct ring_buffer *buffer_a, > struct ring_buffer *buffer_b, int cpu) >{ > return -19; >} > > >int ring_buffer_empty(struct ring_buffer *buffer); >int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); > >void ring_buffer_record_disable(struct ring_buffer *buffer); >void ring_buffer_record_enable(struct ring_buffer *buffer); >void ring_buffer_record_off(struct ring_buffer *buffer); >void ring_buffer_record_on(struct ring_buffer *buffer); >int ring_buffer_record_is_on(struct ring_buffer *buffer); >void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); >void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); > >unsigned long ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu); >unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu); >unsigned long ring_buffer_entries(struct ring_buffer *buffer); >unsigned long ring_buffer_overruns(struct ring_buffer *buffer); >unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); >unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); >unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); > >u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); >void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, > int cpu, u64 *ts); >void ring_buffer_set_clock(struct ring_buffer *buffer, > u64 (*clock)(void)); > >size_t ring_buffer_page_len(void *page); > > >void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu); >void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data); >int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, > size_t len, int cpu, int full); > >struct trace_seq; > >int ring_buffer_print_entry_header(struct trace_seq *s); >int ring_buffer_print_page_header(struct trace_seq *s); > >enum ring_buffer_flags { > RB_FL_OVERWRITE = 1 << 0, >}; > > > > > > > > > > > > > > > > >extern unsigned int __invalid_size_argument_for_IOC; > > > > > >struct bio_set; >struct bio; >struct bio_integrity_payload; >struct page; >struct block_device; >typedef void (bio_end_io_t) (struct bio *, int); >typedef void (bio_destructor_t) (struct bio *); > > > > >struct bio_vec { > struct page *bv_page; > unsigned int bv_len; > unsigned int bv_offset; >}; > > > > > >struct bio { > sector_t bi_sector; > > struct bio *bi_next; > struct block_device *bi_bdev; > unsigned long bi_flags; > unsigned long bi_rw; > > > > unsigned short bi_vcnt; > unsigned short bi_idx; > > > > > unsigned int bi_phys_segments; > > unsigned int bi_size; > > > > > > unsigned int bi_seg_front_size; > unsigned int bi_seg_back_size; > > unsigned int bi_max_vecs; > > atomic_t bi_cnt; > > struct bio_vec *bi_io_vec; > > bio_end_io_t *bi_end_io; > > void *bi_private; > > > > > bio_destructor_t *bi_destructor; > > > > > > > struct bio_vec bi_inline_vecs[0]; >}; > >enum rq_flag_bits { > > __REQ_WRITE, > __REQ_FAILFAST_DEV, > __REQ_FAILFAST_TRANSPORT, > __REQ_FAILFAST_DRIVER, > > __REQ_SYNC, > __REQ_META, > __REQ_PRIO, > __REQ_DISCARD, > __REQ_SECURE, > > __REQ_NOIDLE, > __REQ_FUA, > __REQ_FLUSH, > > > __REQ_RAHEAD, > __REQ_THROTTLED, > > > > __REQ_SORTED, > __REQ_SOFTBARRIER, > __REQ_NOMERGE, > __REQ_STARTED, > __REQ_DONTPREP, > __REQ_QUEUED, > __REQ_ELVPRIV, > __REQ_FAILED, > __REQ_QUIET, > __REQ_PREEMPT, > __REQ_ALLOCED, > __REQ_COPY_USER, > __REQ_FLUSH_SEQ, > __REQ_IO_STAT, > __REQ_MIXED_MERGE, > __REQ_NR_BITS, >}; > > >struct fstrim_range { > __u64 start; > __u64 len; > __u64 minlen; >}; > > >struct files_stat_struct { > unsigned long nr_files; > unsigned long nr_free_files; > unsigned long max_files; >}; > >struct inodes_stat_t { > int nr_inodes; > int nr_unused; > int dummy[5]; >}; > > > >static inline __attribute__((always_inline)) int old_valid_dev(dev_t dev) >{ > return ((unsigned int) ((dev) >> 20)) < 256 && ((unsigned int) ((dev) & ((1U << 20) - 1))) < 256; >} > >static inline __attribute__((always_inline)) u16 old_encode_dev(dev_t dev) >{ > return (((unsigned int) ((dev) >> 20)) << 8) | ((unsigned int) ((dev) & ((1U << 20) - 1))); >} > >static inline __attribute__((always_inline)) dev_t old_decode_dev(u16 val) >{ > return ((((val >> 8) & 255) << 20) | (val & 255)); >} > >static inline __attribute__((always_inline)) int new_valid_dev(dev_t dev) >{ > return 1; >} > >static inline __attribute__((always_inline)) u32 new_encode_dev(dev_t dev) >{ > unsigned major = ((unsigned int) ((dev) >> 20)); > unsigned minor = ((unsigned int) ((dev) & ((1U << 20) - 1))); > return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12); >} > >static inline __attribute__((always_inline)) dev_t new_decode_dev(u32 dev) >{ > unsigned major = (dev & 0xfff00) >> 8; > unsigned minor = (dev & 0xff) | ((dev >> 12) & 0xfff00); > return (((major) << 20) | (minor)); >} > >static inline __attribute__((always_inline)) int huge_valid_dev(dev_t dev) >{ > return 1; >} > >static inline __attribute__((always_inline)) u64 huge_encode_dev(dev_t dev) >{ > return new_encode_dev(dev); >} > >static inline __attribute__((always_inline)) dev_t huge_decode_dev(u64 dev) >{ > return new_decode_dev(dev); >} > >static inline __attribute__((always_inline)) int sysv_valid_dev(dev_t dev) >{ > return ((unsigned int) ((dev) >> 20)) < (1<<14) && ((unsigned int) ((dev) & ((1U << 20) - 1))) < (1<<18); >} > >static inline __attribute__((always_inline)) u32 sysv_encode_dev(dev_t dev) >{ > return ((unsigned int) ((dev) & ((1U << 20) - 1))) | (((unsigned int) ((dev) >> 20)) << 18); >} > >static inline __attribute__((always_inline)) unsigned sysv_major(u32 dev) >{ > return (dev >> 18) & 0x3fff; >} > >static inline __attribute__((always_inline)) unsigned sysv_minor(u32 dev) >{ > return dev & 0x3ffff; >} > > > > > > > > > >static inline __attribute__((always_inline)) void __list_add_rcu(struct list_head *new, > struct list_head *prev, struct list_head *next) >{ > new->next = next; > new->prev = prev; > ({ __asm__ __volatile__ ("dmb" : : : "memory"); (((*((struct list_head **)(&(prev)->next))))) = (typeof(*(new)) *)((new)); }); > next->prev = new; >} > >static inline __attribute__((always_inline)) void list_add_rcu(struct list_head *new, struct list_head *head) >{ > __list_add_rcu(new, head, head->next); >} > >static inline __attribute__((always_inline)) void list_add_tail_rcu(struct list_head *new, > struct list_head *head) >{ > __list_add_rcu(new, head->prev, head); >} > >static inline __attribute__((always_inline)) void list_del_rcu(struct list_head *entry) >{ > __list_del(entry->prev, entry->next); > entry->prev = ((void *) 0x00200200 + 0); >} > >static inline __attribute__((always_inline)) void hlist_del_init_rcu(struct hlist_node *n) >{ > if (!hlist_unhashed(n)) { > __hlist_del(n); > n->pprev = ((void *)0); > } >} > >static inline __attribute__((always_inline)) void list_replace_rcu(struct list_head *old, > struct list_head *new) >{ > new->next = old->next; > new->prev = old->prev; > ({ __asm__ __volatile__ ("dmb" : : : "memory"); (((*((struct list_head **)(&(new->prev)->next))))) = (typeof(*(new)) *)((new)); }); > new->next->prev = new; > old->prev = ((void *) 0x00200200 + 0); >} > >static inline __attribute__((always_inline)) void list_splice_init_rcu(struct list_head *list, > struct list_head *head, > void (*sync)(void)) >{ > struct list_head *first = list->next; > struct list_head *last = list->prev; > struct list_head *at = head->next; > > if (list_empty(list)) > return; > > > > INIT_LIST_HEAD(list); > > sync(); > > last->next = at; > ({ __asm__ __volatile__ ("dmb" : : : "memory"); (((*((struct list_head **)(&(head)->next))))) = (typeof(*(first)) *)((first)); }); > first->prev = head; > at->prev = last; >} > >static inline __attribute__((always_inline)) void hlist_del_rcu(struct hlist_node *n) >{ > __hlist_del(n); > n->pprev = ((void *) 0x00200200 + 0); >} > >static inline __attribute__((always_inline)) void hlist_replace_rcu(struct hlist_node *old, > struct hlist_node *new) >{ > struct hlist_node *next = old->next; > > new->next = next; > new->pprev = old->pprev; > ({ __asm__ __volatile__ ("dmb" : : : "memory"); ((*(struct hlist_node **)new->pprev)) = (typeof(*(new)) *)((new)); }); > if (next) > new->next->pprev = &new->next; > old->pprev = ((void *) 0x00200200 + 0); >} > >static inline __attribute__((always_inline)) void hlist_add_head_rcu(struct hlist_node *n, > struct hlist_head *h) >{ > struct hlist_node *first = h->first; > > n->next = first; > n->pprev = &h->first; > ({ __asm__ __volatile__ ("dmb" : : : "memory"); (((*((struct hlist_node **)(&(h)->first))))) = (typeof(*(n)) *)((n)); }); > if (first) > first->pprev = &n->next; >} > >static inline __attribute__((always_inline)) void hlist_add_before_rcu(struct hlist_node *n, > struct hlist_node *next) >{ > n->pprev = next->pprev; > n->next = next; > ({ __asm__ __volatile__ ("dmb" : : : "memory"); (((*((struct hlist_node **)((n)->pprev))))) = (typeof(*(n)) *)((n)); }); > next->pprev = &n->next; >} > >static inline __attribute__((always_inline)) void hlist_add_after_rcu(struct hlist_node *prev, > struct hlist_node *n) >{ > n->next = prev->next; > n->pprev = &prev->next; > ({ __asm__ __volatile__ ("dmb" : : : "memory"); (((*((struct hlist_node **)(&(prev)->next))))) = (typeof(*(n)) *)((n)); }); > if (n->next) > n->next->pprev = &n->next; >} > > > > > > > > > > >struct hlist_bl_head { > struct hlist_bl_node *first; >}; > >struct hlist_bl_node { > struct hlist_bl_node *next, **pprev; >}; > > > >static inline __attribute__((always_inline)) void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) >{ > h->next = ((void *)0); > h->pprev = ((void *)0); >} > > > >static inline __attribute__((always_inline)) int hlist_bl_unhashed(const struct hlist_bl_node *h) >{ > return !h->pprev; >} > >static inline __attribute__((always_inline)) struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h) >{ > return (struct hlist_bl_node *) > ((unsigned long)h->first & ~1UL); >} > >static inline __attribute__((always_inline)) void hlist_bl_set_first(struct hlist_bl_head *h, > struct hlist_bl_node *n) >{ > ; > > ; > h->first = (struct hlist_bl_node *)((unsigned long)n | 1UL); >} > >static inline __attribute__((always_inline)) int hlist_bl_empty(const struct hlist_bl_head *h) >{ > return !((unsigned long)h->first & ~1UL); >} > >static inline __attribute__((always_inline)) void hlist_bl_add_head(struct hlist_bl_node *n, > struct hlist_bl_head *h) >{ > struct hlist_bl_node *first = hlist_bl_first(h); > > n->next = first; > if (first) > first->pprev = &n->next; > n->pprev = &h->first; > hlist_bl_set_first(h, n); >} > >static inline __attribute__((always_inline)) void __hlist_bl_del(struct hlist_bl_node *n) >{ > struct hlist_bl_node *next = n->next; > struct hlist_bl_node **pprev = n->pprev; > > ; > > > *pprev = (struct hlist_bl_node *) > ((unsigned long)next | > ((unsigned long)*pprev & 1UL)); > if (next) > next->pprev = pprev; >} > >static inline __attribute__((always_inline)) void hlist_bl_del(struct hlist_bl_node *n) >{ > __hlist_bl_del(n); > n->next = ((void *) 0x00100100 + 0); > n->pprev = ((void *) 0x00200200 + 0); >} > >static inline __attribute__((always_inline)) void hlist_bl_del_init(struct hlist_bl_node *n) >{ > if (!hlist_bl_unhashed(n)) { > __hlist_bl_del(n); > INIT_HLIST_BL_NODE(n); > } >} > >static inline __attribute__((always_inline)) void hlist_bl_lock(struct hlist_bl_head *b) >{ > bit_spin_lock(0, (unsigned long *)b); >} > >static inline __attribute__((always_inline)) void hlist_bl_unlock(struct hlist_bl_head *b) >{ > __bit_spin_unlock(0, (unsigned long *)b); >} > > > >static inline __attribute__((always_inline)) void hlist_bl_set_first_rcu(struct hlist_bl_head *h, > struct hlist_bl_node *n) >{ > ; > > ; > ({ __asm__ __volatile__ ("dmb" : : : "memory"); ((h->first)) = (typeof(*((struct hlist_bl_node *)((unsigned long)n | 1UL))) *)(((struct hlist_bl_node *)((unsigned long)n | 1UL))); }) > ; >} > >static inline __attribute__((always_inline)) struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h) >{ > return (struct hlist_bl_node *) > ((unsigned long)({ typeof(*(h->first)) *_________p1 = (typeof(*(h->first))* )(*(volatile typeof((h->first)) *)&((h->first))); do { } while (0); ; do { } while(0); ((typeof(*(h->first)) *)(_________p1)); }) & ~1UL); >} > >static inline __attribute__((always_inline)) void hlist_bl_del_init_rcu(struct hlist_bl_node *n) >{ > if (!hlist_bl_unhashed(n)) { > __hlist_bl_del(n); > n->pprev = ((void *)0); > } >} > >static inline __attribute__((always_inline)) void hlist_bl_del_rcu(struct hlist_bl_node *n) >{ > __hlist_bl_del(n); > n->pprev = ((void *) 0x00200200 + 0); >} > >static inline __attribute__((always_inline)) void hlist_bl_add_head_rcu(struct hlist_bl_node *n, > struct hlist_bl_head *h) >{ > struct hlist_bl_node *first; > > > first = hlist_bl_first(h); > > n->next = first; > if (first) > first->pprev = &n->next; > n->pprev = &h->first; > > > hlist_bl_set_first_rcu(h, n); >} > > > > > > >struct nameidata; >struct path; >struct vfsmount; > >struct qstr { > unsigned int hash; > unsigned int len; > const unsigned char *name; >}; > >struct dentry_stat_t { > int nr_dentry; > int nr_unused; > int age_limit; > int want_pages; > int dummy[2]; >}; >extern struct dentry_stat_t dentry_stat; > > > > > > >static inline __attribute__((always_inline)) unsigned long >partial_name_hash(unsigned long c, unsigned long prevhash) >{ > return (prevhash + (c << 4) + (c >> 4)) * 11; >} > > > > > >static inline __attribute__((always_inline)) unsigned long end_name_hash(unsigned long hash) >{ > return (unsigned int) hash; >} > > >extern unsigned int full_name_hash(const unsigned char *, unsigned int); > >struct dentry { > > unsigned int d_flags; > seqcount_t d_seq; > struct hlist_bl_node d_hash; > struct dentry *d_parent; > struct qstr d_name; > struct inode *d_inode; > > unsigned char d_iname[36]; > > > unsigned int d_count; > spinlock_t d_lock; > const struct dentry_operations *d_op; > struct super_block *d_sb; > unsigned long d_time; > void *d_fsdata; > > struct list_head d_lru; > > > > union { > struct list_head d_child; > struct rcu_head d_rcu; > } d_u; > struct list_head d_subdirs; > struct list_head d_alias; >}; > > > > > > > >enum dentry_d_lock_class >{ > DENTRY_D_LOCK_NORMAL, > DENTRY_D_LOCK_NESTED >}; > >struct dentry_operations { > int (*d_revalidate)(struct dentry *, struct nameidata *); > int (*d_hash)(const struct dentry *, const struct inode *, > struct qstr *); > int (*d_compare)(const struct dentry *, const struct inode *, > const struct dentry *, const struct inode *, > unsigned int, const char *, const struct qstr *); > int (*d_delete)(const struct dentry *); > void (*d_release)(struct dentry *); > void (*d_prune)(struct dentry *); > void (*d_iput)(struct dentry *, struct inode *); > char *(*d_dname)(struct dentry *, char *, int); > struct vfsmount *(*d_automount)(struct path *); > int (*d_manage)(struct dentry *, bool); >} __attribute__((__aligned__((1 << 6)))); > >extern seqlock_t rename_lock; > >static inline __attribute__((always_inline)) int dname_external(struct dentry *dentry) >{ > return dentry->d_name.name != dentry->d_iname; >} > > > > >extern void d_instantiate(struct dentry *, struct inode *); >extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); >extern struct dentry * d_materialise_unique(struct dentry *, struct inode *); >extern void __d_drop(struct dentry *dentry); >extern void d_drop(struct dentry *dentry); >extern void d_delete(struct dentry *); >extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op); > > >extern struct dentry * d_alloc(struct dentry *, const struct qstr *); >extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); >extern struct dentry * d_splice_alias(struct inode *, struct dentry *); >extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); >extern struct dentry *d_find_any_alias(struct inode *inode); >extern struct dentry * d_obtain_alias(struct inode *); >extern void shrink_dcache_sb(struct super_block *); >extern void shrink_dcache_parent(struct dentry *); >extern void shrink_dcache_for_umount(struct super_block *); >extern int d_invalidate(struct dentry *); > > >extern struct dentry * d_make_root(struct inode *); > > >extern void d_genocide(struct dentry *); > >extern struct dentry *d_find_alias(struct inode *); >extern void d_prune_aliases(struct inode *); > > >extern int have_submounts(struct dentry *); > > > > >extern void d_rehash(struct dentry *); > >static inline __attribute__((always_inline)) void d_add(struct dentry *entry, struct inode *inode) >{ > d_instantiate(entry, inode); > d_rehash(entry); >} > >static inline __attribute__((always_inline)) struct dentry *d_add_unique(struct dentry *entry, struct inode *inode) >{ > struct dentry *res; > > res = d_instantiate_unique(entry, inode); > d_rehash(res != ((void *)0) ? res : entry); > return res; >} > >extern void dentry_update_name_case(struct dentry *, struct qstr *); > > >extern void d_move(struct dentry *, struct dentry *); >extern struct dentry *d_ancestor(struct dentry *, struct dentry *); > > >extern struct dentry *d_lookup(struct dentry *, struct qstr *); >extern struct dentry *d_hash_and_lookup(struct dentry *, struct qstr *); >extern struct dentry *__d_lookup(struct dentry *, struct qstr *); >extern struct dentry *__d_lookup_rcu(const struct dentry *parent, > const struct qstr *name, > unsigned *seq, struct inode **inode); > >static inline __attribute__((always_inline)) int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq) >{ > int ret = 0; > > do { if (__builtin_expect(!!(!((&(&(&dentry->d_lock)->rlock)->raw_lock)->lock != 0)), 0)) do { asm volatile("1:\t" ".word " "0xe7f001f2" "\n" ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" "2:\t.asciz " "\"include/linux/dcache.h\"" "\n" ".popsection\n" ".pushsection __bug_table,\"a\"\n" "3:\t.word 1b, 2b\n" "\t.hword " "302" ", 0\n" ".popsection"); __builtin_unreachable(); } while (0); } while(0); > if (!read_seqcount_retry(&dentry->d_seq, seq)) { > ret = 1; > dentry->d_count++; > } > > return ret; >} > > >extern int d_validate(struct dentry *, struct dentry *); > > > > >extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); > >extern char *__d_path(const struct path *, const struct path *, char *, int); >extern char *d_absolute_path(const struct path *, char *, int); >extern char *d_path(const struct path *, char *, int); >extern char *d_path_with_unreachable(const struct path *, char *, int); >extern char *dentry_path_raw(struct dentry *, char *, int); >extern char *dentry_path(struct dentry *, char *, int); > >static inline __attribute__((always_inline)) struct dentry *dget_dlock(struct dentry *dentry) >{ > if (dentry) > dentry->d_count++; > return dentry; >} > >static inline __attribute__((always_inline)) struct dentry *dget(struct dentry *dentry) >{ > if (dentry) { > spin_lock(&dentry->d_lock); > dget_dlock(dentry); > spin_unlock(&dentry->d_lock); > } > return dentry; >} > >extern struct dentry *dget_parent(struct dentry *dentry); > >static inline __attribute__((always_inline)) int d_unhashed(struct dentry *dentry) >{ > return hlist_bl_unhashed(&dentry->d_hash); >} > >static inline __attribute__((always_inline)) int d_unlinked(struct dentry *dentry) >{ > return d_unhashed(dentry) && !((dentry) == (dentry)->d_parent); >} > >static inline __attribute__((always_inline)) int cant_mount(struct dentry *dentry) >{ > return (dentry->d_flags & 0x0100); >} > >static inline __attribute__((always_inline)) void dont_mount(struct dentry *dentry) >{ > spin_lock(&dentry->d_lock); > dentry->d_flags |= 0x0100; > spin_unlock(&dentry->d_lock); >} > >extern void dput(struct dentry *); > >static inline __attribute__((always_inline)) bool d_managed(struct dentry *dentry) >{ > return dentry->d_flags & (0x10000|0x20000|0x40000); >} > >static inline __attribute__((always_inline)) bool d_mountpoint(struct dentry *dentry) >{ > return dentry->d_flags & 0x10000; >} > >static inline __attribute__((always_inline)) bool d_need_lookup(struct dentry *dentry) >{ > return dentry->d_flags & 0x80000; >} > >extern void d_clear_need_lookup(struct dentry *dentry); > >extern int sysctl_vfs_cache_pressure; > > > > > >struct dentry; >struct vfsmount; > >struct path { > struct vfsmount *mnt; > struct dentry *dentry; >}; > >extern void path_get(struct path *); >extern void path_put(struct path *); > >static inline __attribute__((always_inline)) int path_equal(const struct path *path1, const struct path *path2) >{ > return path1->mnt == path2->mnt && path1->dentry == path2->dentry; >} > > > > > > > > > > > >struct __old_kernel_stat { > unsigned short st_dev; > unsigned short st_ino; > unsigned short st_mode; > unsigned short st_nlink; > unsigned short st_uid; > unsigned short st_gid; > unsigned short st_rdev; > unsigned long st_size; > unsigned long st_atime; > unsigned long st_mtime; > unsigned long st_ctime; >}; > > > >struct stat { > > > > > unsigned long st_dev; > > unsigned long st_ino; > unsigned short st_mode; > unsigned short st_nlink; > unsigned short st_uid; > unsigned short st_gid; > > > > > unsigned long st_rdev; > > unsigned long st_size; > unsigned long st_blksize; > unsigned long st_blocks; > unsigned long st_atime; > unsigned long st_atime_nsec; > unsigned long st_mtime; > unsigned long st_mtime_nsec; > unsigned long st_ctime; > unsigned long st_ctime_nsec; > unsigned long __unused4; > unsigned long __unused5; >}; > > > > > > >struct stat64 { > unsigned long long st_dev; > unsigned char __pad0[4]; > > > unsigned long __st_ino; > unsigned int st_mode; > unsigned int st_nlink; > > unsigned long st_uid; > unsigned long st_gid; > > unsigned long long st_rdev; > unsigned char __pad3[4]; > > long long st_size; > unsigned long st_blksize; > unsigned long long st_blocks; > > unsigned long st_atime; > unsigned long st_atime_nsec; > > unsigned long st_mtime; > unsigned long st_mtime_nsec; > > unsigned long st_ctime; > unsigned long st_ctime_nsec; > > unsigned long long st_ino; >}; > > >struct kstat { > u64 ino; > dev_t dev; > umode_t mode; > unsigned int nlink; > uid_t uid; > gid_t gid; > dev_t rdev; > loff_t size; > struct timespec atime; > struct timespec mtime; > struct timespec ctime; > unsigned long blksize; > unsigned long long blocks; >}; > > > > > >static inline __attribute__((always_inline)) int radix_tree_is_indirect_ptr(void *ptr) >{ > return (int)((unsigned long)ptr & 1); >} > > > > > > >struct radix_tree_root { > unsigned int height; > gfp_t gfp_mask; > struct radix_tree_node *rnode; >}; > >static inline __attribute__((always_inline)) void *radix_tree_deref_slot(void **pslot) >{ > return ({ typeof(*(*pslot)) *_________p1 = (typeof(*(*pslot))* )(*(volatile typeof((*pslot)) *)&((*pslot))); do { } while (0); ; do { } while(0); ((typeof(*(*pslot)) *)(_________p1)); }); >} > >static inline __attribute__((always_inline)) void *radix_tree_deref_slot_protected(void **pslot, > spinlock_t *treelock) >{ > return ({ do { } while (0); ; ((typeof(*(*pslot)) *)((*pslot))); }); >} > >static inline __attribute__((always_inline)) int radix_tree_deref_retry(void *arg) >{ > return __builtin_expect(!!((unsigned long)arg & 1), 0); >} > > > > > > >static inline __attribute__((always_inline)) int radix_tree_exceptional_entry(void *arg) >{ > > return (unsigned long)arg & 2; >} > > > > > > >static inline __attribute__((always_inline)) int radix_tree_exception(void *arg) >{ > return __builtin_expect(!!((unsigned long)arg & (1 | 2)), 0) > ; >} > >static inline __attribute__((always_inline)) void radix_tree_replace_slot(void **pslot, void *item) >{ > do { if (__builtin_expect(!!(radix_tree_is_indirect_ptr(item)), 0)) do { asm volatile("1:\t" ".word " "0xe7f001f2" "\n" ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" "2:\t.asciz " "\"include/linux/radix-tree.h\"" "\n" ".popsection\n" ".pushsection __bug_table,\"a\"\n" "3:\t.word 1b, 2b\n" "\t.hword " "215" ", 0\n" ".popsection"); __builtin_unreachable(); } while (0); } while(0); > ({ __asm__ __volatile__ ("dmb" : : : "memory"); ((*pslot)) = (typeof(*(item)) *)((item)); }); >} > >int radix_tree_insert(struct radix_tree_root *, unsigned long, void *); >void *radix_tree_lookup(struct radix_tree_root *, unsigned long); >void **radix_tree_lookup_slot(struct radix_tree_root *, unsigned long); >void *radix_tree_delete(struct radix_tree_root *, unsigned long); >unsigned int >radix_tree_gang_lookup(struct radix_tree_root *root, void **results, > unsigned long first_index, unsigned int max_items); >unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, > void ***results, unsigned long *indices, > unsigned long first_index, unsigned int max_items); >unsigned long radix_tree_next_hole(struct radix_tree_root *root, > unsigned long index, unsigned long max_scan); >unsigned long radix_tree_prev_hole(struct radix_tree_root *root, > unsigned long index, unsigned long max_scan); >int radix_tree_preload(gfp_t gfp_mask); >void radix_tree_init(void); >void *radix_tree_tag_set(struct radix_tree_root *root, > unsigned long index, unsigned int tag); >void *radix_tree_tag_clear(struct radix_tree_root *root, > unsigned long index, unsigned int tag); >int radix_tree_tag_get(struct radix_tree_root *root, > unsigned long index, unsigned int tag); >unsigned int >radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, > unsigned long first_index, unsigned int max_items, > unsigned int tag); >unsigned int >radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, > unsigned long first_index, unsigned int max_items, > unsigned int tag); >unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, > unsigned long *first_indexp, unsigned long last_index, > unsigned long nr_to_tag, > unsigned int fromtag, unsigned int totag); >int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); >unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item); > >static inline __attribute__((always_inline)) void radix_tree_preload_end(void) >{ > do { do { __asm__ __volatile__("": : :"memory"); do { (current_thread_info()->preempt_count) -= (1); } while (0); } while (0); __asm__ __volatile__("": : :"memory"); do { if (__builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 1)), 0)) preempt_schedule(); } while (0); } while (0); >} > >struct radix_tree_iter { > unsigned long index; > unsigned long next_index; > unsigned long tags; >}; > >static inline __attribute__((always_inline)) __attribute__((always_inline)) void ** >radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start) >{ > > iter->index = 0; > iter->next_index = start; > return ((void *)0); >} > >void **radix_tree_next_chunk(struct radix_tree_root *root, > struct radix_tree_iter *iter, unsigned flags); > > > > > > > >static inline __attribute__((always_inline)) __attribute__((always_inline)) unsigned >radix_tree_chunk_size(struct radix_tree_iter *iter) >{ > return iter->next_index - iter->index; >} > >static inline __attribute__((always_inline)) __attribute__((always_inline)) void ** >radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags) >{ > if (flags & 0x0100) { > iter->tags >>= 1; > if (__builtin_expect(!!(iter->tags & 1ul), 1)) { > iter->index++; > return slot + 1; > } > if (!(flags & 0x0200) && __builtin_expect(!!(iter->tags), 1)) { > unsigned offset = (({ unsigned long __t = (iter->tags); fls(__t & -__t); }) - 1); > > iter->tags >>= offset; > iter->index += offset + 1; > return slot + offset + 1; > } > } else { > unsigned size = radix_tree_chunk_size(iter) - 1; > > while (size--) { > slot++; > iter->index++; > if (__builtin_expect(!!(*slot), 1)) > return slot; > if (flags & 0x0200) { > > iter->next_index = 0; > break; > } > } > } > return ((void *)0); >} > > > > > > > > > >enum pid_type >{ > PIDTYPE_PID, > PIDTYPE_PGID, > PIDTYPE_SID, > PIDTYPE_MAX >}; > >struct upid { > > int nr; > struct pid_namespace *ns; > struct hlist_node pid_chain; >}; > >struct pid >{ > atomic_t count; > unsigned int level; > > struct hlist_head tasks[PIDTYPE_MAX]; > struct rcu_head rcu; > struct upid numbers[1]; >}; > >extern struct pid init_struct_pid; > >struct pid_link >{ > struct hlist_node node; > struct pid *pid; >}; > >static inline __attribute__((always_inline)) struct pid *get_pid(struct pid *pid) >{ > if (pid) > atomic_add(1, &pid->count); > return pid; >} > >extern void put_pid(struct pid *pid); >extern struct task_struct *pid_task(struct pid *pid, enum pid_type); >extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type); > >extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type); > > > > > >extern void attach_pid(struct task_struct *task, enum pid_type type, > struct pid *pid); >extern void detach_pid(struct task_struct *task, enum pid_type); >extern void change_pid(struct task_struct *task, enum pid_type, > struct pid *pid); >extern void transfer_pid(struct task_struct *old, struct task_struct *new, > enum pid_type); > >struct pid_namespace; >extern struct pid_namespace init_pid_ns; > >extern struct pid *find_pid_ns(int nr, struct pid_namespace *ns); >extern struct pid *find_vpid(int nr); > > > > >extern struct pid *find_get_pid(int nr); >extern struct pid *find_ge_pid(int nr, struct pid_namespace *); >int next_pidmap(struct pid_namespace *pid_ns, unsigned int last); > >extern struct pid *alloc_pid(struct pid_namespace *ns); >extern void free_pid(struct pid *pid); > >static inline __attribute__((always_inline)) struct pid_namespace *ns_of_pid(struct pid *pid) >{ > struct pid_namespace *ns = ((void *)0); > if (pid) > ns = pid->numbers[pid->level].ns; > return ns; >} > > > > > > > >static inline __attribute__((always_inline)) bool is_child_reaper(struct pid *pid) >{ > return pid->numbers[pid->level].nr == 1; >} > >static inline __attribute__((always_inline)) pid_t pid_nr(struct pid *pid) >{ > pid_t nr = 0; > if (pid) > nr = pid->numbers[0].nr; > return nr; >} > >pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns); >pid_t pid_vnr(struct pid *pid); > > > > > > >struct semaphore { > raw_spinlock_t lock; > unsigned int count; > struct list_head wait_list; >}; > >static inline __attribute__((always_inline)) void sema_init(struct semaphore *sem, int val) >{ > static struct lock_class_key __key; > *sem = (struct semaphore) { .lock = (raw_spinlock_t) { .raw_lock = { 0 }, }, .count = val, .wait_list = { &((*sem).wait_list), &((*sem).wait_list) }, }; > do { (void)("semaphore->lock"); (void)(&__key); } while (0); >} > >extern void down(struct semaphore *sem); >extern int __attribute__((warn_unused_result)) down_interruptible(struct semaphore *sem); >extern int __attribute__((warn_unused_result)) down_killable(struct semaphore *sem); >extern int __attribute__((warn_unused_result)) down_trylock(struct semaphore *sem); >extern int __attribute__((warn_unused_result)) down_timeout(struct semaphore *sem, long jiffies); >extern void up(struct semaphore *sem); > > > >struct fiemap_extent { > __u64 fe_logical; > > __u64 fe_physical; > > __u64 fe_length; > __u64 fe_reserved64[2]; > __u32 fe_flags; > __u32 fe_reserved[3]; >}; > >struct fiemap { > __u64 fm_start; > > __u64 fm_length; > > __u32 fm_flags; > __u32 fm_mapped_extents; > __u32 fm_extent_count; > __u32 fm_reserved; > struct fiemap_extent fm_extents[0]; >}; > > > > > > >enum migrate_mode { > MIGRATE_ASYNC, > MIGRATE_SYNC_LIGHT, > MIGRATE_SYNC, >}; > > > > >struct export_operations; >struct hd_geometry; >struct iovec; >struct nameidata; >struct kiocb; >struct kobject; >struct pipe_inode_info; >struct poll_table_struct; >struct kstatfs; >struct vm_area_struct; >struct vfsmount; >struct cred; > >extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) inode_init(void); >extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) inode_init_early(void); >extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) files_init(unsigned long); > >extern struct files_stat_struct files_stat; >extern unsigned long get_max_files(void); >extern int sysctl_nr_open; >extern struct inodes_stat_t inodes_stat; >extern int leases_enable, lease_break_time; > >struct buffer_head; >typedef int (get_block_t)(struct inode *inode, sector_t iblock, > struct buffer_head *bh_result, int create); >typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, > ssize_t bytes, void *private, int ret, > bool is_async); > >struct iattr { > unsigned int ia_valid; > umode_t ia_mode; > uid_t ia_uid; > gid_t ia_gid; > loff_t ia_size; > struct timespec ia_atime; > struct timespec ia_mtime; > struct timespec ia_ctime; > > > > > > > struct file *ia_file; >}; > >enum positive_aop_returns { > AOP_WRITEPAGE_ACTIVATE = 0x80000, > AOP_TRUNCATED_PAGE = 0x80001, >}; > >struct page; >struct address_space; >struct writeback_control; > >struct iov_iter { > const struct iovec *iov; > unsigned long nr_segs; > size_t iov_offset; > size_t count; >}; > >size_t iov_iter_copy_from_user_atomic(struct page *page, > struct iov_iter *i, unsigned long offset, size_t bytes); >size_t iov_iter_copy_from_user(struct page *page, > struct iov_iter *i, unsigned long offset, size_t bytes); >void iov_iter_advance(struct iov_iter *i, size_t bytes); >int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes); >size_t iov_iter_single_seg_count(struct iov_iter *i); > >static inline __attribute__((always_inline)) void iov_iter_init(struct iov_iter *i, > const struct iovec *iov, unsigned long nr_segs, > size_t count, size_t written) >{ > i->iov = iov; > i->nr_segs = nr_segs; > i->iov_offset = 0; > i->count = count + written; > > iov_iter_advance(i, written); >} > >static inline __attribute__((always_inline)) size_t iov_iter_count(struct iov_iter *i) >{ > return i->count; >} > >typedef struct { > size_t written; > size_t count; > union { > char *buf; > void *data; > } arg; > int error; >} read_descriptor_t; > >typedef int (*read_actor_t)(read_descriptor_t *, struct page *, > unsigned long, unsigned long); > >struct address_space_operations { > int (*writepage)(struct page *page, struct writeback_control *wbc); > int (*readpage)(struct file *, struct page *); > > > int (*writepages)(struct address_space *, struct writeback_control *); > > > int (*set_page_dirty)(struct page *page); > > int (*readpages)(struct file *filp, struct address_space *mapping, > struct list_head *pages, unsigned nr_pages); > > int (*write_begin)(struct file *, struct address_space *mapping, > loff_t pos, unsigned len, unsigned flags, > struct page **pagep, void **fsdata); > int (*write_end)(struct file *, struct address_space *mapping, > loff_t pos, unsigned len, unsigned copied, > struct page *page, void *fsdata); > > > sector_t (*bmap)(struct address_space *, sector_t); > void (*invalidatepage) (struct page *, unsigned long); > int (*releasepage) (struct page *, gfp_t); > void (*freepage)(struct page *); > ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, > loff_t offset, unsigned long nr_segs); > int (*get_xip_mem)(struct address_space *, unsigned long, int, > void **, unsigned long *); > > > > > int (*migratepage) (struct address_space *, > struct page *, struct page *, enum migrate_mode); > int (*launder_page) (struct page *); > int (*is_partially_uptodate) (struct page *, read_descriptor_t *, > unsigned long); > int (*error_remove_page)(struct address_space *, struct page *); >}; > >extern const struct address_space_operations empty_aops; > > > > > >int pagecache_write_begin(struct file *, struct address_space *mapping, > loff_t pos, unsigned len, unsigned flags, > struct page **pagep, void **fsdata); > >int pagecache_write_end(struct file *, struct address_space *mapping, > loff_t pos, unsigned len, unsigned copied, > struct page *page, void *fsdata); > >struct backing_dev_info; >struct address_space { > struct inode *host; > struct radix_tree_root page_tree; > spinlock_t tree_lock; > unsigned int i_mmap_writable; > struct prio_tree_root i_mmap; > struct list_head i_mmap_nonlinear; > struct mutex i_mmap_mutex; > > unsigned long nrpages; > unsigned long writeback_index; > const struct address_space_operations *a_ops; > unsigned long flags; > struct backing_dev_info *backing_dev_info; > spinlock_t private_lock; > struct list_head private_list; > struct address_space *assoc_mapping; >} __attribute__((aligned(sizeof(long)))); > > > > > >struct request_queue; > >struct block_device { > dev_t bd_dev; > int bd_openers; > struct inode * bd_inode; > struct super_block * bd_super; > struct mutex bd_mutex; > struct list_head bd_inodes; > void * bd_claiming; > void * bd_holder; > int bd_holders; > bool bd_write_holder; > > struct list_head bd_holder_disks; > > struct block_device * bd_contains; > unsigned bd_block_size; > struct hd_struct * bd_part; > > unsigned bd_part_count; > int bd_invalidated; > struct gendisk * bd_disk; > struct request_queue * bd_queue; > struct list_head bd_list; > > > > > > > unsigned long bd_private; > > > int bd_fsfreeze_count; > > struct mutex bd_fsfreeze_mutex; >}; > >int mapping_tagged(struct address_space *mapping, int tag); > > > > >static inline __attribute__((always_inline)) int mapping_mapped(struct address_space *mapping) >{ > return !prio_tree_empty(&mapping->i_mmap) || > !list_empty(&mapping->i_mmap_nonlinear); >} > > > > > > > >static inline __attribute__((always_inline)) int mapping_writably_mapped(struct address_space *mapping) >{ > return mapping->i_mmap_writable != 0; >} > >struct posix_acl; > >struct inode { > umode_t i_mode; > unsigned short i_opflags; > uid_t i_uid; > gid_t i_gid; > unsigned int i_flags; > > > struct posix_acl *i_acl; > struct posix_acl *i_default_acl; > > > const struct inode_operations *i_op; > struct super_block *i_sb; > struct address_space *i_mapping; > > > void *i_security; > > > > unsigned long i_ino; > > > > > > > > union { > const unsigned int i_nlink; > unsigned int __i_nlink; > }; > dev_t i_rdev; > struct timespec i_atime; > struct timespec i_mtime; > struct timespec i_ctime; > spinlock_t i_lock; > unsigned short i_bytes; > blkcnt_t i_blocks; > loff_t i_size; > > > seqcount_t i_size_seqcount; > > > > unsigned long i_state; > struct mutex i_mutex; > > unsigned long dirtied_when; > > struct hlist_node i_hash; > struct list_head i_wb_list; > struct list_head i_lru; > struct list_head i_sb_list; > union { > struct list_head i_dentry; > struct rcu_head i_rcu; > }; > atomic_t i_count; > unsigned int i_blkbits; > u64 i_version; > atomic_t i_dio_count; > atomic_t i_writecount; > const struct file_operations *i_fop; > struct file_lock *i_flock; > struct address_space i_data; > > > > struct list_head i_devices; > union { > struct pipe_inode_info *i_pipe; > struct block_device *i_bdev; > struct cdev *i_cdev; > }; > > __u32 i_generation; > > > __u32 i_fsnotify_mask; > struct hlist_head i_fsnotify_marks; > > > > > > void *i_private; >}; > >static inline __attribute__((always_inline)) int inode_unhashed(struct inode *inode) >{ > return hlist_unhashed(&inode->i_hash); >} > >enum inode_i_mutex_lock_class >{ > I_MUTEX_NORMAL, > I_MUTEX_PARENT, > I_MUTEX_CHILD, > I_MUTEX_XATTR, > I_MUTEX_QUOTA >}; > >static inline __attribute__((always_inline)) loff_t i_size_read(const struct inode *inode) >{ > > loff_t i_size; > unsigned int seq; > > do { > seq = read_seqcount_begin(&inode->i_size_seqcount); > i_size = inode->i_size; > } while (read_seqcount_retry(&inode->i_size_seqcount, seq)); > return i_size; > >} > > > > > > >static inline __attribute__((always_inline)) void i_size_write(struct inode *inode, loff_t i_size) >{ > > write_seqcount_begin(&inode->i_size_seqcount); > inode->i_size = i_size; > write_seqcount_end(&inode->i_size_seqcount); > > > > > > > >} > >static inline __attribute__((always_inline)) unsigned iminor(const struct inode *inode) >{ > return ((unsigned int) ((inode->i_rdev) & ((1U << 20) - 1))); >} > >static inline __attribute__((always_inline)) unsigned imajor(const struct inode *inode) >{ > return ((unsigned int) ((inode->i_rdev) >> 20)); >} > >extern struct block_device *I_BDEV(struct inode *inode); > >struct fown_struct { > rwlock_t lock; > struct pid *pid; > enum pid_type pid_type; > uid_t uid, euid; > int signum; >}; > > > > >struct file_ra_state { > unsigned long start; > unsigned int size; > unsigned int async_size; > > > unsigned int ra_pages; > unsigned int mmap_miss; > loff_t prev_pos; >}; > > > > >static inline __attribute__((always_inline)) int ra_has_index(struct file_ra_state *ra, unsigned long index) >{ > return (index >= ra->start && > index < ra->start + ra->size); >} > > > > >struct file { > > > > > union { > struct list_head fu_list; > struct rcu_head fu_rcuhead; > } f_u; > struct path f_path; > > > const struct file_operations *f_op; > > > > > > spinlock_t f_lock; > > int f_sb_list_cpu; > > atomic_long_t f_count; > unsigned int f_flags; > fmode_t f_mode; > loff_t f_pos; > struct fown_struct f_owner; > const struct cred *f_cred; > struct file_ra_state f_ra; > > u64 f_version; > > void *f_security; > > > void *private_data; > > > > struct list_head f_ep_links; > struct list_head f_tfile_llink; > > struct address_space *f_mapping; > > > >}; > >struct file_handle { > __u32 handle_bytes; > int handle_type; > > unsigned char f_handle[0]; >}; > >static inline __attribute__((always_inline)) void file_take_write(struct file *filp) {} >static inline __attribute__((always_inline)) void file_release_write(struct file *filp) {} >static inline __attribute__((always_inline)) void file_reset_write(struct file *filp) {} >static inline __attribute__((always_inline)) void file_check_state(struct file *filp) {} >static inline __attribute__((always_inline)) int file_check_writeable(struct file *filp) >{ > return 0; >} > >typedef struct files_struct *fl_owner_t; > >struct file_lock_operations { > void (*fl_copy_lock)(struct file_lock *, struct file_lock *); > void (*fl_release_private)(struct file_lock *); >}; > >struct lock_manager_operations { > int (*lm_compare_owner)(struct file_lock *, struct file_lock *); > void (*lm_notify)(struct file_lock *); > int (*lm_grant)(struct file_lock *, struct file_lock *, int); > void (*lm_release_private)(struct file_lock *); > void (*lm_break)(struct file_lock *); > int (*lm_change)(struct file_lock **, int); >}; > >struct lock_manager { > struct list_head list; >}; > >void locks_start_grace(struct lock_manager *); >void locks_end_grace(struct lock_manager *); >int locks_in_grace(void); > > > > > > >struct nlm_lockowner; > > > > >struct nfs_lock_info { > u32 state; > struct nlm_lockowner *owner; > struct list_head list; >}; > >struct nfs4_lock_state; >struct nfs4_lock_info { > struct nfs4_lock_state *owner; >}; > > >struct file_lock { > struct file_lock *fl_next; > struct list_head fl_link; > struct list_head fl_block; > fl_owner_t fl_owner; > unsigned int fl_flags; > unsigned char fl_type; > unsigned int fl_pid; > struct pid *fl_nspid; > wait_queue_head_t fl_wait; > struct file *fl_file; > loff_t fl_start; > loff_t fl_end; > > struct fasync_struct * fl_fasync; > > unsigned long fl_break_time; > unsigned long fl_downgrade_time; > > const struct file_lock_operations *fl_ops; > const struct lock_manager_operations *fl_lmops; > union { > struct nfs_lock_info nfs_fl; > struct nfs4_lock_info nfs4_fl; > struct { > struct list_head link; > int state; > } afs; > } fl_u; >}; > > > > > > > > > >struct f_owner_ex { > int type; > __kernel_pid_t pid; >}; > >struct flock { > short l_type; > short l_whence; > __kernel_off_t l_start; > __kernel_off_t l_len; > __kernel_pid_t l_pid; > >}; > >struct flock64 { > short l_type; > short l_whence; > __kernel_loff_t l_start; > __kernel_loff_t l_len; > __kernel_pid_t l_pid; > >}; > > > > >extern void send_sigio(struct fown_struct *fown, int fd, int band); > > >extern int fcntl_getlk(struct file *, struct flock *); >extern int fcntl_setlk(unsigned int, struct file *, unsigned int, > struct flock *); > > >extern int fcntl_getlk64(struct file *, struct flock64 *); >extern int fcntl_setlk64(unsigned int, struct file *, unsigned int, > struct flock64 *); > > >extern int fcntl_setlease(unsigned int fd, struct file *filp, long arg); >extern int fcntl_getlease(struct file *filp); > > >void locks_free_lock(struct file_lock *fl); >extern void locks_init_lock(struct file_lock *); >extern struct file_lock * locks_alloc_lock(void); >extern void locks_copy_lock(struct file_lock *, struct file_lock *); >extern void __locks_copy_lock(struct file_lock *, const struct file_lock *); >extern void locks_remove_posix(struct file *, fl_owner_t); >extern void locks_remove_flock(struct file *); >extern void locks_release_private(struct file_lock *); >extern void posix_test_lock(struct file *, struct file_lock *); >extern int posix_lock_file(struct file *, struct file_lock *, struct file_lock *); >extern int posix_lock_file_wait(struct file *, struct file_lock *); >extern int posix_unblock_lock(struct file *, struct file_lock *); >extern int vfs_test_lock(struct file *, struct file_lock *); >extern int vfs_lock_file(struct file *, unsigned int, struct file_lock *, struct file_lock *); >extern int vfs_cancel_lock(struct file *filp, struct file_lock *fl); >extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl); >extern int __break_lease(struct inode *inode, unsigned int flags); >extern void lease_get_mtime(struct inode *, struct timespec *time); >extern int generic_setlease(struct file *, long, struct file_lock **); >extern int vfs_setlease(struct file *, long, struct file_lock **); >extern int lease_modify(struct file_lock **, int); >extern int lock_may_read(struct inode *, loff_t start, unsigned long count); >extern int lock_may_write(struct inode *, loff_t start, unsigned long count); >extern void locks_delete_block(struct file_lock *waiter); >extern void lock_flocks(void); >extern void unlock_flocks(void); > >struct fasync_struct { > spinlock_t fa_lock; > int magic; > int fa_fd; > struct fasync_struct *fa_next; > struct file *fa_file; > struct rcu_head fa_rcu; >}; > > > > >extern int fasync_helper(int, struct file *, int, struct fasync_struct **); >extern struct fasync_struct *fasync_insert_entry(int, struct file *, struct fasync_struct **, struct fasync_struct *); >extern int fasync_remove_entry(struct file *, struct fasync_struct **); >extern struct fasync_struct *fasync_alloc(void); >extern void fasync_free(struct fasync_struct *); > > >extern void kill_fasync(struct fasync_struct **, int, int); > >extern int __f_setown(struct file *filp, struct pid *, enum pid_type, int force); >extern int f_setown(struct file *filp, unsigned long arg, int force); >extern void f_delown(struct file *filp); >extern pid_t f_getown(struct file *filp); >extern int send_sigurg(struct fown_struct *fown); > >extern struct list_head super_blocks; >extern spinlock_t sb_lock; > >struct super_block { > struct list_head s_list; > dev_t s_dev; > unsigned char s_dirt; > unsigned char s_blocksize_bits; > unsigned long s_blocksize; > loff_t s_maxbytes; > struct file_system_type *s_type; > const struct super_operations *s_op; > const struct dquot_operations *dq_op; > const struct quotactl_ops *s_qcop; > const struct export_operations *s_export_op; > unsigned long s_flags; > unsigned long s_magic; > struct dentry *s_root; > struct rw_semaphore s_umount; > struct mutex s_lock; > int s_count; > atomic_t s_active; > > void *s_security; > > const struct xattr_handler **s_xattr; > > struct list_head s_inodes; > struct hlist_bl_head s_anon; > > struct list_head *s_files; > > > > struct list_head s_mounts; > > struct list_head s_dentry_lru; > int s_nr_dentry_unused; > > > spinlock_t s_inode_lru_lock __attribute__((__aligned__((1 << 6)))); > struct list_head s_inode_lru; > int s_nr_inodes_unused; > > struct block_device *s_bdev; > struct backing_dev_info *s_bdi; > struct mtd_info *s_mtd; > struct hlist_node s_instances; > struct quota_info s_dquot; > > int s_frozen; > wait_queue_head_t s_wait_unfrozen; > > char s_id[32]; > u8 s_uuid[16]; > > void *s_fs_info; > unsigned int s_max_links; > fmode_t s_mode; > > > > u32 s_time_gran; > > > > > > struct mutex s_vfs_rename_mutex; > > > > > > char *s_subtype; > > > > > > char *s_options; > const struct dentry_operations *s_d_op; > > > > > int cleancache_poolid; > > struct shrinker s_shrink; > > > atomic_long_t s_remove_count; > > > int s_readonly_remount; >}; > > >extern void prune_icache_sb(struct super_block *sb, int nr_to_scan); >extern void prune_dcache_sb(struct super_block *sb, int nr_to_scan); > >extern struct timespec current_fs_time(struct super_block *sb); > > > > >enum { > SB_UNFROZEN = 0, > SB_FREEZE_WRITE = 1, > SB_FREEZE_TRANS = 2, >}; > >extern struct user_namespace init_user_ns; > >extern bool inode_owner_or_capable(const struct inode *inode); > > >extern void lock_super(struct super_block *); >extern void unlock_super(struct super_block *); > > > > >extern int vfs_create(struct inode *, struct dentry *, umode_t, struct nameidata *); >extern int vfs_mkdir(struct inode *, struct dentry *, umode_t); >extern int vfs_mknod(struct inode *, struct dentry *, umode_t, dev_t); >extern int vfs_symlink(struct inode *, struct dentry *, const char *); >extern int vfs_link(struct dentry *, struct inode *, struct dentry *); >extern int vfs_rmdir(struct inode *, struct dentry *); >extern int vfs_unlink(struct inode *, struct dentry *); >extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); > > > > >extern void dentry_unhash(struct dentry *dentry); > > > > >extern void inode_init_owner(struct inode *inode, const struct inode *dir, > umode_t mode); > > > >struct fiemap_extent_info { > unsigned int fi_flags; > unsigned int fi_extents_mapped; > unsigned int fi_extents_max; > struct fiemap_extent *fi_extents_start; > >}; >int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, > u64 phys, u64 len, u32 flags); >int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); > >typedef int (*filldir_t)(void *, const char *, int, loff_t, u64, unsigned); >struct block_device_operations; > > > > > > > >struct file_operations { > struct module *owner; > loff_t (*llseek) (struct file *, loff_t, int); > ssize_t (*read) (struct file *, char *, size_t, loff_t *); > ssize_t (*write) (struct file *, const char *, size_t, loff_t *); > ssize_t (*aio_read) (struct kiocb *, const struct iovec *, unsigned long, loff_t); > ssize_t (*aio_write) (struct kiocb *, const struct iovec *, unsigned long, loff_t); > int (*readdir) (struct file *, void *, filldir_t); > unsigned int (*poll) (struct file *, struct poll_table_struct *); > long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); > long (*compat_ioctl) (struct file *, unsigned int, unsigned long); > int (*mmap) (struct file *, struct vm_area_struct *); > int (*open) (struct inode *, struct file *); > int (*flush) (struct file *, fl_owner_t id); > int (*release) (struct inode *, struct file *); > int (*fsync) (struct file *, loff_t, loff_t, int datasync); > int (*aio_fsync) (struct kiocb *, int datasync); > int (*fasync) (int, struct file *, int); > int (*lock) (struct file *, int, struct file_lock *); > ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); > unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); > int (*check_flags)(int); > int (*flock) (struct file *, int, struct file_lock *); > ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); > ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); > int (*setlease)(struct file *, long, struct file_lock **); > long (*fallocate)(struct file *file, int mode, loff_t offset, > loff_t len); >}; > >struct inode_operations { > struct dentry * (*lookup) (struct inode *,struct dentry *, struct nameidata *); > void * (*follow_link) (struct dentry *, struct nameidata *); > int (*permission) (struct inode *, int); > struct posix_acl * (*get_acl)(struct inode *, int); > > int (*readlink) (struct dentry *, char *,int); > void (*put_link) (struct dentry *, struct nameidata *, void *); > > int (*create) (struct inode *,struct dentry *,umode_t,struct nameidata *); > int (*link) (struct dentry *,struct inode *,struct dentry *); > int (*unlink) (struct inode *,struct dentry *); > int (*symlink) (struct inode *,struct dentry *,const char *); > int (*mkdir) (struct inode *,struct dentry *,umode_t); > int (*rmdir) (struct inode *,struct dentry *); > int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t); > int (*rename) (struct inode *, struct dentry *, > struct inode *, struct dentry *); > void (*truncate) (struct inode *); > int (*setattr) (struct dentry *, struct iattr *); > int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *); > int (*setxattr) (struct dentry *, const char *,const void *,size_t,int); > ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); > ssize_t (*listxattr) (struct dentry *, char *, size_t); > int (*removexattr) (struct dentry *, const char *); > void (*truncate_range)(struct inode *, loff_t, loff_t); > int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, > u64 len); >} __attribute__((__aligned__((1 << 6)))); > >struct seq_file; > >ssize_t rw_copy_check_uvector(int type, const struct iovec * uvector, > unsigned long nr_segs, unsigned long fast_segs, > struct iovec *fast_pointer, > struct iovec **ret_pointer, > int check_access); > >extern ssize_t vfs_read(struct file *, char *, size_t, loff_t *); >extern ssize_t vfs_write(struct file *, const char *, size_t, loff_t *); >extern ssize_t vfs_readv(struct file *, const struct iovec *, > unsigned long, loff_t *); >extern ssize_t vfs_writev(struct file *, const struct iovec *, > unsigned long, loff_t *); > >struct super_operations { > struct inode *(*alloc_inode)(struct super_block *sb); > void (*destroy_inode)(struct inode *); > > void (*dirty_inode) (struct inode *, int flags); > int (*write_inode) (struct inode *, struct writeback_control *wbc); > int (*drop_inode) (struct inode *); > void (*evict_inode) (struct inode *); > void (*put_super) (struct super_block *); > void (*write_super) (struct super_block *); > int (*sync_fs)(struct super_block *sb, int wait); > int (*freeze_fs) (struct super_block *); > int (*unfreeze_fs) (struct super_block *); > int (*statfs) (struct dentry *, struct kstatfs *); > int (*remount_fs) (struct super_block *, int *, char *); > void (*umount_begin) (struct super_block *); > > int (*show_options)(struct seq_file *, struct dentry *); > int (*show_devname)(struct seq_file *, struct dentry *); > int (*show_path)(struct seq_file *, struct dentry *); > int (*show_stats)(struct seq_file *, struct dentry *); > > > > > int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); > int (*nr_cached_objects)(struct super_block *); > void (*free_cached_objects)(struct super_block *, int); >}; > >extern void __mark_inode_dirty(struct inode *, int); >static inline __attribute__((always_inline)) void mark_inode_dirty(struct inode *inode) >{ > __mark_inode_dirty(inode, ((1 << 0) | (1 << 1) | (1 << 2))); >} > >static inline __attribute__((always_inline)) void mark_inode_dirty_sync(struct inode *inode) >{ > __mark_inode_dirty(inode, (1 << 0)); >} > >extern void inc_nlink(struct inode *inode); >extern void drop_nlink(struct inode *inode); >extern void clear_nlink(struct inode *inode); >extern void set_nlink(struct inode *inode, unsigned int nlink); > >static inline __attribute__((always_inline)) void inode_inc_link_count(struct inode *inode) >{ > inc_nlink(inode); > mark_inode_dirty(inode); >} > >static inline __attribute__((always_inline)) void inode_dec_link_count(struct inode *inode) >{ > drop_nlink(inode); > mark_inode_dirty(inode); >} > >static inline __attribute__((always_inline)) void inode_inc_iversion(struct inode *inode) >{ > spin_lock(&inode->i_lock); > inode->i_version++; > spin_unlock(&inode->i_lock); >} > >extern void touch_atime(struct path *); >static inline __attribute__((always_inline)) void file_accessed(struct file *file) >{ > if (!(file->f_flags & 01000000)) > touch_atime(&file->f_path); >} > >int sync_inode(struct inode *inode, struct writeback_control *wbc); >int sync_inode_metadata(struct inode *inode, int wait); > >struct file_system_type { > const char *name; > int fs_flags; > struct dentry *(*mount) (struct file_system_type *, int, > const char *, void *); > void (*kill_sb) (struct super_block *); > struct module *owner; > struct file_system_type * next; > struct hlist_head fs_supers; > > struct lock_class_key s_lock_key; > struct lock_class_key s_umount_key; > struct lock_class_key s_vfs_rename_key; > > struct lock_class_key i_lock_key; > struct lock_class_key i_mutex_key; > struct lock_class_key i_mutex_dir_key; >}; > >extern struct dentry *mount_ns(struct file_system_type *fs_type, int flags, > void *data, int (*fill_super)(struct super_block *, void *, int)); >extern struct dentry *mount_bdev(struct file_system_type *fs_type, > int flags, const char *dev_name, void *data, > int (*fill_super)(struct super_block *, void *, int)); >extern struct dentry *mount_single(struct file_system_type *fs_type, > int flags, void *data, > int (*fill_super)(struct super_block *, void *, int)); >extern struct dentry *mount_nodev(struct file_system_type *fs_type, > int flags, void *data, > int (*fill_super)(struct super_block *, void *, int)); >extern struct dentry *mount_subtree(struct vfsmount *mnt, const char *path); >void generic_shutdown_super(struct super_block *sb); >void kill_block_super(struct super_block *sb); >void kill_anon_super(struct super_block *sb); >void kill_litter_super(struct super_block *sb); >void deactivate_super(struct super_block *sb); >void deactivate_locked_super(struct super_block *sb); >int set_anon_super(struct super_block *s, void *data); >int get_anon_bdev(dev_t *); >void free_anon_bdev(dev_t); >struct super_block *sget(struct file_system_type *type, > int (*test)(struct super_block *,void *), > int (*set)(struct super_block *,void *), > void *data); >extern struct dentry *mount_pseudo(struct file_system_type *, char *, > const struct super_operations *ops, > const struct dentry_operations *dops, > unsigned long); > > > > > > > >extern int register_filesystem(struct file_system_type *); >extern int unregister_filesystem(struct file_system_type *); >extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data); > >extern void kern_unmount(struct vfsmount *mnt); >extern int may_umount_tree(struct vfsmount *); >extern int may_umount(struct vfsmount *); >extern long do_mount(char *, char *, char *, unsigned long, void *); >extern struct vfsmount *collect_mounts(struct path *); >extern void drop_collected_mounts(struct vfsmount *); >extern int iterate_mounts(int (*)(struct vfsmount *, void *), void *, > struct vfsmount *); >extern int vfs_statfs(struct path *, struct kstatfs *); >extern int user_statfs(const char *, struct kstatfs *); >extern int fd_statfs(int, struct kstatfs *); >extern int vfs_ustat(dev_t, struct kstatfs *); >extern int freeze_super(struct super_block *super); >extern int thaw_super(struct super_block *super); >extern bool our_mnt(struct vfsmount *mnt); > >extern int current_umask(void); > > >extern struct kobject *fs_kobj; > > >extern int rw_verify_area(int, struct file *, loff_t *, size_t); > > > > > >extern int locks_mandatory_locked(struct inode *); >extern int locks_mandatory_area(int, struct inode *, struct file *, loff_t, size_t); > > > > > > >static inline __attribute__((always_inline)) int __mandatory_lock(struct inode *ino) >{ > return (ino->i_mode & (0002000 | 00010)) == 0002000; >} > > > > > > >static inline __attribute__((always_inline)) int mandatory_lock(struct inode *ino) >{ > return ((ino)->i_sb->s_flags & (64)) && __mandatory_lock(ino); >} > >static inline __attribute__((always_inline)) int locks_verify_locked(struct inode *inode) >{ > if (mandatory_lock(inode)) > return locks_mandatory_locked(inode); > return 0; >} > >static inline __attribute__((always_inline)) int locks_verify_truncate(struct inode *inode, > struct file *filp, > loff_t size) >{ > if (inode->i_flock && mandatory_lock(inode)) > return locks_mandatory_area( > 2, inode, filp, > size < inode->i_size ? size : inode->i_size, > (size < inode->i_size ? inode->i_size - size > : size - inode->i_size) > ); > return 0; >} > >static inline __attribute__((always_inline)) int break_lease(struct inode *inode, unsigned int mode) >{ > if (inode->i_flock) > return __break_lease(inode, mode); > return 0; >} > >extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, > struct file *filp); >extern int do_fallocate(struct file *file, int mode, loff_t offset, > loff_t len); >extern long do_sys_open(int dfd, const char *filename, int flags, > umode_t mode); >extern struct file *filp_open(const char *, int, umode_t); >extern struct file *file_open_root(struct dentry *, struct vfsmount *, > const char *, int); >extern struct file * dentry_open(struct dentry *, struct vfsmount *, int, > const struct cred *); >extern int filp_close(struct file *, fl_owner_t id); >extern char * getname(const char *); > > > >extern int ioctl_preallocate(struct file *filp, void *argp); > > >extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) vfs_caches_init_early(void); >extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) vfs_caches_init(unsigned long); > >extern struct kmem_cache *names_cachep; > > > > > > > >extern void putname(const char *name); > > > >extern int register_blkdev(unsigned int, const char *); >extern void unregister_blkdev(unsigned int, const char *); >extern struct block_device *bdget(dev_t); >extern struct block_device *bdgrab(struct block_device *bdev); >extern void bd_set_size(struct block_device *, loff_t size); >extern sector_t blkdev_max_block(struct block_device *bdev); >extern void bd_forget(struct inode *inode); >extern void bdput(struct block_device *); >extern void invalidate_bdev(struct block_device *); >extern int sync_blockdev(struct block_device *bdev); >extern void kill_bdev(struct block_device *); >extern struct super_block *freeze_bdev(struct block_device *); >extern void emergency_thaw_all(void); >extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); >extern int fsync_bdev(struct block_device *); > >extern int sync_filesystem(struct super_block *); >extern const struct file_operations def_blk_fops; >extern const struct file_operations def_chr_fops; >extern const struct file_operations bad_sock_fops; >extern const struct file_operations def_fifo_fops; > >extern int ioctl_by_bdev(struct block_device *, unsigned, unsigned long); >extern int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); >extern long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); >extern int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder); >extern struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, > void *holder); >extern struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, > void *holder); >extern int blkdev_put(struct block_device *bdev, fmode_t mode); > >extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); >extern void bd_unlink_disk_holder(struct block_device *bdev, > struct gendisk *disk); > >extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); >extern int register_chrdev_region(dev_t, unsigned, const char *); >extern int __register_chrdev(unsigned int major, unsigned int baseminor, > unsigned int count, const char *name, > const struct file_operations *fops); >extern void __unregister_chrdev(unsigned int major, unsigned int baseminor, > unsigned int count, const char *name); >extern void unregister_chrdev_region(dev_t, unsigned); >extern void chrdev_show(struct seq_file *,off_t); > >static inline __attribute__((always_inline)) int register_chrdev(unsigned int major, const char *name, > const struct file_operations *fops) >{ > return __register_chrdev(major, 0, 256, name, fops); >} > >static inline __attribute__((always_inline)) void unregister_chrdev(unsigned int major, const char *name) >{ > __unregister_chrdev(major, 0, 256, name); >} > > > > > > > >extern const char *__bdevname(dev_t, char *buffer); >extern const char *bdevname(struct block_device *bdev, char *buffer); >extern struct block_device *lookup_bdev(const char *); >extern void blkdev_show(struct seq_file *,off_t); > > > > > >extern void init_special_inode(struct inode *, umode_t, dev_t); > > >extern void make_bad_inode(struct inode *); >extern int is_bad_inode(struct inode *); > >extern const struct file_operations read_pipefifo_fops; >extern const struct file_operations write_pipefifo_fops; >extern const struct file_operations rdwr_pipefifo_fops; > >extern void check_disk_size_change(struct gendisk *disk, > struct block_device *bdev); >extern int revalidate_disk(struct gendisk *); >extern int check_disk_change(struct block_device *); >extern int __invalidate_device(struct block_device *, bool); >extern int invalidate_partition(struct gendisk *, int); > >unsigned long invalidate_mapping_pages(struct address_space *mapping, > unsigned long start, unsigned long end); > >static inline __attribute__((always_inline)) void invalidate_remote_inode(struct inode *inode) >{ > if ((((inode->i_mode) & 00170000) == 0100000) || (((inode->i_mode) & 00170000) == 0040000) || > (((inode->i_mode) & 00170000) == 0120000)) > invalidate_mapping_pages(inode->i_mapping, 0, -1); >} >extern int invalidate_inode_pages2(struct address_space *mapping); >extern int invalidate_inode_pages2_range(struct address_space *mapping, > unsigned long start, unsigned long end); >extern int write_inode_now(struct inode *, int); >extern int filemap_fdatawrite(struct address_space *); >extern int filemap_flush(struct address_space *); >extern int filemap_fdatawait(struct address_space *); >extern int filemap_fdatawait_range(struct address_space *, loff_t lstart, > loff_t lend); >extern int filemap_write_and_wait(struct address_space *mapping); >extern int filemap_write_and_wait_range(struct address_space *mapping, > loff_t lstart, loff_t lend); >extern int __filemap_fdatawrite_range(struct address_space *mapping, > loff_t start, loff_t end, int sync_mode); >extern int filemap_fdatawrite_range(struct address_space *mapping, > loff_t start, loff_t end); > >extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, > int datasync); >extern int vfs_fsync(struct file *file, int datasync); >extern int generic_write_sync(struct file *file, loff_t pos, loff_t count); >extern void sync_supers(void); >extern void emergency_sync(void); >extern void emergency_remount(void); > >extern sector_t bmap(struct inode *, sector_t); > >extern int notify_change(struct dentry *, struct iattr *); >extern int inode_permission(struct inode *, int); >extern int generic_permission(struct inode *, int); > >static inline __attribute__((always_inline)) bool execute_ok(struct inode *inode) >{ > return (inode->i_mode & (00100|00010|00001)) || (((inode->i_mode) & 00170000) == 0040000); >} > >static inline __attribute__((always_inline)) int get_write_access(struct inode *inode) >{ > return atomic_inc_unless_negative(&inode->i_writecount) ? 0 : -26; >} >static inline __attribute__((always_inline)) int deny_write_access(struct file *file) >{ > struct inode *inode = file->f_path.dentry->d_inode; > return atomic_dec_unless_positive(&inode->i_writecount) ? 0 : -26; >} >static inline __attribute__((always_inline)) void put_write_access(struct inode * inode) >{ > atomic_sub(1, &inode->i_writecount); >} >static inline __attribute__((always_inline)) void allow_write_access(struct file *file) >{ > if (file) > atomic_add(1, &file->f_path.dentry->d_inode->i_writecount); >} > >static inline __attribute__((always_inline)) void i_readcount_dec(struct inode *inode) >{ > return; >} >static inline __attribute__((always_inline)) void i_readcount_inc(struct inode *inode) >{ > return; >} > >extern int do_pipe_flags(int *, int); >extern struct file *create_read_pipe(struct file *f, int flags); >extern struct file *create_write_pipe(int flags); >extern void free_write_pipe(struct file *); > >extern int kernel_read(struct file *, loff_t, char *, unsigned long); >extern struct file * open_exec(const char *); > > >extern int is_subdir(struct dentry *, struct dentry *); >extern int path_is_under(struct path *, struct path *); >extern ino_t find_inode_number(struct dentry *, struct qstr *); > > > > > > > > > > >static inline __attribute__((always_inline)) void * __attribute__((warn_unused_result)) ERR_PTR(long error) >{ > return (void *) error; >} > >static inline __attribute__((always_inline)) long __attribute__((warn_unused_result)) PTR_ERR(const void *ptr) >{ > return (long) ptr; >} > >static inline __attribute__((always_inline)) long __attribute__((warn_unused_result)) IS_ERR(const void *ptr) >{ > return __builtin_expect(!!(((unsigned long)ptr) >= (unsigned long)-4095), 0); >} > >static inline __attribute__((always_inline)) long __attribute__((warn_unused_result)) IS_ERR_OR_NULL(const void *ptr) >{ > return !ptr || __builtin_expect(!!(((unsigned long)ptr) >= (unsigned long)-4095), 0); >} > >static inline __attribute__((always_inline)) void * __attribute__((warn_unused_result)) ERR_CAST(const void *ptr) >{ > > return (void *) ptr; >} > >static inline __attribute__((always_inline)) int __attribute__((warn_unused_result)) PTR_RET(const void *ptr) >{ > if (IS_ERR(ptr)) > return PTR_ERR(ptr); > else > return 0; >} > > > >extern loff_t default_llseek(struct file *file, loff_t offset, int origin); > >extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); > >extern int inode_init_always(struct super_block *, struct inode *); >extern void inode_init_once(struct inode *); >extern void address_space_init_once(struct address_space *mapping); >extern void ihold(struct inode * inode); >extern void iput(struct inode *); >extern struct inode * igrab(struct inode *); >extern ino_t iunique(struct super_block *, ino_t); >extern int inode_needs_sync(struct inode *inode); >extern int generic_delete_inode(struct inode *inode); >static inline __attribute__((always_inline)) int generic_drop_inode(struct inode *inode) >{ > return !inode->i_nlink || inode_unhashed(inode); >} > >extern struct inode *ilookup5_nowait(struct super_block *sb, > unsigned long hashval, int (*test)(struct inode *, void *), > void *data); >extern struct inode *ilookup5(struct super_block *sb, unsigned long hashval, > int (*test)(struct inode *, void *), void *data); >extern struct inode *ilookup(struct super_block *sb, unsigned long ino); > >extern struct inode * iget5_locked(struct super_block *, unsigned long, int (*test)(struct inode *, void *), int (*set)(struct inode *, void *), void *); >extern struct inode * iget_locked(struct super_block *, unsigned long); >extern int insert_inode_locked4(struct inode *, unsigned long, int (*test)(struct inode *, void *), void *); >extern int insert_inode_locked(struct inode *); > > > >static inline __attribute__((always_inline)) void lockdep_annotate_inode_mutex_key(struct inode *inode) { }; > >extern void unlock_new_inode(struct inode *); >extern unsigned int get_next_ino(void); > >extern void __iget(struct inode * inode); >extern void iget_failed(struct inode *); >extern void end_writeback(struct inode *); >extern void __destroy_inode(struct inode *); >extern struct inode *new_inode_pseudo(struct super_block *sb); >extern struct inode *new_inode(struct super_block *sb); >extern void free_inode_nonrcu(struct inode *inode); >extern int should_remove_suid(struct dentry *); >extern int file_remove_suid(struct file *); > >extern void __insert_inode_hash(struct inode *, unsigned long hashval); >static inline __attribute__((always_inline)) void insert_inode_hash(struct inode *inode) >{ > __insert_inode_hash(inode, inode->i_ino); >} > >extern void __remove_inode_hash(struct inode *); >static inline __attribute__((always_inline)) void remove_inode_hash(struct inode *inode) >{ > if (!inode_unhashed(inode)) > __remove_inode_hash(inode); >} > >extern void inode_sb_list_add(struct inode *inode); > > >extern void submit_bio(int, struct bio *); >extern int bdev_read_only(struct block_device *); > >extern int set_blocksize(struct block_device *, int); >extern int sb_set_blocksize(struct super_block *, int); >extern int sb_min_blocksize(struct super_block *, int); > >extern int generic_file_mmap(struct file *, struct vm_area_struct *); >extern int generic_file_readonly_mmap(struct file *, struct vm_area_struct *); >extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size); >int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk); >extern ssize_t generic_file_aio_read(struct kiocb *, const struct iovec *, unsigned long, loff_t); >extern ssize_t __generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, > loff_t *); >extern ssize_t generic_file_aio_write(struct kiocb *, const struct iovec *, unsigned long, loff_t); >extern ssize_t generic_file_direct_write(struct kiocb *, const struct iovec *, > unsigned long *, loff_t, loff_t *, size_t, size_t); >extern ssize_t generic_file_buffered_write(struct kiocb *, const struct iovec *, > unsigned long, loff_t, loff_t *, size_t, ssize_t); >extern ssize_t do_sync_read(struct file *filp, char *buf, size_t len, loff_t *ppos); >extern ssize_t do_sync_write(struct file *filp, const char *buf, size_t len, loff_t *ppos); >extern int generic_segment_checks(const struct iovec *iov, > unsigned long *nr_segs, size_t *count, int access_flags); > > >extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, > unsigned long nr_segs, loff_t pos); >extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, > int datasync); >extern void block_sync_page(struct page *page); > > >extern ssize_t generic_file_splice_read(struct file *, loff_t *, > struct pipe_inode_info *, size_t, unsigned int); >extern ssize_t default_file_splice_read(struct file *, loff_t *, > struct pipe_inode_info *, size_t, unsigned int); >extern ssize_t generic_file_splice_write(struct pipe_inode_info *, > struct file *, loff_t *, size_t, unsigned int); >extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, > struct file *out, loff_t *, size_t len, unsigned int flags); >extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out, > size_t len, unsigned int flags); > >extern void >file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); >extern loff_t noop_llseek(struct file *file, loff_t offset, int origin); >extern loff_t no_llseek(struct file *file, loff_t offset, int origin); >extern loff_t generic_file_llseek(struct file *file, loff_t offset, int origin); >extern loff_t generic_file_llseek_size(struct file *file, loff_t offset, > int origin, loff_t maxsize); >extern int generic_file_open(struct inode * inode, struct file * filp); >extern int nonseekable_open(struct inode * inode, struct file * filp); > >static inline __attribute__((always_inline)) int xip_truncate_page(struct address_space *mapping, loff_t from) >{ > return 0; >} > > > >typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, > loff_t file_offset); > >enum { > > DIO_LOCKING = 0x01, > > > DIO_SKIP_HOLES = 0x02, >}; > >void dio_end_io(struct bio *bio, int error); >void inode_dio_wait(struct inode *inode); >void inode_dio_done(struct inode *inode); > >ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, > struct block_device *bdev, const struct iovec *iov, loff_t offset, > unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, > dio_submit_t submit_io, int flags); > >static inline __attribute__((always_inline)) ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, > struct inode *inode, const struct iovec *iov, loff_t offset, > unsigned long nr_segs, get_block_t get_block) >{ > return __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, > offset, nr_segs, get_block, ((void *)0), ((void *)0), > DIO_LOCKING | DIO_SKIP_HOLES); >} > > > > > > >extern const struct file_operations generic_ro_fops; > > > >extern int vfs_readlink(struct dentry *, char *, int, const char *); >extern int vfs_follow_link(struct nameidata *, const char *); >extern int page_readlink(struct dentry *, char *, int); >extern void *page_follow_link_light(struct dentry *, struct nameidata *); >extern void page_put_link(struct dentry *, struct nameidata *, void *); >extern int __page_symlink(struct inode *inode, const char *symname, int len, > int nofs); >extern int page_symlink(struct inode *inode, const char *symname, int len); >extern const struct inode_operations page_symlink_inode_operations; >extern int generic_readlink(struct dentry *, char *, int); >extern void generic_fillattr(struct inode *, struct kstat *); >extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); >void __inode_add_bytes(struct inode *inode, loff_t bytes); >void inode_add_bytes(struct inode *inode, loff_t bytes); >void inode_sub_bytes(struct inode *inode, loff_t bytes); >loff_t inode_get_bytes(struct inode *inode); >void inode_set_bytes(struct inode *inode, loff_t bytes); > >extern int vfs_readdir(struct file *, filldir_t, void *); > >extern int vfs_stat(const char *, struct kstat *); >extern int vfs_lstat(const char *, struct kstat *); >extern int vfs_fstat(unsigned int, struct kstat *); >extern int vfs_fstatat(int , const char *, struct kstat *, int); > >extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, > unsigned long arg); >extern int __generic_block_fiemap(struct inode *inode, > struct fiemap_extent_info *fieinfo, > loff_t start, loff_t len, > get_block_t *get_block); >extern int generic_block_fiemap(struct inode *inode, > struct fiemap_extent_info *fieinfo, u64 start, > u64 len, get_block_t *get_block); > >extern void get_filesystem(struct file_system_type *fs); >extern void put_filesystem(struct file_system_type *fs); >extern struct file_system_type *get_fs_type(const char *name); >extern struct super_block *get_super(struct block_device *); >extern struct super_block *get_super_thawed(struct block_device *); >extern struct super_block *get_active_super(struct block_device *bdev); >extern void drop_super(struct super_block *sb); >extern void iterate_supers(void (*)(struct super_block *, void *), void *); >extern void iterate_supers_type(struct file_system_type *, > void (*)(struct super_block *, void *), void *); > >extern int dcache_dir_open(struct inode *, struct file *); >extern int dcache_dir_close(struct inode *, struct file *); >extern loff_t dcache_dir_lseek(struct file *, loff_t, int); >extern int dcache_readdir(struct file *, void *, filldir_t); >extern int simple_setattr(struct dentry *, struct iattr *); >extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *); >extern int simple_statfs(struct dentry *, struct kstatfs *); >extern int simple_open(struct inode *inode, struct file *file); >extern int simple_link(struct dentry *, struct inode *, struct dentry *); >extern int simple_unlink(struct inode *, struct dentry *); >extern int simple_rmdir(struct inode *, struct dentry *); >extern int simple_rename(struct inode *, struct dentry *, struct inode *, struct dentry *); >extern int noop_fsync(struct file *, loff_t, loff_t, int); >extern int simple_empty(struct dentry *); >extern int simple_readpage(struct file *file, struct page *page); >extern int simple_write_begin(struct file *file, struct address_space *mapping, > loff_t pos, unsigned len, unsigned flags, > struct page **pagep, void **fsdata); >extern int simple_write_end(struct file *file, struct address_space *mapping, > loff_t pos, unsigned len, unsigned copied, > struct page *page, void *fsdata); > >extern struct dentry *simple_lookup(struct inode *, struct dentry *, struct nameidata *); >extern ssize_t generic_read_dir(struct file *, char *, size_t, loff_t *); >extern const struct file_operations simple_dir_operations; >extern const struct inode_operations simple_dir_inode_operations; >struct tree_descr { char *name; const struct file_operations *ops; int mode; }; >struct dentry *d_alloc_name(struct dentry *, const char *); >extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *); >extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count); >extern void simple_release_fs(struct vfsmount **mount, int *count); > >extern ssize_t simple_read_from_buffer(void *to, size_t count, > loff_t *ppos, const void *from, size_t available); >extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos, > const void *from, size_t count); > >extern int generic_file_fsync(struct file *, loff_t, loff_t, int); > >extern int generic_check_addressable(unsigned, u64); > > >extern int buffer_migrate_page(struct address_space *, > struct page *, struct page *, > enum migrate_mode); > > > > >extern int inode_change_ok(const struct inode *, struct iattr *); >extern int inode_newsize_ok(const struct inode *, loff_t offset); >extern void setattr_copy(struct inode *inode, const struct iattr *attr); > >extern void file_update_time(struct file *file); > >extern int generic_show_options(struct seq_file *m, struct dentry *root); >extern void save_mount_options(struct super_block *sb, char *options); >extern void replace_mount_options(struct super_block *sb, char *options); > >static inline __attribute__((always_inline)) ino_t parent_ino(struct dentry *dentry) >{ > ino_t res; > > > > > > spin_lock(&dentry->d_lock); > res = dentry->d_parent->d_inode->i_ino; > spin_unlock(&dentry->d_lock); > return res; >} > > > > > > > >struct simple_transaction_argresp { > ssize_t size; > char data[0]; >}; > > > >char *simple_transaction_get(struct file *file, const char *buf, > size_t size); >ssize_t simple_transaction_read(struct file *file, char *buf, > size_t size, loff_t *pos); >int simple_transaction_release(struct inode *inode, struct file *file); > >void simple_transaction_set(struct file *file, size_t n); > >static inline __attribute__((always_inline)) __attribute__((format(printf, 1, 2))) >void __simple_attr_check_format(const char *fmt, ...) >{ > >} > >int simple_attr_open(struct inode *inode, struct file *file, > int (*get)(void *, u64 *), int (*set)(void *, u64), > const char *fmt); >int simple_attr_release(struct inode *inode, struct file *file); >ssize_t simple_attr_read(struct file *file, char *buf, > size_t len, loff_t *ppos); >ssize_t simple_attr_write(struct file *file, const char *buf, > size_t len, loff_t *ppos); > >struct ctl_table; >int proc_nr_files(struct ctl_table *table, int write, > void *buffer, size_t *lenp, loff_t *ppos); >int proc_nr_dentry(struct ctl_table *table, int write, > void *buffer, size_t *lenp, loff_t *ppos); >int proc_nr_inodes(struct ctl_table *table, int write, > void *buffer, size_t *lenp, loff_t *ppos); >int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) get_filesystem_list(char *buf); > >static inline __attribute__((always_inline)) int is_sxid(umode_t mode) >{ > return (mode & 0004000) || ((mode & 0002000) && (mode & 00010)); >} > >static inline __attribute__((always_inline)) void inode_has_no_xattr(struct inode *inode) >{ > if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & (1<<28))) > inode->i_flags |= 4096; >} > > >struct trace_seq { > unsigned char buffer[((1UL) << 12)]; > unsigned int len; > unsigned int readpos; > int full; >}; > >static inline __attribute__((always_inline)) void >trace_seq_init(struct trace_seq *s) >{ > s->len = 0; > s->readpos = 0; > s->full = 0; >} > > > > > >extern __attribute__((format(printf, 2, 3))) >int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); >extern __attribute__((format(printf, 2, 0))) >int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); >extern int >trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); >extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); >extern ssize_t trace_seq_to_user(struct trace_seq *s, char *ubuf, > size_t cnt); >extern int trace_seq_puts(struct trace_seq *s, const char *str); >extern int trace_seq_putc(struct trace_seq *s, unsigned char c); >extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len); >extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, > size_t len); >extern void *trace_seq_reserve(struct trace_seq *s, size_t len); >extern int trace_seq_path(struct trace_seq *s, const struct path *path); > > > > > > > > > > >static inline __attribute__((always_inline)) void ftrace_nmi_enter(void) { } >static inline __attribute__((always_inline)) void ftrace_nmi_exit(void) { } > > > > > > > > > > > > > > > > > > > > >struct irqaction; >struct pt_regs; >extern void migrate_irqs(void); > >extern void asm_do_IRQ(unsigned int, struct pt_regs *); >void handle_IRQ(unsigned int, struct pt_regs *); >void init_IRQ(void); > >void arch_trigger_all_cpu_backtrace(void); > > > > >typedef struct { > unsigned int __softirq_pending; > > unsigned int ipi_irqs[6]; > >} __attribute__((__aligned__((1 << 6)))) irq_cpustat_t; > > > >extern irq_cpustat_t irq_stat[]; > > > > > > >u64 smp_irq_stat_cpu(unsigned int cpu); > > >extern void synchronize_irq(unsigned int irq); > > > > >struct task_struct; > > >static inline __attribute__((always_inline)) void account_system_vtime(struct task_struct *tsk) >{ >} > >extern void rcu_nmi_enter(void); >extern void rcu_nmi_exit(void); > >extern void irq_enter(void); > >extern void irq_exit(void); > > > >enum perf_type_id { > PERF_TYPE_HARDWARE = 0, > PERF_TYPE_SOFTWARE = 1, > PERF_TYPE_TRACEPOINT = 2, > PERF_TYPE_HW_CACHE = 3, > PERF_TYPE_RAW = 4, > PERF_TYPE_BREAKPOINT = 5, > > PERF_TYPE_MAX, >}; > > > > > > >enum perf_hw_id { > > > > PERF_COUNT_HW_CPU_CYCLES = 0, > PERF_COUNT_HW_INSTRUCTIONS = 1, > PERF_COUNT_HW_CACHE_REFERENCES = 2, > PERF_COUNT_HW_CACHE_MISSES = 3, > PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4, > PERF_COUNT_HW_BRANCH_MISSES = 5, > PERF_COUNT_HW_BUS_CYCLES = 6, > PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7, > PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8, > PERF_COUNT_HW_REF_CPU_CYCLES = 9, > > PERF_COUNT_HW_MAX, >}; > >enum perf_hw_cache_id { > PERF_COUNT_HW_CACHE_L1D = 0, > PERF_COUNT_HW_CACHE_L1I = 1, > PERF_COUNT_HW_CACHE_LL = 2, > PERF_COUNT_HW_CACHE_DTLB = 3, > PERF_COUNT_HW_CACHE_ITLB = 4, > PERF_COUNT_HW_CACHE_BPU = 5, > PERF_COUNT_HW_CACHE_NODE = 6, > > PERF_COUNT_HW_CACHE_MAX, >}; > >enum perf_hw_cache_op_id { > PERF_COUNT_HW_CACHE_OP_READ = 0, > PERF_COUNT_HW_CACHE_OP_WRITE = 1, > PERF_COUNT_HW_CACHE_OP_PREFETCH = 2, > > PERF_COUNT_HW_CACHE_OP_MAX, >}; > >enum perf_hw_cache_op_result_id { > PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0, > PERF_COUNT_HW_CACHE_RESULT_MISS = 1, > > PERF_COUNT_HW_CACHE_RESULT_MAX, >}; > > > > > > > >enum perf_sw_ids { > PERF_COUNT_SW_CPU_CLOCK = 0, > PERF_COUNT_SW_TASK_CLOCK = 1, > PERF_COUNT_SW_PAGE_FAULTS = 2, > PERF_COUNT_SW_CONTEXT_SWITCHES = 3, > PERF_COUNT_SW_CPU_MIGRATIONS = 4, > PERF_COUNT_SW_PAGE_FAULTS_MIN = 5, > PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6, > PERF_COUNT_SW_ALIGNMENT_FAULTS = 7, > PERF_COUNT_SW_EMULATION_FAULTS = 8, > > PERF_COUNT_SW_MAX, >}; > > > > > >enum perf_event_sample_format { > PERF_SAMPLE_IP = 1U << 0, > PERF_SAMPLE_TID = 1U << 1, > PERF_SAMPLE_TIME = 1U << 2, > PERF_SAMPLE_ADDR = 1U << 3, > PERF_SAMPLE_READ = 1U << 4, > PERF_SAMPLE_CALLCHAIN = 1U << 5, > PERF_SAMPLE_ID = 1U << 6, > PERF_SAMPLE_CPU = 1U << 7, > PERF_SAMPLE_PERIOD = 1U << 8, > PERF_SAMPLE_STREAM_ID = 1U << 9, > PERF_SAMPLE_RAW = 1U << 10, > PERF_SAMPLE_BRANCH_STACK = 1U << 11, > > PERF_SAMPLE_MAX = 1U << 12, >}; > >enum perf_branch_sample_type { > PERF_SAMPLE_BRANCH_USER = 1U << 0, > PERF_SAMPLE_BRANCH_KERNEL = 1U << 1, > PERF_SAMPLE_BRANCH_HV = 1U << 2, > > PERF_SAMPLE_BRANCH_ANY = 1U << 3, > PERF_SAMPLE_BRANCH_ANY_CALL = 1U << 4, > PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << 5, > PERF_SAMPLE_BRANCH_IND_CALL = 1U << 6, > > PERF_SAMPLE_BRANCH_MAX = 1U << 7, >}; > >enum perf_event_read_format { > PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0, > PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1, > PERF_FORMAT_ID = 1U << 2, > PERF_FORMAT_GROUP = 1U << 3, > > PERF_FORMAT_MAX = 1U << 4, >}; > >struct perf_event_attr { > > > > > __u32 type; > > > > > __u32 size; > > > > > __u64 config; > > union { > __u64 sample_period; > __u64 sample_freq; > }; > > __u64 sample_type; > __u64 read_format; > > __u64 disabled : 1, > inherit : 1, > pinned : 1, > exclusive : 1, > exclude_user : 1, > exclude_kernel : 1, > exclude_hv : 1, > exclude_idle : 1, > mmap : 1, > comm : 1, > freq : 1, > inherit_stat : 1, > enable_on_exec : 1, > task : 1, > watermark : 1, > > precise_ip : 2, > mmap_data : 1, > sample_id_all : 1, > > exclude_host : 1, > exclude_guest : 1, > > __reserved_1 : 43; > > union { > __u32 wakeup_events; > __u32 wakeup_watermark; > }; > > __u32 bp_type; > union { > __u64 bp_addr; > __u64 config1; > }; > union { > __u64 bp_len; > __u64 config2; > }; > __u64 branch_sample_type; >}; > >enum perf_event_ioc_flags { > PERF_IOC_FLAG_GROUP = 1U << 0, >}; > > > > >struct perf_event_mmap_page { > __u32 version; > __u32 compat_version; > > __u32 lock; > __u32 index; > __s64 offset; > __u64 time_enabled; > __u64 time_running; > union { > __u64 capabilities; > __u64 cap_usr_time : 1, > cap_usr_rdpmc : 1, > cap_____res : 62; > }; > > __u16 pmc_width; > > __u16 time_shift; > __u32 time_mult; > __u64 time_offset; > > > > > > __u64 __reserved[120]; > > __u64 data_head; > __u64 data_tail; >}; > >struct perf_event_header { > __u32 type; > __u16 misc; > __u16 size; >}; > >enum perf_event_type { > > PERF_RECORD_MMAP = 1, > > PERF_RECORD_LOST = 2, > > PERF_RECORD_COMM = 3, > > PERF_RECORD_EXIT = 4, > > PERF_RECORD_THROTTLE = 5, > PERF_RECORD_UNTHROTTLE = 6, > > PERF_RECORD_FORK = 7, > > PERF_RECORD_READ = 8, > > PERF_RECORD_SAMPLE = 9, > > PERF_RECORD_MAX, >}; > >enum perf_callchain_context { > PERF_CONTEXT_HV = (__u64)-32, > PERF_CONTEXT_KERNEL = (__u64)-128, > PERF_CONTEXT_USER = (__u64)-512, > > PERF_CONTEXT_GUEST = (__u64)-2048, > PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176, > PERF_CONTEXT_GUEST_USER = (__u64)-2560, > > PERF_CONTEXT_MAX = (__u64)-4095, >}; > > > > > >struct sched_param { > int sched_priority; >}; > > > > > > > > > >typedef unsigned long cputime_t; > > > > > > >typedef u64 cputime64_t; > > > > > > > > > > > > > > > > >struct task_struct; > > >extern int print_fatal_signals; > > > > >struct sigqueue { > struct list_head list; > int flags; > siginfo_t info; > struct user_struct *user; >}; > > > > >struct sigpending { > struct list_head list; > sigset_t signal; >}; > >static inline __attribute__((always_inline)) void sigaddset(sigset_t *set, int _sig) >{ > unsigned long sig = _sig - 1; > if ((64 / 32) == 1) > set->sig[0] |= 1UL << sig; > else > set->sig[sig / 32] |= 1UL << (sig % 32); >} > >static inline __attribute__((always_inline)) void sigdelset(sigset_t *set, int _sig) >{ > unsigned long sig = _sig - 1; > if ((64 / 32) == 1) > set->sig[0] &= ~(1UL << sig); > else > set->sig[sig / 32] &= ~(1UL << (sig % 32)); >} > >static inline __attribute__((always_inline)) int sigismember(sigset_t *set, int _sig) >{ > unsigned long sig = _sig - 1; > if ((64 / 32) == 1) > return 1 & (set->sig[0] >> sig); > else > return 1 & (set->sig[sig / 32] >> (sig % 32)); >} > >static inline __attribute__((always_inline)) int sigfindinword(unsigned long word) >{ > return (({ unsigned long __t = (~(~word)); fls(__t & -__t); }) - 1); >} > > > >static inline __attribute__((always_inline)) int sigisemptyset(sigset_t *set) >{ > extern void _NSIG_WORDS_is_unsupported_size(void); > switch ((64 / 32)) { > case 4: > return (set->sig[3] | set->sig[2] | > set->sig[1] | set->sig[0]) == 0; > case 2: > return (set->sig[1] | set->sig[0]) == 0; > case 1: > return set->sig[0] == 0; > default: > _NSIG_WORDS_is_unsupported_size(); > return 0; > } >} > >static inline __attribute__((always_inline)) void sigorsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) | (b3)); r->sig[2] = ((a2) | (b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) | (b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) | (b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } } > > >static inline __attribute__((always_inline)) void sigandsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) & (b3)); r->sig[2] = ((a2) & (b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) & (b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) & (b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } } > > >static inline __attribute__((always_inline)) void sigandnsets(sigset_t *r, const sigset_t *a, const sigset_t *b) { extern void _NSIG_WORDS_is_unsupported_size(void); unsigned long a0, a1, a2, a3, b0, b1, b2, b3; switch ((64 / 32)) { case 4: a3 = a->sig[3]; a2 = a->sig[2]; b3 = b->sig[3]; b2 = b->sig[2]; r->sig[3] = ((a3) & ~(b3)); r->sig[2] = ((a2) & ~(b2)); case 2: a1 = a->sig[1]; b1 = b->sig[1]; r->sig[1] = ((a1) & ~(b1)); case 1: a0 = a->sig[0]; b0 = b->sig[0]; r->sig[0] = ((a0) & ~(b0)); break; default: _NSIG_WORDS_is_unsupported_size(); } } > >static inline __attribute__((always_inline)) void signotset(sigset_t *set) { extern void _NSIG_WORDS_is_unsupported_size(void); switch ((64 / 32)) { case 4: set->sig[3] = (~(set->sig[3])); set->sig[2] = (~(set->sig[2])); case 2: set->sig[1] = (~(set->sig[1])); case 1: set->sig[0] = (~(set->sig[0])); break; default: _NSIG_WORDS_is_unsupported_size(); } } > > > > >static inline __attribute__((always_inline)) void sigemptyset(sigset_t *set) >{ > switch ((64 / 32)) { > default: > ({ void *__p = (set); size_t __n = sizeof(sigset_t); if ((__n) != 0) { if (__builtin_constant_p((0)) && (0) == 0) __memzero((__p),(__n)); else memset((__p),(0),(__n)); } (__p); }); > break; > case 2: set->sig[1] = 0; > case 1: set->sig[0] = 0; > break; > } >} > >static inline __attribute__((always_inline)) void sigfillset(sigset_t *set) >{ > switch ((64 / 32)) { > default: > ({ void *__p = (set); size_t __n = sizeof(sigset_t); if ((__n) != 0) { if (__builtin_constant_p((-1)) && (-1) == 0) __memzero((__p),(__n)); else memset((__p),(-1),(__n)); } (__p); }); > break; > case 2: set->sig[1] = -1; > case 1: set->sig[0] = -1; > break; > } >} > > > >static inline __attribute__((always_inline)) void sigaddsetmask(sigset_t *set, unsigned long mask) >{ > set->sig[0] |= mask; >} > >static inline __attribute__((always_inline)) void sigdelsetmask(sigset_t *set, unsigned long mask) >{ > set->sig[0] &= ~mask; >} > >static inline __attribute__((always_inline)) int sigtestsetmask(sigset_t *set, unsigned long mask) >{ > return (set->sig[0] & mask) != 0; >} > >static inline __attribute__((always_inline)) void siginitset(sigset_t *set, unsigned long mask) >{ > set->sig[0] = mask; > switch ((64 / 32)) { > default: > ({ void *__p = (&set->sig[1]); size_t __n = sizeof(long)*((64 / 32)-1); if ((__n) != 0) { if (__builtin_constant_p((0)) && (0) == 0) __memzero((__p),(__n)); else memset((__p),(0),(__n)); } (__p); }); > break; > case 2: set->sig[1] = 0; > case 1: ; > } >} > >static inline __attribute__((always_inline)) void siginitsetinv(sigset_t *set, unsigned long mask) >{ > set->sig[0] = ~mask; > switch ((64 / 32)) { > default: > ({ void *__p = (&set->sig[1]); size_t __n = sizeof(long)*((64 / 32)-1); if ((__n) != 0) { if (__builtin_constant_p((-1)) && (-1) == 0) __memzero((__p),(__n)); else memset((__p),(-1),(__n)); } (__p); }); > break; > case 2: set->sig[1] = -1; > case 1: ; > } >} > > > >static inline __attribute__((always_inline)) void init_sigpending(struct sigpending *sig) >{ > sigemptyset(&sig->signal); > INIT_LIST_HEAD(&sig->list); >} > >extern void flush_sigqueue(struct sigpending *queue); > > >static inline __attribute__((always_inline)) int valid_signal(unsigned long sig) >{ > return sig <= 64 ? 1 : 0; >} > >struct timespec; >struct pt_regs; > >extern int next_signal(struct sigpending *pending, sigset_t *mask); >extern int do_send_sig_info(int sig, struct siginfo *info, > struct task_struct *p, bool group); >extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p); >extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); >extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, > siginfo_t *info); >extern long do_sigpending(void *, unsigned long); >extern int do_sigtimedwait(const sigset_t *, siginfo_t *, > const struct timespec *); >extern int sigprocmask(int, sigset_t *, sigset_t *); >extern void set_current_blocked(const sigset_t *); >extern int show_unhandled_signals; > >extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie); >extern void block_sigmask(struct k_sigaction *ka, int signr); >extern void exit_signals(struct task_struct *tsk); > >extern struct kmem_cache *sighand_cachep; > >int unhandled_signal(struct task_struct *tsk, int sig); > >void signals_init(void); > > > > > > > > >struct prop_global { > > > > > > int shift; > > > > > > > struct percpu_counter events; >}; > > > > > > >struct prop_descriptor { > int index; > struct prop_global pg[2]; > struct mutex mutex; >}; > >int prop_descriptor_init(struct prop_descriptor *pd, int shift); >void prop_change_shift(struct prop_descriptor *pd, int new_shift); > > > > > >struct prop_local_percpu { > > > > struct percpu_counter events; > > > > > int shift; > unsigned long period; > raw_spinlock_t lock; >}; > >int prop_local_init_percpu(struct prop_local_percpu *pl); >void prop_local_destroy_percpu(struct prop_local_percpu *pl); >void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl); >void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl, > long *numerator, long *denominator); > >static inline __attribute__((always_inline)) >void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl) >{ > unsigned long flags; > > do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); do { } while (0); } while (0); > __prop_inc_percpu(pd, pl); > do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); do { } while (0); } else { do { } while (0); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); >} > >void __prop_inc_percpu_max(struct prop_descriptor *pd, > struct prop_local_percpu *pl, long frac); > > > > > > >struct prop_local_single { > > > > unsigned long events; > > > > > > unsigned long period; > int shift; > raw_spinlock_t lock; >}; > > > > > >int prop_local_init_single(struct prop_local_single *pl); >void prop_local_destroy_single(struct prop_local_single *pl); >void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl); >void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl, > long *numerator, long *denominator); > >static inline __attribute__((always_inline)) >void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl) >{ > unsigned long flags; > > do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = arch_local_irq_save(); } while (0); do { } while (0); } while (0); > __prop_inc_single(pd, pl); > do { if (({ ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_irqs_disabled_flags(flags); })) { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); do { } while (0); } else { do { } while (0); do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); arch_local_irq_restore(flags); } while (0); } } while (0); >} > > > >struct seccomp_data { > int nr; > __u32 arch; > __u64 instruction_pointer; > __u64 args[6]; >}; > > > > > > > > >struct seccomp_filter; > >struct seccomp { > int mode; > struct seccomp_filter *filter; >}; > >extern int __secure_computing(int); >static inline __attribute__((always_inline)) int secure_computing(int this_syscall) >{ > if (__builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 21)), 0)) > return __secure_computing(this_syscall); > return 0; >} > > >static inline __attribute__((always_inline)) void secure_computing_strict(int this_syscall) >{ > do { if (__builtin_expect(!!(secure_computing(this_syscall) != 0), 0)) do { asm volatile("1:\t" ".word " "0xe7f001f2" "\n" ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" "2:\t.asciz " "\"include/linux/seccomp.h\"" "\n" ".popsection\n" ".pushsection __bug_table,\"a\"\n" "3:\t.word 1b, 2b\n" "\t.hword " "81" ", 0\n" ".popsection"); __builtin_unreachable(); } while (0); } while(0); >} > >extern long prctl_get_seccomp(void); >extern long prctl_set_seccomp(unsigned long, char *); > >static inline __attribute__((always_inline)) int seccomp_mode(struct seccomp *s) >{ > return s->mode; >} > >extern void put_seccomp_filter(struct task_struct *tsk); >extern void get_seccomp_filter(struct task_struct *tsk); >extern u32 seccomp_bpf_load(int off); > > > > > > > >struct plist_head { > struct list_head node_list; >}; > >struct plist_node { > int prio; > struct list_head prio_list; > struct list_head node_list; >}; > >static inline __attribute__((always_inline)) void >plist_head_init(struct plist_head *head) >{ > INIT_LIST_HEAD(&head->node_list); >} > > > > > > >static inline __attribute__((always_inline)) void plist_node_init(struct plist_node *node, int prio) >{ > node->prio = prio; > INIT_LIST_HEAD(&node->prio_list); > INIT_LIST_HEAD(&node->node_list); >} > >extern void plist_add(struct plist_node *node, struct plist_head *head); >extern void plist_del(struct plist_node *node, struct plist_head *head); > >static inline __attribute__((always_inline)) int plist_head_empty(const struct plist_head *head) >{ > return list_empty(&head->node_list); >} > > > > > >static inline __attribute__((always_inline)) int plist_node_empty(const struct plist_node *node) >{ > return list_empty(&node->node_list); >} > >static inline __attribute__((always_inline)) struct plist_node *plist_first(const struct plist_head *head) >{ > return ({ const typeof( ((struct plist_node *)0)->node_list ) *__mptr = (head->node_list.next); (struct plist_node *)( (char *)__mptr - __builtin_offsetof(struct plist_node,node_list) );}) > ; >} > > > > > > > >static inline __attribute__((always_inline)) struct plist_node *plist_last(const struct plist_head *head) >{ > return ({ const typeof( ((struct plist_node *)0)->node_list ) *__mptr = (head->node_list.prev); (struct plist_node *)( (char *)__mptr - __builtin_offsetof(struct plist_node,node_list) );}) > ; >} > > > >extern int max_lock_depth; > >struct rt_mutex { > raw_spinlock_t wait_lock; > struct plist_head wait_list; > struct task_struct *owner; > > > > > > >}; > >struct rt_mutex_waiter; >struct hrtimer_sleeper; > > > > > > > static inline __attribute__((always_inline)) int rt_mutex_debug_check_no_locks_freed(const void *from, > unsigned long len) > { > return 0; > } > >static inline __attribute__((always_inline)) int rt_mutex_is_locked(struct rt_mutex *lock) >{ > return lock->owner != ((void *)0); >} > >extern void __rt_mutex_init(struct rt_mutex *lock, const char *name); >extern void rt_mutex_destroy(struct rt_mutex *lock); > >extern void rt_mutex_lock(struct rt_mutex *lock); >extern int rt_mutex_lock_interruptible(struct rt_mutex *lock, > int detect_deadlock); >extern int rt_mutex_timed_lock(struct rt_mutex *lock, > struct hrtimer_sleeper *timeout, > int detect_deadlock); > >extern int rt_mutex_trylock(struct rt_mutex *lock); > >extern void rt_mutex_unlock(struct rt_mutex *lock); > > > > > > >struct rusage { > struct timeval ru_utime; > struct timeval ru_stime; > long ru_maxrss; > long ru_ixrss; > long ru_idrss; > long ru_isrss; > long ru_minflt; > long ru_majflt; > long ru_nswap; > long ru_inblock; > long ru_oublock; > long ru_msgsnd; > long ru_msgrcv; > long ru_nsignals; > long ru_nvcsw; > long ru_nivcsw; >}; > >struct rlimit { > unsigned long rlim_cur; > unsigned long rlim_max; >}; > > > >struct rlimit64 { > __u64 rlim_cur; > __u64 rlim_max; >}; > > > > > > > > >struct task_struct; > >int getrusage(struct task_struct *p, int who, struct rusage *ru); >int do_prlimit(struct task_struct *tsk, unsigned int resource, > struct rlimit *new_rlim, struct rlimit *old_rlim); > > > > > > > > > > > > >struct timerqueue_node { > struct rb_node node; > ktime_t expires; >}; > >struct timerqueue_head { > struct rb_root head; > struct timerqueue_node *next; >}; > > >extern void timerqueue_add(struct timerqueue_head *head, > struct timerqueue_node *node); >extern void timerqueue_del(struct timerqueue_head *head, > struct timerqueue_node *node); >extern struct timerqueue_node *timerqueue_iterate_next( > struct timerqueue_node *node); > >static inline __attribute__((always_inline)) >struct timerqueue_node *timerqueue_getnext(struct timerqueue_head *head) >{ > return head->next; >} > >static inline __attribute__((always_inline)) void timerqueue_init(struct timerqueue_node *node) >{ > rb_init_node(&node->node); >} > >static inline __attribute__((always_inline)) void timerqueue_init_head(struct timerqueue_head *head) >{ > head->head = (struct rb_root) { ((void *)0), }; > head->next = ((void *)0); >} > > >struct hrtimer_clock_base; >struct hrtimer_cpu_base; > > > > >enum hrtimer_mode { > HRTIMER_MODE_ABS = 0x0, > HRTIMER_MODE_REL = 0x1, > HRTIMER_MODE_PINNED = 0x02, > HRTIMER_MODE_ABS_PINNED = 0x02, > HRTIMER_MODE_REL_PINNED = 0x03, >}; > > > > >enum hrtimer_restart { > HRTIMER_NORESTART, > HRTIMER_RESTART, >}; > >struct hrtimer { > struct timerqueue_node node; > ktime_t _softexpires; > enum hrtimer_restart (*function)(struct hrtimer *); > struct hrtimer_clock_base *base; > unsigned long state; > > > > > >}; > >struct hrtimer_sleeper { > struct hrtimer timer; > struct task_struct *task; >}; > >struct hrtimer_clock_base { > struct hrtimer_cpu_base *cpu_base; > int index; > clockid_t clockid; > struct timerqueue_head active; > ktime_t resolution; > ktime_t (*get_time)(void); > ktime_t softirq_time; > ktime_t offset; >}; > >enum hrtimer_base_type { > HRTIMER_BASE_MONOTONIC, > HRTIMER_BASE_REALTIME, > HRTIMER_BASE_BOOTTIME, > HRTIMER_MAX_CLOCK_BASES, >}; > >struct hrtimer_cpu_base { > raw_spinlock_t lock; > unsigned int active_bases; > unsigned int clock_was_set; > > ktime_t expires_next; > int hres_active; > int hang_detected; > unsigned long nr_events; > unsigned long nr_retries; > unsigned long nr_hangs; > ktime_t max_hang_time; > > struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; >}; > >static inline __attribute__((always_inline)) void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) >{ > timer->node.expires = time; > timer->_softexpires = time; >} > >static inline __attribute__((always_inline)) void hrtimer_set_expires_range(struct hrtimer *timer, ktime_t time, ktime_t delta) >{ > timer->_softexpires = time; > timer->node.expires = ktime_add_safe(time, delta); >} > >static inline __attribute__((always_inline)) void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t time, unsigned long delta) >{ > timer->_softexpires = time; > timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta)); >} > >static inline __attribute__((always_inline)) void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64) >{ > timer->node.expires.tv64 = tv64; > timer->_softexpires.tv64 = tv64; >} > >static inline __attribute__((always_inline)) void hrtimer_add_expires(struct hrtimer *timer, ktime_t time) >{ > timer->node.expires = ktime_add_safe(timer->node.expires, time); > timer->_softexpires = ktime_add_safe(timer->_softexpires, time); >} > >static inline __attribute__((always_inline)) void hrtimer_add_expires_ns(struct hrtimer *timer, u64 ns) >{ > timer->node.expires = ({ (ktime_t){ .tv64 = (timer->node.expires).tv64 + (ns) }; }); > timer->_softexpires = ({ (ktime_t){ .tv64 = (timer->_softexpires).tv64 + (ns) }; }); >} > >static inline __attribute__((always_inline)) ktime_t hrtimer_get_expires(const struct hrtimer *timer) >{ > return timer->node.expires; >} > >static inline __attribute__((always_inline)) ktime_t hrtimer_get_softexpires(const struct hrtimer *timer) >{ > return timer->_softexpires; >} > >static inline __attribute__((always_inline)) s64 hrtimer_get_expires_tv64(const struct hrtimer *timer) >{ > return timer->node.expires.tv64; >} >static inline __attribute__((always_inline)) s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer) >{ > return timer->_softexpires.tv64; >} > >static inline __attribute__((always_inline)) s64 hrtimer_get_expires_ns(const struct hrtimer *timer) >{ > return ((timer->node.expires).tv64); >} > >static inline __attribute__((always_inline)) ktime_t hrtimer_expires_remaining(const struct hrtimer *timer) >{ > return ({ (ktime_t){ .tv64 = (timer->node.expires).tv64 - (timer->base->get_time()).tv64 }; }); >} > > >struct clock_event_device; > >extern void hrtimer_interrupt(struct clock_event_device *dev); > > > > >static inline __attribute__((always_inline)) ktime_t hrtimer_cb_get_time(struct hrtimer *timer) >{ > return timer->base->get_time(); >} > >static inline __attribute__((always_inline)) int hrtimer_is_hres_active(struct hrtimer *timer) >{ > return timer->base->cpu_base->hres_active; >} > >extern void hrtimer_peek_ahead_timers(void); > >extern void clock_was_set_delayed(void); > >extern void clock_was_set(void); > >extern void timerfd_clock_was_set(void); > > > >extern void hrtimers_resume(void); > >extern ktime_t ktime_get(void); >extern ktime_t ktime_get_real(void); >extern ktime_t ktime_get_boottime(void); >extern ktime_t ktime_get_monotonic_offset(void); >extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot); > >extern __attribute__((section(".data..percpu" ""))) __typeof__(struct tick_device) tick_cpu_device; > > > > > >extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock, > enum hrtimer_mode mode); > > > > > > > >static inline __attribute__((always_inline)) void hrtimer_init_on_stack(struct hrtimer *timer, > clockid_t which_clock, > enum hrtimer_mode mode) >{ > hrtimer_init(timer, which_clock, mode); >} >static inline __attribute__((always_inline)) void destroy_hrtimer_on_stack(struct hrtimer *timer) { } > > > >extern int hrtimer_start(struct hrtimer *timer, ktime_t tim, > const enum hrtimer_mode mode); >extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, > unsigned long range_ns, const enum hrtimer_mode mode); >extern int >__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, > unsigned long delta_ns, > const enum hrtimer_mode mode, int wakeup); > >extern int hrtimer_cancel(struct hrtimer *timer); >extern int hrtimer_try_to_cancel(struct hrtimer *timer); > >static inline __attribute__((always_inline)) int hrtimer_start_expires(struct hrtimer *timer, > enum hrtimer_mode mode) >{ > unsigned long delta; > ktime_t soft, hard; > soft = hrtimer_get_softexpires(timer); > hard = hrtimer_get_expires(timer); > delta = ((({ (ktime_t){ .tv64 = (hard).tv64 - (soft).tv64 }; })).tv64); > return hrtimer_start_range_ns(timer, soft, delta, mode); >} > >static inline __attribute__((always_inline)) int hrtimer_restart(struct hrtimer *timer) >{ > return hrtimer_start_expires(timer, HRTIMER_MODE_ABS); >} > > >extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer); >extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp); > >extern ktime_t hrtimer_get_next_event(void); > > > > > > >static inline __attribute__((always_inline)) int hrtimer_active(const struct hrtimer *timer) >{ > return timer->state != 0x00; >} > > > > >static inline __attribute__((always_inline)) int hrtimer_is_queued(struct hrtimer *timer) >{ > return timer->state & 0x01; >} > > > > > >static inline __attribute__((always_inline)) int hrtimer_callback_running(struct hrtimer *timer) >{ > return timer->state & 0x02; >} > > >extern u64 >hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); > > >static inline __attribute__((always_inline)) u64 hrtimer_forward_now(struct hrtimer *timer, > ktime_t interval) >{ > return hrtimer_forward(timer, timer->base->get_time(), interval); >} > > >extern long hrtimer_nanosleep(struct timespec *rqtp, > struct timespec *rmtp, > const enum hrtimer_mode mode, > const clockid_t clockid); >extern long hrtimer_nanosleep_restart(struct restart_block *restart_block); > >extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, > struct task_struct *tsk); > >extern int schedule_hrtimeout_range(ktime_t *expires, unsigned long delta, > const enum hrtimer_mode mode); >extern int schedule_hrtimeout_range_clock(ktime_t *expires, > unsigned long delta, const enum hrtimer_mode mode, int clock); >extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); > > >extern void hrtimer_run_queues(void); >extern void hrtimer_run_pending(void); > > >extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) hrtimers_init(void); > > >extern u64 ktime_divns(const ktime_t kt, s64 div); > > > > > >extern void sysrq_timer_list_show(void); > > > >struct task_io_accounting { > >}; > > > >struct task_struct; > >static inline __attribute__((always_inline)) void >account_scheduler_latency(struct task_struct *task, int usecs, int inter) >{ >} > >static inline __attribute__((always_inline)) void clear_all_latency_tracing(struct task_struct *p) >{ >} > > > > > >struct selinux_audit_rule; >struct audit_context; >struct kern_ipc_perm; > > > > > > >bool selinux_is_enabled(void); > > > >struct user_struct; >struct cred; >struct inode; > > > > > > > >struct group_info { > atomic_t usage; > int ngroups; > int nblocks; > gid_t small_block[32]; > gid_t *blocks[0]; >}; > >static inline __attribute__((always_inline)) struct group_info *get_group_info(struct group_info *gi) >{ > atomic_add(1, &gi->usage); > return gi; >} > >extern struct group_info *groups_alloc(int); >extern struct group_info init_groups; >extern void groups_free(struct group_info *); >extern int set_current_groups(struct group_info *); >extern int set_groups(struct cred *, struct group_info *); >extern int groups_search(const struct group_info *, gid_t); > > > > > >extern int in_group_p(gid_t); >extern int in_egroup_p(gid_t); > >struct cred { > atomic_t usage; > > > > > > > > uid_t uid; > gid_t gid; > uid_t suid; > gid_t sgid; > uid_t euid; > gid_t egid; > uid_t fsuid; > gid_t fsgid; > unsigned securebits; > kernel_cap_t cap_inheritable; > kernel_cap_t cap_permitted; > kernel_cap_t cap_effective; > kernel_cap_t cap_bset; > > void *security; > > struct user_struct *user; > struct user_namespace *user_ns; > struct group_info *group_info; > struct rcu_head rcu; >}; > >extern void __put_cred(struct cred *); >extern void exit_creds(struct task_struct *); >extern int copy_creds(struct task_struct *, unsigned long); >extern const struct cred *get_task_cred(struct task_struct *); >extern struct cred *cred_alloc_blank(void); >extern struct cred *prepare_creds(void); >extern struct cred *prepare_exec_creds(void); >extern int commit_creds(struct cred *); >extern void abort_creds(struct cred *); >extern const struct cred *override_creds(const struct cred *); >extern void revert_creds(const struct cred *); >extern struct cred *prepare_kernel_cred(struct task_struct *); >extern int change_create_files_as(struct cred *, struct inode *); >extern int set_security_override(struct cred *, u32); >extern int set_security_override_from_ctx(struct cred *, const char *); >extern int set_create_files_as(struct cred *, struct inode *); >extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) cred_init(void); > >static inline __attribute__((always_inline)) void validate_creds(const struct cred *cred) >{ >} >static inline __attribute__((always_inline)) void validate_creds_for_do_exit(struct task_struct *tsk) >{ >} >static inline __attribute__((always_inline)) void validate_process_creds(void) >{ >} > >static inline __attribute__((always_inline)) struct cred *get_new_cred(struct cred *cred) >{ > atomic_add(1, &cred->usage); > return cred; >} > >static inline __attribute__((always_inline)) const struct cred *get_cred(const struct cred *cred) >{ > struct cred *nonconst_cred = (struct cred *) cred; > validate_creds(cred); > return get_new_cred(nonconst_cred); >} > >static inline __attribute__((always_inline)) void put_cred(const struct cred *_cred) >{ > struct cred *cred = (struct cred *) _cred; > > validate_creds(cred); > if ((atomic_sub_return(1, &(cred)->usage) == 0)) > __put_cred(cred); >} > >extern struct user_namespace init_user_ns; > > > >struct llist_head { > struct llist_node *first; >}; > >struct llist_node { > struct llist_node *next; >}; > >static inline __attribute__((always_inline)) void init_llist_head(struct llist_head *list) >{ > list->first = ((void *)0); >} > >static inline __attribute__((always_inline)) bool llist_empty(const struct llist_head *head) >{ > return (*(volatile typeof(head->first) *)&(head->first)) == ((void *)0); >} > >static inline __attribute__((always_inline)) struct llist_node *llist_next(struct llist_node *node) >{ > return node->next; >} > >static inline __attribute__((always_inline)) bool llist_add(struct llist_node *new, struct llist_head *head) >{ > struct llist_node *entry, *old_entry; > > entry = head->first; > for (;;) { > old_entry = entry; > new->next = entry; > entry = ((__typeof__(*(&head->first)))__cmpxchg_mb((&head->first), (unsigned long)(old_entry), (unsigned long)(new), sizeof(*(&head->first)))); > if (entry == old_entry) > break; > } > > return old_entry == ((void *)0); >} > >static inline __attribute__((always_inline)) struct llist_node *llist_del_all(struct llist_head *head) >{ > return ((__typeof__(*(&head->first)))__xchg((unsigned long)(((void *)0)),(&head->first),sizeof(*(&head->first)))); >} > >extern bool llist_add_batch(struct llist_node *new_first, > struct llist_node *new_last, > struct llist_head *head); >extern struct llist_node *llist_del_first(struct llist_head *head); > > > > >struct exec_domain; >struct futex_pi_state; >struct robust_list_head; >struct bio_list; >struct fs_struct; >struct perf_event_context; >struct blk_plug; > >extern unsigned long avenrun[]; >extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); > >extern unsigned long total_forks; >extern int nr_threads; >extern __attribute__((section(".data..percpu" ""))) __typeof__(unsigned long) process_counts; >extern int nr_processes(void); >extern unsigned long nr_running(void); >extern unsigned long nr_uninterruptible(void); >extern unsigned long nr_iowait(void); >extern unsigned long nr_iowait_cpu(int cpu); >extern unsigned long this_cpu_load(void); > > >extern void calc_global_load(unsigned long ticks); > >extern unsigned long get_parent_ip(unsigned long addr); > >struct seq_file; >struct cfs_rq; >struct task_group; > >extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m); >extern void proc_sched_set_task(struct task_struct *p); >extern void >print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq); > >extern char ___assert_task_state[1 - 2*!!( > sizeof("RSDTtZXxKW")-1 != ( __builtin_constant_p(512) ? ( (512) < 1 ? ____ilog2_NaN() : (512) & (1ULL << 63) ? 63 : (512) & (1ULL << 62) ? 62 : (512) & (1ULL << 61) ? 61 : (512) & (1ULL << 60) ? 60 : (512) & (1ULL << 59) ? 59 : (512) & (1ULL << 58) ? 58 : (512) & (1ULL << 57) ? 57 : (512) & (1ULL << 56) ? 56 : (512) & (1ULL << 55) ? 55 : (512) & (1ULL << 54) ? 54 : (512) & (1ULL << 53) ? 53 : (512) & (1ULL << 52) ? 52 : (512) & (1ULL << 51) ? 51 : (512) & (1ULL << 50) ? 50 : (512) & (1ULL << 49) ? 49 : (512) & (1ULL << 48) ? 48 : (512) & (1ULL << 47) ? 47 : (512) & (1ULL << 46) ? 46 : (512) & (1ULL << 45) ? 45 : (512) & (1ULL << 44) ? 44 : (512) & (1ULL << 43) ? 43 : (512) & (1ULL << 42) ? 42 : (512) & (1ULL << 41) ? 41 : (512) & (1ULL << 40) ? 40 : (512) & (1ULL << 39) ? 39 : (512) & (1ULL << 38) ? 38 : (512) & (1ULL << 37) ? 37 : (512) & (1ULL << 36) ? 36 : (512) & (1ULL << 35) ? 35 : (512) & (1ULL << 34) ? 34 : (512) & (1ULL << 33) ? 33 : (512) & (1ULL << 32) ? 32 : (512) & (1ULL << 31) ? 31 : (512) & (1ULL << 30) ? 30 : (512) & (1ULL << 29) ? 29 : (512) & (1ULL << 28) ? 28 : (512) & (1ULL << 27) ? 27 : (512) & (1ULL << 26) ? 26 : (512) & (1ULL << 25) ? 25 : (512) & (1ULL << 24) ? 24 : (512) & (1ULL << 23) ? 23 : (512) & (1ULL << 22) ? 22 : (512) & (1ULL << 21) ? 21 : (512) & (1ULL << 20) ? 20 : (512) & (1ULL << 19) ? 19 : (512) & (1ULL << 18) ? 18 : (512) & (1ULL << 17) ? 17 : (512) & (1ULL << 16) ? 16 : (512) & (1ULL << 15) ? 15 : (512) & (1ULL << 14) ? 14 : (512) & (1ULL << 13) ? 13 : (512) & (1ULL << 12) ? 12 : (512) & (1ULL << 11) ? 11 : (512) & (1ULL << 10) ? 10 : (512) & (1ULL << 9) ? 9 : (512) & (1ULL << 8) ? 8 : (512) & (1ULL << 7) ? 7 : (512) & (1ULL << 6) ? 6 : (512) & (1ULL << 5) ? 5 : (512) & (1ULL << 4) ? 4 : (512) & (1ULL << 3) ? 3 : (512) & (1ULL << 2) ? 2 : (512) & (1ULL << 1) ? 1 : (512) & (1ULL << 0) ? 0 : ____ilog2_NaN() ) : (sizeof(512) <= 4) ? __ilog2_u32(512) : __ilog2_u64(512) )+1)]; > >extern rwlock_t tasklist_lock; >extern spinlock_t mmlist_lock; > >struct task_struct; > > > > > >extern void sched_init(void); >extern void sched_init_smp(void); >extern void schedule_tail(struct task_struct *prev); >extern void init_idle(struct task_struct *idle, int cpu); >extern void init_idle_bootup_task(struct task_struct *idle); > >extern int runqueue_is_locked(int cpu); > > >extern void select_nohz_load_balancer(int stop_tick); >extern void set_cpu_sd_state_idle(void); >extern int get_nohz_timer_target(void); > >extern void show_state_filter(unsigned long state_filter); > >static inline __attribute__((always_inline)) void show_state(void) >{ > show_state_filter(0); >} > >extern void show_regs(struct pt_regs *); > > > > > > >extern void show_stack(struct task_struct *task, unsigned long *sp); > >void io_schedule(void); >long io_schedule_timeout(long timeout); > >extern void cpu_init (void); >extern void trap_init(void); >extern void update_process_times(int user); >extern void scheduler_tick(void); > >extern void sched_show_task(struct task_struct *p); > > >extern void touch_softlockup_watchdog(void); >extern void touch_softlockup_watchdog_sync(void); >extern void touch_all_softlockup_watchdogs(void); >extern int proc_dowatchdog_thresh(struct ctl_table *table, int write, > void *buffer, > size_t *lenp, loff_t *ppos); >extern unsigned int softlockup_panic; >void lockup_detector_init(void); > >extern unsigned int sysctl_hung_task_panic; >extern unsigned long sysctl_hung_task_check_count; >extern unsigned long sysctl_hung_task_timeout_secs; >extern unsigned long sysctl_hung_task_warnings; >extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, > void *buffer, > size_t *lenp, loff_t *ppos); > >extern char __sched_text_start[], __sched_text_end[]; > > >extern int in_sched_functions(unsigned long addr); > > >extern signed long schedule_timeout(signed long timeout); >extern signed long schedule_timeout_interruptible(signed long timeout); >extern signed long schedule_timeout_killable(signed long timeout); >extern signed long schedule_timeout_uninterruptible(signed long timeout); > void schedule(void); >extern void schedule_preempt_disabled(void); >extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner); > >struct nsproxy; >struct user_namespace; > >extern int sysctl_max_map_count; > > > > > > > > > > >struct iovec >{ > void *iov_base; > __kernel_size_t iov_len; >}; > >struct kvec { > void *iov_base; > size_t iov_len; >}; > >static inline __attribute__((always_inline)) size_t iov_length(const struct iovec *iov, unsigned long nr_segs) >{ > unsigned long seg; > size_t ret = 0; > > for (seg = 0; seg < nr_segs; seg++) > ret += iov[seg].iov_len; > return ret; >} > >unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to); > > > > > > > > >struct kioctx; > >struct kiocb { > struct list_head ki_run_list; > unsigned long ki_flags; > int ki_users; > unsigned ki_key; > > struct file *ki_filp; > struct kioctx *ki_ctx; > int (*ki_cancel)(struct kiocb *, struct io_event *); > ssize_t (*ki_retry)(struct kiocb *); > void (*ki_dtor)(struct kiocb *); > > union { > void *user; > struct task_struct *tsk; > } ki_obj; > > __u64 ki_user_data; > loff_t ki_pos; > > void *private; > > unsigned short ki_opcode; > size_t ki_nbytes; > char *ki_buf; > size_t ki_left; > struct iovec ki_inline_vec; > struct iovec *ki_iovec; > unsigned long ki_nr_segs; > unsigned long ki_cur_seg; > > struct list_head ki_list; > > struct list_head ki_batch; > > > > > > struct eventfd_ctx *ki_eventfd; >}; > >struct aio_ring { > unsigned id; > unsigned nr; > unsigned head; > unsigned tail; > > unsigned magic; > unsigned compat_features; > unsigned incompat_features; > unsigned header_length; > > > struct io_event io_events[0]; >}; > > > > >struct aio_ring_info { > unsigned long mmap_base; > unsigned long mmap_size; > > struct page **ring_pages; > spinlock_t ring_lock; > long nr_pages; > > unsigned nr, tail; > > struct page *internal_pages[8]; >}; > >struct kioctx { > atomic_t users; > int dead; > struct mm_struct *mm; > > > unsigned long user_id; > struct hlist_node list; > > wait_queue_head_t wait; > > spinlock_t ctx_lock; > > int reqs_active; > struct list_head active_reqs; > struct list_head run_list; > > > unsigned max_reqs; > > struct aio_ring_info ring_info; > > struct delayed_work wq; > > struct rcu_head rcu_head; >}; > > >extern unsigned aio_max_size; > >static inline __attribute__((always_inline)) ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; } >static inline __attribute__((always_inline)) int aio_put_req(struct kiocb *iocb) { return 0; } >static inline __attribute__((always_inline)) void kick_iocb(struct kiocb *iocb) { } >static inline __attribute__((always_inline)) int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; } >struct mm_struct; >static inline __attribute__((always_inline)) void exit_aio(struct mm_struct *mm) { } >static inline __attribute__((always_inline)) long do_io_submit(aio_context_t ctx_id, long nr, > struct iocb * *iocbpp, > bool compat) { return 0; } > > >static inline __attribute__((always_inline)) struct kiocb *list_kiocb(struct list_head *h) >{ > return ({ const typeof( ((struct kiocb *)0)->ki_list ) *__mptr = (h); (struct kiocb *)( (char *)__mptr - __builtin_offsetof(struct kiocb,ki_list) );}); >} > > >extern unsigned long aio_nr; >extern unsigned long aio_max_nr; > > > >extern void arch_pick_mmap_layout(struct mm_struct *mm); >extern unsigned long >arch_get_unmapped_area(struct file *, unsigned long, unsigned long, > unsigned long, unsigned long); >extern unsigned long >arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, > unsigned long len, unsigned long pgoff, > unsigned long flags); >extern void arch_unmap_area(struct mm_struct *, unsigned long); >extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long); > > > > > >extern void set_dumpable(struct mm_struct *mm, int value); >extern int get_dumpable(struct mm_struct *mm); > >struct sighand_struct { > atomic_t count; > struct k_sigaction action[64]; > spinlock_t siglock; > wait_queue_head_t signalfd_wqh; >}; > >struct pacct_struct { > int ac_flag; > long ac_exitcode; > unsigned long ac_mem; > cputime_t ac_utime, ac_stime; > unsigned long ac_minflt, ac_majflt; >}; > >struct cpu_itimer { > cputime_t expires; > cputime_t incr; > u32 error; > u32 incr_error; >}; > >struct task_cputime { > cputime_t utime; > cputime_t stime; > unsigned long long sum_exec_runtime; >}; > >struct thread_group_cputimer { > struct task_cputime cputime; > int running; > raw_spinlock_t lock; >}; > > >struct autogroup; > >struct signal_struct { > atomic_t sigcnt; > atomic_t live; > int nr_threads; > > wait_queue_head_t wait_chldexit; > > > struct task_struct *curr_target; > > > struct sigpending shared_pending; > > > int group_exit_code; > > > > > > int notify_count; > struct task_struct *group_exit_task; > > > int group_stop_count; > unsigned int flags; > > unsigned int is_child_subreaper:1; > unsigned int has_child_subreaper:1; > > > struct list_head posix_timers; > > > struct hrtimer real_timer; > struct pid *leader_pid; > ktime_t it_real_incr; > > > > > > > struct cpu_itimer it[2]; > > > > > > struct thread_group_cputimer cputimer; > > > struct task_cputime cputime_expires; > > struct list_head cpu_timers[3]; > > struct pid *tty_old_pgrp; > > > int leader; > > struct tty_struct *tty; > > cputime_t utime, stime, cutime, cstime; > cputime_t gtime; > cputime_t cgtime; > > cputime_t prev_utime, prev_stime; > > unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw; > unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt; > unsigned long inblock, oublock, cinblock, coublock; > unsigned long maxrss, cmaxrss; > struct task_io_accounting ioac; > > > > > > > > unsigned long long sum_sched_runtime; > > struct rlimit rlim[16]; > > unsigned audit_tty; > struct tty_audit_buf *tty_audit_buf; > > struct rw_semaphore group_rwsem; > > > int oom_adj; > int oom_score_adj; > int oom_score_adj_min; > > > struct mutex cred_guard_mutex; > > >}; > >static inline __attribute__((always_inline)) int signal_group_exit(const struct signal_struct *sig) >{ > return (sig->flags & 0x00000004) || > (sig->group_exit_task != ((void *)0)); >} > > > > >struct user_struct { > atomic_t __count; > atomic_t processes; > atomic_t files; > atomic_t sigpending; > > atomic_t inotify_watches; > atomic_t inotify_devs; > > > > > > atomic_long_t epoll_watches; > > > > > > unsigned long locked_shm; > > > > > > > > struct hlist_node uidhash_node; > uid_t uid; > struct user_namespace *user_ns; > > > atomic_long_t locked_vm; > >}; > >extern int uids_sysfs_init(void); > >extern struct user_struct *find_user(uid_t); > >extern struct user_struct root_user; > > > >struct backing_dev_info; >struct reclaim_state; > > >struct sched_info { > > unsigned long pcount; > unsigned long long run_delay; > > > unsigned long long last_arrival, > last_queued; >}; > >static inline __attribute__((always_inline)) int sched_info_on(void) >{ > > return 1; > > > > > > >} > >enum cpu_idle_type { > CPU_IDLE, > CPU_NOT_IDLE, > CPU_NEWLY_IDLE, > CPU_MAX_IDLE_TYPES >}; > >enum powersavings_balance_level { > POWERSAVINGS_BALANCE_NONE = 0, > POWERSAVINGS_BALANCE_BASIC, > > > POWERSAVINGS_BALANCE_WAKEUP, > > > MAX_POWERSAVINGS_BALANCE_LEVELS >}; > >extern int sched_mc_power_savings, sched_smt_power_savings; > >static inline __attribute__((always_inline)) int sd_balance_for_mc_power(void) >{ > if (sched_smt_power_savings) > return 0x0100; > > if (!sched_mc_power_savings) > return 0x1000; > > return 0; >} > >static inline __attribute__((always_inline)) int sd_balance_for_package_power(void) >{ > if (sched_mc_power_savings | sched_smt_power_savings) > return 0x0100; > > return 0x1000; >} > >extern int __attribute__((weak)) arch_sd_sibiling_asym_packing(void); > > > > > > > >static inline __attribute__((always_inline)) int sd_power_saving_flags(void) >{ > if (sched_mc_power_savings | sched_smt_power_savings) > return 0x0002; > > return 0; >} > >struct sched_group_power { > atomic_t ref; > > > > > unsigned int power, power_orig; > unsigned long next_update; > > > > atomic_t nr_busy_cpus; >}; > >struct sched_group { > struct sched_group *next; > atomic_t ref; > > unsigned int group_weight; > struct sched_group_power *sgp; > > unsigned long cpumask[0]; >}; > >static inline __attribute__((always_inline)) struct cpumask *sched_group_cpus(struct sched_group *sg) >{ > return ((struct cpumask *)(1 ? (sg->cpumask) : (void *)sizeof(__check_is_bitmap(sg->cpumask)))); >} > > > > > >static inline __attribute__((always_inline)) unsigned int group_first_cpu(struct sched_group *group) >{ > return cpumask_first(sched_group_cpus(group)); >} > >struct sched_domain_attr { > int relax_domain_level; >}; > > > > > >extern int sched_domain_level_max; > >struct sched_domain { > > struct sched_domain *parent; > struct sched_domain *child; > struct sched_group *groups; > unsigned long min_interval; > unsigned long max_interval; > unsigned int busy_factor; > unsigned int imbalance_pct; > unsigned int cache_nice_tries; > unsigned int busy_idx; > unsigned int idle_idx; > unsigned int newidle_idx; > unsigned int wake_idx; > unsigned int forkexec_idx; > unsigned int smt_gain; > int flags; > int level; > > > unsigned long last_balance; > unsigned int balance_interval; > unsigned int nr_balance_failed; > > u64 last_update; > > > > unsigned int lb_count[CPU_MAX_IDLE_TYPES]; > unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; > unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; > unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; > unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; > unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; > unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; > unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; > > > unsigned int alb_count; > unsigned int alb_failed; > unsigned int alb_pushed; > > > unsigned int sbe_count; > unsigned int sbe_balanced; > unsigned int sbe_pushed; > > > unsigned int sbf_count; > unsigned int sbf_balanced; > unsigned int sbf_pushed; > > > unsigned int ttwu_wake_remote; > unsigned int ttwu_move_affine; > unsigned int ttwu_move_balance; > > > char *name; > > union { > void *private; > struct rcu_head rcu; > }; > > unsigned int span_weight; > > > > > > > > unsigned long span[0]; >}; > >static inline __attribute__((always_inline)) struct cpumask *sched_domain_span(struct sched_domain *sd) >{ > return ((struct cpumask *)(1 ? (sd->span) : (void *)sizeof(__check_is_bitmap(sd->span)))); >} > >extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], > struct sched_domain_attr *dattr_new); > > >cpumask_var_t *alloc_sched_domains(unsigned int ndoms); >void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms); > > >static inline __attribute__((always_inline)) int test_sd_parent(struct sched_domain *sd, int flag) >{ > if (sd->parent && (sd->parent->flags & flag)) > return 1; > > return 0; >} > >unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu); >unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu); > >bool cpus_share_cache(int this_cpu, int that_cpu); > >struct io_context; > > > > > >static inline __attribute__((always_inline)) void prefetch_stack(struct task_struct *t) { } > > >struct audit_context; >struct mempolicy; >struct pipe_inode_info; >struct uts_namespace; > >struct rq; >struct sched_domain; > >struct sched_class { > const struct sched_class *next; > > void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); > void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); > void (*yield_task) (struct rq *rq); > bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt); > > void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); > > struct task_struct * (*pick_next_task) (struct rq *rq); > void (*put_prev_task) (struct rq *rq, struct task_struct *p); > > > int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); > > void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); > void (*post_schedule) (struct rq *this_rq); > void (*task_waking) (struct task_struct *task); > void (*task_woken) (struct rq *this_rq, struct task_struct *task); > > void (*set_cpus_allowed)(struct task_struct *p, > const struct cpumask *newmask); > > void (*rq_online)(struct rq *rq); > void (*rq_offline)(struct rq *rq); > > > void (*set_curr_task) (struct rq *rq); > void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); > void (*task_fork) (struct task_struct *p); > > void (*switched_from) (struct rq *this_rq, struct task_struct *task); > void (*switched_to) (struct rq *this_rq, struct task_struct *task); > void (*prio_changed) (struct rq *this_rq, struct task_struct *task, > int oldprio); > > unsigned int (*get_rr_interval) (struct rq *rq, > struct task_struct *task); > > > void (*task_move_group) (struct task_struct *p, int on_rq); > >}; > >struct load_weight { > unsigned long weight, inv_weight; >}; > > >struct sched_statistics { > u64 wait_start; > u64 wait_max; > u64 wait_count; > u64 wait_sum; > u64 iowait_count; > u64 iowait_sum; > > u64 sleep_start; > u64 sleep_max; > s64 sum_sleep_runtime; > > u64 block_start; > u64 block_max; > u64 exec_max; > u64 slice_max; > > u64 nr_migrations_cold; > u64 nr_failed_migrations_affine; > u64 nr_failed_migrations_running; > u64 nr_failed_migrations_hot; > u64 nr_forced_migrations; > > u64 nr_wakeups; > u64 nr_wakeups_sync; > u64 nr_wakeups_migrate; > u64 nr_wakeups_local; > u64 nr_wakeups_remote; > u64 nr_wakeups_affine; > u64 nr_wakeups_affine_attempts; > u64 nr_wakeups_passive; > u64 nr_wakeups_idle; >}; > > >struct sched_entity { > struct load_weight load; > struct rb_node run_node; > struct list_head group_node; > unsigned int on_rq; > > u64 exec_start; > u64 sum_exec_runtime; > u64 vruntime; > u64 prev_sum_exec_runtime; > > u64 nr_migrations; > > > struct sched_statistics statistics; > > > > struct sched_entity *parent; > > struct cfs_rq *cfs_rq; > > struct cfs_rq *my_q; > >}; > >struct sched_rt_entity { > struct list_head run_list; > unsigned long timeout; > unsigned int time_slice; > int nr_cpus_allowed; > > struct sched_rt_entity *back; > > struct sched_rt_entity *parent; > > struct rt_rq *rt_rq; > > struct rt_rq *my_q; > >}; > > > > > > > >struct rcu_node; > >enum perf_event_task_context { > perf_invalid_context = -1, > perf_hw_context = 0, > perf_sw_context, > perf_nr_task_contexts, >}; > >struct task_struct { > volatile long state; > void *stack; > atomic_t usage; > unsigned int flags; > unsigned int ptrace; > > > struct llist_node wake_entry; > int on_cpu; > > int on_rq; > > int prio, static_prio, normal_prio; > unsigned int rt_priority; > const struct sched_class *sched_class; > struct sched_entity se; > struct sched_rt_entity rt; > > struct task_group *sched_task_group; > > unsigned char fpu_counter; > > > > > unsigned int policy; > cpumask_t cpus_allowed; > > > int rcu_read_lock_nesting; > char rcu_read_unlock_special; > struct list_head rcu_node_entry; > > > struct rcu_node *rcu_blocked_node; > > > > > > > struct sched_info sched_info; > > > struct list_head tasks; > > struct plist_node pushable_tasks; > > > struct mm_struct *mm, *active_mm; > > unsigned brk_randomized:1; > > > > > > int exit_state; > int exit_code, exit_signal; > int pdeath_signal; > unsigned int jobctl; > > unsigned int personality; > unsigned did_exec:1; > unsigned in_execve:1; > > unsigned in_iowait:1; > > > unsigned no_new_privs:1; > > > unsigned sched_reset_on_fork:1; > unsigned sched_contributes_to_load:1; > > > > unsigned irq_thread:1; > > > pid_t pid; > pid_t tgid; > > struct task_struct *real_parent; > struct task_struct *parent; > > > > struct list_head children; > struct list_head sibling; > struct task_struct *group_leader; > > > > > > > struct list_head ptraced; > struct list_head ptrace_entry; > > > struct pid_link pids[PIDTYPE_MAX]; > struct list_head thread_group; > > struct completion *vfork_done; > int *set_child_tid; > int *clear_child_tid; > > cputime_t utime, stime, utimescaled, stimescaled; > cputime_t gtime; > > cputime_t prev_utime, prev_stime; > > unsigned long nvcsw, nivcsw; > struct timespec start_time; > struct timespec real_start_time; > > unsigned long min_flt, maj_flt; > > struct task_cputime cputime_expires; > struct list_head cpu_timers[3]; > > > const struct cred *real_cred; > > const struct cred *cred; > > struct cred *replacement_session_keyring; > > char comm[16]; > > > > > int link_count, total_link_count; > > > struct sysv_sem sysvsem; > > > > unsigned long last_switch_count; > > > struct thread_struct thread; > > struct fs_struct *fs; > > struct files_struct *files; > > struct nsproxy *nsproxy; > > struct signal_struct *signal; > struct sighand_struct *sighand; > > sigset_t blocked, real_blocked; > sigset_t saved_sigmask; > struct sigpending pending; > > unsigned long sas_ss_sp; > size_t sas_ss_size; > int (*notifier)(void *priv); > void *notifier_data; > sigset_t *notifier_mask; > struct audit_context *audit_context; > > uid_t loginuid; > unsigned int sessionid; > > struct seccomp seccomp; > > > u32 parent_exec_id; > u32 self_exec_id; > > > spinlock_t alloc_lock; > > > raw_spinlock_t pi_lock; > > > > struct plist_head pi_waiters; > > struct rt_mutex_waiter *pi_blocked_on; > > void *journal_info; > > > struct bio_list *bio_list; > > > > struct blk_plug *plug; > > > > struct reclaim_state *reclaim_state; > > struct backing_dev_info *backing_dev_info; > > struct io_context *io_context; > > unsigned long ptrace_message; > siginfo_t *last_siginfo; > struct task_io_accounting ioac; > > struct css_set *cgroups; > > struct list_head cg_list; > > > struct robust_list_head *robust_list; > > > > struct list_head pi_state_list; > struct futex_pi_state *pi_state_cache; > > > struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; > struct mutex perf_event_mutex; > struct list_head perf_event_list; > > > > > > > struct rcu_head rcu; > > > > > struct pipe_inode_info *splice_pipe; > > int nr_dirtied; > int nr_dirtied_pause; > unsigned long dirty_paused_when; > > unsigned long timer_slack_ns; > unsigned long default_timer_slack_ns; > > struct list_head *scm_work_list; > > unsigned long trace; > > unsigned long trace_recursion; > > > struct memcg_batch_info { > int do_batch; > struct mem_cgroup *memcg; > unsigned long nr_pages; > unsigned long memsw_nr_pages; > } memcg_batch; > > > atomic_t ptrace_bp_refcnt; > >}; > >static inline __attribute__((always_inline)) int rt_prio(int prio) >{ > if (__builtin_expect(!!(prio < 100), 0)) > return 1; > return 0; >} > >static inline __attribute__((always_inline)) int rt_task(struct task_struct *p) >{ > return rt_prio(p->prio); >} > >static inline __attribute__((always_inline)) struct pid *task_pid(struct task_struct *task) >{ > return task->pids[PIDTYPE_PID].pid; >} > >static inline __attribute__((always_inline)) struct pid *task_tgid(struct task_struct *task) >{ > return task->group_leader->pids[PIDTYPE_PID].pid; >} > > > > > > >static inline __attribute__((always_inline)) struct pid *task_pgrp(struct task_struct *task) >{ > return task->group_leader->pids[PIDTYPE_PGID].pid; >} > >static inline __attribute__((always_inline)) struct pid *task_session(struct task_struct *task) >{ > return task->group_leader->pids[PIDTYPE_SID].pid; >} > >struct pid_namespace; > >pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, > struct pid_namespace *ns); > >static inline __attribute__((always_inline)) pid_t task_pid_nr(struct task_struct *tsk) >{ > return tsk->pid; >} > >static inline __attribute__((always_inline)) pid_t task_pid_nr_ns(struct task_struct *tsk, > struct pid_namespace *ns) >{ > return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); >} > >static inline __attribute__((always_inline)) pid_t task_pid_vnr(struct task_struct *tsk) >{ > return __task_pid_nr_ns(tsk, PIDTYPE_PID, ((void *)0)); >} > > >static inline __attribute__((always_inline)) pid_t task_tgid_nr(struct task_struct *tsk) >{ > return tsk->tgid; >} > >pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns); > >static inline __attribute__((always_inline)) pid_t task_tgid_vnr(struct task_struct *tsk) >{ > return pid_vnr(task_tgid(tsk)); >} > > >static inline __attribute__((always_inline)) pid_t task_pgrp_nr_ns(struct task_struct *tsk, > struct pid_namespace *ns) >{ > return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); >} > >static inline __attribute__((always_inline)) pid_t task_pgrp_vnr(struct task_struct *tsk) >{ > return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ((void *)0)); >} > > >static inline __attribute__((always_inline)) pid_t task_session_nr_ns(struct task_struct *tsk, > struct pid_namespace *ns) >{ > return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); >} > >static inline __attribute__((always_inline)) pid_t task_session_vnr(struct task_struct *tsk) >{ > return __task_pid_nr_ns(tsk, PIDTYPE_SID, ((void *)0)); >} > > >static inline __attribute__((always_inline)) pid_t task_pgrp_nr(struct task_struct *tsk) >{ > return task_pgrp_nr_ns(tsk, &init_pid_ns); >} > >static inline __attribute__((always_inline)) int pid_alive(struct task_struct *p) >{ > return p->pids[PIDTYPE_PID].pid != ((void *)0); >} > > > > > > > >static inline __attribute__((always_inline)) int is_global_init(struct task_struct *tsk) >{ > return tsk->pid == 1; >} > > > > > >extern int is_container_init(struct task_struct *tsk); > >extern struct pid *cad_pid; > >extern void free_task(struct task_struct *tsk); > > >extern void __put_task_struct(struct task_struct *t); > >static inline __attribute__((always_inline)) void put_task_struct(struct task_struct *t) >{ > if ((atomic_sub_return(1, &t->usage) == 0)) > __put_task_struct(t); >} > >extern void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st); >extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st); > >extern int task_free_register(struct notifier_block *n); >extern int task_free_unregister(struct notifier_block *n); > >extern bool task_set_jobctl_pending(struct task_struct *task, > unsigned int mask); >extern void task_clear_jobctl_trapping(struct task_struct *task); >extern void task_clear_jobctl_pending(struct task_struct *task, > unsigned int mask); > > > > > > >static inline __attribute__((always_inline)) void rcu_copy_process(struct task_struct *p) >{ > p->rcu_read_lock_nesting = 0; > p->rcu_read_unlock_special = 0; > > p->rcu_blocked_node = ((void *)0); > > > > > INIT_LIST_HEAD(&p->rcu_node_entry); >} > >extern void do_set_cpus_allowed(struct task_struct *p, > const struct cpumask *new_mask); > >extern int set_cpus_allowed_ptr(struct task_struct *p, > const struct cpumask *new_mask); > >void calc_load_enter_idle(void); >void calc_load_exit_idle(void); > > > > > > >static inline __attribute__((always_inline)) int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask) >{ > return set_cpus_allowed_ptr(p, &new_mask); >} > >extern unsigned long long __attribute__((no_instrument_function)) sched_clock(void); > > > >extern u64 cpu_clock(int cpu); >extern u64 local_clock(void); >extern u64 sched_clock_cpu(int cpu); > > >extern void sched_clock_init(void); > > >static inline __attribute__((always_inline)) void sched_clock_tick(void) >{ >} > >static inline __attribute__((always_inline)) void sched_clock_idle_sleep_event(void) >{ >} > >static inline __attribute__((always_inline)) void sched_clock_idle_wakeup_event(u64 delta_ns) >{ >} > >static inline __attribute__((always_inline)) void enable_sched_clock_irqtime(void) {} >static inline __attribute__((always_inline)) void disable_sched_clock_irqtime(void) {} > > >extern unsigned long long >task_sched_runtime(struct task_struct *task); > > > >extern void sched_exec(void); > > > > >extern void sched_clock_idle_sleep_event(void); >extern void sched_clock_idle_wakeup_event(u64 delta_ns); > > >extern void idle_task_exit(void); > > > > > >extern void wake_up_idle_cpu(int cpu); > > > > >extern unsigned int sysctl_sched_latency; >extern unsigned int sysctl_sched_min_granularity; >extern unsigned int sysctl_sched_wakeup_granularity; >extern unsigned int sysctl_sched_child_runs_first; > >enum sched_tunable_scaling { > SCHED_TUNABLESCALING_NONE, > SCHED_TUNABLESCALING_LOG, > SCHED_TUNABLESCALING_LINEAR, > SCHED_TUNABLESCALING_END, >}; >extern enum sched_tunable_scaling sysctl_sched_tunable_scaling; > > >extern unsigned int sysctl_sched_migration_cost; >extern unsigned int sysctl_sched_nr_migrate; >extern unsigned int sysctl_sched_time_avg; >extern unsigned int sysctl_timer_migration; >extern unsigned int sysctl_sched_shares_window; > >int sched_proc_update_handler(struct ctl_table *table, int write, > void *buffer, size_t *length, > loff_t *ppos); > > >static inline __attribute__((always_inline)) unsigned int get_sysctl_timer_migration(void) >{ > return sysctl_timer_migration; >} > > > > > > >extern unsigned int sysctl_sched_rt_period; >extern int sysctl_sched_rt_runtime; > >int sched_rt_handler(struct ctl_table *table, int write, > void *buffer, size_t *lenp, > loff_t *ppos); > >static inline __attribute__((always_inline)) void sched_autogroup_create_attach(struct task_struct *p) { } >static inline __attribute__((always_inline)) void sched_autogroup_detach(struct task_struct *p) { } >static inline __attribute__((always_inline)) void sched_autogroup_fork(struct signal_struct *sig) { } >static inline __attribute__((always_inline)) void sched_autogroup_exit(struct signal_struct *sig) { } > > > > > > > >extern int rt_mutex_getprio(struct task_struct *p); >extern void rt_mutex_setprio(struct task_struct *p, int prio); >extern void rt_mutex_adjust_pi(struct task_struct *p); >static inline __attribute__((always_inline)) bool tsk_is_pi_blocked(struct task_struct *tsk) >{ > return tsk->pi_blocked_on != ((void *)0); >} > >extern bool yield_to(struct task_struct *p, bool preempt); >extern void set_user_nice(struct task_struct *p, long nice); >extern int task_prio(const struct task_struct *p); >extern int task_nice(const struct task_struct *p); >extern int can_nice(const struct task_struct *p, const int nice); >extern int task_curr(const struct task_struct *p); >extern int idle_cpu(int cpu); >extern int sched_setscheduler(struct task_struct *, int, > const struct sched_param *); >extern int sched_setscheduler_nocheck(struct task_struct *, int, > const struct sched_param *); >extern struct task_struct *idle_task(int cpu); > > > > >static inline __attribute__((always_inline)) bool is_idle_task(const struct task_struct *p) >{ > return p->pid == 0; >} >extern struct task_struct *curr_task(int cpu); >extern void set_curr_task(int cpu, struct task_struct *p); > >void yield(void); > > > > >extern struct exec_domain default_exec_domain; > >union thread_union { > struct thread_info thread_info; > unsigned long stack[8192/sizeof(long)]; >}; > > >static inline __attribute__((always_inline)) int kstack_end(void *addr) >{ > > > > return !(((unsigned long)addr+sizeof(void*)-1) & (8192 -sizeof(void*))); >} > > >extern union thread_union init_thread_union; >extern struct task_struct init_task; > >extern struct mm_struct init_mm; > >extern struct pid_namespace init_pid_ns; > >extern struct task_struct *find_task_by_vpid(pid_t nr); >extern struct task_struct *find_task_by_pid_ns(pid_t nr, > struct pid_namespace *ns); > >extern void __set_special_pids(struct pid *pid); > > >extern struct user_struct * alloc_uid(struct user_namespace *, uid_t); >static inline __attribute__((always_inline)) struct user_struct *get_uid(struct user_struct *u) >{ > atomic_add(1, &u->__count); > return u; >} >extern void free_uid(struct user_struct *); >extern void release_uids(struct user_namespace *ns); > > > >extern void xtime_update(unsigned long ticks); > >extern int wake_up_state(struct task_struct *tsk, unsigned int state); >extern int wake_up_process(struct task_struct *tsk); >extern void wake_up_new_task(struct task_struct *tsk); > > extern void kick_process(struct task_struct *tsk); > > > >extern void sched_fork(struct task_struct *p); >extern void sched_dead(struct task_struct *p); > >extern void proc_caches_init(void); >extern void flush_signals(struct task_struct *); >extern void __flush_signals(struct task_struct *); >extern void ignore_signals(struct task_struct *); >extern void flush_signal_handlers(struct task_struct *, int force_default); >extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info); > >static inline __attribute__((always_inline)) int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) >{ > unsigned long flags; > int ret; > > do { do { ({ unsigned long __dummy; typeof(flags) __dummy2; (void)(&__dummy == &__dummy2); 1; }); flags = _raw_spin_lock_irqsave(spinlock_check(&tsk->sighand->siglock)); } while (0); } while (0); > ret = dequeue_signal(tsk, mask, info); > spin_unlock_irqrestore(&tsk->sighand->siglock, flags); > > return ret; >} > >extern void block_all_signals(int (*notifier)(void *priv), void *priv, > sigset_t *mask); >extern void unblock_all_signals(void); >extern void release_task(struct task_struct * p); >extern int send_sig_info(int, struct siginfo *, struct task_struct *); >extern int force_sigsegv(int, struct task_struct *); >extern int force_sig_info(int, struct siginfo *, struct task_struct *); >extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp); >extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid); >extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *, > const struct cred *, u32); >extern int kill_pgrp(struct pid *pid, int sig, int priv); >extern int kill_pid(struct pid *pid, int sig, int priv); >extern int kill_proc_info(int, struct siginfo *, pid_t); >extern __attribute__((warn_unused_result)) bool do_notify_parent(struct task_struct *, int); >extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent); >extern void force_sig(int, struct task_struct *); >extern int send_sig(int, struct task_struct *, int); >extern int zap_other_threads(struct task_struct *p); >extern struct sigqueue *sigqueue_alloc(void); >extern void sigqueue_free(struct sigqueue *); >extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group); >extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); >extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long); > >static inline __attribute__((always_inline)) int kill_cad_pid(int sig, int priv) >{ > return kill_pid(cad_pid, sig, priv); >} > >static inline __attribute__((always_inline)) int on_sig_stack(unsigned long sp) >{ > > > > > return sp > (get_current())->sas_ss_sp && > sp - (get_current())->sas_ss_sp <= (get_current())->sas_ss_size; > >} > >static inline __attribute__((always_inline)) int sas_ss_flags(unsigned long sp) >{ > return ((get_current())->sas_ss_size == 0 ? 2 > : on_sig_stack(sp) ? 1 : 0); >} > > > > >extern struct mm_struct * mm_alloc(void); > > >extern void __mmdrop(struct mm_struct *); >static inline __attribute__((always_inline)) void mmdrop(struct mm_struct * mm) >{ > if (__builtin_expect(!!((atomic_sub_return(1, &mm->mm_count) == 0)), 0)) > __mmdrop(mm); >} > > >extern void mmput(struct mm_struct *); > >extern struct mm_struct *get_task_mm(struct task_struct *task); > > > > > >extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode); > >extern void mm_release(struct task_struct *, struct mm_struct *); > >extern struct mm_struct *dup_mm(struct task_struct *tsk); > >extern int copy_thread(unsigned long, unsigned long, unsigned long, > struct task_struct *, struct pt_regs *); >extern void flush_thread(void); >extern void exit_thread(void); > >extern void exit_files(struct task_struct *); >extern void __cleanup_sighand(struct sighand_struct *); > >extern void exit_itimers(struct signal_struct *); >extern void flush_itimer_signals(void); > >extern void do_group_exit(int); > >extern void daemonize(const char *, ...); >extern int allow_signal(int); >extern int disallow_signal(int); > >extern int do_execve(const char *, > const char * const *, > const char * const *, struct pt_regs *); >extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int *, int *); >struct task_struct *fork_idle(int); > >extern void set_task_comm(struct task_struct *tsk, char *from); >extern char *get_task_comm(char *to, struct task_struct *tsk); > > >void scheduler_ipi(void); >extern unsigned long wait_task_inactive(struct task_struct *, long match_state); > >extern bool current_is_single_threaded(void); > >static inline __attribute__((always_inline)) int get_nr_threads(struct task_struct *tsk) >{ > return tsk->signal->nr_threads; >} > >static inline __attribute__((always_inline)) bool thread_group_leader(struct task_struct *p) >{ > return p->exit_signal >= 0; >} > > > > > > > >static inline __attribute__((always_inline)) int has_group_leader_pid(struct task_struct *p) >{ > return p->pid == p->tgid; >} > >static inline __attribute__((always_inline)) >int same_thread_group(struct task_struct *p1, struct task_struct *p2) >{ > return p1->tgid == p2->tgid; >} > >static inline __attribute__((always_inline)) struct task_struct *next_thread(const struct task_struct *p) >{ > return ({typeof (*p->thread_group.next) *__ptr = (typeof (*p->thread_group.next) *)p->thread_group.next; ({ const typeof( ((struct task_struct *)0)->thread_group ) *__mptr = ((typeof(p->thread_group.next))({ typeof(*(__ptr)) *_________p1 = (typeof(*(__ptr))* )(*(volatile typeof((__ptr)) *)&((__ptr))); do { } while (0); ; do { } while(0); ((typeof(*(__ptr)) *)(_________p1)); })); (struct task_struct *)( (char *)__mptr - __builtin_offsetof(struct task_struct,thread_group) );}); }) > ; >} > >static inline __attribute__((always_inline)) int thread_group_empty(struct task_struct *p) >{ > return list_empty(&p->thread_group); >} > >static inline __attribute__((always_inline)) void task_lock(struct task_struct *p) >{ > spin_lock(&p->alloc_lock); >} > >static inline __attribute__((always_inline)) void task_unlock(struct task_struct *p) >{ > spin_unlock(&p->alloc_lock); >} > >extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, > unsigned long *flags); > >static inline __attribute__((always_inline)) struct sighand_struct *lock_task_sighand(struct task_struct *tsk, > unsigned long *flags) >{ > struct sighand_struct *ret; > > ret = __lock_task_sighand(tsk, flags); > (void)(ret); > return ret; >} > >static inline __attribute__((always_inline)) void unlock_task_sighand(struct task_struct *tsk, > unsigned long *flags) >{ > spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); >} > > >static inline __attribute__((always_inline)) void threadgroup_change_begin(struct task_struct *tsk) >{ > down_read(&tsk->signal->group_rwsem); >} >static inline __attribute__((always_inline)) void threadgroup_change_end(struct task_struct *tsk) >{ > up_read(&tsk->signal->group_rwsem); >} > >static inline __attribute__((always_inline)) void threadgroup_lock(struct task_struct *tsk) >{ > > > > > mutex_lock(&tsk->signal->cred_guard_mutex); > down_write(&tsk->signal->group_rwsem); >} > > > > > > > >static inline __attribute__((always_inline)) void threadgroup_unlock(struct task_struct *tsk) >{ > up_write(&tsk->signal->group_rwsem); > mutex_unlock(&tsk->signal->cred_guard_mutex); >} > >static inline __attribute__((always_inline)) void setup_thread_stack(struct task_struct *p, struct task_struct *org) >{ > *((struct thread_info *)(p)->stack) = *((struct thread_info *)(org)->stack); > ((struct thread_info *)(p)->stack)->task = p; >} > >static inline __attribute__((always_inline)) unsigned long *end_of_stack(struct task_struct *p) >{ > return (unsigned long *)(((struct thread_info *)(p)->stack) + 1); >} > > > >static inline __attribute__((always_inline)) int object_is_on_stack(void *obj) >{ > void *stack = (((get_current()))->stack); > > return (obj >= stack) && (obj < (stack + 8192)); >} > >extern void thread_info_cache_init(void); > >static inline __attribute__((always_inline)) void set_tsk_thread_flag(struct task_struct *tsk, int flag) >{ > set_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag); >} > >static inline __attribute__((always_inline)) void clear_tsk_thread_flag(struct task_struct *tsk, int flag) >{ > clear_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag); >} > >static inline __attribute__((always_inline)) int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag) >{ > return test_and_set_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag); >} > >static inline __attribute__((always_inline)) int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag) >{ > return test_and_clear_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag); >} > >static inline __attribute__((always_inline)) int test_tsk_thread_flag(struct task_struct *tsk, int flag) >{ > return test_ti_thread_flag(((struct thread_info *)(tsk)->stack), flag); >} > >static inline __attribute__((always_inline)) void set_tsk_need_resched(struct task_struct *tsk) >{ > set_tsk_thread_flag(tsk,1); >} > >static inline __attribute__((always_inline)) void clear_tsk_need_resched(struct task_struct *tsk) >{ > clear_tsk_thread_flag(tsk,1); >} > >static inline __attribute__((always_inline)) int test_tsk_need_resched(struct task_struct *tsk) >{ > return __builtin_expect(!!(test_tsk_thread_flag(tsk,1)), 0); >} > >static inline __attribute__((always_inline)) int restart_syscall(void) >{ > set_tsk_thread_flag((get_current()), 0); > return -513; >} > >static inline __attribute__((always_inline)) int signal_pending(struct task_struct *p) >{ > return __builtin_expect(!!(test_tsk_thread_flag(p,0)), 0); >} > >static inline __attribute__((always_inline)) int __fatal_signal_pending(struct task_struct *p) >{ > return __builtin_expect(!!(sigismember(&p->pending.signal, 9)), 0); >} > >static inline __attribute__((always_inline)) int fatal_signal_pending(struct task_struct *p) >{ > return signal_pending(p) && __fatal_signal_pending(p); >} > >static inline __attribute__((always_inline)) int signal_pending_state(long state, struct task_struct *p) >{ > if (!(state & (1 | 128))) > return 0; > if (!signal_pending(p)) > return 0; > > return (state & 1) || __fatal_signal_pending(p); >} > >static inline __attribute__((always_inline)) int need_resched(void) >{ > return __builtin_expect(!!(test_ti_thread_flag(current_thread_info(), 1)), 0); >} > >extern int _cond_resched(void); > > > > > > >extern int __cond_resched_lock(spinlock_t *lock); > >extern int __cond_resched_softirq(void); > >static inline __attribute__((always_inline)) int spin_needbreak(spinlock_t *lock) >{ > > return spin_is_contended(lock); > > > >} > > > > >void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times); >void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times); > >static inline __attribute__((always_inline)) void thread_group_cputime_init(struct signal_struct *sig) >{ > do { *(&sig->cputimer.lock) = (raw_spinlock_t) { .raw_lock = { 0 }, }; } while (0); >} > > > > > > > >extern void recalc_sigpending_and_wake(struct task_struct *t); >extern void recalc_sigpending(void); > >extern void signal_wake_up_state(struct task_struct *t, unsigned int state); > >static inline __attribute__((always_inline)) void signal_wake_up(struct task_struct *t, bool resume) >{ > signal_wake_up_state(t, resume ? 128 : 0); >} >static inline __attribute__((always_inline)) void ptrace_signal_wake_up(struct task_struct *t, bool resume) >{ > signal_wake_up_state(t, resume ? 8 : 0); >} > > > > > > >static inline __attribute__((always_inline)) unsigned int task_cpu(const struct task_struct *p) >{ > return ((struct thread_info *)(p)->stack)->cpu; >} > >extern void set_task_cpu(struct task_struct *p, unsigned int cpu); > >extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); >extern long sched_getaffinity(pid_t pid, struct cpumask *mask); > >extern void normalize_rt_tasks(void); > > > >extern struct task_group root_task_group; > >extern struct task_group *sched_create_group(struct task_group *parent); >extern void sched_destroy_group(struct task_group *tg); >extern void sched_move_task(struct task_struct *tsk); > >extern int sched_group_set_shares(struct task_group *tg, unsigned long shares); >extern unsigned long sched_group_shares(struct task_group *tg); > > >extern int sched_group_set_rt_runtime(struct task_group *tg, > long rt_runtime_us); >extern long sched_group_rt_runtime(struct task_group *tg); >extern int sched_group_set_rt_period(struct task_group *tg, > long rt_period_us); >extern long sched_group_rt_period(struct task_group *tg); >extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk); > > > >extern int task_can_switch_user(struct user_struct *up, > struct task_struct *tsk); > >static inline __attribute__((always_inline)) void add_rchar(struct task_struct *tsk, ssize_t amt) >{ >} > >static inline __attribute__((always_inline)) void add_wchar(struct task_struct *tsk, ssize_t amt) >{ >} > >static inline __attribute__((always_inline)) void inc_syscr(struct task_struct *tsk) >{ >} > >static inline __attribute__((always_inline)) void inc_syscw(struct task_struct *tsk) >{ >} > > > > > > > >extern void mm_update_next_owner(struct mm_struct *mm); >extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); > >static inline __attribute__((always_inline)) unsigned long task_rlimit(const struct task_struct *tsk, > unsigned int limit) >{ > return (*(volatile typeof(tsk->signal->rlim[limit].rlim_cur) *)&(tsk->signal->rlim[limit].rlim_cur)); >} > >static inline __attribute__((always_inline)) unsigned long task_rlimit_max(const struct task_struct *tsk, > unsigned int limit) >{ > return (*(volatile typeof(tsk->signal->rlim[limit].rlim_max) *)&(tsk->signal->rlim[limit].rlim_max)); >} > >static inline __attribute__((always_inline)) unsigned long rlimit(unsigned int limit) >{ > return task_rlimit((get_current()), limit); >} > >static inline __attribute__((always_inline)) unsigned long rlimit_max(unsigned int limit) >{ > return task_rlimit_max((get_current()), limit); >} > > > > > > > > >struct taskstats { > > > > > > __u16 version; > __u32 ac_exitcode; > > > > > __u8 ac_flag; > __u8 ac_nice; > > __u64 cpu_count __attribute__((aligned(8))); > __u64 cpu_delay_total; > > > > > > > __u64 blkio_count; > __u64 blkio_delay_total; > > > __u64 swapin_count; > __u64 swapin_delay_total; > > > > > > > > __u64 cpu_run_real_total; > > > > > > > > __u64 cpu_run_virtual_total; > > > > > char ac_comm[32]; > __u8 ac_sched __attribute__((aligned(8))); > > __u8 ac_pad[3]; > __u32 ac_uid __attribute__((aligned(8))); > > __u32 ac_gid; > __u32 ac_pid; > __u32 ac_ppid; > __u32 ac_btime; > __u64 ac_etime __attribute__((aligned(8))); > > __u64 ac_utime; > __u64 ac_stime; > __u64 ac_minflt; > __u64 ac_majflt; > > __u64 coremem; > > > > __u64 virtmem; > > > > > __u64 hiwater_rss; > __u64 hiwater_vm; > > > __u64 read_char; > __u64 write_char; > __u64 read_syscalls; > __u64 write_syscalls; > > > > > __u64 read_bytes; > __u64 write_bytes; > __u64 cancelled_write_bytes; > > __u64 nvcsw; > __u64 nivcsw; > > > __u64 ac_utimescaled; > __u64 ac_stimescaled; > __u64 cpu_scaled_run_real_total; > > > __u64 freepages_count; > __u64 freepages_delay_total; >}; > >enum { > TASKSTATS_CMD_UNSPEC = 0, > TASKSTATS_CMD_GET, > TASKSTATS_CMD_NEW, > __TASKSTATS_CMD_MAX, >}; > > > >enum { > TASKSTATS_TYPE_UNSPEC = 0, > TASKSTATS_TYPE_PID, > TASKSTATS_TYPE_TGID, > TASKSTATS_TYPE_STATS, > TASKSTATS_TYPE_AGGR_PID, > TASKSTATS_TYPE_AGGR_TGID, > TASKSTATS_TYPE_NULL, > __TASKSTATS_TYPE_MAX, >}; > > > >enum { > TASKSTATS_CMD_ATTR_UNSPEC = 0, > TASKSTATS_CMD_ATTR_PID, > TASKSTATS_CMD_ATTR_TGID, > TASKSTATS_CMD_ATTR_REGISTER_CPUMASK, > TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK, > __TASKSTATS_CMD_ATTR_MAX, >}; > > >struct cgroupstats { > __u64 nr_sleeping; > __u64 nr_running; > __u64 nr_stopped; > __u64 nr_uninterruptible; > > __u64 nr_io_wait; >}; > > > > > > > >enum { > CGROUPSTATS_CMD_UNSPEC = __TASKSTATS_CMD_MAX, > CGROUPSTATS_CMD_GET, > CGROUPSTATS_CMD_NEW, > __CGROUPSTATS_CMD_MAX, >}; > > > >enum { > CGROUPSTATS_TYPE_UNSPEC = 0, > CGROUPSTATS_TYPE_CGROUP_STATS, > __CGROUPSTATS_TYPE_MAX, >}; > > > >enum { > CGROUPSTATS_CMD_ATTR_UNSPEC = 0, > CGROUPSTATS_CMD_ATTR_FD, > __CGROUPSTATS_CMD_ATTR_MAX, >}; > > > >struct ptr_heap { > void **ptrs; > int max; > int size; > int (*gt)(void *, void *); >}; > >extern int heap_init(struct ptr_heap *heap, size_t size, gfp_t gfp_mask, > int (*gt)(void *, void *)); > > > > > >void heap_free(struct ptr_heap *heap); > >extern void *heap_insert(struct ptr_heap *heap, void *p); > > > > >struct idr_layer { > unsigned long bitmap; > struct idr_layer *ary[1<<5]; > int count; > int layer; > struct rcu_head rcu_head; >}; > >struct idr { > struct idr_layer *top; > struct idr_layer *id_free; > int layers; > int id_free_cnt; > spinlock_t lock; >}; > >void *idr_find(struct idr *idp, int id); >int idr_pre_get(struct idr *idp, gfp_t gfp_mask); >int idr_get_new(struct idr *idp, void *ptr, int *id); >int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id); >int idr_for_each(struct idr *idp, > int (*fn)(int id, void *p, void *data), void *data); >void *idr_get_next(struct idr *idp, int *nextid); >void *idr_replace(struct idr *idp, void *ptr, int id); >void idr_remove(struct idr *idp, int id); >void idr_remove_all(struct idr *idp); >void idr_destroy(struct idr *idp); >void idr_init(struct idr *idp); > >struct ida_bitmap { > long nr_busy; > unsigned long bitmap[(128 / sizeof(long) - 1)]; >}; > >struct ida { > struct idr idr; > struct ida_bitmap *free_bitmap; >}; > > > > >int ida_pre_get(struct ida *ida, gfp_t gfp_mask); >int ida_get_new_above(struct ida *ida, int starting_id, int *p_id); >int ida_get_new(struct ida *ida, int *p_id); >void ida_remove(struct ida *ida, int id); >void ida_destroy(struct ida *ida); >void ida_init(struct ida *ida); > >int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, > gfp_t gfp_mask); >void ida_simple_remove(struct ida *ida, unsigned int id); > >void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) idr_init_cache(void); > > > > >struct cgroupfs_root; >struct cgroup_subsys; >struct inode; >struct cgroup; >struct css_id; > >extern int cgroup_init_early(void); >extern int cgroup_init(void); >extern void cgroup_lock(void); >extern int cgroup_lock_is_held(void); >extern bool cgroup_lock_live_group(struct cgroup *cgrp); >extern void cgroup_unlock(void); >extern void cgroup_fork(struct task_struct *p); >extern void cgroup_fork_callbacks(struct task_struct *p); >extern void cgroup_post_fork(struct task_struct *p); >extern void cgroup_exit(struct task_struct *p, int run_callbacks); >extern int cgroupstats_build(struct cgroupstats *stats, > struct dentry *dentry); >extern int cgroup_load_subsys(struct cgroup_subsys *ss); >extern void cgroup_unload_subsys(struct cgroup_subsys *ss); > >extern const struct file_operations proc_cgroup_operations; > > > >enum cgroup_subsys_id { > > >debug_subsys_id, > > > > > >cpu_cgroup_subsys_id, > > > > > >cpuacct_subsys_id, > > > > > >mem_cgroup_subsys_id, > >freezer_subsys_id, > > CGROUP_BUILTIN_SUBSYS_COUNT >}; > >struct cgroup_subsys_state { > > > > > > struct cgroup *cgroup; > > > > > > > > atomic_t refcnt; > > unsigned long flags; > > struct css_id *id; >}; > > >enum { > CSS_ROOT, > CSS_REMOVED, >}; > >extern void __css_get(struct cgroup_subsys_state *css, int count); >static inline __attribute__((always_inline)) void css_get(struct cgroup_subsys_state *css) >{ > > if (!test_bit(CSS_ROOT, &css->flags)) > __css_get(css, 1); >} > >static inline __attribute__((always_inline)) bool css_is_removed(struct cgroup_subsys_state *css) >{ > return test_bit(CSS_REMOVED, &css->flags); >} > > > > > > > >static inline __attribute__((always_inline)) bool css_tryget(struct cgroup_subsys_state *css) >{ > if (test_bit(CSS_ROOT, &css->flags)) > return true; > while (!atomic_add_unless((&css->refcnt), 1, 0)) { > if (test_bit(CSS_REMOVED, &css->flags)) > return false; > __asm__ __volatile__("": : :"memory"); > } > return true; >} > > > > > > >extern void __css_put(struct cgroup_subsys_state *css, int count); >static inline __attribute__((always_inline)) void css_put(struct cgroup_subsys_state *css) >{ > if (!test_bit(CSS_ROOT, &css->flags)) > __css_put(css, 1); >} > > >enum { > > CGRP_REMOVED, > > CGRP_RELEASABLE, > > CGRP_NOTIFY_ON_RELEASE, > > > > CGRP_WAIT_ON_RMDIR, > > > > CGRP_CLONE_CHILDREN, >}; > >struct cgroup { > unsigned long flags; > > > > > > atomic_t count; > > > > > > struct list_head sibling; > struct list_head children; > > struct cgroup *parent; > struct dentry *dentry; > > > struct cgroup_subsys_state *subsys[(8*sizeof(unsigned long))]; > > struct cgroupfs_root *root; > struct cgroup *top_cgroup; > > > > > > struct list_head css_sets; > > > > > > > struct list_head release_list; > > > > > > struct list_head pidlists; > struct mutex pidlist_mutex; > > > struct rcu_head rcu_head; > > > struct list_head event_list; > spinlock_t event_list_lock; >}; > >struct css_set { > > > atomic_t refcount; > > > > > > struct hlist_node hlist; > > > > > > struct list_head tasks; > > > > > > > struct list_head cg_links; > > > > > > > > struct cgroup_subsys_state *subsys[(8*sizeof(unsigned long))]; > > > struct rcu_head rcu_head; > struct work_struct work; >}; > > > > > > >struct cgroup_map_cb { > int (*fill)(struct cgroup_map_cb *cb, const char *key, u64 value); > void *state; >}; > >struct cftype { > > > > > char name[64]; > int private; > > > > > umode_t mode; > > > > > > size_t max_write_len; > > int (*open)(struct inode *inode, struct file *file); > ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft, > struct file *file, > char *buf, size_t nbytes, loff_t *ppos); > > > > > u64 (*read_u64)(struct cgroup *cgrp, struct cftype *cft); > > > > s64 (*read_s64)(struct cgroup *cgrp, struct cftype *cft); > > > > > > > int (*read_map)(struct cgroup *cont, struct cftype *cft, > struct cgroup_map_cb *cb); > > > > > int (*read_seq_string)(struct cgroup *cont, struct cftype *cft, > struct seq_file *m); > > ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft, > struct file *file, > const char *buf, size_t nbytes, loff_t *ppos); > > > > > > > int (*write_u64)(struct cgroup *cgrp, struct cftype *cft, u64 val); > > > > int (*write_s64)(struct cgroup *cgrp, struct cftype *cft, s64 val); > > > > > > > int (*write_string)(struct cgroup *cgrp, struct cftype *cft, > const char *buffer); > > > > > > > int (*trigger)(struct cgroup *cgrp, unsigned int event); > > int (*release)(struct inode *inode, struct file *file); > > > > > > > > int (*register_event)(struct cgroup *cgrp, struct cftype *cft, > struct eventfd_ctx *eventfd, const char *args); > > > > > > > void (*unregister_event)(struct cgroup *cgrp, struct cftype *cft, > struct eventfd_ctx *eventfd); >}; > >struct cgroup_scanner { > struct cgroup *cg; > int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan); > void (*process_task)(struct task_struct *p, > struct cgroup_scanner *scan); > struct ptr_heap *heap; > void *data; >}; > > > > > >int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys, > const struct cftype *cft); > > > > > >int cgroup_add_files(struct cgroup *cgrp, > struct cgroup_subsys *subsys, > const struct cftype cft[], > int count); > >int cgroup_is_removed(const struct cgroup *cgrp); > >int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen); > >int cgroup_task_count(const struct cgroup *cgrp); > > >int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task); > >void cgroup_exclude_rmdir(struct cgroup_subsys_state *css); >void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css); > > > > > >struct cgroup_taskset; >struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset); >struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset); >struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset); >int cgroup_taskset_size(struct cgroup_taskset *tset); > >struct cgroup_subsys { > struct cgroup_subsys_state *(*create)(struct cgroup *cgrp); > int (*pre_destroy)(struct cgroup *cgrp); > void (*destroy)(struct cgroup *cgrp); > int (*allow_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset); > int (*can_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset); > void (*cancel_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset); > void (*attach)(struct cgroup *cgrp, struct cgroup_taskset *tset); > void (*fork)(struct task_struct *task); > void (*exit)(struct cgroup *cgrp, struct cgroup *old_cgrp, > struct task_struct *task); > int (*populate)(struct cgroup_subsys *ss, struct cgroup *cgrp); > void (*post_clone)(struct cgroup *cgrp); > void (*bind)(struct cgroup *root); > > int subsys_id; > int active; > int disabled; > int early_init; > > > > > bool use_id; > > const char *name; > > struct mutex hierarchy_mutex; > struct lock_class_key subsys_key; > > > > > > struct cgroupfs_root *root; > struct list_head sibling; > > struct idr idr; > spinlock_t id_lock; > > > struct module *module; >}; > > > > >extern struct cgroup_subsys debug_subsys; > > > > > >extern struct cgroup_subsys cpu_cgroup_subsys; > > > > > >extern struct cgroup_subsys cpuacct_subsys; > > > > > >extern struct cgroup_subsys mem_cgroup_subsys; > >extern struct cgroup_subsys freezer_subsys; > > > >static inline __attribute__((always_inline)) struct cgroup_subsys_state *cgroup_subsys_state( > struct cgroup *cgrp, int subsys_id) >{ > return cgrp->subsys[subsys_id]; >} > >static inline __attribute__((always_inline)) struct cgroup_subsys_state * >task_subsys_state(struct task_struct *task, int subsys_id) >{ > return ({ typeof(*(task->cgroups->subsys[subsys_id])) *_________p1 = (typeof(*(task->cgroups->subsys[subsys_id]))* )(*(volatile typeof((task->cgroups->subsys[subsys_id])) *)&((task->cgroups->subsys[subsys_id]))); do { } while (0); ; do { } while(0); ((typeof(*(task->cgroups->subsys[subsys_id])) *)(_________p1)); }); >} > >static inline __attribute__((always_inline)) struct cgroup* task_cgroup(struct task_struct *task, > int subsys_id) >{ > return task_subsys_state(task, subsys_id)->cgroup; >} > > >struct cgroup_iter { > struct list_head *cg_link; > struct list_head *task; >}; > >void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it); >struct task_struct *cgroup_iter_next(struct cgroup *cgrp, > struct cgroup_iter *it); >void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); >int cgroup_scan_tasks(struct cgroup_scanner *scan); >int cgroup_attach_task(struct cgroup *, struct task_struct *); >int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); > >void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css); > > > >struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id); > > > > > >struct cgroup_subsys_state *css_get_next(struct cgroup_subsys *ss, int id, > struct cgroup_subsys_state *root, int *foundid); > > >bool css_is_ancestor(struct cgroup_subsys_state *cg, > const struct cgroup_subsys_state *root); > > >unsigned short css_id(struct cgroup_subsys_state *css); >unsigned short css_depth(struct cgroup_subsys_state *css); >struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id); > > > >enum arm_perf_pmu_ids { > ARM_PERF_PMU_ID_XSCALE1 = 0, > ARM_PERF_PMU_ID_XSCALE2, > ARM_PERF_PMU_ID_V6, > ARM_PERF_PMU_ID_V6MP, > ARM_PERF_PMU_ID_CA8, > ARM_PERF_PMU_ID_CA9, > ARM_PERF_PMU_ID_CA5, > ARM_PERF_PMU_ID_CA15, > ARM_PERF_PMU_ID_CA7, > ARM_NUM_PMU_IDS, >}; > >extern enum arm_perf_pmu_ids >armpmu_get_pmu_id(void); > > > > >typedef struct { > atomic64_t a; >} local64_t; > > > > >struct perf_guest_info_callbacks { > int (*is_in_guest)(void); > int (*is_user_mode)(void); > unsigned long (*get_guest_ip)(void); >}; > > > > > > > > > > > > > > > > >struct mnt_namespace; >struct uts_namespace; >struct ipc_namespace; >struct pid_namespace; >struct fs_struct; > >struct nsproxy { > atomic_t count; > struct uts_namespace *uts_ns; > struct ipc_namespace *ipc_ns; > struct mnt_namespace *mnt_ns; > struct pid_namespace *pid_ns; > struct net *net_ns; >}; >extern struct nsproxy init_nsproxy; > >static inline __attribute__((always_inline)) struct nsproxy *task_nsproxy(struct task_struct *tsk) >{ > return ({ typeof(*(tsk->nsproxy)) *_________p1 = (typeof(*(tsk->nsproxy))* )(*(volatile typeof((tsk->nsproxy)) *)&((tsk->nsproxy))); do { } while (0); ; do { } while(0); ((typeof(*(tsk->nsproxy)) *)(_________p1)); }); >} > >int copy_namespaces(unsigned long flags, struct task_struct *tsk); >void exit_task_namespaces(struct task_struct *tsk); >void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new); >void free_nsproxy(struct nsproxy *ns); >int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **, > struct fs_struct *); >int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) nsproxy_cache_init(void); > >static inline __attribute__((always_inline)) void put_nsproxy(struct nsproxy *ns) >{ > if ((atomic_sub_return(1, &ns->count) == 0)) { > free_nsproxy(ns); > } >} > >static inline __attribute__((always_inline)) void get_nsproxy(struct nsproxy *ns) >{ > atomic_add(1, &ns->count); >} > > > >struct kref { > atomic_t refcount; >}; > > > > > >static inline __attribute__((always_inline)) void kref_init(struct kref *kref) >{ > (((&kref->refcount)->counter) = (1)); >} > > > > > >static inline __attribute__((always_inline)) void kref_get(struct kref *kref) >{ > ({ int __ret_warn_on = !!(!(*(volatile int *)&(&kref->refcount)->counter)); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_slowpath_null("include/linux/kref.h", 41); __builtin_expect(!!(__ret_warn_on), 0); }); > atomic_add(1, &kref->refcount); >} > >static inline __attribute__((always_inline)) int kref_sub(struct kref *kref, unsigned int count, > void (*release)(struct kref *kref)) >{ > ({ int __ret_warn_on = !!(release == ((void *)0)); if (__builtin_expect(!!(__ret_warn_on), 0)) warn_slowpath_null("include/linux/kref.h", 66); __builtin_expect(!!(__ret_warn_on), 0); }); > > if ((atomic_sub_return((int) count, &kref->refcount) == 0)) { > release(kref); > return 1; > } > return 0; >} > >static inline __attribute__((always_inline)) int kref_put(struct kref *kref, void (*release)(struct kref *kref)) >{ > return kref_sub(kref, 1, release); >} > > >struct pidmap { > atomic_t nr_free; > void *page; >}; > > > >struct bsd_acct_struct; > >struct pid_namespace { > struct kref kref; > struct pidmap pidmap[(((0 ? ((1UL) << 12) * 8 : (sizeof(long) > 4 ? 4 * 1024 * 1024 : (0 ? 0x1000 : 0x8000))) + 8*((1UL) << 12) - 1)/((1UL) << 12)/8)]; > int last_pid; > struct task_struct *child_reaper; > struct kmem_cache *pid_cachep; > unsigned int level; > struct pid_namespace *parent; > > struct vfsmount *proc_mnt; > > > > > gid_t pid_gid; > int hide_pid; > int reboot; >}; > >extern struct pid_namespace init_pid_ns; > >static inline __attribute__((always_inline)) struct pid_namespace *get_pid_ns(struct pid_namespace *ns) >{ > return ns; >} > >static inline __attribute__((always_inline)) struct pid_namespace * >copy_pid_ns(unsigned long flags, struct pid_namespace *ns) >{ > if (flags & 0x20000000) > ns = ERR_PTR(-22); > return ns; >} > >static inline __attribute__((always_inline)) void put_pid_ns(struct pid_namespace *ns) >{ >} > >static inline __attribute__((always_inline)) void zap_pid_ns_processes(struct pid_namespace *ns) >{ > do { asm volatile("1:\t" ".word " "0xe7f001f2" "\n" ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" "2:\t.asciz " "\"include/linux/pid_namespace.h\"" "\n" ".popsection\n" ".pushsection __bug_table,\"a\"\n" "3:\t.word 1b, 2b\n" "\t.hword " "82" ", 0\n" ".popsection"); __builtin_unreachable(); } while (0); >} > >static inline __attribute__((always_inline)) int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) >{ > return 0; >} > > >extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk); >void pidhash_init(void); >void pidmap_init(void); > > > > > > >extern u64 __attribute__((no_instrument_function)) trace_clock_local(void); >extern u64 __attribute__((no_instrument_function)) trace_clock(void); >extern u64 __attribute__((no_instrument_function)) trace_clock_global(void); >extern u64 __attribute__((no_instrument_function)) trace_clock_counter(void); > > > >struct module; > > > >unsigned long kallsyms_lookup_name(const char *name); > > >int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, > unsigned long), > void *data); > >extern int kallsyms_lookup_size_offset(unsigned long addr, > unsigned long *symbolsize, > unsigned long *offset); > > >const char *kallsyms_lookup(unsigned long addr, > unsigned long *symbolsize, > unsigned long *offset, > char **modname, char *namebuf); > > >extern int sprint_symbol(char *buffer, unsigned long address); >extern int sprint_backtrace(char *buffer, unsigned long address); > > >extern void __print_symbol(const char *fmt, unsigned long address); > >int lookup_symbol_name(unsigned long addr, char *symname); >int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); > >static __attribute__((format(printf, 1, 2))) >void __check_printsym_format(const char *fmt, ...) >{ >} > >static inline __attribute__((always_inline)) void print_symbol(const char *fmt, unsigned long addr) >{ > __check_printsym_format(fmt, ""); > __print_symbol(fmt, (unsigned long) > __builtin_extract_return_addr((void *)addr)); >} > >static inline __attribute__((always_inline)) void print_ip_sym(unsigned long ip) >{ > printk("[<%p>] %pS\n", (void *) ip, (void *) ip); >} > > > > >void *return_address(unsigned int); > > >struct module; >struct ftrace_hash; > >static inline __attribute__((always_inline)) void clear_ftrace_function(void) { } >static inline __attribute__((always_inline)) void ftrace_kill(void) { } >static inline __attribute__((always_inline)) void ftrace_stop(void) { } >static inline __attribute__((always_inline)) void ftrace_start(void) { } > >struct ftrace_func_command { > struct list_head list; > char *name; > int (*func)(struct ftrace_hash *hash, > char *func, char *cmd, > char *params, int enable); >}; > >static inline __attribute__((always_inline)) int skip_trace(unsigned long ip) { return 0; } >static inline __attribute__((always_inline)) int ftrace_force_update(void) { return 0; } >static inline __attribute__((always_inline)) void ftrace_disable_daemon(void) { } >static inline __attribute__((always_inline)) void ftrace_enable_daemon(void) { } >static inline __attribute__((always_inline)) void ftrace_release_mod(struct module *mod) {} >static inline __attribute__((always_inline)) int register_ftrace_command(struct ftrace_func_command *cmd) >{ > return -22; >} >static inline __attribute__((always_inline)) int unregister_ftrace_command(char *cmd_name) >{ > return -22; >} >static inline __attribute__((always_inline)) int ftrace_text_reserved(void *start, void *end) >{ > return 0; >} > >static inline __attribute__((always_inline)) ssize_t ftrace_filter_write(struct file *file, const char *ubuf, > size_t cnt, loff_t *ppos) { return -19; } >static inline __attribute__((always_inline)) ssize_t ftrace_notrace_write(struct file *file, const char *ubuf, > size_t cnt, loff_t *ppos) { return -19; } >static inline __attribute__((always_inline)) loff_t ftrace_regex_lseek(struct file *file, loff_t offset, int origin) >{ > return -19; >} >static inline __attribute__((always_inline)) int >ftrace_regex_release(struct inode *inode, struct file *file) { return -19; } > > > >void ftrace_kill(void); > >static inline __attribute__((always_inline)) void tracer_disable(void) >{ > > > >} > > > > > > >static inline __attribute__((always_inline)) int __ftrace_enabled_save(void) >{ > > > > > > return 0; > >} > >static inline __attribute__((always_inline)) void __ftrace_enabled_restore(int enabled) >{ > > > >} > > static inline __attribute__((always_inline)) void time_hardirqs_on(unsigned long a0, unsigned long a1) { } > static inline __attribute__((always_inline)) void time_hardirqs_off(unsigned long a0, unsigned long a1) { } > > > > > > > static inline __attribute__((always_inline)) void trace_preempt_on(unsigned long a0, unsigned long a1) { } > static inline __attribute__((always_inline)) void trace_preempt_off(unsigned long a0, unsigned long a1) { } > > > > > >static inline __attribute__((always_inline)) void ftrace_init(void) { } > > > > > >struct ftrace_graph_ent { > unsigned long func; > int depth; >}; > > > > >struct ftrace_graph_ret { > unsigned long func; > unsigned long long calltime; > unsigned long long rettime; > > unsigned long overrun; > int depth; >}; > > >typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); >typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); > >static inline __attribute__((always_inline)) void ftrace_graph_init_task(struct task_struct *t) { } >static inline __attribute__((always_inline)) void ftrace_graph_exit_task(struct task_struct *t) { } >static inline __attribute__((always_inline)) void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } > >static inline __attribute__((always_inline)) int register_ftrace_graph(trace_func_graph_ret_t retfunc, > trace_func_graph_ent_t entryfunc) >{ > return -1; >} >static inline __attribute__((always_inline)) void unregister_ftrace_graph(void) { } > >static inline __attribute__((always_inline)) int task_curr_ret_stack(struct task_struct *tsk) >{ > return -1; >} > >static inline __attribute__((always_inline)) void pause_graph_tracing(void) { } >static inline __attribute__((always_inline)) void unpause_graph_tracing(void) { } > > > > > >enum { > TSK_TRACE_FL_TRACE_BIT = 0, > TSK_TRACE_FL_GRAPH_BIT = 1, >}; >enum { > TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT, > TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT, >}; > >static inline __attribute__((always_inline)) void set_tsk_trace_trace(struct task_struct *tsk) >{ > _set_bit(TSK_TRACE_FL_TRACE_BIT,&tsk->trace); >} > >static inline __attribute__((always_inline)) void clear_tsk_trace_trace(struct task_struct *tsk) >{ > _clear_bit(TSK_TRACE_FL_TRACE_BIT,&tsk->trace); >} > >static inline __attribute__((always_inline)) int test_tsk_trace_trace(struct task_struct *tsk) >{ > return tsk->trace & TSK_TRACE_FL_TRACE; >} > >static inline __attribute__((always_inline)) void set_tsk_trace_graph(struct task_struct *tsk) >{ > _set_bit(TSK_TRACE_FL_GRAPH_BIT,&tsk->trace); >} > >static inline __attribute__((always_inline)) void clear_tsk_trace_graph(struct task_struct *tsk) >{ > _clear_bit(TSK_TRACE_FL_GRAPH_BIT,&tsk->trace); >} > >static inline __attribute__((always_inline)) int test_tsk_trace_graph(struct task_struct *tsk) >{ > return tsk->trace & TSK_TRACE_FL_GRAPH; >} > >enum ftrace_dump_mode; > >extern enum ftrace_dump_mode ftrace_dump_on_oops; > > > > > > > > > >struct resource { > resource_size_t start; > resource_size_t end; > const char *name; > unsigned long flags; > struct resource *parent, *sibling, *child; >}; > >extern struct resource ioport_resource; >extern struct resource iomem_resource; > >extern struct resource *request_resource_conflict(struct resource *root, struct resource *new); >extern int request_resource(struct resource *root, struct resource *new); >extern int release_resource(struct resource *new); >void release_child_resources(struct resource *new); >extern void reserve_region_with_split(struct resource *root, > resource_size_t start, resource_size_t end, > const char *name); >extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new); >extern int insert_resource(struct resource *parent, struct resource *new); >extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new); >extern void arch_remove_reservations(struct resource *avail); >extern int allocate_resource(struct resource *root, struct resource *new, > resource_size_t size, resource_size_t min, > resource_size_t max, resource_size_t align, > resource_size_t (*alignf)(void *, > const struct resource *, > resource_size_t, > resource_size_t), > void *alignf_data); >struct resource *lookup_resource(struct resource *root, resource_size_t start); >int adjust_resource(struct resource *res, resource_size_t start, > resource_size_t size); >resource_size_t resource_alignment(struct resource *res); >static inline __attribute__((always_inline)) resource_size_t resource_size(const struct resource *res) >{ > return res->end - res->start + 1; >} >static inline __attribute__((always_inline)) unsigned long resource_type(const struct resource *res) >{ > return res->flags & 0x00001f00; >} > >extern struct resource * __request_region(struct resource *, > resource_size_t start, > resource_size_t n, > const char *name, int flags); > > > > > > >extern int __check_region(struct resource *, resource_size_t, resource_size_t); >extern void __release_region(struct resource *, resource_size_t, > resource_size_t); > >static inline __attribute__((always_inline)) int __attribute__((deprecated)) check_region(resource_size_t s, > resource_size_t n) >{ > return __check_region(&ioport_resource, s, n); >} > > >struct device; > > > > > >extern struct resource * __devm_request_region(struct device *dev, > struct resource *parent, resource_size_t start, > resource_size_t n, const char *name); > > > > > > >extern void __devm_release_region(struct device *dev, struct resource *parent, > resource_size_t start, resource_size_t n); >extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); >extern int iomem_is_exclusive(u64 addr); > >extern int >walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, > void *arg, int (*func)(unsigned long, unsigned long, void *)); > > > > > > > >struct sock; >struct kobject; > > > > > >enum kobj_ns_type { > KOBJ_NS_TYPE_NONE = 0, > KOBJ_NS_TYPE_NET, > KOBJ_NS_TYPES >}; > >struct kobj_ns_type_operations { > enum kobj_ns_type type; > void *(*grab_current_ns)(void); > const void *(*netlink_ns)(struct sock *sk); > const void *(*initial_ns)(void); > void (*drop_ns)(void *); >}; > >int kobj_ns_type_register(const struct kobj_ns_type_operations *ops); >int kobj_ns_type_registered(enum kobj_ns_type type); >const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent); >const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj); > >void *kobj_ns_grab_current(enum kobj_ns_type type); >const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk); >const void *kobj_ns_initial(enum kobj_ns_type type); >void kobj_ns_drop(enum kobj_ns_type type, void *ns); > > > >struct kobject; >struct module; >enum kobj_ns_type; > >struct attribute { > const char *name; > umode_t mode; > > > > >}; > >struct attribute_group { > const char *name; > umode_t (*is_visible)(struct kobject *, > struct attribute *, int); > struct attribute **attrs; >}; > >struct file; >struct vm_area_struct; > >struct bin_attribute { > struct attribute attr; > size_t size; > void *private; > ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, > char *, loff_t, size_t); > ssize_t (*write)(struct file *,struct kobject *, struct bin_attribute *, > char *, loff_t, size_t); > int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr, > struct vm_area_struct *vma); >}; > >struct sysfs_ops { > ssize_t (*show)(struct kobject *, struct attribute *,char *); > ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t); > const void *(*namespace)(struct kobject *, const struct attribute *); >}; > >struct sysfs_dirent; > > > >int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *), > void *data, struct module *owner); > >int __attribute__((warn_unused_result)) sysfs_create_dir(struct kobject *kobj); >void sysfs_remove_dir(struct kobject *kobj); >int __attribute__((warn_unused_result)) sysfs_rename_dir(struct kobject *kobj, const char *new_name); >int __attribute__((warn_unused_result)) sysfs_move_dir(struct kobject *kobj, > struct kobject *new_parent_kobj); > >int __attribute__((warn_unused_result)) sysfs_create_file(struct kobject *kobj, > const struct attribute *attr); >int __attribute__((warn_unused_result)) sysfs_create_files(struct kobject *kobj, > const struct attribute **attr); >int __attribute__((warn_unused_result)) sysfs_chmod_file(struct kobject *kobj, > const struct attribute *attr, umode_t mode); >void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr); >void sysfs_remove_files(struct kobject *kobj, const struct attribute **attr); > >int __attribute__((warn_unused_result)) sysfs_create_bin_file(struct kobject *kobj, > const struct bin_attribute *attr); >void sysfs_remove_bin_file(struct kobject *kobj, > const struct bin_attribute *attr); > >int __attribute__((warn_unused_result)) sysfs_create_link(struct kobject *kobj, struct kobject *target, > const char *name); >int __attribute__((warn_unused_result)) sysfs_create_link_nowarn(struct kobject *kobj, > struct kobject *target, > const char *name); >void sysfs_remove_link(struct kobject *kobj, const char *name); > >int sysfs_rename_link(struct kobject *kobj, struct kobject *target, > const char *old_name, const char *new_name); > >void sysfs_delete_link(struct kobject *dir, struct kobject *targ, > const char *name); > >int __attribute__((warn_unused_result)) sysfs_create_group(struct kobject *kobj, > const struct attribute_group *grp); >int sysfs_update_group(struct kobject *kobj, > const struct attribute_group *grp); >void sysfs_remove_group(struct kobject *kobj, > const struct attribute_group *grp); >int sysfs_add_file_to_group(struct kobject *kobj, > const struct attribute *attr, const char *group); >void sysfs_remove_file_from_group(struct kobject *kobj, > const struct attribute *attr, const char *group); >int sysfs_merge_group(struct kobject *kobj, > const struct attribute_group *grp); >void sysfs_unmerge_group(struct kobject *kobj, > const struct attribute_group *grp); > >void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); >void sysfs_notify_dirent(struct sysfs_dirent *sd); >struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd, > const void *ns, > const unsigned char *name); >struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd); >void sysfs_put(struct sysfs_dirent *sd); > >int __attribute__((warn_unused_result)) sysfs_init(void); > > >extern char uevent_helper[]; > > >extern u64 uevent_seqnum; > >enum kobject_action { > KOBJ_ADD, > KOBJ_REMOVE, > KOBJ_CHANGE, > KOBJ_MOVE, > KOBJ_ONLINE, > KOBJ_OFFLINE, > KOBJ_MAX >}; > >struct kobject { > const char *name; > struct list_head entry; > struct kobject *parent; > struct kset *kset; > struct kobj_type *ktype; > struct sysfs_dirent *sd; > struct kref kref; > unsigned int state_initialized:1; > unsigned int state_in_sysfs:1; > unsigned int state_add_uevent_sent:1; > unsigned int state_remove_uevent_sent:1; > unsigned int uevent_suppress:1; >}; > >extern __attribute__((format(printf, 2, 3))) >int kobject_set_name(struct kobject *kobj, const char *name, ...); >extern int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, > va_list vargs); > >static inline __attribute__((always_inline)) const char *kobject_name(const struct kobject *kobj) >{ > return kobj->name; >} > >extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype); >extern __attribute__((format(printf, 3, 4))) __attribute__((warn_unused_result)) >int kobject_add(struct kobject *kobj, struct kobject *parent, > const char *fmt, ...); >extern __attribute__((format(printf, 4, 5))) __attribute__((warn_unused_result)) >int kobject_init_and_add(struct kobject *kobj, > struct kobj_type *ktype, struct kobject *parent, > const char *fmt, ...); > >extern void kobject_del(struct kobject *kobj); > >extern struct kobject * __attribute__((warn_unused_result)) kobject_create(void); >extern struct kobject * __attribute__((warn_unused_result)) kobject_create_and_add(const char *name, > struct kobject *parent); > >extern int __attribute__((warn_unused_result)) kobject_rename(struct kobject *, const char *new_name); >extern int __attribute__((warn_unused_result)) kobject_move(struct kobject *, struct kobject *); > >extern struct kobject *kobject_get(struct kobject *kobj); >extern void kobject_put(struct kobject *kobj); > >extern char *kobject_get_path(struct kobject *kobj, gfp_t flag); > >struct kobj_type { > void (*release)(struct kobject *kobj); > const struct sysfs_ops *sysfs_ops; > struct attribute **default_attrs; > const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj); > const void *(*namespace)(struct kobject *kobj); >}; > >struct kobj_uevent_env { > char *envp[32]; > int envp_idx; > char buf[2048]; > int buflen; >}; > >struct kset_uevent_ops { > int (* const filter)(struct kset *kset, struct kobject *kobj); > const char *(* const name)(struct kset *kset, struct kobject *kobj); > int (* const uevent)(struct kset *kset, struct kobject *kobj, > struct kobj_uevent_env *env); >}; > >struct kobj_attribute { > struct attribute attr; > ssize_t (*show)(struct kobject *kobj, struct kobj_attribute *attr, > char *buf); > ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr, > const char *buf, size_t count); >}; > >extern const struct sysfs_ops kobj_sysfs_ops; > >struct sock; > >struct kset { > struct list_head list; > spinlock_t list_lock; > struct kobject kobj; > const struct kset_uevent_ops *uevent_ops; >}; > >extern void kset_init(struct kset *kset); >extern int __attribute__((warn_unused_result)) kset_register(struct kset *kset); >extern void kset_unregister(struct kset *kset); >extern struct kset * __attribute__((warn_unused_result)) kset_create_and_add(const char *name, > const struct kset_uevent_ops *u, > struct kobject *parent_kobj); > >static inline __attribute__((always_inline)) struct kset *to_kset(struct kobject *kobj) >{ > return kobj ? ({ const typeof( ((struct kset *)0)->kobj ) *__mptr = (kobj); (struct kset *)( (char *)__mptr - __builtin_offsetof(struct kset,kobj) );}) : ((void *)0); >} > >static inline __attribute__((always_inline)) struct kset *kset_get(struct kset *k) >{ > return k ? to_kset(kobject_get(&k->kobj)) : ((void *)0); >} > >static inline __attribute__((always_inline)) void kset_put(struct kset *k) >{ > kobject_put(&k->kobj); >} > >static inline __attribute__((always_inline)) struct kobj_type *get_ktype(struct kobject *kobj) >{ > return kobj->ktype; >} > >extern struct kobject *kset_find_obj(struct kset *, const char *); > > >extern struct kobject *kernel_kobj; > >extern struct kobject *mm_kobj; > >extern struct kobject *hypervisor_kobj; > >extern struct kobject *power_kobj; > >extern struct kobject *firmware_kobj; > > >int kobject_uevent(struct kobject *kobj, enum kobject_action action); >int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, > char *envp[]); > >__attribute__((format(printf, 2, 3))) >int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...); > >int kobject_action_type(const char *buf, size_t count, > enum kobject_action *type); > > > >struct klist_node; >struct klist { > spinlock_t k_lock; > struct list_head k_list; > void (*get)(struct klist_node *); > void (*put)(struct klist_node *); >} __attribute__ ((aligned (sizeof(void *)))); > >extern void klist_init(struct klist *k, void (*get)(struct klist_node *), > void (*put)(struct klist_node *)); > >struct klist_node { > void *n_klist; > struct list_head n_node; > struct kref n_ref; >}; > >extern void klist_add_tail(struct klist_node *n, struct klist *k); >extern void klist_add_head(struct klist_node *n, struct klist *k); >extern void klist_add_after(struct klist_node *n, struct klist_node *pos); >extern void klist_add_before(struct klist_node *n, struct klist_node *pos); > >extern void klist_del(struct klist_node *n); >extern void klist_remove(struct klist_node *n); > >extern int klist_node_attached(struct klist_node *n); > > >struct klist_iter { > struct klist *i_klist; > struct klist_node *i_cur; >}; > > >extern void klist_iter_init(struct klist *k, struct klist_iter *i); >extern void klist_iter_init_node(struct klist *k, struct klist_iter *i, > struct klist_node *n); >extern void klist_iter_exit(struct klist_iter *i); >extern struct klist_node *klist_next(struct klist_iter *i); > > > > > > > > >extern void (*pm_idle)(void); >extern void (*pm_power_off)(void); >extern void (*pm_power_off_prepare)(void); > > > > > >struct device; > > >extern const char power_group_name[]; > > > > >typedef struct pm_message { > int event; >} pm_message_t; > >struct dev_pm_ops { > int (*prepare)(struct device *dev); > void (*complete)(struct device *dev); > int (*suspend)(struct device *dev); > int (*resume)(struct device *dev); > int (*freeze)(struct device *dev); > int (*thaw)(struct device *dev); > int (*poweroff)(struct device *dev); > int (*restore)(struct device *dev); > int (*suspend_late)(struct device *dev); > int (*resume_early)(struct device *dev); > int (*freeze_late)(struct device *dev); > int (*thaw_early)(struct device *dev); > int (*poweroff_late)(struct device *dev); > int (*restore_early)(struct device *dev); > int (*suspend_noirq)(struct device *dev); > int (*resume_noirq)(struct device *dev); > int (*freeze_noirq)(struct device *dev); > int (*thaw_noirq)(struct device *dev); > int (*poweroff_noirq)(struct device *dev); > int (*restore_noirq)(struct device *dev); > int (*runtime_suspend)(struct device *dev); > int (*runtime_resume)(struct device *dev); > int (*runtime_idle)(struct device *dev); >}; > >enum rpm_status { > RPM_ACTIVE = 0, > RPM_RESUMING, > RPM_SUSPENDED, > RPM_SUSPENDING, >}; > >enum rpm_request { > RPM_REQ_NONE = 0, > RPM_REQ_IDLE, > RPM_REQ_SUSPEND, > RPM_REQ_AUTOSUSPEND, > RPM_REQ_RESUME, >}; > >struct wakeup_source; > >struct pm_domain_data { > struct list_head list_node; > struct device *dev; >}; > >struct pm_subsys_data { > spinlock_t lock; > unsigned int refcount; > > struct list_head clock_list; > > > struct pm_domain_data *domain_data; > >}; > >struct dev_pm_info { > pm_message_t power_state; > unsigned int can_wakeup:1; > unsigned int async_suspend:1; > bool is_prepared:1; > bool is_suspended:1; > bool ignore_children:1; > spinlock_t lock; > > struct list_head entry; > struct completion completion; > struct wakeup_source *wakeup; > bool wakeup_path:1; > > > > > struct timer_list suspend_timer; > unsigned long timer_expires; > struct work_struct work; > wait_queue_head_t wait_queue; > atomic_t usage_count; > atomic_t child_count; > unsigned int disable_depth:3; > unsigned int idle_notification:1; > unsigned int request_pending:1; > unsigned int deferred_resume:1; > unsigned int run_wake:1; > unsigned int runtime_auto:1; > unsigned int no_callbacks:1; > unsigned int irq_safe:1; > unsigned int use_autosuspend:1; > unsigned int timer_autosuspends:1; > enum rpm_request request; > enum rpm_status runtime_status; > int runtime_error; > int autosuspend_delay; > unsigned long last_busy; > unsigned long active_jiffies; > unsigned long suspended_jiffies; > unsigned long accounting_timestamp; > struct dev_pm_qos_request *pq_req; > > struct pm_subsys_data *subsys_data; > struct pm_qos_constraints *constraints; >}; > >extern void update_pm_runtime_accounting(struct device *dev); >extern int dev_pm_get_subsys_data(struct device *dev); >extern int dev_pm_put_subsys_data(struct device *dev); > > > > > > >struct dev_pm_domain { > struct dev_pm_ops ops; >}; > >extern void device_pm_lock(void); >extern void dpm_resume_start(pm_message_t state); >extern void dpm_resume_end(pm_message_t state); >extern void dpm_resume(pm_message_t state); >extern void dpm_complete(pm_message_t state); > >extern void device_pm_unlock(void); >extern int dpm_suspend_end(pm_message_t state); >extern int dpm_suspend_start(pm_message_t state); >extern int dpm_suspend(pm_message_t state); >extern int dpm_prepare(pm_message_t state); > >extern void __suspend_report_result(const char *function, void *fn, int ret); > > > > > > >extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); > >extern int pm_generic_prepare(struct device *dev); >extern int pm_generic_suspend_late(struct device *dev); >extern int pm_generic_suspend_noirq(struct device *dev); >extern int pm_generic_suspend(struct device *dev); >extern int pm_generic_resume_early(struct device *dev); >extern int pm_generic_resume_noirq(struct device *dev); >extern int pm_generic_resume(struct device *dev); >extern int pm_generic_freeze_noirq(struct device *dev); >extern int pm_generic_freeze_late(struct device *dev); >extern int pm_generic_freeze(struct device *dev); >extern int pm_generic_thaw_noirq(struct device *dev); >extern int pm_generic_thaw_early(struct device *dev); >extern int pm_generic_thaw(struct device *dev); >extern int pm_generic_restore_noirq(struct device *dev); >extern int pm_generic_restore_early(struct device *dev); >extern int pm_generic_restore(struct device *dev); >extern int pm_generic_poweroff_noirq(struct device *dev); >extern int pm_generic_poweroff_late(struct device *dev); >extern int pm_generic_poweroff(struct device *dev); >extern void pm_generic_complete(struct device *dev); > >enum dpm_order { > DPM_ORDER_NONE, > DPM_ORDER_DEV_AFTER_PARENT, > DPM_ORDER_PARENT_BEFORE_DEV, > DPM_ORDER_DEV_LAST, >}; > > > > >struct dev_archdata { > > > > > void *iommu; > >}; > >struct omap_device; > >struct pdev_archdata { > > > >}; > > >struct device; >struct device_private; >struct device_driver; >struct driver_private; >struct module; >struct class; >struct subsys_private; >struct bus_type; >struct device_node; >struct iommu_ops; > >struct bus_attribute { > struct attribute attr; > ssize_t (*show)(struct bus_type *bus, char *buf); > ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count); >}; > > > > >extern int __attribute__((warn_unused_result)) bus_create_file(struct bus_type *, > struct bus_attribute *); >extern void bus_remove_file(struct bus_type *, struct bus_attribute *); > >struct bus_type { > const char *name; > const char *dev_name; > struct device *dev_root; > struct bus_attribute *bus_attrs; > struct device_attribute *dev_attrs; > struct driver_attribute *drv_attrs; > > int (*match)(struct device *dev, struct device_driver *drv); > int (*uevent)(struct device *dev, struct kobj_uevent_env *env); > int (*probe)(struct device *dev); > int (*remove)(struct device *dev); > void (*shutdown)(struct device *dev); > > int (*suspend)(struct device *dev, pm_message_t state); > int (*resume)(struct device *dev); > > const struct dev_pm_ops *pm; > > struct iommu_ops *iommu_ops; > > struct subsys_private *p; >}; > >extern int __attribute__((warn_unused_result)) __bus_register(struct bus_type *bus, > struct lock_class_key *key); >extern void bus_unregister(struct bus_type *bus); > >extern int __attribute__((warn_unused_result)) bus_rescan_devices(struct bus_type *bus); > > >struct subsys_dev_iter { > struct klist_iter ki; > const struct device_type *type; >}; >void subsys_dev_iter_init(struct subsys_dev_iter *iter, > struct bus_type *subsys, > struct device *start, > const struct device_type *type); >struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter); >void subsys_dev_iter_exit(struct subsys_dev_iter *iter); > >int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, > int (*fn)(struct device *dev, void *data)); >struct device *bus_find_device(struct bus_type *bus, struct device *start, > void *data, > int (*match)(struct device *dev, void *data)); >struct device *bus_find_device_by_name(struct bus_type *bus, > struct device *start, > const char *name); >struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id, > struct device *hint); >int bus_for_each_drv(struct bus_type *bus, struct device_driver *start, > void *data, int (*fn)(struct device_driver *, void *)); >void bus_sort_breadthfirst(struct bus_type *bus, > int (*compare)(const struct device *a, > const struct device *b)); > > > > > > >struct notifier_block; > >extern int bus_register_notifier(struct bus_type *bus, > struct notifier_block *nb); >extern int bus_unregister_notifier(struct bus_type *bus, > struct notifier_block *nb); > >extern struct kset *bus_get_kset(struct bus_type *bus); >extern struct klist *bus_get_device_klist(struct bus_type *bus); > >struct device_driver { > const char *name; > struct bus_type *bus; > > struct module *owner; > const char *mod_name; > > bool suppress_bind_attrs; > > const struct of_device_id *of_match_table; > > int (*probe) (struct device *dev); > int (*remove) (struct device *dev); > void (*shutdown) (struct device *dev); > int (*suspend) (struct device *dev, pm_message_t state); > int (*resume) (struct device *dev); > const struct attribute_group **groups; > > const struct dev_pm_ops *pm; > > struct driver_private *p; >}; > > >extern int __attribute__((warn_unused_result)) driver_register(struct device_driver *drv); >extern void driver_unregister(struct device_driver *drv); > >extern struct device_driver *driver_find(const char *name, > struct bus_type *bus); >extern int driver_probe_done(void); >extern void wait_for_device_probe(void); > > > > >struct driver_attribute { > struct attribute attr; > ssize_t (*show)(struct device_driver *driver, char *buf); > ssize_t (*store)(struct device_driver *driver, const char *buf, > size_t count); >}; > > > > > >extern int __attribute__((warn_unused_result)) driver_create_file(struct device_driver *driver, > const struct driver_attribute *attr); >extern void driver_remove_file(struct device_driver *driver, > const struct driver_attribute *attr); > >extern int __attribute__((warn_unused_result)) driver_for_each_device(struct device_driver *drv, > struct device *start, > void *data, > int (*fn)(struct device *dev, > void *)); >struct device *driver_find_device(struct device_driver *drv, > struct device *start, void *data, > int (*match)(struct device *dev, void *data)); > >struct subsys_interface { > const char *name; > struct bus_type *subsys; > struct list_head node; > int (*add_dev)(struct device *dev, struct subsys_interface *sif); > int (*remove_dev)(struct device *dev, struct subsys_interface *sif); >}; > >int subsys_interface_register(struct subsys_interface *sif); >void subsys_interface_unregister(struct subsys_interface *sif); > >int subsys_system_register(struct bus_type *subsys, > const struct attribute_group **groups); > >struct class { > const char *name; > struct module *owner; > > struct class_attribute *class_attrs; > struct device_attribute *dev_attrs; > struct bin_attribute *dev_bin_attrs; > struct kobject *dev_kobj; > > int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env); > char *(*devnode)(struct device *dev, umode_t *mode); > > void (*class_release)(struct class *class); > void (*dev_release)(struct device *dev); > > int (*suspend)(struct device *dev, pm_message_t state); > int (*resume)(struct device *dev); > > const struct kobj_ns_type_operations *ns_type; > const void *(*namespace)(struct device *dev); > > const struct dev_pm_ops *pm; > > struct subsys_private *p; >}; > >struct class_dev_iter { > struct klist_iter ki; > const struct device_type *type; >}; > >extern struct kobject *sysfs_dev_block_kobj; >extern struct kobject *sysfs_dev_char_kobj; >extern int __attribute__((warn_unused_result)) __class_register(struct class *class, > struct lock_class_key *key); >extern void class_unregister(struct class *class); > >struct class_compat; >struct class_compat *class_compat_register(const char *name); >void class_compat_unregister(struct class_compat *cls); >int class_compat_create_link(struct class_compat *cls, struct device *dev, > struct device *device_link); >void class_compat_remove_link(struct class_compat *cls, struct device *dev, > struct device *device_link); > >extern void class_dev_iter_init(struct class_dev_iter *iter, > struct class *class, > struct device *start, > const struct device_type *type); >extern struct device *class_dev_iter_next(struct class_dev_iter *iter); >extern void class_dev_iter_exit(struct class_dev_iter *iter); > >extern int class_for_each_device(struct class *class, struct device *start, > void *data, > int (*fn)(struct device *dev, void *data)); >extern struct device *class_find_device(struct class *class, > struct device *start, void *data, > int (*match)(struct device *, void *)); > >struct class_attribute { > struct attribute attr; > ssize_t (*show)(struct class *class, struct class_attribute *attr, > char *buf); > ssize_t (*store)(struct class *class, struct class_attribute *attr, > const char *buf, size_t count); > const void *(*namespace)(struct class *class, > const struct class_attribute *attr); >}; > > > > >extern int __attribute__((warn_unused_result)) class_create_file(struct class *class, > const struct class_attribute *attr); >extern void class_remove_file(struct class *class, > const struct class_attribute *attr); > > > >struct class_attribute_string { > struct class_attribute attr; > char *str; >}; > >extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr, > char *buf); > >struct class_interface { > struct list_head node; > struct class *class; > > int (*add_dev) (struct device *, struct class_interface *); > void (*remove_dev) (struct device *, struct class_interface *); >}; > >extern int __attribute__((warn_unused_result)) class_interface_register(struct class_interface *); >extern void class_interface_unregister(struct class_interface *); > >extern struct class * __attribute__((warn_unused_result)) __class_create(struct module *owner, > const char *name, > struct lock_class_key *key); >extern void class_destroy(struct class *cls); > >struct device_type { > const char *name; > const struct attribute_group **groups; > int (*uevent)(struct device *dev, struct kobj_uevent_env *env); > char *(*devnode)(struct device *dev, umode_t *mode); > void (*release)(struct device *dev); > > const struct dev_pm_ops *pm; >}; > > >struct device_attribute { > struct attribute attr; > ssize_t (*show)(struct device *dev, struct device_attribute *attr, > char *buf); > ssize_t (*store)(struct device *dev, struct device_attribute *attr, > const char *buf, size_t count); >}; > >struct dev_ext_attribute { > struct device_attribute attr; > void *var; >}; > >ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr, > char *buf); >ssize_t device_store_ulong(struct device *dev, struct device_attribute *attr, > const char *buf, size_t count); >ssize_t device_show_int(struct device *dev, struct device_attribute *attr, > char *buf); >ssize_t device_store_int(struct device *dev, struct device_attribute *attr, > const char *buf, size_t count); > >extern int device_create_file(struct device *device, > const struct device_attribute *entry); >extern void device_remove_file(struct device *dev, > const struct device_attribute *attr); >extern int __attribute__((warn_unused_result)) device_create_bin_file(struct device *dev, > const struct bin_attribute *attr); >extern void device_remove_bin_file(struct device *dev, > const struct bin_attribute *attr); >extern int device_schedule_callback_owner(struct device *dev, > void (*func)(struct device *dev), struct module *owner); > > > > > > >typedef void (*dr_release_t)(struct device *dev, void *res); >typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data); > > > > > > > >extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp); > >extern void devres_free(void *res); >extern void devres_add(struct device *dev, void *res); >extern void *devres_find(struct device *dev, dr_release_t release, > dr_match_t match, void *match_data); >extern void *devres_get(struct device *dev, void *new_res, > dr_match_t match, void *match_data); >extern void *devres_remove(struct device *dev, dr_release_t release, > dr_match_t match, void *match_data); >extern int devres_destroy(struct device *dev, dr_release_t release, > dr_match_t match, void *match_data); > > >extern void * __attribute__((warn_unused_result)) devres_open_group(struct device *dev, void *id, > gfp_t gfp); >extern void devres_close_group(struct device *dev, void *id); >extern void devres_remove_group(struct device *dev, void *id); >extern int devres_release_group(struct device *dev, void *id); > > >extern void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp); >extern void devm_kfree(struct device *dev, void *p); > >void *devm_request_and_ioremap(struct device *dev, > struct resource *res); > >struct device_dma_parameters { > > > > > unsigned int max_segment_size; > unsigned long segment_boundary_mask; >}; > >struct device { > struct device *parent; > > struct device_private *p; > > struct kobject kobj; > const char *init_name; > const struct device_type *type; > > struct mutex mutex; > > > > struct bus_type *bus; > struct device_driver *driver; > > void *platform_data; > > struct dev_pm_info power; > struct dev_pm_domain *pm_domain; > > > > > u64 *dma_mask; > u64 coherent_dma_mask; > > > > > > struct device_dma_parameters *dma_parms; > > struct list_head dma_pools; > > struct dma_coherent_mem *dma_mem; > > > struct dev_archdata archdata; > > struct device_node *of_node; > > dev_t devt; > u32 id; > > spinlock_t devres_lock; > struct list_head devres_head; > > struct klist_node knode_class; > struct class *class; > const struct attribute_group **groups; > > void (*release)(struct device *dev); >}; > > > > >struct wakeup_source { > const char *name; > struct list_head entry; > spinlock_t lock; > struct timer_list timer; > unsigned long timer_expires; > ktime_t total_time; > ktime_t max_time; > ktime_t last_time; > ktime_t start_prevent_time; > ktime_t prevent_sleep_time; > unsigned long event_count; > unsigned long active_count; > unsigned long relax_count; > unsigned long expire_count; > unsigned long wakeup_count; > bool active:1; > bool autosleep_enabled:1; >}; > > > > > > > >static inline __attribute__((always_inline)) bool device_can_wakeup(struct device *dev) >{ > return dev->power.can_wakeup; >} > >static inline __attribute__((always_inline)) bool device_may_wakeup(struct device *dev) >{ > return dev->power.can_wakeup && !!dev->power.wakeup; >} > > >extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name); >extern struct wakeup_source *wakeup_source_create(const char *name); >extern void wakeup_source_drop(struct wakeup_source *ws); >extern void wakeup_source_destroy(struct wakeup_source *ws); >extern void wakeup_source_add(struct wakeup_source *ws); >extern void wakeup_source_remove(struct wakeup_source *ws); >extern struct wakeup_source *wakeup_source_register(const char *name); >extern void wakeup_source_unregister(struct wakeup_source *ws); >extern int device_wakeup_enable(struct device *dev); >extern int device_wakeup_disable(struct device *dev); >extern void device_set_wakeup_capable(struct device *dev, bool capable); >extern int device_init_wakeup(struct device *dev, bool val); >extern int device_set_wakeup_enable(struct device *dev, bool enable); >extern void __pm_stay_awake(struct wakeup_source *ws); >extern void pm_stay_awake(struct device *dev); >extern void __pm_relax(struct wakeup_source *ws); >extern void pm_relax(struct device *dev); >extern void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec); >extern void pm_wakeup_event(struct device *dev, unsigned int msec); > >static inline __attribute__((always_inline)) void wakeup_source_init(struct wakeup_source *ws, > const char *name) >{ > wakeup_source_prepare(ws, name); > wakeup_source_add(ws); >} > >static inline __attribute__((always_inline)) void wakeup_source_trash(struct wakeup_source *ws) >{ > wakeup_source_remove(ws); > wakeup_source_drop(ws); >} > > >static inline __attribute__((always_inline)) const char *dev_name(const struct device *dev) >{ > > if (dev->init_name) > return dev->init_name; > > return kobject_name(&dev->kobj); >} > >extern __attribute__((format(printf, 2, 3))) >int dev_set_name(struct device *dev, const char *name, ...); > >static inline __attribute__((always_inline)) int dev_to_node(struct device *dev) >{ > return -1; >} >static inline __attribute__((always_inline)) void set_dev_node(struct device *dev, int node) >{ >} > > >static inline __attribute__((always_inline)) struct pm_subsys_data *dev_to_psd(struct device *dev) >{ > return dev ? dev->power.subsys_data : ((void *)0); >} > >static inline __attribute__((always_inline)) unsigned int dev_get_uevent_suppress(const struct device *dev) >{ > return dev->kobj.uevent_suppress; >} > >static inline __attribute__((always_inline)) void dev_set_uevent_suppress(struct device *dev, int val) >{ > dev->kobj.uevent_suppress = val; >} > >static inline __attribute__((always_inline)) int device_is_registered(struct device *dev) >{ > return dev->kobj.state_in_sysfs; >} > >static inline __attribute__((always_inline)) void device_enable_async_suspend(struct device *dev) >{ > if (!dev->power.is_prepared) > dev->power.async_suspend = true; >} > >static inline __attribute__((always_inline)) void device_disable_async_suspend(struct device *dev) >{ > if (!dev->power.is_prepared) > dev->power.async_suspend = false; >} > >static inline __attribute__((always_inline)) bool device_async_suspend_enabled(struct device *dev) >{ > return !!dev->power.async_suspend; >} > >static inline __attribute__((always_inline)) void pm_suspend_ignore_children(struct device *dev, bool enable) >{ > dev->power.ignore_children = enable; >} > >static inline __attribute__((always_inline)) void device_lock(struct device *dev) >{ > mutex_lock(&dev->mutex); >} > >static inline __attribute__((always_inline)) int device_trylock(struct device *dev) >{ > return mutex_trylock(&dev->mutex); >} > >static inline __attribute__((always_inline)) void device_unlock(struct device *dev) >{ > mutex_unlock(&dev->mutex); >} > >void driver_init(void); > > > > >extern int __attribute__((warn_unused_result)) device_register(struct device *dev); >extern void device_unregister(struct device *dev); >extern void device_initialize(struct device *dev); >extern int __attribute__((warn_unused_result)) device_add(struct device *dev); >extern void device_del(struct device *dev); >extern int device_for_each_child(struct device *dev, void *data, > int (*fn)(struct device *dev, void *data)); >extern struct device *device_find_child(struct device *dev, void *data, > int (*match)(struct device *dev, void *data)); >extern int device_rename(struct device *dev, const char *new_name); >extern int device_move(struct device *dev, struct device *new_parent, > enum dpm_order dpm_order); >extern const char *device_get_devnode(struct device *dev, > umode_t *mode, const char **tmp); >extern void *dev_get_drvdata(const struct device *dev); >extern int dev_set_drvdata(struct device *dev, void *data); > > > > >extern struct device *__root_device_register(const char *name, > struct module *owner); > >extern void root_device_unregister(struct device *root); > >static inline __attribute__((always_inline)) void *dev_get_platdata(const struct device *dev) >{ > return dev->platform_data; >} > > > > > >extern int __attribute__((warn_unused_result)) device_bind_driver(struct device *dev); >extern void device_release_driver(struct device *dev); >extern int __attribute__((warn_unused_result)) device_attach(struct device *dev); >extern int __attribute__((warn_unused_result)) driver_attach(struct device_driver *drv); >extern int __attribute__((warn_unused_result)) device_reprobe(struct device *dev); > > > > >extern struct device *device_create_vargs(struct class *cls, > struct device *parent, > dev_t devt, > void *drvdata, > const char *fmt, > va_list vargs); >extern __attribute__((format(printf, 5, 6))) >struct device *device_create(struct class *cls, struct device *parent, > dev_t devt, void *drvdata, > const char *fmt, ...); >extern void device_destroy(struct class *cls, dev_t devt); > > > > > > > >extern int (*platform_notify)(struct device *dev); > >extern int (*platform_notify_remove)(struct device *dev); > > > > > > >extern struct device *get_device(struct device *dev); >extern void put_device(struct device *dev); > >extern void wait_for_device_probe(void); > > > > > > >static inline __attribute__((always_inline)) int devtmpfs_create_node(struct device *dev) { return 0; } >static inline __attribute__((always_inline)) int devtmpfs_delete_node(struct device *dev) { return 0; } >static inline __attribute__((always_inline)) int devtmpfs_mount(const char *mountpoint) { return 0; } > > > >extern void device_shutdown(void); > > >extern const char *dev_driver_string(const struct device *dev); > > > > >extern int __dev_printk(const char *level, const struct device *dev, > struct va_format *vaf); >extern __attribute__((format(printf, 3, 4))) >int dev_printk(const char *level, const struct device *dev, > const char *fmt, ...) > ; >extern __attribute__((format(printf, 2, 3))) >int dev_emerg(const struct device *dev, const char *fmt, ...); >extern __attribute__((format(printf, 2, 3))) >int dev_alert(const struct device *dev, const char *fmt, ...); >extern __attribute__((format(printf, 2, 3))) >int dev_crit(const struct device *dev, const char *fmt, ...); >extern __attribute__((format(printf, 2, 3))) >int dev_err(const struct device *dev, const char *fmt, ...); >extern __attribute__((format(printf, 2, 3))) >int dev_warn(const struct device *dev, const char *fmt, ...); >extern __attribute__((format(printf, 2, 3))) >int dev_notice(const struct device *dev, const char *fmt, ...); >extern __attribute__((format(printf, 2, 3))) >int _dev_info(const struct device *dev, const char *fmt, ...); > > > > >struct node { > struct device dev; > > > > >}; > >struct memory_block; >extern struct node node_devices[]; >typedef void (*node_registration_func_t)(struct node *); > >extern int register_node(struct node *, int, struct node *); >extern void unregister_node(struct node *node); > >static inline __attribute__((always_inline)) int register_one_node(int nid) >{ > return 0; >} >static inline __attribute__((always_inline)) int unregister_one_node(int nid) >{ > return 0; >} >static inline __attribute__((always_inline)) int register_cpu_under_node(unsigned int cpu, unsigned int nid) >{ > return 0; >} >static inline __attribute__((always_inline)) int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) >{ > return 0; >} >static inline __attribute__((always_inline)) int register_mem_sect_under_node(struct memory_block *mem_blk, > int nid) >{ > return 0; >} >static inline __attribute__((always_inline)) int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, > unsigned long phys_index) >{ > return 0; >} > >static inline __attribute__((always_inline)) void register_hugetlbfs_with_node(node_registration_func_t reg, > node_registration_func_t unreg) >{ >} > > > > >struct device; > >struct cpu { > int node_id; > int hotpluggable; > struct device dev; >}; > >extern int register_cpu(struct cpu *cpu, int num); >extern struct device *get_cpu_device(unsigned cpu); >extern bool cpu_is_hotpluggable(unsigned cpu); > >extern int cpu_add_dev_attr(struct device_attribute *attr); >extern void cpu_remove_dev_attr(struct device_attribute *attr); > >extern int cpu_add_dev_attr_group(struct attribute_group *attrs); >extern void cpu_remove_dev_attr_group(struct attribute_group *attrs); > >extern int sched_create_sysfs_power_savings_entries(struct device *dev); > > >extern void unregister_cpu(struct cpu *cpu); >extern ssize_t arch_cpu_probe(const char *, size_t); >extern ssize_t arch_cpu_release(const char *, size_t); > >struct notifier_block; > >enum { > > CPU_PRI_SCHED_ACTIVE = ((int)(~0U>>1)), > CPU_PRI_CPUSET_ACTIVE = ((int)(~0U>>1)) - 1, > CPU_PRI_SCHED_INACTIVE = (-((int)(~0U>>1)) - 1) + 1, > CPU_PRI_CPUSET_INACTIVE = (-((int)(~0U>>1)) - 1), > > > CPU_PRI_PERF = 20, > CPU_PRI_MIGRATION = 10, > > CPU_PRI_WORKQUEUE_UP = 5, > CPU_PRI_WORKQUEUE_DOWN = -5, >}; > >extern int register_cpu_notifier(struct notifier_block *nb); >extern void unregister_cpu_notifier(struct notifier_block *nb); > >int cpu_up(unsigned int cpu); >void notify_cpu_starting(unsigned int cpu); >extern void cpu_maps_update_begin(void); >extern void cpu_maps_update_done(void); > >extern struct bus_type cpu_subsys; > > > > >extern void get_online_cpus(void); >extern void put_online_cpus(void); > > > >int cpu_down(unsigned int cpu); > > > > > >static inline __attribute__((always_inline)) void cpu_hotplug_driver_lock(void) >{ >} > >static inline __attribute__((always_inline)) void cpu_hotplug_driver_unlock(void) >{ >} > >extern int disable_nonboot_cpus(void); >extern void enable_nonboot_cpus(void); > >void idle_notifier_register(struct notifier_block *n); >void idle_notifier_unregister(struct notifier_block *n); >void idle_notifier_call_chain(unsigned long val); > > > > > > > >struct irq_work { > unsigned long flags; > struct llist_node llnode; > void (*func)(struct irq_work *); >}; > >static inline __attribute__((always_inline)) >void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) >{ > work->flags = 0; > work->func = func; >} > >bool irq_work_queue(struct irq_work *work); >void irq_work_run(void); >void irq_work_sync(struct irq_work *work); > > > > > > > > >typedef struct >{ > atomic_long_t a; >} local_t; > > > > > >struct perf_callchain_entry { > __u64 nr; > __u64 ip[255]; >}; > >struct perf_raw_record { > u32 size; > void *data; >}; > >struct perf_branch_entry { > __u64 from; > __u64 to; > __u64 mispred:1, > predicted:1, > reserved:62; >}; > >struct perf_branch_stack { > __u64 nr; > struct perf_branch_entry entries[0]; >}; > >struct task_struct; > > > > >struct hw_perf_event_extra { > u64 config; > unsigned int reg; > int alloc; > int idx; >}; > > > > >struct hw_perf_event { > > union { > struct { > u64 config; > u64 last_tag; > unsigned long config_base; > unsigned long event_base; > int idx; > int last_cpu; > > struct hw_perf_event_extra extra_reg; > struct hw_perf_event_extra branch_reg; > }; > struct { > struct hrtimer hrtimer; > }; > > struct { > struct arch_hw_breakpoint info; > struct list_head bp_list; > > > > > > struct task_struct *bp_target; > }; > > }; > int state; > local64_t prev_count; > u64 sample_period; > u64 last_period; > local64_t period_left; > u64 interrupts_seq; > u64 interrupts; > > u64 freq_time_stamp; > u64 freq_count_stamp; > >}; > >struct perf_event; > >struct pmu { > struct list_head entry; > > struct device *dev; > const struct attribute_group **attr_groups; > char *name; > int type; > > int * pmu_disable_count; > struct perf_cpu_context * pmu_cpu_context; > int task_ctx_nr; > > > > > > void (*pmu_enable) (struct pmu *pmu); > void (*pmu_disable) (struct pmu *pmu); > > > > > > int (*event_init) (struct perf_event *event); > > int (*add) (struct perf_event *event, int flags); > void (*del) (struct perf_event *event, int flags); > > > > > > > void (*start) (struct perf_event *event, int flags); > void (*stop) (struct perf_event *event, int flags); > > > > > void (*read) (struct perf_event *event); > > void (*start_txn) (struct pmu *pmu); > > > > > > > int (*commit_txn) (struct pmu *pmu); > > > > > void (*cancel_txn) (struct pmu *pmu); > > > > > > int (*event_idx) (struct perf_event *event); > > > > > void (*flush_branch_stack) (void); >}; > > > > >enum perf_event_active_state { > PERF_EVENT_STATE_ERROR = -2, > PERF_EVENT_STATE_OFF = -1, > PERF_EVENT_STATE_INACTIVE = 0, > PERF_EVENT_STATE_ACTIVE = 1, >}; > >struct file; >struct perf_sample_data; > >typedef void (*perf_overflow_handler_t)(struct perf_event *, > struct perf_sample_data *, > struct pt_regs *regs); > >enum perf_group_flag { > PERF_GROUP_SOFTWARE = 0x1, >}; > > > > >struct swevent_hlist { > struct hlist_head heads[(1 << 8)]; > struct rcu_head rcu_head; >}; > >struct ring_buffer; > > > > >struct perf_event { > > struct list_head group_entry; > struct list_head event_entry; > struct list_head sibling_list; > struct hlist_node hlist_entry; > int nr_siblings; > int group_flags; > struct perf_event *group_leader; > struct pmu *pmu; > > enum perf_event_active_state state; > unsigned int attach_state; > local64_t count; > atomic64_t child_count; > > u64 total_time_enabled; > u64 total_time_running; > > u64 tstamp_enabled; > u64 tstamp_running; > u64 tstamp_stopped; > > u64 shadow_ctx_time; > > struct perf_event_attr attr; > u16 header_size; > u16 id_header_size; > u16 read_size; > struct hw_perf_event hw; > > struct perf_event_context *ctx; > atomic_long_t refcount; > > > > > > atomic64_t child_total_time_enabled; > atomic64_t child_total_time_running; > > > > > struct mutex child_mutex; > struct list_head child_list; > struct perf_event *parent; > > int oncpu; > int cpu; > > struct list_head owner_entry; > struct task_struct *owner; > > > struct mutex mmap_mutex; > atomic_t mmap_count; > int mmap_locked; > struct user_struct *mmap_user; > struct ring_buffer *rb; > struct list_head rb_entry; > > > wait_queue_head_t waitq; > struct fasync_struct *fasync; > > > int pending_wakeup; > int pending_kill; > int pending_disable; > struct irq_work pending; > > atomic_t event_limit; > > void (*destroy)(struct perf_event *); > struct rcu_head rcu_head; > > struct pid_namespace *ns; > u64 id; > > perf_overflow_handler_t overflow_handler; > void *overflow_handler_context; > > > struct ftrace_event_call *tp_event; > struct event_filter *filter; > >}; > >enum perf_event_context_type { > task_context, > cpu_context, >}; > > > > > > >struct perf_event_context { > struct pmu *pmu; > enum perf_event_context_type type; > > > > > raw_spinlock_t lock; > > > > > > struct mutex mutex; > > struct list_head pinned_groups; > struct list_head flexible_groups; > struct list_head event_list; > int nr_events; > int nr_active; > int is_active; > int nr_stat; > int nr_freq; > int rotate_disable; > atomic_t refcount; > struct task_struct *task; > > > > > u64 time; > u64 timestamp; > > > > > > struct perf_event_context *parent_ctx; > u64 parent_gen; > u64 generation; > int pin_count; > int nr_cgroups; > int nr_branch_stack; > struct rcu_head rcu_head; >}; > >struct perf_cpu_context { > struct perf_event_context ctx; > struct perf_event_context *task_ctx; > int active_oncpu; > int exclusive; > struct list_head rotation_list; > int jiffies_interval; > struct pmu *active_pmu; > struct perf_cgroup *cgrp; >}; > >struct perf_output_handle { > struct perf_event *event; > struct ring_buffer *rb; > unsigned long wakeup; > unsigned long size; > void *addr; > int page; >}; > > > >extern int perf_pmu_register(struct pmu *pmu, char *name, int type); >extern void perf_pmu_unregister(struct pmu *pmu); > >extern int perf_num_counters(void); >extern const char *perf_pmu_name(void); >extern void __perf_event_task_sched_in(struct task_struct *prev, > struct task_struct *task); >extern void __perf_event_task_sched_out(struct task_struct *prev, > struct task_struct *next); >extern int perf_event_init_task(struct task_struct *child); >extern void perf_event_exit_task(struct task_struct *child); >extern void perf_event_free_task(struct task_struct *task); >extern void perf_event_delayed_put(struct task_struct *task); >extern void perf_event_print_debug(void); >extern void perf_pmu_disable(struct pmu *pmu); >extern void perf_pmu_enable(struct pmu *pmu); >extern int perf_event_task_disable(void); >extern int perf_event_task_enable(void); >extern int perf_event_refresh(struct perf_event *event, int refresh); >extern void perf_event_update_userpage(struct perf_event *event); >extern int perf_event_release_kernel(struct perf_event *event); >extern struct perf_event * >perf_event_create_kernel_counter(struct perf_event_attr *attr, > int cpu, > struct task_struct *task, > perf_overflow_handler_t callback, > void *context); >extern u64 perf_event_read_value(struct perf_event *event, > u64 *enabled, u64 *running); > > >struct perf_sample_data { > u64 type; > > u64 ip; > struct { > u32 pid; > u32 tid; > } tid_entry; > u64 time; > u64 addr; > u64 id; > u64 stream_id; > struct { > u32 cpu; > u32 reserved; > } cpu_entry; > u64 period; > struct perf_callchain_entry *callchain; > struct perf_raw_record *raw; > struct perf_branch_stack *br_stack; >}; > >static inline __attribute__((always_inline)) void perf_sample_data_init(struct perf_sample_data *data, u64 addr) >{ > data->addr = addr; > data->raw = ((void *)0); > data->br_stack = ((void *)0); >} > >extern void perf_output_sample(struct perf_output_handle *handle, > struct perf_event_header *header, > struct perf_sample_data *data, > struct perf_event *event); >extern void perf_prepare_sample(struct perf_event_header *header, > struct perf_sample_data *data, > struct perf_event *event, > struct pt_regs *regs); > >extern int perf_event_overflow(struct perf_event *event, > struct perf_sample_data *data, > struct pt_regs *regs); > >static inline __attribute__((always_inline)) bool is_sampling_event(struct perf_event *event) >{ > return event->attr.sample_period != 0; >} > > > > >static inline __attribute__((always_inline)) int is_software_event(struct perf_event *event) >{ > return event->pmu->task_ctx_nr == perf_sw_context; >} > >extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; > >extern void __perf_sw_event(u32, u64, struct pt_regs *, u64); > > >static inline __attribute__((always_inline)) void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { } > >static inline __attribute__((always_inline)) void perf_fetch_caller_regs(struct pt_regs *regs) >{ > ({ void *__p = (regs); size_t __n = sizeof(*regs); if ((__n) != 0) { if (__builtin_constant_p((0)) && (0) == 0) __memzero((__p),(__n)); else memset((__p),(0),(__n)); } (__p); }); > > perf_arch_fetch_caller_regs(regs, ((unsigned long)__builtin_return_address(0))); >} > >static inline __attribute__((always_inline)) __attribute__((always_inline)) void >perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) >{ > struct pt_regs hot_regs; > > if (static_key_false(&perf_swevent_enabled[event_id])) { > if (!regs) { > perf_fetch_caller_regs(&hot_regs); > regs = &hot_regs; > } > __perf_sw_event(event_id, nr, regs, addr); > } >} > >extern struct static_key_deferred perf_sched_events; > >static inline __attribute__((always_inline)) void perf_event_task_sched_in(struct task_struct *prev, > struct task_struct *task) >{ > if (static_key_false(&perf_sched_events.key)) > __perf_event_task_sched_in(prev, task); >} > >static inline __attribute__((always_inline)) void perf_event_task_sched_out(struct task_struct *prev, > struct task_struct *next) >{ > perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, ((void *)0), 0); > > if (static_key_false(&perf_sched_events.key)) > __perf_event_task_sched_out(prev, next); >} > >extern void perf_event_mmap(struct vm_area_struct *vma); >extern struct perf_guest_info_callbacks *perf_guest_cbs; >extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); >extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); > >extern void perf_event_comm(struct task_struct *tsk); >extern void perf_event_fork(struct task_struct *tsk); > > >extern __attribute__((section(".data..percpu" ""))) __typeof__(struct perf_callchain_entry) perf_callchain_entry; > >extern void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs); >extern void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs); > >static inline __attribute__((always_inline)) void perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) >{ > if (entry->nr < 255) > entry->ip[entry->nr++] = ip; >} > >extern int sysctl_perf_event_paranoid; >extern int sysctl_perf_event_mlock; >extern int sysctl_perf_event_sample_rate; > >extern int perf_proc_update_handler(struct ctl_table *table, int write, > void *buffer, size_t *lenp, > loff_t *ppos); > >static inline __attribute__((always_inline)) bool perf_paranoid_tracepoint_raw(void) >{ > return sysctl_perf_event_paranoid > -1; >} > >static inline __attribute__((always_inline)) bool perf_paranoid_cpu(void) >{ > return sysctl_perf_event_paranoid > 0; >} > >static inline __attribute__((always_inline)) bool perf_paranoid_kernel(void) >{ > return sysctl_perf_event_paranoid > 1; >} > >extern void perf_event_init(void); >extern void perf_tp_event(u64 addr, u64 count, void *record, > int entry_size, struct pt_regs *regs, > struct hlist_head *head, int rctx); >extern void perf_bp_event(struct perf_event *event, void *data); > > > > > > > >static inline __attribute__((always_inline)) bool has_branch_stack(struct perf_event *event) >{ > return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; >} > >extern int perf_output_begin(struct perf_output_handle *handle, > struct perf_event *event, unsigned int size); >extern void perf_output_end(struct perf_output_handle *handle); >extern void perf_output_copy(struct perf_output_handle *handle, > const void *buf, unsigned int len); >extern int perf_swevent_get_recursion_context(void); >extern void perf_swevent_put_recursion_context(int rctx); >extern void perf_event_enable(struct perf_event *event); >extern void perf_event_disable(struct perf_event *event); >extern void perf_event_task_tick(void); > >static inline __attribute__((always_inline)) void perf_restore_debug_store(void) { } > > >struct trace_array; >struct tracer; >struct dentry; > >struct trace_print_flags { > unsigned long mask; > const char *name; >}; > >struct trace_print_flags_u64 { > unsigned long long mask; > const char *name; >}; > >const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim, > unsigned long flags, > const struct trace_print_flags *flag_array); > >const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, > const struct trace_print_flags *symbol_array); > > >const char *ftrace_print_symbols_seq_u64(struct trace_seq *p, > unsigned long long val, > const struct trace_print_flags_u64 > *symbol_array); > > >const char *ftrace_print_hex_seq(struct trace_seq *p, > const unsigned char *buf, int len); > > > > > > > >struct trace_entry { > unsigned short type; > unsigned char flags; > unsigned char preempt_count; > int pid; > int padding; >}; > >struct trace_iterator { > struct trace_array *tr; > struct tracer *trace; > void *private; > int cpu_file; > struct mutex mutex; > struct ring_buffer_iter *buffer_iter[2]; > unsigned long iter_flags; > > > struct trace_seq tmp_seq; > > > struct trace_seq seq; > struct trace_entry *ent; > unsigned long lost_events; > int leftover; > int ent_size; > int cpu; > u64 ts; > > loff_t pos; > long idx; > > cpumask_var_t started; >}; > > >struct trace_event; > >typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, > int flags, struct trace_event *event); > >struct trace_event_functions { > trace_print_func trace; > trace_print_func raw; > trace_print_func hex; > trace_print_func binary; >}; > >struct trace_event { > struct hlist_node node; > struct list_head list; > int type; > struct trace_event_functions *funcs; >}; > >extern int register_ftrace_event(struct trace_event *event); >extern int unregister_ftrace_event(struct trace_event *event); > > >enum print_line_t { > TRACE_TYPE_PARTIAL_LINE = 0, > TRACE_TYPE_HANDLED = 1, > TRACE_TYPE_UNHANDLED = 2, > TRACE_TYPE_NO_CONSUME = 3 >}; > >void tracing_generic_entry_update(struct trace_entry *entry, > unsigned long flags, > int pc); >struct ring_buffer_event * >trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, > int type, unsigned long len, > unsigned long flags, int pc); >void trace_current_buffer_unlock_commit(struct ring_buffer *buffer, > struct ring_buffer_event *event, > unsigned long flags, int pc); >void trace_nowake_buffer_unlock_commit(struct ring_buffer *buffer, > struct ring_buffer_event *event, > unsigned long flags, int pc); >void trace_nowake_buffer_unlock_commit_regs(struct ring_buffer *buffer, > struct ring_buffer_event *event, > unsigned long flags, int pc, > struct pt_regs *regs); >void trace_current_buffer_discard_commit(struct ring_buffer *buffer, > struct ring_buffer_event *event); > >void tracing_record_cmdline(struct task_struct *tsk); > >struct event_filter; > >enum trace_reg { > TRACE_REG_REGISTER, > TRACE_REG_UNREGISTER, > > TRACE_REG_PERF_REGISTER, > TRACE_REG_PERF_UNREGISTER, > TRACE_REG_PERF_OPEN, > TRACE_REG_PERF_CLOSE, > TRACE_REG_PERF_ADD, > TRACE_REG_PERF_DEL, > >}; > >struct ftrace_event_call; > >struct ftrace_event_class { > char *system; > void *probe; > > void *perf_probe; > > int (*reg)(struct ftrace_event_call *event, > enum trace_reg type, void *data); > int (*define_fields)(struct ftrace_event_call *); > struct list_head *(*get_fields)(struct ftrace_event_call *); > struct list_head fields; > int (*raw_init)(struct ftrace_event_call *); >}; > >extern int ftrace_event_reg(struct ftrace_event_call *event, > enum trace_reg type, void *data); > >enum { > TRACE_EVENT_FL_ENABLED_BIT, > TRACE_EVENT_FL_FILTERED_BIT, > TRACE_EVENT_FL_RECORDED_CMD_BIT, > TRACE_EVENT_FL_CAP_ANY_BIT, > TRACE_EVENT_FL_NO_SET_FILTER_BIT, > TRACE_EVENT_FL_IGNORE_ENABLE_BIT, >}; > >enum { > TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT), > TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), > TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT), > TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), > TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), > TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), >}; > >struct ftrace_event_call { > struct list_head list; > struct ftrace_event_class *class; > char *name; > struct dentry *dir; > struct trace_event event; > const char *print_fmt; > struct event_filter *filter; > void *mod; > void *data; > > unsigned int flags; > > > int perf_refcount; > struct hlist_head *perf_events; > >}; > >extern void destroy_preds(struct ftrace_event_call *call); >extern int filter_match_preds(struct event_filter *filter, void *rec); >extern int filter_current_check_discard(struct ring_buffer *buffer, > struct ftrace_event_call *call, > void *rec, > struct ring_buffer_event *event); > >enum { > FILTER_OTHER = 0, > FILTER_STATIC_STRING, > FILTER_DYN_STRING, > FILTER_PTR_STRING, > FILTER_TRACE_FN, >}; > > >extern struct mutex event_storage_mutex; >extern char event_storage[128]; > >extern int trace_event_raw_init(struct ftrace_event_call *call); >extern int trace_define_field(struct ftrace_event_call *call, const char *type, > const char *name, int offset, int size, > int is_signed, int filter_type); >extern int trace_add_event_call(struct ftrace_event_call *call); >extern void trace_remove_event_call(struct ftrace_event_call *call); > > > >int trace_set_clr_event(const char *system, const char *event, int set); > >struct perf_event; > >extern __attribute__((section(".data..percpu" ""))) __typeof__(struct pt_regs) perf_trace_regs; > >extern int perf_trace_init(struct perf_event *event); >extern void perf_trace_destroy(struct perf_event *event); >extern int perf_trace_add(struct perf_event *event, int flags); >extern void perf_trace_del(struct perf_event *event, int flags); >extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, > char *filter_str); >extern void ftrace_profile_free_filter(struct perf_event *event); >extern void *perf_trace_buf_prepare(int size, unsigned short type, > struct pt_regs *regs, int *rctxp); > >static inline __attribute__((always_inline)) void >perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, > u64 count, struct pt_regs *regs, void *head) >{ > perf_tp_event(addr, count, raw_data, size, regs, head, rctx); >} > > >struct syscall_metadata { > const char *name; > int syscall_nr; > int nb_args; > const char **types; > const char **args; > struct list_head enter_fields; > > struct ftrace_event_call *enter_event; > struct ftrace_event_call *exit_event; >}; > >int perf_sysenter_enable(struct ftrace_event_call *call); >void perf_sysenter_disable(struct ftrace_event_call *call); >int perf_sysexit_enable(struct ftrace_event_call *call); >void perf_sysexit_disable(struct ftrace_event_call *call); > > > long sys_time(time_t *tloc); > long sys_stime(time_t *tptr); > long sys_gettimeofday(struct timeval *tv, > struct timezone *tz); > long sys_settimeofday(struct timeval *tv, > struct timezone *tz); > long sys_adjtimex(struct timex *txc_p); > > long sys_times(struct tms *tbuf); > > long sys_gettid(void); > long sys_nanosleep(struct timespec *rqtp, struct timespec *rmtp); > long sys_alarm(unsigned int seconds); > long sys_getpid(void); > long sys_getppid(void); > long sys_getuid(void); > long sys_geteuid(void); > long sys_getgid(void); > long sys_getegid(void); > long sys_getresuid(uid_t *ruid, uid_t *euid, uid_t *suid); > long sys_getresgid(gid_t *rgid, gid_t *egid, gid_t *sgid); > long sys_getpgid(pid_t pid); > long sys_getpgrp(void); > long sys_getsid(pid_t pid); > long sys_getgroups(int gidsetsize, gid_t *grouplist); > > long sys_setregid(gid_t rgid, gid_t egid); > long sys_setgid(gid_t gid); > long sys_setreuid(uid_t ruid, uid_t euid); > long sys_setuid(uid_t uid); > long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid); > long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid); > long sys_setfsuid(uid_t uid); > long sys_setfsgid(gid_t gid); > long sys_setpgid(pid_t pid, pid_t pgid); > long sys_setsid(void); > long sys_setgroups(int gidsetsize, gid_t *grouplist); > > long sys_acct(const char *name); > long sys_capget(cap_user_header_t header, > cap_user_data_t dataptr); > long sys_capset(cap_user_header_t header, > const cap_user_data_t data); > long sys_personality(unsigned int personality); > > long sys_sigpending(old_sigset_t *set); > long sys_sigprocmask(int how, old_sigset_t *set, > old_sigset_t *oset); > long sys_getitimer(int which, struct itimerval *value); > long sys_setitimer(int which, > struct itimerval *value, > struct itimerval *ovalue); > long sys_timer_create(clockid_t which_clock, > struct sigevent *timer_event_spec, > timer_t * created_timer_id); > long sys_timer_gettime(timer_t timer_id, > struct itimerspec *setting); > long sys_timer_getoverrun(timer_t timer_id); > long sys_timer_settime(timer_t timer_id, int flags, > const struct itimerspec *new_setting, > struct itimerspec *old_setting); > long sys_timer_delete(timer_t timer_id); > long sys_clock_settime(clockid_t which_clock, > const struct timespec *tp); > long sys_clock_gettime(clockid_t which_clock, > struct timespec *tp); > long sys_clock_adjtime(clockid_t which_clock, > struct timex *tx); > long sys_clock_getres(clockid_t which_clock, > struct timespec *tp); > long sys_clock_nanosleep(clockid_t which_clock, int flags, > const struct timespec *rqtp, > struct timespec *rmtp); > > long sys_nice(int increment); > long sys_sched_setscheduler(pid_t pid, int policy, > struct sched_param *param); > long sys_sched_setparam(pid_t pid, > struct sched_param *param); > long sys_sched_getscheduler(pid_t pid); > long sys_sched_getparam(pid_t pid, > struct sched_param *param); > long sys_sched_setaffinity(pid_t pid, unsigned int len, > unsigned long *user_mask_ptr); > long sys_sched_getaffinity(pid_t pid, unsigned int len, > unsigned long *user_mask_ptr); > long sys_sched_yield(void); > long sys_sched_get_priority_max(int policy); > long sys_sched_get_priority_min(int policy); > long sys_sched_rr_get_interval(pid_t pid, > struct timespec *interval); > long sys_setpriority(int which, int who, int niceval); > long sys_getpriority(int which, int who); > > long sys_shutdown(int, int); > long sys_reboot(int magic1, int magic2, unsigned int cmd, > void *arg); > long sys_restart_syscall(void); > long sys_kexec_load(unsigned long entry, unsigned long nr_segments, > struct kexec_segment *segments, > unsigned long flags); > > long sys_exit(int error_code); > long sys_exit_group(int error_code); > long sys_wait4(pid_t pid, int *stat_addr, > int options, struct rusage *ru); > long sys_waitid(int which, pid_t pid, > struct siginfo *infop, > int options, struct rusage *ru); > long sys_waitpid(pid_t pid, int *stat_addr, int options); > long sys_set_tid_address(int *tidptr); > long sys_futex(u32 *uaddr, int op, u32 val, > struct timespec *utime, u32 *uaddr2, > u32 val3); > > long sys_init_module(void *umod, unsigned long len, > const char *uargs); > long sys_delete_module(const char *name_user, > unsigned int flags); > > long sys_rt_sigprocmask(int how, sigset_t *set, > sigset_t *oset, size_t sigsetsize); > long sys_rt_sigpending(sigset_t *set, size_t sigsetsize); > long sys_rt_sigtimedwait(const sigset_t *uthese, > siginfo_t *uinfo, > const struct timespec *uts, > size_t sigsetsize); > long sys_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, > siginfo_t *uinfo); > long sys_kill(int pid, int sig); > long sys_tgkill(int tgid, int pid, int sig); > long sys_tkill(int pid, int sig); > long sys_rt_sigqueueinfo(int pid, int sig, siginfo_t *uinfo); > long sys_sgetmask(void); > long sys_ssetmask(int newmask); > long sys_signal(int sig, __sighandler_t handler); > long sys_pause(void); > > long sys_sync(void); > long sys_fsync(unsigned int fd); > long sys_fdatasync(unsigned int fd); > long sys_bdflush(int func, long data); > long sys_mount(char *dev_name, char *dir_name, > char *type, unsigned long flags, > void *data); > long sys_umount(char *name, int flags); > long sys_oldumount(char *name); > long sys_truncate(const char *path, long length); > long sys_ftruncate(unsigned int fd, unsigned long length); > long sys_stat(const char *filename, > struct __old_kernel_stat *statbuf); > long sys_statfs(const char * path, > struct statfs *buf); > long sys_statfs64(const char *path, size_t sz, > struct statfs64 *buf); > long sys_fstatfs(unsigned int fd, struct statfs *buf); > long sys_fstatfs64(unsigned int fd, size_t sz, > struct statfs64 *buf); > long sys_lstat(const char *filename, > struct __old_kernel_stat *statbuf); > long sys_fstat(unsigned int fd, > struct __old_kernel_stat *statbuf); > long sys_newstat(const char *filename, > struct stat *statbuf); > long sys_newlstat(const char *filename, > struct stat *statbuf); > long sys_newfstat(unsigned int fd, struct stat *statbuf); > long sys_ustat(unsigned dev, struct ustat *ubuf); > > long sys_stat64(const char *filename, > struct stat64 *statbuf); > long sys_fstat64(unsigned long fd, struct stat64 *statbuf); > long sys_lstat64(const char *filename, > struct stat64 *statbuf); > long sys_truncate64(const char *path, loff_t length); > long sys_ftruncate64(unsigned int fd, loff_t length); > > > long sys_setxattr(const char *path, const char *name, > const void *value, size_t size, int flags); > long sys_lsetxattr(const char *path, const char *name, > const void *value, size_t size, int flags); > long sys_fsetxattr(int fd, const char *name, > const void *value, size_t size, int flags); > long sys_getxattr(const char *path, const char *name, > void *value, size_t size); > long sys_lgetxattr(const char *path, const char *name, > void *value, size_t size); > long sys_fgetxattr(int fd, const char *name, > void *value, size_t size); > long sys_listxattr(const char *path, char *list, > size_t size); > long sys_llistxattr(const char *path, char *list, > size_t size); > long sys_flistxattr(int fd, char *list, size_t size); > long sys_removexattr(const char *path, > const char *name); > long sys_lremovexattr(const char *path, > const char *name); > long sys_fremovexattr(int fd, const char *name); > > long sys_brk(unsigned long brk); > long sys_mprotect(unsigned long start, size_t len, > unsigned long prot); > long sys_mremap(unsigned long addr, > unsigned long old_len, unsigned long new_len, > unsigned long flags, unsigned long new_addr); > long sys_remap_file_pages(unsigned long start, unsigned long size, > unsigned long prot, unsigned long pgoff, > unsigned long flags); > long sys_msync(unsigned long start, size_t len, int flags); > long sys_fadvise64(int fd, loff_t offset, size_t len, int advice); > long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice); > long sys_munmap(unsigned long addr, size_t len); > long sys_mlock(unsigned long start, size_t len); > long sys_munlock(unsigned long start, size_t len); > long sys_mlockall(int flags); > long sys_munlockall(void); > long sys_madvise(unsigned long start, size_t len, int behavior); > long sys_mincore(unsigned long start, size_t len, > unsigned char * vec); > > long sys_pivot_root(const char *new_root, > const char *put_old); > long sys_chroot(const char *filename); > long sys_mknod(const char *filename, umode_t mode, > unsigned dev); > long sys_link(const char *oldname, > const char *newname); > long sys_symlink(const char *old, const char *new); > long sys_unlink(const char *pathname); > long sys_rename(const char *oldname, > const char *newname); > long sys_chmod(const char *filename, umode_t mode); > long sys_fchmod(unsigned int fd, umode_t mode); > > long sys_fcntl(unsigned int fd, unsigned int cmd, unsigned long arg); > > long sys_fcntl64(unsigned int fd, > unsigned int cmd, unsigned long arg); > > long sys_pipe(int *fildes); > long sys_pipe2(int *fildes, int flags); > long sys_dup(unsigned int fildes); > long sys_dup2(unsigned int oldfd, unsigned int newfd); > long sys_dup3(unsigned int oldfd, unsigned int newfd, int flags); > long sys_ioperm(unsigned long from, unsigned long num, int on); > long sys_ioctl(unsigned int fd, unsigned int cmd, > unsigned long arg); > long sys_flock(unsigned int fd, unsigned int cmd); > long sys_io_setup(unsigned nr_reqs, aio_context_t *ctx); > long sys_io_destroy(aio_context_t ctx); > long sys_io_getevents(aio_context_t ctx_id, > long min_nr, > long nr, > struct io_event *events, > struct timespec *timeout); > long sys_io_submit(aio_context_t, long, > struct iocb * *); > long sys_io_cancel(aio_context_t ctx_id, struct iocb *iocb, > struct io_event *result); > long sys_sendfile(int out_fd, int in_fd, > off_t *offset, size_t count); > long sys_sendfile64(int out_fd, int in_fd, > loff_t *offset, size_t count); > long sys_readlink(const char *path, > char *buf, int bufsiz); > long sys_creat(const char *pathname, umode_t mode); > long sys_open(const char *filename, > int flags, umode_t mode); > long sys_close(unsigned int fd); > long sys_access(const char *filename, int mode); > long sys_vhangup(void); > long sys_chown(const char *filename, > uid_t user, gid_t group); > long sys_lchown(const char *filename, > uid_t user, gid_t group); > long sys_fchown(unsigned int fd, uid_t user, gid_t group); > > long sys_chown16(const char *filename, > old_uid_t user, old_gid_t group); > long sys_lchown16(const char *filename, > old_uid_t user, old_gid_t group); > long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group); > long sys_setregid16(old_gid_t rgid, old_gid_t egid); > long sys_setgid16(old_gid_t gid); > long sys_setreuid16(old_uid_t ruid, old_uid_t euid); > long sys_setuid16(old_uid_t uid); > long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid); > long sys_getresuid16(old_uid_t *ruid, > old_uid_t *euid, old_uid_t *suid); > long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid); > long sys_getresgid16(old_gid_t *rgid, > old_gid_t *egid, old_gid_t *sgid); > long sys_setfsuid16(old_uid_t uid); > long sys_setfsgid16(old_gid_t gid); > long sys_getgroups16(int gidsetsize, old_gid_t *grouplist); > long sys_setgroups16(int gidsetsize, old_gid_t *grouplist); > long sys_getuid16(void); > long sys_geteuid16(void); > long sys_getgid16(void); > long sys_getegid16(void); > > > long sys_utime(char *filename, > struct utimbuf *times); > long sys_utimes(char *filename, > struct timeval *utimes); > long sys_lseek(unsigned int fd, off_t offset, > unsigned int origin); > long sys_llseek(unsigned int fd, unsigned long offset_high, > unsigned long offset_low, loff_t *result, > unsigned int origin); > long sys_read(unsigned int fd, char *buf, size_t count); > long sys_readahead(int fd, loff_t offset, size_t count); > long sys_readv(unsigned long fd, > const struct iovec *vec, > unsigned long vlen); > long sys_write(unsigned int fd, const char *buf, > size_t count); > long sys_writev(unsigned long fd, > const struct iovec *vec, > unsigned long vlen); > long sys_pread64(unsigned int fd, char *buf, > size_t count, loff_t pos); > long sys_pwrite64(unsigned int fd, const char *buf, > size_t count, loff_t pos); > long sys_preadv(unsigned long fd, const struct iovec *vec, > unsigned long vlen, unsigned long pos_l, unsigned long pos_h); > long sys_pwritev(unsigned long fd, const struct iovec *vec, > unsigned long vlen, unsigned long pos_l, unsigned long pos_h); > long sys_getcwd(char *buf, unsigned long size); > long sys_mkdir(const char *pathname, umode_t mode); > long sys_chdir(const char *filename); > long sys_fchdir(unsigned int fd); > long sys_rmdir(const char *pathname); > long sys_lookup_dcookie(u64 cookie64, char *buf, size_t len); > long sys_quotactl(unsigned int cmd, const char *special, > qid_t id, void *addr); > long sys_getdents(unsigned int fd, > struct linux_dirent *dirent, > unsigned int count); > long sys_getdents64(unsigned int fd, > struct linux_dirent64 *dirent, > unsigned int count); > > long sys_setsockopt(int fd, int level, int optname, > char *optval, int optlen); > long sys_getsockopt(int fd, int level, int optname, > char *optval, int *optlen); > long sys_bind(int, struct sockaddr *, int); > long sys_connect(int, struct sockaddr *, int); > long sys_accept(int, struct sockaddr *, int *); > long sys_accept4(int, struct sockaddr *, int *, int); > long sys_getsockname(int, struct sockaddr *, int *); > long sys_getpeername(int, struct sockaddr *, int *); > long sys_send(int, void *, size_t, unsigned); > long sys_sendto(int, void *, size_t, unsigned, > struct sockaddr *, int); > long sys_sendmsg(int fd, struct msghdr *msg, unsigned flags); > long sys_sendmmsg(int fd, struct mmsghdr *msg, > unsigned int vlen, unsigned flags); > long sys_recv(int, void *, size_t, unsigned); > long sys_recvfrom(int, void *, size_t, unsigned, > struct sockaddr *, int *); > long sys_recvmsg(int fd, struct msghdr *msg, unsigned flags); > long sys_recvmmsg(int fd, struct mmsghdr *msg, > unsigned int vlen, unsigned flags, > struct timespec *timeout); > long sys_socket(int, int, int); > long sys_socketpair(int, int, int, int *); > long sys_socketcall(int call, unsigned long *args); > long sys_listen(int, int); > long sys_poll(struct pollfd *ufds, unsigned int nfds, > int timeout); > long sys_select(int n, fd_set *inp, fd_set *outp, > fd_set *exp, struct timeval *tvp); > long sys_old_select(struct sel_arg_struct *arg); > long sys_epoll_create(int size); > long sys_epoll_create1(int flags); > long sys_epoll_ctl(int epfd, int op, int fd, > struct epoll_event *event); > long sys_epoll_wait(int epfd, struct epoll_event *events, > int maxevents, int timeout); > long sys_epoll_pwait(int epfd, struct epoll_event *events, > int maxevents, int timeout, > const sigset_t *sigmask, > size_t sigsetsize); > long sys_gethostname(char *name, int len); > long sys_sethostname(char *name, int len); > long sys_setdomainname(char *name, int len); > long sys_newuname(struct new_utsname *name); > long sys_uname(struct old_utsname *); > long sys_olduname(struct oldold_utsname *); > > long sys_getrlimit(unsigned int resource, > struct rlimit *rlim); > > long sys_old_getrlimit(unsigned int resource, struct rlimit *rlim); > > long sys_setrlimit(unsigned int resource, > struct rlimit *rlim); > long sys_prlimit64(pid_t pid, unsigned int resource, > const struct rlimit64 *new_rlim, > struct rlimit64 *old_rlim); > long sys_getrusage(int who, struct rusage *ru); > long sys_umask(int mask); > > long sys_msgget(key_t key, int msgflg); > long sys_msgsnd(int msqid, struct msgbuf *msgp, > size_t msgsz, int msgflg); > long sys_msgrcv(int msqid, struct msgbuf *msgp, > size_t msgsz, long msgtyp, int msgflg); > long sys_msgctl(int msqid, int cmd, struct msqid_ds *buf); > > long sys_semget(key_t key, int nsems, int semflg); > long sys_semop(int semid, struct sembuf *sops, > unsigned nsops); > long sys_semctl(int semid, int semnum, int cmd, union semun arg); > long sys_semtimedop(int semid, struct sembuf *sops, > unsigned nsops, > const struct timespec *timeout); > long sys_shmat(int shmid, char *shmaddr, int shmflg); > long sys_shmget(key_t key, size_t size, int flag); > long sys_shmdt(char *shmaddr); > long sys_shmctl(int shmid, int cmd, struct shmid_ds *buf); > long sys_ipc(unsigned int call, int first, unsigned long second, > unsigned long third, void *ptr, long fifth); > > long sys_mq_open(const char *name, int oflag, umode_t mode, struct mq_attr *attr); > long sys_mq_unlink(const char *name); > long sys_mq_timedsend(mqd_t mqdes, const char *msg_ptr, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout); > long sys_mq_timedreceive(mqd_t mqdes, char *msg_ptr, size_t msg_len, unsigned int *msg_prio, const struct timespec *abs_timeout); > long sys_mq_notify(mqd_t mqdes, const struct sigevent *notification); > long sys_mq_getsetattr(mqd_t mqdes, const struct mq_attr *mqstat, struct mq_attr *omqstat); > > long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn); > long sys_pciconfig_read(unsigned long bus, unsigned long dfn, > unsigned long off, unsigned long len, > void *buf); > long sys_pciconfig_write(unsigned long bus, unsigned long dfn, > unsigned long off, unsigned long len, > void *buf); > > long sys_prctl(int option, unsigned long arg2, unsigned long arg3, > unsigned long arg4, unsigned long arg5); > long sys_swapon(const char *specialfile, int swap_flags); > long sys_swapoff(const char *specialfile); > long sys_sysctl(struct __sysctl_args *args); > long sys_sysinfo(struct sysinfo *info); > long sys_sysfs(int option, > unsigned long arg1, unsigned long arg2); > long sys_syslog(int type, char *buf, int len); > long sys_uselib(const char *library); > long sys_ni_syscall(void); > long sys_ptrace(long request, long pid, unsigned long addr, > unsigned long data); > > long sys_add_key(const char *_type, > const char *_description, > const void *_payload, > size_t plen, > key_serial_t destringid); > > long sys_request_key(const char *_type, > const char *_description, > const char *_callout_info, > key_serial_t destringid); > > long sys_keyctl(int cmd, unsigned long arg2, unsigned long arg3, > unsigned long arg4, unsigned long arg5); > > long sys_ioprio_set(int which, int who, int ioprio); > long sys_ioprio_get(int which, int who); > long sys_set_mempolicy(int mode, unsigned long *nmask, > unsigned long maxnode); > long sys_migrate_pages(pid_t pid, unsigned long maxnode, > const unsigned long *from, > const unsigned long *to); > long sys_move_pages(pid_t pid, unsigned long nr_pages, > const void * *pages, > const int *nodes, > int *status, > int flags); > long sys_mbind(unsigned long start, unsigned long len, > unsigned long mode, > unsigned long *nmask, > unsigned long maxnode, > unsigned flags); > long sys_get_mempolicy(int *policy, > unsigned long *nmask, > unsigned long maxnode, > unsigned long addr, unsigned long flags); > > long sys_inotify_init(void); > long sys_inotify_init1(int flags); > long sys_inotify_add_watch(int fd, const char *path, > u32 mask); > long sys_inotify_rm_watch(int fd, __s32 wd); > > long sys_spu_run(int fd, __u32 *unpc, > __u32 *ustatus); > long sys_spu_create(const char *name, > unsigned int flags, umode_t mode, int fd); > > long sys_mknodat(int dfd, const char * filename, umode_t mode, > unsigned dev); > long sys_mkdirat(int dfd, const char * pathname, umode_t mode); > long sys_unlinkat(int dfd, const char * pathname, int flag); > long sys_symlinkat(const char * oldname, > int newdfd, const char * newname); > long sys_linkat(int olddfd, const char *oldname, > int newdfd, const char *newname, int flags); > long sys_renameat(int olddfd, const char * oldname, > int newdfd, const char * newname); > long sys_futimesat(int dfd, const char *filename, > struct timeval *utimes); > long sys_faccessat(int dfd, const char *filename, int mode); > long sys_fchmodat(int dfd, const char * filename, > umode_t mode); > long sys_fchownat(int dfd, const char *filename, uid_t user, > gid_t group, int flag); > long sys_openat(int dfd, const char *filename, int flags, > umode_t mode); > long sys_newfstatat(int dfd, const char *filename, > struct stat *statbuf, int flag); > long sys_fstatat64(int dfd, const char *filename, > struct stat64 *statbuf, int flag); > long sys_readlinkat(int dfd, const char *path, char *buf, > int bufsiz); > long sys_utimensat(int dfd, const char *filename, > struct timespec *utimes, int flags); > long sys_unshare(unsigned long unshare_flags); > > long sys_splice(int fd_in, loff_t *off_in, > int fd_out, loff_t *off_out, > size_t len, unsigned int flags); > > long sys_vmsplice(int fd, const struct iovec *iov, > unsigned long nr_segs, unsigned int flags); > > long sys_tee(int fdin, int fdout, size_t len, unsigned int flags); > > long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes, > unsigned int flags); > long sys_sync_file_range2(int fd, unsigned int flags, > loff_t offset, loff_t nbytes); > long sys_get_robust_list(int pid, > struct robust_list_head * *head_ptr, > size_t *len_ptr); > long sys_set_robust_list(struct robust_list_head *head, > size_t len); > long sys_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *cache); > long sys_signalfd(int ufd, sigset_t *user_mask, size_t sizemask); > long sys_signalfd4(int ufd, sigset_t *user_mask, size_t sizemask, int flags); > long sys_timerfd_create(int clockid, int flags); > long sys_timerfd_settime(int ufd, int flags, > const struct itimerspec *utmr, > struct itimerspec *otmr); > long sys_timerfd_gettime(int ufd, struct itimerspec *otmr); > long sys_eventfd(unsigned int count); > long sys_eventfd2(unsigned int count, int flags); > long sys_fallocate(int fd, int mode, loff_t offset, loff_t len); > long sys_old_readdir(unsigned int, struct old_linux_dirent *, unsigned int); > long sys_pselect6(int, fd_set *, fd_set *, > fd_set *, struct timespec *, > void *); > long sys_ppoll(struct pollfd *, unsigned int, > struct timespec *, const sigset_t *, > size_t); > long sys_fanotify_init(unsigned int flags, unsigned int event_f_flags); > long sys_fanotify_mark(int fanotify_fd, unsigned int flags, > u64 mask, int fd, > const char *pathname); > long sys_syncfs(int fd); > >int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]); > > > long sys_perf_event_open( > struct perf_event_attr *attr_uptr, > pid_t pid, int cpu, int group_fd, unsigned long flags); > > long sys_mmap_pgoff(unsigned long addr, unsigned long len, > unsigned long prot, unsigned long flags, > unsigned long fd, unsigned long pgoff); > long sys_old_mmap(struct mmap_arg_struct *arg); > long sys_name_to_handle_at(int dfd, const char *name, > struct file_handle *handle, > int *mnt_id, int flag); > long sys_open_by_handle_at(int mountdirfd, > struct file_handle *handle, > int flags); > long sys_setns(int fd, int nstype); > long sys_process_vm_readv(pid_t pid, > const struct iovec *lvec, > unsigned long liovcnt, > const struct iovec *rvec, > unsigned long riovcnt, > unsigned long flags); > long sys_process_vm_writev(pid_t pid, > const struct iovec *lvec, > unsigned long liovcnt, > const struct iovec *rvec, > unsigned long riovcnt, > unsigned long flags); > > > > > > > > >struct fsnotify_group; >struct fsnotify_event; >struct fsnotify_mark; >struct fsnotify_event_private_data; > >struct fsnotify_ops { > bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode, > struct fsnotify_mark *inode_mark, > struct fsnotify_mark *vfsmount_mark, > __u32 mask, void *data, int data_type); > int (*handle_event)(struct fsnotify_group *group, > struct fsnotify_mark *inode_mark, > struct fsnotify_mark *vfsmount_mark, > struct fsnotify_event *event); > void (*free_group_priv)(struct fsnotify_group *group); > void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group); > void (*free_event_priv)(struct fsnotify_event_private_data *priv); >}; > > > > > > > >struct fsnotify_group { > > atomic_t refcnt; > > const struct fsnotify_ops *ops; > > > struct mutex notification_mutex; > struct list_head notification_list; > wait_queue_head_t notification_waitq; > unsigned int q_len; > unsigned int max_events; > > > > > > > > unsigned int priority; > > > spinlock_t mark_lock; > atomic_t num_marks; > > > struct list_head marks_list; > > > union { > void *private; > > struct inotify_group_private_data { > spinlock_t idr_lock; > struct idr idr; > u32 last_wd; > struct fasync_struct *fa; > struct user_struct *user; > } inotify_data; > > }; >}; > >struct fsnotify_event_holder { > struct fsnotify_event *event; > struct list_head event_list; >}; > > > > > >struct fsnotify_event_private_data { > struct fsnotify_group *group; > struct list_head event_list; >}; > > > > > > >struct fsnotify_event { > > > > > > struct fsnotify_event_holder holder; > spinlock_t lock; > > struct inode *to_tell; > > union { > struct path path; > struct inode *inode; > }; > > > > > int data_type; > atomic_t refcnt; > __u32 mask; > > u32 sync_cookie; > const unsigned char *file_name; > size_t name_len; > struct pid *tgid; > > > > > > struct list_head private_data_list; >}; > > > > >struct fsnotify_inode_mark { > struct inode *inode; > struct hlist_node i_list; > struct list_head free_i_list; >}; > > > > >struct fsnotify_vfsmount_mark { > struct vfsmount *mnt; > struct hlist_node m_list; > struct list_head free_m_list; >}; > >struct fsnotify_mark { > __u32 mask; > > > atomic_t refcnt; > struct fsnotify_group *group; > struct list_head g_list; > spinlock_t lock; > union { > struct fsnotify_inode_mark i; > struct fsnotify_vfsmount_mark m; > }; > struct list_head free_g_list; > __u32 ignored_mask; > > > > > > unsigned int flags; > struct list_head destroy_list; > void (*free_mark)(struct fsnotify_mark *mark); >}; > > > > > > >extern int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is, > const unsigned char *name, u32 cookie); >extern int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask); >extern void __fsnotify_inode_delete(struct inode *inode); >extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); >extern u32 fsnotify_get_cookie(void); > >static inline __attribute__((always_inline)) int fsnotify_inode_watches_children(struct inode *inode) >{ > > if (!(inode->i_fsnotify_mask & 0x08000000)) > return 0; > > > return inode->i_fsnotify_mask & (0x00000001 | 0x00000002 | 0x00000004 | 0x00000008 | 0x00000010 | 0x00000020 | 0x00000040 | 0x00000080 | 0x00000100 | 0x00000200); >} > > > > > >static inline __attribute__((always_inline)) void __fsnotify_update_dcache_flags(struct dentry *dentry) >{ > struct dentry *parent; > > do { if (__builtin_expect(!!(!((&(&(&dentry->d_lock)->rlock)->raw_lock)->lock != 0)), 0)) do { asm volatile("1:\t" ".word " "0xe7f001f2" "\n" ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" "2:\t.asciz " "\"include/linux/fsnotify_backend.h\"" "\n" ".popsection\n" ".pushsection __bug_table,\"a\"\n" "3:\t.word 1b, 2b\n" "\t.hword " "332" ", 0\n" ".popsection"); __builtin_unreachable(); } while (0); } while(0); > > parent = dentry->d_parent; > if (parent->d_inode && fsnotify_inode_watches_children(parent->d_inode)) > dentry->d_flags |= 0x4000; > else > dentry->d_flags &= ~0x4000; >} > > > > >static inline __attribute__((always_inline)) void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode) >{ > if (!inode) > return; > > spin_lock(&dentry->d_lock); > __fsnotify_update_dcache_flags(dentry); > spin_unlock(&dentry->d_lock); >} > > > > >extern struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops); > >extern void fsnotify_put_group(struct fsnotify_group *group); > > >extern void fsnotify_get_event(struct fsnotify_event *event); >extern void fsnotify_put_event(struct fsnotify_event *event); > >extern struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnotify_group *group, > struct fsnotify_event *event); > > >extern struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group, > struct fsnotify_event *event, > struct fsnotify_event_private_data *priv, > struct fsnotify_event *(*merge)(struct list_head *, > struct fsnotify_event *)); > >extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); > >extern struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group); > >extern struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group); > > > > >extern void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt); > >extern void fsnotify_recalc_inode_mask(struct inode *inode); >extern void fsnotify_init_mark(struct fsnotify_mark *mark, void (*free_mark)(struct fsnotify_mark *mark)); > >extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode); > >extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt); > >extern void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old); > >extern void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask); > >extern void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask); > >extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group, > struct inode *inode, struct vfsmount *mnt, int allow_dups); > >extern void fsnotify_destroy_mark(struct fsnotify_mark *mark); > >extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group); > >extern void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group); > >extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags); > >extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group); >extern void fsnotify_get_mark(struct fsnotify_mark *mark); >extern void fsnotify_put_mark(struct fsnotify_mark *mark); >extern void fsnotify_unmount_inodes(struct list_head *list); > > >extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, > void *data, int data_is, > const unsigned char *name, > u32 cookie, gfp_t gfp); > > >extern struct fsnotify_event *fsnotify_clone_event(struct fsnotify_event *old_event); >extern int fsnotify_replace_event(struct fsnotify_event_holder *old_holder, > struct fsnotify_event *new_event); > > > > > > > >extern long arch_ptrace(struct task_struct *child, long request, > unsigned long addr, unsigned long data); >extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char *dst, int len); >extern int ptrace_writedata(struct task_struct *tsk, char *src, unsigned long dst, int len); >extern void ptrace_disable(struct task_struct *); >extern int ptrace_check_attach(struct task_struct *task, bool ignore_state); >extern int ptrace_request(struct task_struct *child, long request, > unsigned long addr, unsigned long data); >extern void ptrace_notify(int exit_code); >extern void __ptrace_link(struct task_struct *child, > struct task_struct *new_parent); >extern void __ptrace_unlink(struct task_struct *child); >extern void exit_ptrace(struct task_struct *tracer); > > > > >extern int __ptrace_may_access(struct task_struct *task, unsigned int mode); > >extern bool ptrace_may_access(struct task_struct *task, unsigned int mode); > >static inline __attribute__((always_inline)) int ptrace_reparented(struct task_struct *child) >{ > return !same_thread_group(child->real_parent, child->parent); >} > >static inline __attribute__((always_inline)) void ptrace_unlink(struct task_struct *child) >{ > if (__builtin_expect(!!(child->ptrace), 0)) > __ptrace_unlink(child); >} > >int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr, > unsigned long data); >int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr, > unsigned long data); > >static inline __attribute__((always_inline)) struct task_struct *ptrace_parent(struct task_struct *task) >{ > if (__builtin_expect(!!(task->ptrace), 0)) > return ({ typeof(*(task->parent)) *_________p1 = (typeof(*(task->parent))* )(*(volatile typeof((task->parent)) *)&((task->parent))); do { } while (0); ; do { } while(0); ((typeof(*(task->parent)) *)(_________p1)); }); > return ((void *)0); >} > >static inline __attribute__((always_inline)) bool ptrace_event_enabled(struct task_struct *task, int event) >{ > return task->ptrace & (1 << (3 + (event))); >} > >static inline __attribute__((always_inline)) void ptrace_event(int event, unsigned long message) >{ > if (__builtin_expect(!!(ptrace_event_enabled((get_current()), event)), 0)) { > (get_current())->ptrace_message = message; > ptrace_notify((event << 8) | 5); > } else if (event == 4) { > > if (((get_current())->ptrace & (0x00000001|0x00010000)) == 0x00000001) > send_sig(5, (get_current()), 0); > } >} > >static inline __attribute__((always_inline)) void ptrace_init_task(struct task_struct *child, bool ptrace) >{ > INIT_LIST_HEAD(&child->ptrace_entry); > INIT_LIST_HEAD(&child->ptraced); > > (((&child->ptrace_bp_refcnt)->counter) = (1)); > > child->jobctl = 0; > child->ptrace = 0; > child->parent = child->real_parent; > > if (__builtin_expect(!!(ptrace), 0) && (get_current())->ptrace) { > child->ptrace = (get_current())->ptrace; > __ptrace_link(child, (get_current())->parent); > > if (child->ptrace & 0x00010000) > task_set_jobctl_pending(child, (1 << 19)); > else > sigaddset(&child->pending.signal, 19); > > set_tsk_thread_flag(child, 0); > } >} > > > > > > > >static inline __attribute__((always_inline)) void ptrace_release_task(struct task_struct *task) >{ > do { if (__builtin_expect(!!(!list_empty(&task->ptraced)), 0)) do { asm volatile("1:\t" ".word " "0xe7f001f2" "\n" ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" "2:\t.asciz " "\"include/linux/ptrace.h\"" "\n" ".popsection\n" ".pushsection __bug_table,\"a\"\n" "3:\t.word 1b, 2b\n" "\t.hword " "250" ", 0\n" ".popsection"); __builtin_unreachable(); } while (0); } while(0); > ptrace_unlink(task); > do { if (__builtin_expect(!!(!list_empty(&task->ptrace_entry)), 0)) do { asm volatile("1:\t" ".word " "0xe7f001f2" "\n" ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" "2:\t.asciz " "\"include/linux/ptrace.h\"" "\n" ".popsection\n" ".pushsection __bug_table,\"a\"\n" "3:\t.word 1b, 2b\n" "\t.hword " "252" ", 0\n" ".popsection"); __builtin_unreachable(); } while (0); } while(0); >} > >static inline __attribute__((always_inline)) void user_enable_single_step(struct task_struct *task) >{ > do { asm volatile("1:\t" ".word " "0xe7f001f2" "\n" ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" "2:\t.asciz " "\"include/linux/ptrace.h\"" "\n" ".popsection\n" ".pushsection __bug_table,\"a\"\n" "3:\t.word 1b, 2b\n" "\t.hword " "310" ", 0\n" ".popsection"); __builtin_unreachable(); } while (0); >} > >static inline __attribute__((always_inline)) void user_disable_single_step(struct task_struct *task) >{ >} > >static inline __attribute__((always_inline)) void user_enable_block_step(struct task_struct *task) >{ > do { asm volatile("1:\t" ".word " "0xe7f001f2" "\n" ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" "2:\t.asciz " "\"include/linux/ptrace.h\"" "\n" ".popsection\n" ".pushsection __bug_table,\"a\"\n" "3:\t.word 1b, 2b\n" "\t.hword " "353" ", 0\n" ".popsection"); __builtin_unreachable(); } while (0); >} > >static inline __attribute__((always_inline)) void user_single_step_siginfo(struct task_struct *tsk, > struct pt_regs *regs, siginfo_t *info) >{ > ({ void *__p = (info); size_t __n = sizeof(*info); if ((__n) != 0) { if (__builtin_constant_p((0)) && (0) == 0) __memzero((__p),(__n)); else memset((__p),(0),(__n)); } (__p); }); > info->si_signo = 5; >} > >extern int task_current_syscall(struct task_struct *target, long *callno, > unsigned long args[6], unsigned int maxargs, > unsigned long *sp, unsigned long *pc); > > >extern int ptrace_get_breakpoints(struct task_struct *tsk); >extern void ptrace_put_breakpoints(struct task_struct *tsk); > > >enum { > Audit_equal, > Audit_not_equal, > Audit_bitmask, > Audit_bittest, > Audit_lt, > Audit_gt, > Audit_le, > Audit_ge, > Audit_bad >}; > >struct audit_status { > __u32 mask; > __u32 enabled; > __u32 failure; > __u32 pid; > __u32 rate_limit; > __u32 backlog_limit; > __u32 lost; > __u32 backlog; >}; > >struct audit_tty_status { > __u32 enabled; >}; > > > > > >struct audit_rule_data { > __u32 flags; > __u32 action; > __u32 field_count; > __u32 mask[64]; > __u32 fields[64]; > __u32 values[64]; > __u32 fieldflags[64]; > __u32 buflen; > char buf[0]; >}; > > > > > >struct audit_rule { > __u32 flags; > __u32 action; > __u32 field_count; > __u32 mask[64]; > __u32 fields[64]; > __u32 values[64]; >}; > > > > >struct audit_sig_info { > uid_t uid; > pid_t pid; > char ctx[0]; >}; > >struct audit_buffer; >struct audit_context; >struct inode; >struct netlink_skb_parms; >struct path; >struct linux_binprm; >struct mq_attr; >struct mqstat; >struct audit_watch; >struct audit_tree; > >struct audit_krule { > int vers_ops; > u32 flags; > u32 listnr; > u32 action; > u32 mask[64]; > u32 buflen; > u32 field_count; > char *filterkey; > struct audit_field *fields; > struct audit_field *arch_f; > struct audit_field *inode_f; > struct audit_watch *watch; > struct audit_tree *tree; > struct list_head rlist; > struct list_head list; > u64 prio; >}; > >struct audit_field { > u32 type; > u32 val; > u32 op; > char *lsm_str; > void *lsm_rule; >}; > >extern int __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) audit_register_class(int class, unsigned *list); >extern int audit_classify_syscall(int abi, unsigned syscall); >extern int audit_classify_arch(int arch); > > > >extern int audit_alloc(struct task_struct *task); >extern void __audit_free(struct task_struct *task); >extern void __audit_syscall_entry(int arch, > int major, unsigned long a0, unsigned long a1, > unsigned long a2, unsigned long a3); >extern void __audit_syscall_exit(int ret_success, long ret_value); >extern void __audit_getname(const char *name); >extern void audit_putname(const char *name); >extern void __audit_inode(const char *name, const struct dentry *dentry); >extern void __audit_inode_child(const struct dentry *dentry, > const struct inode *parent); >extern void __audit_seccomp(unsigned long syscall, long signr, int code); >extern void __audit_ptrace(struct task_struct *t); > >static inline __attribute__((always_inline)) int audit_dummy_context(void) >{ > void *p = (get_current())->audit_context; > return !p || *(int *)p; >} >static inline __attribute__((always_inline)) void audit_free(struct task_struct *task) >{ > if (__builtin_expect(!!(task->audit_context), 0)) > __audit_free(task); >} >static inline __attribute__((always_inline)) void audit_syscall_entry(int arch, int major, unsigned long a0, > unsigned long a1, unsigned long a2, > unsigned long a3) >{ > if (__builtin_expect(!!(!audit_dummy_context()), 0)) > __audit_syscall_entry(arch, major, a0, a1, a2, a3); >} >static inline __attribute__((always_inline)) void audit_syscall_exit(void *pt_regs) >{ > if (__builtin_expect(!!((get_current())->audit_context), 0)) { > int success = (!__builtin_expect(!!(((unsigned long)(regs_return_value(pt_regs))) >= (unsigned long)-4095), 0)); > int return_code = regs_return_value(pt_regs); > > __audit_syscall_exit(success, return_code); > } >} >static inline __attribute__((always_inline)) void audit_getname(const char *name) >{ > if (__builtin_expect(!!(!audit_dummy_context()), 0)) > __audit_getname(name); >} >static inline __attribute__((always_inline)) void audit_inode(const char *name, const struct dentry *dentry) { > if (__builtin_expect(!!(!audit_dummy_context()), 0)) > __audit_inode(name, dentry); >} >static inline __attribute__((always_inline)) void audit_inode_child(const struct dentry *dentry, > const struct inode *parent) { > if (__builtin_expect(!!(!audit_dummy_context()), 0)) > __audit_inode_child(dentry, parent); >} >void audit_core_dumps(long signr); > >static inline __attribute__((always_inline)) void audit_seccomp(unsigned long syscall, long signr, int code) >{ > if (__builtin_expect(!!(!audit_dummy_context()), 0)) > __audit_seccomp(syscall, signr, code); >} > >static inline __attribute__((always_inline)) void audit_ptrace(struct task_struct *t) >{ > if (__builtin_expect(!!(!audit_dummy_context()), 0)) > __audit_ptrace(t); >} > > >extern unsigned int audit_serial(void); >extern int auditsc_get_stamp(struct audit_context *ctx, > struct timespec *t, unsigned int *serial); >extern int audit_set_loginuid(uid_t loginuid); > > >extern void audit_log_task_context(struct audit_buffer *ab); >extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); >extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); >extern int __audit_bprm(struct linux_binprm *bprm); >extern void __audit_socketcall(int nargs, unsigned long *args); >extern int __audit_sockaddr(int len, void *addr); >extern void __audit_fd_pair(int fd1, int fd2); >extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr); >extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout); >extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification); >extern void __audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat); >extern int __audit_log_bprm_fcaps(struct linux_binprm *bprm, > const struct cred *new, > const struct cred *old); >extern void __audit_log_capset(pid_t pid, const struct cred *new, const struct cred *old); >extern void __audit_mmap_fd(int fd, int flags); > >static inline __attribute__((always_inline)) void audit_ipc_obj(struct kern_ipc_perm *ipcp) >{ > if (__builtin_expect(!!(!audit_dummy_context()), 0)) > __audit_ipc_obj(ipcp); >} >static inline __attribute__((always_inline)) void audit_fd_pair(int fd1, int fd2) >{ > if (__builtin_expect(!!(!audit_dummy_context()), 0)) > __audit_fd_pair(fd1, fd2); >} >static inline __attribute__((always_inline)) void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode) >{ > if (__builtin_expect(!!(!audit_dummy_context()), 0)) > __audit_ipc_set_perm(qbytes, uid, gid, mode); >} >static inline __attribute__((always_inline)) int audit_bprm(struct linux_binprm *bprm) >{ > if (__builtin_expect(!!(!audit_dummy_context()), 0)) > return __audit_bprm(bprm); > return 0; >} >static inline __attribute__((always_inline)) void audit_socketcall(int nargs, unsigned long *args) >{ > if (__builtin_expect(!!(!audit_dummy_context()), 0)) > __audit_socketcall(nargs, args); >} >static inline __attribute__((always_inline)) int audit_sockaddr(int len, void *addr) >{ > if (__builtin_expect(!!(!audit_dummy_context()), 0)) > return __audit_sockaddr(len, addr); > return 0; >} >static inline __attribute__((always_inline)) void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr) >{ > if (__builtin_expect(!!(!audit_dummy_context()), 0)) > __audit_mq_open(oflag, mode, attr); >} >static inline __attribute__((always_inline)) void audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout) >{ > if (__builtin_expect(!!(!audit_dummy_context()), 0)) > __audit_mq_sendrecv(mqdes, msg_len, msg_prio, abs_timeout); >} >static inline __attribute__((always_inline)) void audit_mq_notify(mqd_t mqdes, const struct sigevent *notification) >{ > if (__builtin_expect(!!(!audit_dummy_context()), 0)) > __audit_mq_notify(mqdes, notification); >} >static inline __attribute__((always_inline)) void audit_mq_getsetattr(mqd_t mqdes, struct mq_attr *mqstat) >{ > if (__builtin_expect(!!(!audit_dummy_context()), 0)) > __audit_mq_getsetattr(mqdes, mqstat); >} > >static inline __attribute__((always_inline)) int audit_log_bprm_fcaps(struct linux_binprm *bprm, > const struct cred *new, > const struct cred *old) >{ > if (__builtin_expect(!!(!audit_dummy_context()), 0)) > return __audit_log_bprm_fcaps(bprm, new, old); > return 0; >} > >static inline __attribute__((always_inline)) void audit_log_capset(pid_t pid, const struct cred *new, > const struct cred *old) >{ > if (__builtin_expect(!!(!audit_dummy_context()), 0)) > __audit_log_capset(pid, new, old); >} > >static inline __attribute__((always_inline)) void audit_mmap_fd(int fd, int flags) >{ > if (__builtin_expect(!!(!audit_dummy_context()), 0)) > __audit_mmap_fd(fd, flags); >} > >extern int audit_n_rules; >extern int audit_signals; > >extern __attribute__((format(printf, 4, 5))) >void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, > const char *fmt, ...); > >extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type); >extern __attribute__((format(printf, 2, 3))) >void audit_log_format(struct audit_buffer *ab, const char *fmt, ...); >extern void audit_log_end(struct audit_buffer *ab); >extern int audit_string_contains_control(const char *string, > size_t len); >extern void audit_log_n_hex(struct audit_buffer *ab, > const unsigned char *buf, > size_t len); >extern void audit_log_n_string(struct audit_buffer *ab, > const char *buf, > size_t n); > >extern void audit_log_n_untrustedstring(struct audit_buffer *ab, > const char *string, > size_t n); >extern void audit_log_untrustedstring(struct audit_buffer *ab, > const char *string); >extern void audit_log_d_path(struct audit_buffer *ab, > const char *prefix, > const struct path *path); >extern void audit_log_key(struct audit_buffer *ab, > char *key); >extern void audit_log_lost(const char *message); > >extern void audit_log_secctx(struct audit_buffer *ab, u32 secid); > > > > >extern int audit_update_lsm_rules(void); > > >extern int audit_filter_user(struct netlink_skb_parms *cb); >extern int audit_filter_type(int type); >extern int audit_receive_filter(int type, int pid, int uid, int seq, > void *data, size_t datasz, uid_t loginuid, > u32 sessionid, u32 sid); >extern int audit_enabled; > > > >void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) kmem_cache_init(void); >int slab_is_available(void); > >struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, > unsigned long, > void (*)(void *)); >void kmem_cache_destroy(struct kmem_cache *); >int kmem_cache_shrink(struct kmem_cache *); >void kmem_cache_free(struct kmem_cache *, void *); >unsigned int kmem_cache_size(struct kmem_cache *); > >void * __attribute__((warn_unused_result)) __krealloc(const void *, size_t, gfp_t); >void * __attribute__((warn_unused_result)) krealloc(const void *, size_t, gfp_t); >void kfree(const void *); >void kzfree(const void *); >size_t ksize(const void *); > > > > > >static inline __attribute__((always_inline)) void kmemleak_init(void) >{ >} >static inline __attribute__((always_inline)) void kmemleak_alloc(const void *ptr, size_t size, int min_count, > gfp_t gfp) >{ >} >static inline __attribute__((always_inline)) void kmemleak_alloc_recursive(const void *ptr, size_t size, > int min_count, unsigned long flags, > gfp_t gfp) >{ >} >static inline __attribute__((always_inline)) void kmemleak_alloc_percpu(const void *ptr, size_t size) >{ >} >static inline __attribute__((always_inline)) void kmemleak_free(const void *ptr) >{ >} >static inline __attribute__((always_inline)) void kmemleak_free_part(const void *ptr, size_t size) >{ >} >static inline __attribute__((always_inline)) void kmemleak_free_recursive(const void *ptr, unsigned long flags) >{ >} >static inline __attribute__((always_inline)) void kmemleak_free_percpu(const void *ptr) >{ >} >static inline __attribute__((always_inline)) void kmemleak_not_leak(const void *ptr) >{ >} >static inline __attribute__((always_inline)) void kmemleak_ignore(const void *ptr) >{ >} >static inline __attribute__((always_inline)) void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) >{ >} >static inline __attribute__((always_inline)) void kmemleak_erase(void **ptr) >{ >} >static inline __attribute__((always_inline)) void kmemleak_no_scan(const void *ptr) >{ >} > > >enum stat_item { > ALLOC_FASTPATH, > ALLOC_SLOWPATH, > FREE_FASTPATH, > FREE_SLOWPATH, > FREE_FROZEN, > FREE_ADD_PARTIAL, > FREE_REMOVE_PARTIAL, > ALLOC_FROM_PARTIAL, > ALLOC_SLAB, > ALLOC_REFILL, > ALLOC_NODE_MISMATCH, > FREE_SLAB, > CPUSLAB_FLUSH, > DEACTIVATE_FULL, > DEACTIVATE_EMPTY, > DEACTIVATE_TO_HEAD, > DEACTIVATE_TO_TAIL, > DEACTIVATE_REMOTE_FREES, > DEACTIVATE_BYPASS, > ORDER_FALLBACK, > CMPXCHG_DOUBLE_CPU_FAIL, > CMPXCHG_DOUBLE_FAIL, > CPU_PARTIAL_ALLOC, > CPU_PARTIAL_FREE, > CPU_PARTIAL_NODE, > CPU_PARTIAL_DRAIN, > NR_SLUB_STAT_ITEMS }; > >struct kmem_cache_cpu { > void **freelist; > unsigned long tid; > struct page *page; > struct page *partial; > int node; > > > >}; > >struct kmem_cache_node { > spinlock_t list_lock; > unsigned long nr_partial; > struct list_head partial; > > atomic_long_t nr_slabs; > atomic_long_t total_objects; > struct list_head full; > >}; > > > > > > >struct kmem_cache_order_objects { > unsigned long x; >}; > > > > >struct kmem_cache { > struct kmem_cache_cpu *cpu_slab; > > unsigned long flags; > unsigned long min_partial; > int size; > int objsize; > int offset; > int cpu_partial; > struct kmem_cache_order_objects oo; > > > struct kmem_cache_order_objects max; > struct kmem_cache_order_objects min; > gfp_t allocflags; > int refcount; > void (*ctor)(void *); > int inuse; > int align; > int reserved; > const char *name; > struct list_head list; > > struct kobject kobj; > > struct kmem_cache_node *node[(1 << 0)]; >}; > >extern struct kmem_cache *kmalloc_caches[(12 + 2)]; > > > > > >static inline __attribute__((always_inline)) __attribute__((always_inline)) int kmalloc_index(size_t size) >{ > if (!size) > return 0; > > if (size <= (1 << 6)) > return ( __builtin_constant_p((1 << 6)) ? ( ((1 << 6)) < 1 ? ____ilog2_NaN() : ((1 << 6)) & (1ULL << 63) ? 63 : ((1 << 6)) & (1ULL << 62) ? 62 : ((1 << 6)) & (1ULL << 61) ? 61 : ((1 << 6)) & (1ULL << 60) ? 60 : ((1 << 6)) & (1ULL << 59) ? 59 : ((1 << 6)) & (1ULL << 58) ? 58 : ((1 << 6)) & (1ULL << 57) ? 57 : ((1 << 6)) & (1ULL << 56) ? 56 : ((1 << 6)) & (1ULL << 55) ? 55 : ((1 << 6)) & (1ULL << 54) ? 54 : ((1 << 6)) & (1ULL << 53) ? 53 : ((1 << 6)) & (1ULL << 52) ? 52 : ((1 << 6)) & (1ULL << 51) ? 51 : ((1 << 6)) & (1ULL << 50) ? 50 : ((1 << 6)) & (1ULL << 49) ? 49 : ((1 << 6)) & (1ULL << 48) ? 48 : ((1 << 6)) & (1ULL << 47) ? 47 : ((1 << 6)) & (1ULL << 46) ? 46 : ((1 << 6)) & (1ULL << 45) ? 45 : ((1 << 6)) & (1ULL << 44) ? 44 : ((1 << 6)) & (1ULL << 43) ? 43 : ((1 << 6)) & (1ULL << 42) ? 42 : ((1 << 6)) & (1ULL << 41) ? 41 : ((1 << 6)) & (1ULL << 40) ? 40 : ((1 << 6)) & (1ULL << 39) ? 39 : ((1 << 6)) & (1ULL << 38) ? 38 : ((1 << 6)) & (1ULL << 37) ? 37 : ((1 << 6)) & (1ULL << 36) ? 36 : ((1 << 6)) & (1ULL << 35) ? 35 : ((1 << 6)) & (1ULL << 34) ? 34 : ((1 << 6)) & (1ULL << 33) ? 33 : ((1 << 6)) & (1ULL << 32) ? 32 : ((1 << 6)) & (1ULL << 31) ? 31 : ((1 << 6)) & (1ULL << 30) ? 30 : ((1 << 6)) & (1ULL << 29) ? 29 : ((1 << 6)) & (1ULL << 28) ? 28 : ((1 << 6)) & (1ULL << 27) ? 27 : ((1 << 6)) & (1ULL << 26) ? 26 : ((1 << 6)) & (1ULL << 25) ? 25 : ((1 << 6)) & (1ULL << 24) ? 24 : ((1 << 6)) & (1ULL << 23) ? 23 : ((1 << 6)) & (1ULL << 22) ? 22 : ((1 << 6)) & (1ULL << 21) ? 21 : ((1 << 6)) & (1ULL << 20) ? 20 : ((1 << 6)) & (1ULL << 19) ? 19 : ((1 << 6)) & (1ULL << 18) ? 18 : ((1 << 6)) & (1ULL << 17) ? 17 : ((1 << 6)) & (1ULL << 16) ? 16 : ((1 << 6)) & (1ULL << 15) ? 15 : ((1 << 6)) & (1ULL << 14) ? 14 : ((1 << 6)) & (1ULL << 13) ? 13 : ((1 << 6)) & (1ULL << 12) ? 12 : ((1 << 6)) & (1ULL << 11) ? 11 : ((1 << 6)) & (1ULL << 10) ? 10 : ((1 << 6)) & (1ULL << 9) ? 9 : ((1 << 6)) & (1ULL << 8) ? 8 : ((1 << 6)) & (1ULL << 7) ? 7 : ((1 << 6)) & (1ULL << 6) ? 6 : ((1 << 6)) & (1ULL << 5) ? 5 : ((1 << 6)) & (1ULL << 4) ? 4 : ((1 << 6)) & (1ULL << 3) ? 3 : ((1 << 6)) & (1ULL << 2) ? 2 : ((1 << 6)) & (1ULL << 1) ? 1 : ((1 << 6)) & (1ULL << 0) ? 0 : ____ilog2_NaN() ) : (sizeof((1 << 6)) <= 4) ? __ilog2_u32((1 << 6)) : __ilog2_u64((1 << 6)) ); > > if ((1 << 6) <= 32 && size > 64 && size <= 96) > return 1; > if ((1 << 6) <= 64 && size > 128 && size <= 192) > return 2; > if (size <= 8) return 3; > if (size <= 16) return 4; > if (size <= 32) return 5; > if (size <= 64) return 6; > if (size <= 128) return 7; > if (size <= 256) return 8; > if (size <= 512) return 9; > if (size <= 1024) return 10; > if (size <= 2 * 1024) return 11; > if (size <= 4 * 1024) return 12; > > > > > > if (size <= 8 * 1024) return 13; > if (size <= 16 * 1024) return 14; > if (size <= 32 * 1024) return 15; > if (size <= 64 * 1024) return 16; > if (size <= 128 * 1024) return 17; > if (size <= 256 * 1024) return 18; > if (size <= 512 * 1024) return 19; > if (size <= 1024 * 1024) return 20; > if (size <= 2 * 1024 * 1024) return 21; > do { asm volatile("1:\t" ".word " "0xe7f001f2" "\n" ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" "2:\t.asciz " "\"include/linux/slub_def.h\"" "\n" ".popsection\n" ".pushsection __bug_table,\"a\"\n" "3:\t.word 1b, 2b\n" "\t.hword " "192" ", 0\n" ".popsection"); __builtin_unreachable(); } while (0); > return -1; > >} > > > > > > > >static inline __attribute__((always_inline)) __attribute__((always_inline)) struct kmem_cache *kmalloc_slab(size_t size) >{ > int index = kmalloc_index(size); > > if (index == 0) > return ((void *)0); > > return kmalloc_caches[index]; >} > >void *kmem_cache_alloc(struct kmem_cache *, gfp_t); >void *__kmalloc(size_t size, gfp_t flags); > >static inline __attribute__((always_inline)) __attribute__((always_inline)) void * >kmalloc_order(size_t size, gfp_t flags, unsigned int order) >{ > void *ret = (void *) __get_free_pages(flags | (( gfp_t)0x4000u), order); > kmemleak_alloc(ret, size, 1, flags); > return ret; >} > > > > > > >extern bool verify_mem_not_deleted(const void *x); > >extern void * >kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size); >extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); > >static inline __attribute__((always_inline)) __attribute__((always_inline)) void *kmalloc_large(size_t size, gfp_t flags) >{ > unsigned int order = ( __builtin_constant_p(size) ? ( ((size) == 0UL) ? 32 - 12 : (((size) < (1UL << 12)) ? 0 : ( __builtin_constant_p((size) - 1) ? ( ((size) - 1) < 1 ? ____ilog2_NaN() : ((size) - 1) & (1ULL << 63) ? 63 : ((size) - 1) & (1ULL << 62) ? 62 : ((size) - 1) & (1ULL << 61) ? 61 : ((size) - 1) & (1ULL << 60) ? 60 : ((size) - 1) & (1ULL << 59) ? 59 : ((size) - 1) & (1ULL << 58) ? 58 : ((size) - 1) & (1ULL << 57) ? 57 : ((size) - 1) & (1ULL << 56) ? 56 : ((size) - 1) & (1ULL << 55) ? 55 : ((size) - 1) & (1ULL << 54) ? 54 : ((size) - 1) & (1ULL << 53) ? 53 : ((size) - 1) & (1ULL << 52) ? 52 : ((size) - 1) & (1ULL << 51) ? 51 : ((size) - 1) & (1ULL << 50) ? 50 : ((size) - 1) & (1ULL << 49) ? 49 : ((size) - 1) & (1ULL << 48) ? 48 : ((size) - 1) & (1ULL << 47) ? 47 : ((size) - 1) & (1ULL << 46) ? 46 : ((size) - 1) & (1ULL << 45) ? 45 : ((size) - 1) & (1ULL << 44) ? 44 : ((size) - 1) & (1ULL << 43) ? 43 : ((size) - 1) & (1ULL << 42) ? 42 : ((size) - 1) & (1ULL << 41) ? 41 : ((size) - 1) & (1ULL << 40) ? 40 : ((size) - 1) & (1ULL << 39) ? 39 : ((size) - 1) & (1ULL << 38) ? 38 : ((size) - 1) & (1ULL << 37) ? 37 : ((size) - 1) & (1ULL << 36) ? 36 : ((size) - 1) & (1ULL << 35) ? 35 : ((size) - 1) & (1ULL << 34) ? 34 : ((size) - 1) & (1ULL << 33) ? 33 : ((size) - 1) & (1ULL << 32) ? 32 : ((size) - 1) & (1ULL << 31) ? 31 : ((size) - 1) & (1ULL << 30) ? 30 : ((size) - 1) & (1ULL << 29) ? 29 : ((size) - 1) & (1ULL << 28) ? 28 : ((size) - 1) & (1ULL << 27) ? 27 : ((size) - 1) & (1ULL << 26) ? 26 : ((size) - 1) & (1ULL << 25) ? 25 : ((size) - 1) & (1ULL << 24) ? 24 : ((size) - 1) & (1ULL << 23) ? 23 : ((size) - 1) & (1ULL << 22) ? 22 : ((size) - 1) & (1ULL << 21) ? 21 : ((size) - 1) & (1ULL << 20) ? 20 : ((size) - 1) & (1ULL << 19) ? 19 : ((size) - 1) & (1ULL << 18) ? 18 : ((size) - 1) & (1ULL << 17) ? 17 : ((size) - 1) & (1ULL << 16) ? 16 : ((size) - 1) & (1ULL << 15) ? 15 : ((size) - 1) & (1ULL << 14) ? 14 : ((size) - 1) & (1ULL << 13) ? 13 : ((size) - 1) & (1ULL << 12) ? 12 : ((size) - 1) & (1ULL << 11) ? 11 : ((size) - 1) & (1ULL << 10) ? 10 : ((size) - 1) & (1ULL << 9) ? 9 : ((size) - 1) & (1ULL << 8) ? 8 : ((size) - 1) & (1ULL << 7) ? 7 : ((size) - 1) & (1ULL << 6) ? 6 : ((size) - 1) & (1ULL << 5) ? 5 : ((size) - 1) & (1ULL << 4) ? 4 : ((size) - 1) & (1ULL << 3) ? 3 : ((size) - 1) & (1ULL << 2) ? 2 : ((size) - 1) & (1ULL << 1) ? 1 : ((size) - 1) & (1ULL << 0) ? 0 : ____ilog2_NaN() ) : (sizeof((size) - 1) <= 4) ? __ilog2_u32((size) - 1) : __ilog2_u64((size) - 1) ) - 12 + 1) ) : __get_order(size) ); > return kmalloc_order_trace(size, flags, order); >} > >static inline __attribute__((always_inline)) __attribute__((always_inline)) void *kmalloc(size_t size, gfp_t flags) >{ > if (__builtin_constant_p(size)) { > if (size > (2 * ((1UL) << 12))) > return kmalloc_large(size, flags); > > if (!(flags & ( gfp_t)0)) { > struct kmem_cache *s = kmalloc_slab(size); > > if (!s) > return ((void *)16); > > return kmem_cache_alloc_trace(s, flags, size); > } > } > return __kmalloc(size, flags); >} > > >static inline __attribute__((always_inline)) void *kmalloc_array(size_t n, size_t size, gfp_t flags) >{ > if (size != 0 && n > (~0UL) / size) > return ((void *)0); > return __kmalloc(n * size, flags); >} > > > > > > > >static inline __attribute__((always_inline)) void *kcalloc(size_t n, size_t size, gfp_t flags) >{ > return kmalloc_array(n, size, flags | (( gfp_t)0x8000u)); >} > >static inline __attribute__((always_inline)) void *kmalloc_node(size_t size, gfp_t flags, int node) >{ > return kmalloc(size, flags); >} > >static inline __attribute__((always_inline)) void *__kmalloc_node(size_t size, gfp_t flags, int node) >{ > return __kmalloc(size, flags); >} > >void *kmem_cache_alloc(struct kmem_cache *, gfp_t); > >static inline __attribute__((always_inline)) void *kmem_cache_alloc_node(struct kmem_cache *cachep, > gfp_t flags, int node) >{ > return kmem_cache_alloc(cachep, flags); >} > >extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); > >static inline __attribute__((always_inline)) void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) >{ > return kmem_cache_alloc(k, flags | (( gfp_t)0x8000u)); >} > > > > > > >static inline __attribute__((always_inline)) void *kzalloc(size_t size, gfp_t flags) >{ > return kmalloc(size, flags | (( gfp_t)0x8000u)); >} > > > > > > > >static inline __attribute__((always_inline)) void *kzalloc_node(size_t size, gfp_t flags, int node) >{ > return kmalloc_node(size, flags | (( gfp_t)0x8000u), node); >} > >void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) kmem_cache_init_late(void); > > > > > > >static inline __attribute__((always_inline)) void fsnotify_d_instantiate(struct dentry *dentry, > struct inode *inode) >{ > __fsnotify_d_instantiate(dentry, inode); >} > > >static inline __attribute__((always_inline)) int fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask) >{ > if (!dentry) > dentry = path->dentry; > > return __fsnotify_parent(path, dentry, mask); >} > > >static inline __attribute__((always_inline)) int fsnotify_perm(struct file *file, int mask) >{ > struct path *path = &file->f_path; > struct inode *inode = path->dentry->d_inode; > __u32 fsnotify_mask = 0; > int ret; > > if (file->f_mode & (( fmode_t)0x1000000)) > return 0; > if (!(mask & (0x00000004 | 0x00000020))) > return 0; > if (mask & 0x00000020) > fsnotify_mask = 0x00010000; > else if (mask & 0x00000004) > fsnotify_mask = 0x00020000; > else > do { asm volatile("1:\t" ".word " "0xe7f001f2" "\n" ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" "2:\t.asciz " "\"include/linux/fsnotify.h\"" "\n" ".popsection\n" ".pushsection __bug_table,\"a\"\n" "3:\t.word 1b, 2b\n" "\t.hword " "54" ", 0\n" ".popsection"); __builtin_unreachable(); } while (0); > > ret = fsnotify_parent(path, ((void *)0), fsnotify_mask); > if (ret) > return ret; > > return fsnotify(inode, fsnotify_mask, path, 1, ((void *)0), 0); >} > > > > >static inline __attribute__((always_inline)) void fsnotify_d_move(struct dentry *dentry) >{ > > > > > __fsnotify_update_dcache_flags(dentry); >} > > > > >static inline __attribute__((always_inline)) void fsnotify_link_count(struct inode *inode) >{ > fsnotify(inode, 0x00000004, inode, 2, ((void *)0), 0); >} > > > > >static inline __attribute__((always_inline)) void fsnotify_move(struct inode *old_dir, struct inode *new_dir, > const unsigned char *old_name, > int isdir, struct inode *target, struct dentry *moved) >{ > struct inode *source = moved->d_inode; > u32 fs_cookie = fsnotify_get_cookie(); > __u32 old_dir_mask = (0x08000000 | 0x00000040); > __u32 new_dir_mask = (0x08000000 | 0x00000080); > const unsigned char *new_name = moved->d_name.name; > > if (old_dir == new_dir) > old_dir_mask |= 0x10000000; > > if (isdir) { > old_dir_mask |= 0x40000000; > new_dir_mask |= 0x40000000; > } > > fsnotify(old_dir, old_dir_mask, old_dir, 2, old_name, fs_cookie); > fsnotify(new_dir, new_dir_mask, new_dir, 2, new_name, fs_cookie); > > if (target) > fsnotify_link_count(target); > > if (source) > fsnotify(source, 0x00000800, moved->d_inode, 2, ((void *)0), 0); > audit_inode_child(moved, new_dir); >} > > > > >static inline __attribute__((always_inline)) void fsnotify_inode_delete(struct inode *inode) >{ > __fsnotify_inode_delete(inode); >} > > > > >static inline __attribute__((always_inline)) void fsnotify_vfsmount_delete(struct vfsmount *mnt) >{ > __fsnotify_vfsmount_delete(mnt); >} > > > > >static inline __attribute__((always_inline)) void fsnotify_nameremove(struct dentry *dentry, int isdir) >{ > __u32 mask = 0x00000200; > > if (isdir) > mask |= 0x40000000; > > fsnotify_parent(((void *)0), dentry, mask); >} > > > > >static inline __attribute__((always_inline)) void fsnotify_inoderemove(struct inode *inode) >{ > fsnotify(inode, 0x00000400, inode, 2, ((void *)0), 0); > __fsnotify_inode_delete(inode); >} > > > > >static inline __attribute__((always_inline)) void fsnotify_create(struct inode *inode, struct dentry *dentry) >{ > audit_inode_child(dentry, inode); > > fsnotify(inode, 0x00000100, dentry->d_inode, 2, dentry->d_name.name, 0); >} > > > > > > >static inline __attribute__((always_inline)) void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry) >{ > fsnotify_link_count(inode); > audit_inode_child(new_dentry, dir); > > fsnotify(dir, 0x00000100, inode, 2, new_dentry->d_name.name, 0); >} > > > > >static inline __attribute__((always_inline)) void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) >{ > __u32 mask = (0x00000100 | 0x40000000); > struct inode *d_inode = dentry->d_inode; > > audit_inode_child(dentry, inode); > > fsnotify(inode, mask, d_inode, 2, dentry->d_name.name, 0); >} > > > > >static inline __attribute__((always_inline)) void fsnotify_access(struct file *file) >{ > struct path *path = &file->f_path; > struct inode *inode = path->dentry->d_inode; > __u32 mask = 0x00000001; > > if ((((inode->i_mode) & 00170000) == 0040000)) > mask |= 0x40000000; > > if (!(file->f_mode & (( fmode_t)0x1000000))) { > fsnotify_parent(path, ((void *)0), mask); > fsnotify(inode, mask, path, 1, ((void *)0), 0); > } >} > > > > >static inline __attribute__((always_inline)) void fsnotify_modify(struct file *file) >{ > struct path *path = &file->f_path; > struct inode *inode = path->dentry->d_inode; > __u32 mask = 0x00000002; > > if ((((inode->i_mode) & 00170000) == 0040000)) > mask |= 0x40000000; > > if (!(file->f_mode & (( fmode_t)0x1000000))) { > fsnotify_parent(path, ((void *)0), mask); > fsnotify(inode, mask, path, 1, ((void *)0), 0); > } >} > > > > >static inline __attribute__((always_inline)) void fsnotify_open(struct file *file) >{ > struct path *path = &file->f_path; > struct inode *inode = path->dentry->d_inode; > __u32 mask = 0x00000020; > > if ((((inode->i_mode) & 00170000) == 0040000)) > mask |= 0x40000000; > > fsnotify_parent(path, ((void *)0), mask); > fsnotify(inode, mask, path, 1, ((void *)0), 0); >} > > > > >static inline __attribute__((always_inline)) void fsnotify_close(struct file *file) >{ > struct path *path = &file->f_path; > struct inode *inode = file->f_path.dentry->d_inode; > fmode_t mode = file->f_mode; > __u32 mask = (mode & (( fmode_t)0x2)) ? 0x00000008 : 0x00000010; > > if ((((inode->i_mode) & 00170000) == 0040000)) > mask |= 0x40000000; > > if (!(file->f_mode & (( fmode_t)0x1000000))) { > fsnotify_parent(path, ((void *)0), mask); > fsnotify(inode, mask, path, 1, ((void *)0), 0); > } >} > > > > >static inline __attribute__((always_inline)) void fsnotify_xattr(struct dentry *dentry) >{ > struct inode *inode = dentry->d_inode; > __u32 mask = 0x00000004; > > if ((((inode->i_mode) & 00170000) == 0040000)) > mask |= 0x40000000; > > fsnotify_parent(((void *)0), dentry, mask); > fsnotify(inode, mask, inode, 2, ((void *)0), 0); >} > > > > > >static inline __attribute__((always_inline)) void fsnotify_change(struct dentry *dentry, unsigned int ia_valid) >{ > struct inode *inode = dentry->d_inode; > __u32 mask = 0; > > if (ia_valid & (1 << 1)) > mask |= 0x00000004; > if (ia_valid & (1 << 2)) > mask |= 0x00000004; > if (ia_valid & (1 << 3)) > mask |= 0x00000002; > > > if ((ia_valid & ((1 << 4) | (1 << 5))) == ((1 << 4) | (1 << 5))) > mask |= 0x00000004; > else if (ia_valid & (1 << 4)) > mask |= 0x00000001; > else if (ia_valid & (1 << 5)) > mask |= 0x00000002; > > if (ia_valid & (1 << 0)) > mask |= 0x00000004; > > if (mask) { > if ((((inode->i_mode) & 00170000) == 0040000)) > mask |= 0x40000000; > > fsnotify_parent(((void *)0), dentry, mask); > fsnotify(inode, mask, inode, 2, ((void *)0), 0); > } >} > > > > > > >static inline __attribute__((always_inline)) const unsigned char *fsnotify_oldname_init(const unsigned char *name) >{ > return kstrdup(name, ((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); >} > > > > >static inline __attribute__((always_inline)) void fsnotify_oldname_free(const unsigned char *old_name) >{ > kfree(old_name); >} > > > > > >static inline __attribute__((always_inline)) u64 hash_64(u64 val, unsigned int bits) >{ > u64 hash = val; > > > u64 n = hash; > n <<= 18; > hash -= n; > n <<= 33; > hash -= n; > n <<= 3; > hash += n; > n <<= 3; > hash -= n; > n <<= 4; > hash += n; > n <<= 2; > hash += n; > > > return hash >> (64 - bits); >} > >static inline __attribute__((always_inline)) u32 hash_32(u32 val, unsigned int bits) >{ > > u32 hash = val * 0x9e370001UL; > > > return hash >> (32 - bits); >} > >static inline __attribute__((always_inline)) unsigned long hash_ptr(const void *ptr, unsigned int bits) >{ > return hash_32((unsigned long)ptr, bits); >} > > > > >struct kernel_symbol >{ > unsigned long value; > const char *name; >}; > > > >struct super_block; >struct vfsmount; >struct dentry; >struct mnt_namespace; > >struct vfsmount { > struct dentry *mnt_root; > struct super_block *mnt_sb; > int mnt_flags; >}; > >struct file; > >extern int mnt_want_write(struct vfsmount *mnt); >extern int mnt_want_write_file(struct file *file); >extern int mnt_clone_write(struct vfsmount *mnt); >extern void mnt_drop_write(struct vfsmount *mnt); >extern void mnt_drop_write_file(struct file *file); >extern void mntput(struct vfsmount *mnt); >extern struct vfsmount *mntget(struct vfsmount *mnt); >extern void mnt_pin(struct vfsmount *mnt); >extern void mnt_unpin(struct vfsmount *mnt); >extern int __mnt_is_readonly(struct vfsmount *mnt); > >struct file_system_type; >extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, > int flags, const char *name, > void *data); > >extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list); >extern void mark_mounts_for_expiry(struct list_head *mounts); > >extern dev_t name_to_dev_t(char *name); > > > >struct file; > >extern void fput(struct file *); > >struct file_operations; >struct vfsmount; >struct dentry; >struct path; >extern struct file *alloc_file(struct path *, fmode_t mode, > const struct file_operations *fop); > >static inline __attribute__((always_inline)) void fput_light(struct file *file, int fput_needed) >{ > if (fput_needed) > fput(file); >} > >extern struct file *fget(unsigned int fd); >extern struct file *fget_light(unsigned int fd, int *fput_needed); >extern struct file *fget_raw(unsigned int fd); >extern struct file *fget_raw_light(unsigned int fd, int *fput_needed); >extern void set_close_on_exec(unsigned int fd, int flag); >extern void put_filp(struct file *); >extern int alloc_fd(unsigned start, unsigned flags); >extern int get_unused_fd(void); > >extern void put_unused_fd(unsigned int fd); > >extern void fd_install(unsigned int fd, struct file *file); > > > > > > > > > >__asm__( >" .macro it, cond\n" >" .endm\n" >" .macro itt, cond\n" >" .endm\n" >" .macro ite, cond\n" >" .endm\n" >" .macro ittt, cond\n" >" .endm\n" >" .macro itte, cond\n" >" .endm\n" >" .macro itet, cond\n" >" .endm\n" >" .macro itee, cond\n" >" .endm\n" >" .macro itttt, cond\n" >" .endm\n" >" .macro ittte, cond\n" >" .endm\n" >" .macro ittet, cond\n" >" .endm\n" >" .macro ittee, cond\n" >" .endm\n" >" .macro itett, cond\n" >" .endm\n" >" .macro itete, cond\n" >" .endm\n" >" .macro iteet, cond\n" >" .endm\n" >" .macro iteee, cond\n" >" .endm\n"); > > >struct exception_table_entry >{ > unsigned long insn, fixup; >}; > >extern int fixup_exception(struct pt_regs *regs); > > > > > >extern int __get_user_bad(void); >extern int __put_user_bad(void); > >static inline __attribute__((always_inline)) void set_fs(mm_segment_t fs) >{ > current_thread_info()->addr_limit = fs; > do { } while (0); >} > >extern int __get_user_1(void *); >extern int __get_user_2(void *); >extern int __get_user_4(void *); > >extern int __put_user_1(void *, unsigned int); >extern int __put_user_2(void *, unsigned int); >extern int __put_user_4(void *, unsigned int); >extern int __put_user_8(void *, unsigned long long); > >extern unsigned long __attribute__((warn_unused_result)) __copy_from_user(void *to, const void *from, unsigned long n); >extern unsigned long __attribute__((warn_unused_result)) __copy_to_user(void *to, const void *from, unsigned long n); >extern unsigned long __attribute__((warn_unused_result)) __copy_to_user_std(void *to, const void *from, unsigned long n); >extern unsigned long __attribute__((warn_unused_result)) __clear_user(void *addr, unsigned long n); >extern unsigned long __attribute__((warn_unused_result)) __clear_user_std(void *addr, unsigned long n); > > > > > > >extern unsigned long __attribute__((warn_unused_result)) __strncpy_from_user(char *to, const char *from, unsigned long count); >extern unsigned long __attribute__((warn_unused_result)) __strnlen_user(const char *s, long n); > >static inline __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result)) copy_from_user(void *to, const void *from, unsigned long n) >{ > if ((({ unsigned long flag, roksum; (void)0; __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" : "=&r" (flag), "=&r" (roksum) : "r" (from), "Ir" (n), "0" (current_thread_info()->addr_limit) : "cc"); flag; }) == 0)) > n = __copy_from_user(to, from, n); > else > ({ void *__p = (to); size_t __n = n; if ((__n) != 0) { if (__builtin_constant_p((0)) && (0) == 0) __memzero((__p),(__n)); else memset((__p),(0),(__n)); } (__p); }); > return n; >} > >static inline __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result)) copy_to_user(void *to, const void *from, unsigned long n) >{ > if ((({ unsigned long flag, roksum; (void)0; __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" : "=&r" (flag), "=&r" (roksum) : "r" (to), "Ir" (n), "0" (current_thread_info()->addr_limit) : "cc"); flag; }) == 0)) > n = __copy_to_user(to, from, n); > return n; >} > > > > >static inline __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result)) clear_user(void *to, unsigned long n) >{ > if ((({ unsigned long flag, roksum; (void)0; __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" : "=&r" (flag), "=&r" (roksum) : "r" (to), "Ir" (n), "0" (current_thread_info()->addr_limit) : "cc"); flag; }) == 0)) > n = __clear_user(to, n); > return n; >} > >static inline __attribute__((always_inline)) long __attribute__((warn_unused_result)) strncpy_from_user(char *dst, const char *src, long count) >{ > long res = -14; > if ((({ unsigned long flag, roksum; (void)0; __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" : "=&r" (flag), "=&r" (roksum) : "r" (src), "Ir" (1), "0" (current_thread_info()->addr_limit) : "cc"); flag; }) == 0)) > res = __strncpy_from_user(dst, src, count); > return res; >} > > > >static inline __attribute__((always_inline)) long __attribute__((warn_unused_result)) strnlen_user(const char *s, long n) >{ > unsigned long res = 0; > > if (({ unsigned long flag; __asm__("cmp %2, %0; movlo %0, #0" : "=&r" (flag) : "0" (current_thread_info()->addr_limit), "r" (s) : "cc"); (flag == 0); })) > res = __strnlen_user(s, n); > > return res; >} > > > >struct linux_binprm; >struct cred; >struct rlimit; >struct siginfo; >struct sem_array; >struct sembuf; >struct kern_ipc_perm; >struct audit_context; >struct super_block; >struct inode; >struct dentry; >struct file; >struct vfsmount; >struct path; >struct qstr; >struct nameidata; >struct iattr; >struct fown_struct; >struct file_operations; >struct shmid_kernel; >struct msg_msg; >struct msg_queue; >struct xattr; >struct xfrm_sec_ctx; >struct mm_struct; > >struct ctl_table; >struct audit_krule; >struct user_namespace; >struct timezone; > > > > > >extern int cap_capable(const struct cred *cred, struct user_namespace *ns, > int cap, int audit); >extern int cap_settime(const struct timespec *ts, const struct timezone *tz); >extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode); >extern int cap_ptrace_traceme(struct task_struct *parent); >extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted); >extern int cap_capset(struct cred *new, const struct cred *old, > const kernel_cap_t *effective, > const kernel_cap_t *inheritable, > const kernel_cap_t *permitted); >extern int cap_bprm_set_creds(struct linux_binprm *bprm); >extern int cap_bprm_secureexec(struct linux_binprm *bprm); >extern int cap_inode_setxattr(struct dentry *dentry, const char *name, > const void *value, size_t size, int flags); >extern int cap_inode_removexattr(struct dentry *dentry, const char *name); >extern int cap_inode_need_killpriv(struct dentry *dentry); >extern int cap_inode_killpriv(struct dentry *dentry); >extern int cap_file_mmap(struct file *file, unsigned long reqprot, > unsigned long prot, unsigned long flags, > unsigned long addr, unsigned long addr_only); >extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags); >extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, > unsigned long arg4, unsigned long arg5); >extern int cap_task_setscheduler(struct task_struct *p); >extern int cap_task_setioprio(struct task_struct *p, int ioprio); >extern int cap_task_setnice(struct task_struct *p, int nice); >extern int cap_vm_enough_memory(struct mm_struct *mm, long pages); > >struct msghdr; >struct sk_buff; >struct sock; >struct sockaddr; >struct socket; >struct flowi; >struct dst_entry; >struct xfrm_selector; >struct xfrm_policy; >struct xfrm_state; >struct xfrm_user_sec_ctx; >struct seq_file; > >extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb); > >void reset_security_ops(void); > > >extern unsigned long mmap_min_addr; >extern unsigned long dac_mmap_min_addr; > >struct sched_param; >struct request_sock; > >extern int mmap_min_addr_handler(struct ctl_table *table, int write, > void *buffer, size_t *lenp, loff_t *ppos); > > > >typedef int (*initxattrs) (struct inode *inode, > const struct xattr *xattr_array, void *fs_data); > > > >struct security_mnt_opts { > char **mnt_opts; > int *mnt_opts_flags; > int num_mnt_opts; >}; > >static inline __attribute__((always_inline)) void security_init_mnt_opts(struct security_mnt_opts *opts) >{ > opts->mnt_opts = ((void *)0); > opts->mnt_opts_flags = ((void *)0); > opts->num_mnt_opts = 0; >} > >static inline __attribute__((always_inline)) void security_free_mnt_opts(struct security_mnt_opts *opts) >{ > int i; > if (opts->mnt_opts) > for (i = 0; i < opts->num_mnt_opts; i++) > kfree(opts->mnt_opts[i]); > kfree(opts->mnt_opts); > opts->mnt_opts = ((void *)0); > kfree(opts->mnt_opts_flags); > opts->mnt_opts_flags = ((void *)0); > opts->num_mnt_opts = 0; >} > >struct security_operations { > char name[10 + 1]; > > int (*binder_set_context_mgr) (struct task_struct *mgr); > int (*binder_transaction) (struct task_struct *from, struct task_struct *to); > int (*binder_transfer_binder) (struct task_struct *from, struct task_struct *to); > int (*binder_transfer_file) (struct task_struct *from, struct task_struct *to, struct file *file); > > int (*ptrace_access_check) (struct task_struct *child, unsigned int mode); > int (*ptrace_traceme) (struct task_struct *parent); > int (*capget) (struct task_struct *target, > kernel_cap_t *effective, > kernel_cap_t *inheritable, kernel_cap_t *permitted); > int (*capset) (struct cred *new, > const struct cred *old, > const kernel_cap_t *effective, > const kernel_cap_t *inheritable, > const kernel_cap_t *permitted); > int (*capable) (const struct cred *cred, struct user_namespace *ns, > int cap, int audit); > int (*quotactl) (int cmds, int type, int id, struct super_block *sb); > int (*quota_on) (struct dentry *dentry); > int (*syslog) (int type); > int (*settime) (const struct timespec *ts, const struct timezone *tz); > int (*vm_enough_memory) (struct mm_struct *mm, long pages); > > int (*bprm_set_creds) (struct linux_binprm *bprm); > int (*bprm_check_security) (struct linux_binprm *bprm); > int (*bprm_secureexec) (struct linux_binprm *bprm); > void (*bprm_committing_creds) (struct linux_binprm *bprm); > void (*bprm_committed_creds) (struct linux_binprm *bprm); > > int (*sb_alloc_security) (struct super_block *sb); > void (*sb_free_security) (struct super_block *sb); > int (*sb_copy_data) (char *orig, char *copy); > int (*sb_remount) (struct super_block *sb, void *data); > int (*sb_kern_mount) (struct super_block *sb, int flags, void *data); > int (*sb_show_options) (struct seq_file *m, struct super_block *sb); > int (*sb_statfs) (struct dentry *dentry); > int (*sb_mount) (char *dev_name, struct path *path, > char *type, unsigned long flags, void *data); > int (*sb_umount) (struct vfsmount *mnt, int flags); > int (*sb_pivotroot) (struct path *old_path, > struct path *new_path); > int (*sb_set_mnt_opts) (struct super_block *sb, > struct security_mnt_opts *opts); > void (*sb_clone_mnt_opts) (const struct super_block *oldsb, > struct super_block *newsb); > int (*sb_parse_opts_str) (char *options, struct security_mnt_opts *opts); > > int (*inode_alloc_security) (struct inode *inode); > void (*inode_free_security) (struct inode *inode); > int (*inode_init_security) (struct inode *inode, struct inode *dir, > const struct qstr *qstr, char **name, > void **value, size_t *len); > int (*inode_create) (struct inode *dir, > struct dentry *dentry, umode_t mode); > int (*inode_link) (struct dentry *old_dentry, > struct inode *dir, struct dentry *new_dentry); > int (*inode_unlink) (struct inode *dir, struct dentry *dentry); > int (*inode_symlink) (struct inode *dir, > struct dentry *dentry, const char *old_name); > int (*inode_mkdir) (struct inode *dir, struct dentry *dentry, umode_t mode); > int (*inode_rmdir) (struct inode *dir, struct dentry *dentry); > int (*inode_mknod) (struct inode *dir, struct dentry *dentry, > umode_t mode, dev_t dev); > int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry, > struct inode *new_dir, struct dentry *new_dentry); > int (*inode_readlink) (struct dentry *dentry); > int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd); > int (*inode_permission) (struct inode *inode, int mask); > int (*inode_setattr) (struct dentry *dentry, struct iattr *attr); > int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry); > int (*inode_setxattr) (struct dentry *dentry, const char *name, > const void *value, size_t size, int flags); > void (*inode_post_setxattr) (struct dentry *dentry, const char *name, > const void *value, size_t size, int flags); > int (*inode_getxattr) (struct dentry *dentry, const char *name); > int (*inode_listxattr) (struct dentry *dentry); > int (*inode_removexattr) (struct dentry *dentry, const char *name); > int (*inode_need_killpriv) (struct dentry *dentry); > int (*inode_killpriv) (struct dentry *dentry); > int (*inode_getsecurity) (const struct inode *inode, const char *name, void **buffer, bool alloc); > int (*inode_setsecurity) (struct inode *inode, const char *name, const void *value, size_t size, int flags); > int (*inode_listsecurity) (struct inode *inode, char *buffer, size_t buffer_size); > void (*inode_getsecid) (const struct inode *inode, u32 *secid); > > int (*file_permission) (struct file *file, int mask); > int (*file_alloc_security) (struct file *file); > void (*file_free_security) (struct file *file); > int (*file_ioctl) (struct file *file, unsigned int cmd, > unsigned long arg); > int (*file_mmap) (struct file *file, > unsigned long reqprot, unsigned long prot, > unsigned long flags, unsigned long addr, > unsigned long addr_only); > int (*file_mprotect) (struct vm_area_struct *vma, > unsigned long reqprot, > unsigned long prot); > int (*file_lock) (struct file *file, unsigned int cmd); > int (*file_fcntl) (struct file *file, unsigned int cmd, > unsigned long arg); > int (*file_set_fowner) (struct file *file); > int (*file_send_sigiotask) (struct task_struct *tsk, > struct fown_struct *fown, int sig); > int (*file_receive) (struct file *file); > int (*dentry_open) (struct file *file, const struct cred *cred); > > int (*task_create) (unsigned long clone_flags); > void (*task_free) (struct task_struct *task); > int (*cred_alloc_blank) (struct cred *cred, gfp_t gfp); > void (*cred_free) (struct cred *cred); > int (*cred_prepare)(struct cred *new, const struct cred *old, > gfp_t gfp); > void (*cred_transfer)(struct cred *new, const struct cred *old); > int (*kernel_act_as)(struct cred *new, u32 secid); > int (*kernel_create_files_as)(struct cred *new, struct inode *inode); > int (*kernel_module_request)(char *kmod_name); > int (*task_fix_setuid) (struct cred *new, const struct cred *old, > int flags); > int (*task_setpgid) (struct task_struct *p, pid_t pgid); > int (*task_getpgid) (struct task_struct *p); > int (*task_getsid) (struct task_struct *p); > void (*task_getsecid) (struct task_struct *p, u32 *secid); > int (*task_setnice) (struct task_struct *p, int nice); > int (*task_setioprio) (struct task_struct *p, int ioprio); > int (*task_getioprio) (struct task_struct *p); > int (*task_setrlimit) (struct task_struct *p, unsigned int resource, > struct rlimit *new_rlim); > int (*task_setscheduler) (struct task_struct *p); > int (*task_getscheduler) (struct task_struct *p); > int (*task_movememory) (struct task_struct *p); > int (*task_kill) (struct task_struct *p, > struct siginfo *info, int sig, u32 secid); > int (*task_wait) (struct task_struct *p); > int (*task_prctl) (int option, unsigned long arg2, > unsigned long arg3, unsigned long arg4, > unsigned long arg5); > void (*task_to_inode) (struct task_struct *p, struct inode *inode); > > int (*ipc_permission) (struct kern_ipc_perm *ipcp, short flag); > void (*ipc_getsecid) (struct kern_ipc_perm *ipcp, u32 *secid); > > int (*msg_msg_alloc_security) (struct msg_msg *msg); > void (*msg_msg_free_security) (struct msg_msg *msg); > > int (*msg_queue_alloc_security) (struct msg_queue *msq); > void (*msg_queue_free_security) (struct msg_queue *msq); > int (*msg_queue_associate) (struct msg_queue *msq, int msqflg); > int (*msg_queue_msgctl) (struct msg_queue *msq, int cmd); > int (*msg_queue_msgsnd) (struct msg_queue *msq, > struct msg_msg *msg, int msqflg); > int (*msg_queue_msgrcv) (struct msg_queue *msq, > struct msg_msg *msg, > struct task_struct *target, > long type, int mode); > > int (*shm_alloc_security) (struct shmid_kernel *shp); > void (*shm_free_security) (struct shmid_kernel *shp); > int (*shm_associate) (struct shmid_kernel *shp, int shmflg); > int (*shm_shmctl) (struct shmid_kernel *shp, int cmd); > int (*shm_shmat) (struct shmid_kernel *shp, > char *shmaddr, int shmflg); > > int (*sem_alloc_security) (struct sem_array *sma); > void (*sem_free_security) (struct sem_array *sma); > int (*sem_associate) (struct sem_array *sma, int semflg); > int (*sem_semctl) (struct sem_array *sma, int cmd); > int (*sem_semop) (struct sem_array *sma, > struct sembuf *sops, unsigned nsops, int alter); > > int (*netlink_send) (struct sock *sk, struct sk_buff *skb); > > void (*d_instantiate) (struct dentry *dentry, struct inode *inode); > > int (*getprocattr) (struct task_struct *p, char *name, char **value); > int (*setprocattr) (struct task_struct *p, char *name, void *value, size_t size); > int (*secid_to_secctx) (u32 secid, char **secdata, u32 *seclen); > int (*secctx_to_secid) (const char *secdata, u32 seclen, u32 *secid); > void (*release_secctx) (char *secdata, u32 seclen); > > int (*inode_notifysecctx)(struct inode *inode, void *ctx, u32 ctxlen); > int (*inode_setsecctx)(struct dentry *dentry, void *ctx, u32 ctxlen); > int (*inode_getsecctx)(struct inode *inode, void **ctx, u32 *ctxlen); > > > int (*unix_stream_connect) (struct sock *sock, struct sock *other, struct sock *newsk); > int (*unix_may_send) (struct socket *sock, struct socket *other); > > int (*socket_create) (int family, int type, int protocol, int kern); > int (*socket_post_create) (struct socket *sock, int family, > int type, int protocol, int kern); > int (*socket_bind) (struct socket *sock, > struct sockaddr *address, int addrlen); > int (*socket_connect) (struct socket *sock, > struct sockaddr *address, int addrlen); > int (*socket_listen) (struct socket *sock, int backlog); > int (*socket_accept) (struct socket *sock, struct socket *newsock); > int (*socket_sendmsg) (struct socket *sock, > struct msghdr *msg, int size); > int (*socket_recvmsg) (struct socket *sock, > struct msghdr *msg, int size, int flags); > int (*socket_getsockname) (struct socket *sock); > int (*socket_getpeername) (struct socket *sock); > int (*socket_getsockopt) (struct socket *sock, int level, int optname); > int (*socket_setsockopt) (struct socket *sock, int level, int optname); > int (*socket_shutdown) (struct socket *sock, int how); > int (*socket_sock_rcv_skb) (struct sock *sk, struct sk_buff *skb); > int (*socket_getpeersec_stream) (struct socket *sock, char *optval, int *optlen, unsigned len); > int (*socket_getpeersec_dgram) (struct socket *sock, struct sk_buff *skb, u32 *secid); > int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority); > void (*sk_free_security) (struct sock *sk); > void (*sk_clone_security) (const struct sock *sk, struct sock *newsk); > void (*sk_getsecid) (struct sock *sk, u32 *secid); > void (*sock_graft) (struct sock *sk, struct socket *parent); > int (*inet_conn_request) (struct sock *sk, struct sk_buff *skb, > struct request_sock *req); > void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req); > void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb); > int (*secmark_relabel_packet) (u32 secid); > void (*secmark_refcount_inc) (void); > void (*secmark_refcount_dec) (void); > void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl); > int (*tun_dev_create)(void); > void (*tun_dev_post_create)(struct sock *sk); > int (*tun_dev_attach)(struct sock *sk); > > int (*audit_rule_init) (u32 field, u32 op, char *rulestr, void **lsmrule); > int (*audit_rule_known) (struct audit_krule *krule); > int (*audit_rule_match) (u32 secid, u32 field, u32 op, void *lsmrule, > struct audit_context *actx); > void (*audit_rule_free) (void *lsmrule); > >}; > > >extern int security_init(void); >extern int security_module_enable(struct security_operations *ops); >extern int register_security(struct security_operations *ops); >extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) security_fixup_ops(struct security_operations *ops); > > > >int security_binder_set_context_mgr(struct task_struct *mgr); >int security_binder_transaction(struct task_struct *from, struct task_struct *to); >int security_binder_transfer_binder(struct task_struct *from, struct task_struct *to); >int security_binder_transfer_file(struct task_struct *from, struct task_struct *to, struct file *file); >int security_ptrace_access_check(struct task_struct *child, unsigned int mode); >int security_ptrace_traceme(struct task_struct *parent); >int security_capget(struct task_struct *target, > kernel_cap_t *effective, > kernel_cap_t *inheritable, > kernel_cap_t *permitted); >int security_capset(struct cred *new, const struct cred *old, > const kernel_cap_t *effective, > const kernel_cap_t *inheritable, > const kernel_cap_t *permitted); >int security_capable(const struct cred *cred, struct user_namespace *ns, > int cap); >int security_capable_noaudit(const struct cred *cred, struct user_namespace *ns, > int cap); >int security_quotactl(int cmds, int type, int id, struct super_block *sb); >int security_quota_on(struct dentry *dentry); >int security_syslog(int type); >int security_settime(const struct timespec *ts, const struct timezone *tz); >int security_vm_enough_memory_mm(struct mm_struct *mm, long pages); >int security_bprm_set_creds(struct linux_binprm *bprm); >int security_bprm_check(struct linux_binprm *bprm); >void security_bprm_committing_creds(struct linux_binprm *bprm); >void security_bprm_committed_creds(struct linux_binprm *bprm); >int security_bprm_secureexec(struct linux_binprm *bprm); >int security_sb_alloc(struct super_block *sb); >void security_sb_free(struct super_block *sb); >int security_sb_copy_data(char *orig, char *copy); >int security_sb_remount(struct super_block *sb, void *data); >int security_sb_kern_mount(struct super_block *sb, int flags, void *data); >int security_sb_show_options(struct seq_file *m, struct super_block *sb); >int security_sb_statfs(struct dentry *dentry); >int security_sb_mount(char *dev_name, struct path *path, > char *type, unsigned long flags, void *data); >int security_sb_umount(struct vfsmount *mnt, int flags); >int security_sb_pivotroot(struct path *old_path, struct path *new_path); >int security_sb_set_mnt_opts(struct super_block *sb, struct security_mnt_opts *opts); >void security_sb_clone_mnt_opts(const struct super_block *oldsb, > struct super_block *newsb); >int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts); > >int security_inode_alloc(struct inode *inode); >void security_inode_free(struct inode *inode); >int security_inode_init_security(struct inode *inode, struct inode *dir, > const struct qstr *qstr, > initxattrs initxattrs, void *fs_data); >int security_old_inode_init_security(struct inode *inode, struct inode *dir, > const struct qstr *qstr, char **name, > void **value, size_t *len); >int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode); >int security_inode_link(struct dentry *old_dentry, struct inode *dir, > struct dentry *new_dentry); >int security_inode_unlink(struct inode *dir, struct dentry *dentry); >int security_inode_symlink(struct inode *dir, struct dentry *dentry, > const char *old_name); >int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode); >int security_inode_rmdir(struct inode *dir, struct dentry *dentry); >int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev); >int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry, > struct inode *new_dir, struct dentry *new_dentry); >int security_inode_readlink(struct dentry *dentry); >int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd); >int security_inode_permission(struct inode *inode, int mask); >int security_inode_setattr(struct dentry *dentry, struct iattr *attr); >int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry); >int security_inode_setxattr(struct dentry *dentry, const char *name, > const void *value, size_t size, int flags); >void security_inode_post_setxattr(struct dentry *dentry, const char *name, > const void *value, size_t size, int flags); >int security_inode_getxattr(struct dentry *dentry, const char *name); >int security_inode_listxattr(struct dentry *dentry); >int security_inode_removexattr(struct dentry *dentry, const char *name); >int security_inode_need_killpriv(struct dentry *dentry); >int security_inode_killpriv(struct dentry *dentry); >int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc); >int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags); >int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size); >void security_inode_getsecid(const struct inode *inode, u32 *secid); >int security_file_permission(struct file *file, int mask); >int security_file_alloc(struct file *file); >void security_file_free(struct file *file); >int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg); >int security_file_mmap(struct file *file, unsigned long reqprot, > unsigned long prot, unsigned long flags, > unsigned long addr, unsigned long addr_only); >int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, > unsigned long prot); >int security_file_lock(struct file *file, unsigned int cmd); >int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg); >int security_file_set_fowner(struct file *file); >int security_file_send_sigiotask(struct task_struct *tsk, > struct fown_struct *fown, int sig); >int security_file_receive(struct file *file); >int security_dentry_open(struct file *file, const struct cred *cred); >int security_task_create(unsigned long clone_flags); >void security_task_free(struct task_struct *task); >int security_cred_alloc_blank(struct cred *cred, gfp_t gfp); >void security_cred_free(struct cred *cred); >int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp); >void security_transfer_creds(struct cred *new, const struct cred *old); >int security_kernel_act_as(struct cred *new, u32 secid); >int security_kernel_create_files_as(struct cred *new, struct inode *inode); >int security_kernel_module_request(char *kmod_name); >int security_task_fix_setuid(struct cred *new, const struct cred *old, > int flags); >int security_task_setpgid(struct task_struct *p, pid_t pgid); >int security_task_getpgid(struct task_struct *p); >int security_task_getsid(struct task_struct *p); >void security_task_getsecid(struct task_struct *p, u32 *secid); >int security_task_setnice(struct task_struct *p, int nice); >int security_task_setioprio(struct task_struct *p, int ioprio); >int security_task_getioprio(struct task_struct *p); >int security_task_setrlimit(struct task_struct *p, unsigned int resource, > struct rlimit *new_rlim); >int security_task_setscheduler(struct task_struct *p); >int security_task_getscheduler(struct task_struct *p); >int security_task_movememory(struct task_struct *p); >int security_task_kill(struct task_struct *p, struct siginfo *info, > int sig, u32 secid); >int security_task_wait(struct task_struct *p); >int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, > unsigned long arg4, unsigned long arg5); >void security_task_to_inode(struct task_struct *p, struct inode *inode); >int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag); >void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid); >int security_msg_msg_alloc(struct msg_msg *msg); >void security_msg_msg_free(struct msg_msg *msg); >int security_msg_queue_alloc(struct msg_queue *msq); >void security_msg_queue_free(struct msg_queue *msq); >int security_msg_queue_associate(struct msg_queue *msq, int msqflg); >int security_msg_queue_msgctl(struct msg_queue *msq, int cmd); >int security_msg_queue_msgsnd(struct msg_queue *msq, > struct msg_msg *msg, int msqflg); >int security_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg, > struct task_struct *target, long type, int mode); >int security_shm_alloc(struct shmid_kernel *shp); >void security_shm_free(struct shmid_kernel *shp); >int security_shm_associate(struct shmid_kernel *shp, int shmflg); >int security_shm_shmctl(struct shmid_kernel *shp, int cmd); >int security_shm_shmat(struct shmid_kernel *shp, char *shmaddr, int shmflg); >int security_sem_alloc(struct sem_array *sma); >void security_sem_free(struct sem_array *sma); >int security_sem_associate(struct sem_array *sma, int semflg); >int security_sem_semctl(struct sem_array *sma, int cmd); >int security_sem_semop(struct sem_array *sma, struct sembuf *sops, > unsigned nsops, int alter); >void security_d_instantiate(struct dentry *dentry, struct inode *inode); >int security_getprocattr(struct task_struct *p, char *name, char **value); >int security_setprocattr(struct task_struct *p, char *name, void *value, size_t size); >int security_netlink_send(struct sock *sk, struct sk_buff *skb); >int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); >int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); >void security_release_secctx(char *secdata, u32 seclen); > >int security_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen); >int security_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen); >int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen); > >int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk); >int security_unix_may_send(struct socket *sock, struct socket *other); >int security_socket_create(int family, int type, int protocol, int kern); >int security_socket_post_create(struct socket *sock, int family, > int type, int protocol, int kern); >int security_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen); >int security_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen); >int security_socket_listen(struct socket *sock, int backlog); >int security_socket_accept(struct socket *sock, struct socket *newsock); >int security_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size); >int security_socket_recvmsg(struct socket *sock, struct msghdr *msg, > int size, int flags); >int security_socket_getsockname(struct socket *sock); >int security_socket_getpeername(struct socket *sock); >int security_socket_getsockopt(struct socket *sock, int level, int optname); >int security_socket_setsockopt(struct socket *sock, int level, int optname); >int security_socket_shutdown(struct socket *sock, int how); >int security_sock_rcv_skb(struct sock *sk, struct sk_buff *skb); >int security_socket_getpeersec_stream(struct socket *sock, char *optval, > int *optlen, unsigned len); >int security_socket_getpeersec_dgram(struct socket *sock, struct sk_buff *skb, u32 *secid); >int security_sk_alloc(struct sock *sk, int family, gfp_t priority); >void security_sk_free(struct sock *sk); >void security_sk_clone(const struct sock *sk, struct sock *newsk); >void security_sk_classify_flow(struct sock *sk, struct flowi *fl); >void security_req_classify_flow(const struct request_sock *req, struct flowi *fl); >void security_sock_graft(struct sock*sk, struct socket *parent); >int security_inet_conn_request(struct sock *sk, > struct sk_buff *skb, struct request_sock *req); >void security_inet_csk_clone(struct sock *newsk, > const struct request_sock *req); >void security_inet_conn_established(struct sock *sk, > struct sk_buff *skb); >int security_secmark_relabel_packet(u32 secid); >void security_secmark_refcount_inc(void); >void security_secmark_refcount_dec(void); >int security_tun_dev_create(void); >void security_tun_dev_post_create(struct sock *sk); >int security_tun_dev_attach(struct sock *sk); > >static inline __attribute__((always_inline)) int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx) >{ > return 0; >} > >static inline __attribute__((always_inline)) int security_xfrm_policy_clone(struct xfrm_sec_ctx *old, struct xfrm_sec_ctx **new_ctxp) >{ > return 0; >} > >static inline __attribute__((always_inline)) void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx) >{ >} > >static inline __attribute__((always_inline)) int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) >{ > return 0; >} > >static inline __attribute__((always_inline)) int security_xfrm_state_alloc(struct xfrm_state *x, > struct xfrm_user_sec_ctx *sec_ctx) >{ > return 0; >} > >static inline __attribute__((always_inline)) int security_xfrm_state_alloc_acquire(struct xfrm_state *x, > struct xfrm_sec_ctx *polsec, u32 secid) >{ > return 0; >} > >static inline __attribute__((always_inline)) void security_xfrm_state_free(struct xfrm_state *x) >{ >} > >static inline __attribute__((always_inline)) int security_xfrm_state_delete(struct xfrm_state *x) >{ > return 0; >} > >static inline __attribute__((always_inline)) int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir) >{ > return 0; >} > >static inline __attribute__((always_inline)) int security_xfrm_state_pol_flow_match(struct xfrm_state *x, > struct xfrm_policy *xp, const struct flowi *fl) >{ > return 1; >} > >static inline __attribute__((always_inline)) int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid) >{ > return 0; >} > >static inline __attribute__((always_inline)) void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl) >{ >} > >static inline __attribute__((always_inline)) int security_path_unlink(struct path *dir, struct dentry *dentry) >{ > return 0; >} > >static inline __attribute__((always_inline)) int security_path_mkdir(struct path *dir, struct dentry *dentry, > umode_t mode) >{ > return 0; >} > >static inline __attribute__((always_inline)) int security_path_rmdir(struct path *dir, struct dentry *dentry) >{ > return 0; >} > >static inline __attribute__((always_inline)) int security_path_mknod(struct path *dir, struct dentry *dentry, > umode_t mode, unsigned int dev) >{ > return 0; >} > >static inline __attribute__((always_inline)) int security_path_truncate(struct path *path) >{ > return 0; >} > >static inline __attribute__((always_inline)) int security_path_symlink(struct path *dir, struct dentry *dentry, > const char *old_name) >{ > return 0; >} > >static inline __attribute__((always_inline)) int security_path_link(struct dentry *old_dentry, > struct path *new_dir, > struct dentry *new_dentry) >{ > return 0; >} > >static inline __attribute__((always_inline)) int security_path_rename(struct path *old_dir, > struct dentry *old_dentry, > struct path *new_dir, > struct dentry *new_dentry) >{ > return 0; >} > >static inline __attribute__((always_inline)) int security_path_chmod(struct path *path, umode_t mode) >{ > return 0; >} > >static inline __attribute__((always_inline)) int security_path_chown(struct path *path, uid_t uid, gid_t gid) >{ > return 0; >} > >static inline __attribute__((always_inline)) int security_path_chroot(struct path *path) >{ > return 0; >} > >int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule); >int security_audit_rule_known(struct audit_krule *krule); >int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule, > struct audit_context *actx); >void security_audit_rule_free(void *lsmrule); > >static inline __attribute__((always_inline)) struct dentry *securityfs_create_dir(const char *name, > struct dentry *parent) >{ > return ERR_PTR(-19); >} > >static inline __attribute__((always_inline)) struct dentry *securityfs_create_file(const char *name, > umode_t mode, > struct dentry *parent, > void *data, > const struct file_operations *fops) >{ > return ERR_PTR(-19); >} > >static inline __attribute__((always_inline)) void securityfs_remove(struct dentry *dentry) >{} > > > > > >static inline __attribute__((always_inline)) char *alloc_secdata(void) >{ > return (char *)get_zeroed_page(((( gfp_t)0x10u) | (( gfp_t)0x40u) | (( gfp_t)0x80u))); >} > >static inline __attribute__((always_inline)) void free_secdata(void *secdata) >{ > free_pages(((unsigned long)secdata), 0); >} > > > > > > > > > > > > >struct mem_cgroup; >struct page_cgroup; >struct page; >struct mm_struct; > > >enum mem_cgroup_page_stat_item { > MEMCG_NR_FILE_MAPPED, >}; > >struct mem_cgroup_reclaim_cookie { > struct zone *zone; > int priority; > unsigned int generation; >}; > >extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm, > gfp_t gfp_mask); > >extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm, > struct page *page, gfp_t mask, struct mem_cgroup **memcgp); >extern void mem_cgroup_commit_charge_swapin(struct page *page, > struct mem_cgroup *memcg); >extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg); > >extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, > gfp_t gfp_mask); > >struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); >struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *, > enum lru_list); >void mem_cgroup_lru_del_list(struct page *, enum lru_list); >void mem_cgroup_lru_del(struct page *); >struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *, > enum lru_list, enum lru_list); > > >extern void mem_cgroup_uncharge_start(void); >extern void mem_cgroup_uncharge_end(void); > >extern void mem_cgroup_uncharge_page(struct page *page); >extern void mem_cgroup_uncharge_cache_page(struct page *page); > >extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, > int order); >int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg); > >extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); >extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); >extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm); > >extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); >extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont); > >static inline __attribute__((always_inline)) >int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) >{ > struct mem_cgroup *memcg; > rcu_read_lock(); > memcg = mem_cgroup_from_task(({ typeof(*((mm)->owner)) *_________p1 = (typeof(*((mm)->owner))* )(*(volatile typeof(((mm)->owner)) *)&(((mm)->owner))); do { } while (0); ; do { } while(0); ((typeof(*((mm)->owner)) *)(_________p1)); })); > rcu_read_unlock(); > return cgroup == memcg; >} > >extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); > >extern int >mem_cgroup_prepare_migration(struct page *page, > struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask); >extern void mem_cgroup_end_migration(struct mem_cgroup *memcg, > struct page *oldpage, struct page *newpage, bool migration_ok); > >struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, > struct mem_cgroup *, > struct mem_cgroup_reclaim_cookie *); >void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); > > > > >int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, > struct zone *zone); >int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, > struct zone *zone); >int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); >unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, > int nid, int zid, unsigned int lrumask); >struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, > struct zone *zone); >struct zone_reclaim_stat* >mem_cgroup_get_reclaim_stat_from_page(struct page *page); >extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, > struct task_struct *p); >extern void mem_cgroup_replace_page_cache(struct page *oldpage, > struct page *newpage); > > > > > >static inline __attribute__((always_inline)) bool mem_cgroup_disabled(void) >{ > if (mem_cgroup_subsys.disabled) > return true; > return false; >} > >void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked, > unsigned long *flags); > >extern atomic_t memcg_moving; > >static inline __attribute__((always_inline)) void mem_cgroup_begin_update_page_stat(struct page *page, > bool *locked, unsigned long *flags) >{ > if (mem_cgroup_disabled()) > return; > rcu_read_lock(); > *locked = false; > if ((*(volatile int *)&(&memcg_moving)->counter)) > __mem_cgroup_begin_update_page_stat(page, locked, flags); >} > >void __mem_cgroup_end_update_page_stat(struct page *page, > unsigned long *flags); >static inline __attribute__((always_inline)) void mem_cgroup_end_update_page_stat(struct page *page, > bool *locked, unsigned long *flags) >{ > if (mem_cgroup_disabled()) > return; > if (*locked) > __mem_cgroup_end_update_page_stat(page, flags); > rcu_read_unlock(); >} > >void mem_cgroup_update_page_stat(struct page *page, > enum mem_cgroup_page_stat_item idx, > int val); > >static inline __attribute__((always_inline)) void mem_cgroup_inc_page_stat(struct page *page, > enum mem_cgroup_page_stat_item idx) >{ > mem_cgroup_update_page_stat(page, idx, 1); >} > >static inline __attribute__((always_inline)) void mem_cgroup_dec_page_stat(struct page *page, > enum mem_cgroup_page_stat_item idx) >{ > mem_cgroup_update_page_stat(page, idx, -1); >} > >unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, > gfp_t gfp_mask, > unsigned long *total_scanned); >u64 mem_cgroup_get_limit(struct mem_cgroup *memcg); > >void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); > >static inline __attribute__((always_inline)) bool >mem_cgroup_bad_page_check(struct page *page) >{ > return false; >} > >static inline __attribute__((always_inline)) void >mem_cgroup_print_bad_page(struct page *page) >{ >} > > >enum { > UNDER_LIMIT, > SOFT_LIMIT, > OVER_LIMIT, >}; > >struct sock; > > > > >static inline __attribute__((always_inline)) void sock_update_memcg(struct sock *sk) >{ >} >static inline __attribute__((always_inline)) void sock_release_memcg(struct sock *sk) >{ >} > > > > > > > >struct notifier_block; > >struct bio; > >static inline __attribute__((always_inline)) int current_is_kswapd(void) >{ > return (get_current())->flags & 0x00040000; >} > >union swap_header { > struct { > char reserved[((1UL) << 12) - 10]; > char magic[10]; > } magic; > struct { > char bootbits[1024]; > __u32 version; > __u32 last_page; > __u32 nr_badpages; > unsigned char sws_uuid[16]; > unsigned char sws_volume[16]; > __u32 padding[117]; > __u32 badpages[1]; > } info; >}; > > > > > >typedef struct { > unsigned long val; >} swp_entry_t; > > > > > >struct reclaim_state { > unsigned long reclaimed_slab; >}; > > > >struct address_space; >struct sysinfo; >struct writeback_control; >struct zone; > >struct swap_extent { > struct list_head list; > unsigned long start_page; > unsigned long nr_pages; > sector_t start_block; >}; > >enum { > SWP_USED = (1 << 0), > SWP_WRITEOK = (1 << 1), > SWP_DISCARDABLE = (1 << 2), > SWP_DISCARDING = (1 << 3), > SWP_SOLIDSTATE = (1 << 4), > SWP_CONTINUED = (1 << 5), > SWP_BLKDEV = (1 << 6), > > SWP_SCANNING = (1 << 8), >}; > >struct swap_info_struct { > unsigned long flags; > signed short prio; > signed char type; > signed char next; > unsigned int max; > unsigned char *swap_map; > unsigned int lowest_bit; > unsigned int highest_bit; > unsigned int pages; > unsigned int inuse_pages; > unsigned int cluster_next; > unsigned int cluster_nr; > unsigned int lowest_alloc; > unsigned int highest_alloc; > struct swap_extent *curr_swap_extent; > struct swap_extent first_swap_extent; > struct block_device *bdev; > struct file *swap_file; > unsigned int old_block_size; >}; > >struct swap_list_t { > int head; > int next; >}; > > > > > >extern unsigned long totalram_pages; >extern unsigned long totalreserve_pages; >extern unsigned long dirty_balance_reserve; >extern unsigned int nr_free_buffer_pages(void); >extern unsigned int nr_free_pagecache_pages(void); > > > > > > >extern void __lru_cache_add(struct page *, enum lru_list lru); >extern void lru_cache_add_lru(struct page *, enum lru_list lru); >extern void lru_add_page_tail(struct zone* zone, > struct page *page, struct page *page_tail); >extern void activate_page(struct page *); >extern void mark_page_accessed(struct page *); >extern void lru_add_drain(void); >extern void lru_add_drain_cpu(int cpu); >extern int lru_add_drain_all(void); >extern void rotate_reclaimable_page(struct page *page); >extern void deactivate_page(struct page *page); >extern void swap_setup(void); > >extern void add_page_to_unevictable_list(struct page *page); > > > > > >static inline __attribute__((always_inline)) void lru_cache_add_anon(struct page *page) >{ > __lru_cache_add(page, LRU_INACTIVE_ANON); >} > >static inline __attribute__((always_inline)) void lru_cache_add_file(struct page *page) >{ > __lru_cache_add(page, LRU_INACTIVE_FILE); >} > > >extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, > gfp_t gfp_mask, nodemask_t *mask); >extern int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file); >extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, > gfp_t gfp_mask, bool noswap); >extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, > gfp_t gfp_mask, bool noswap, > struct zone *zone, > unsigned long *nr_scanned); >extern unsigned long shrink_all_memory(unsigned long nr_pages); >extern int vm_swappiness; >extern int remove_mapping(struct address_space *mapping, struct page *page); >extern long vm_total_pages; > >static inline __attribute__((always_inline)) int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) >{ > return 0; >} > > >extern int page_evictable(struct page *page, struct vm_area_struct *vma); >extern void check_move_unevictable_pages(struct page **, int nr_pages); > >extern unsigned long scan_unevictable_pages; >extern int scan_unevictable_handler(struct ctl_table *, int, > void *, size_t *, loff_t *); > > > > >static inline __attribute__((always_inline)) int scan_unevictable_register_node(struct node *node) >{ > return 0; >} >static inline __attribute__((always_inline)) void scan_unevictable_unregister_node(struct node *node) >{ >} > > >extern int kswapd_run(int nid); >extern void kswapd_stop(int nid); > >extern int mem_cgroup_swappiness(struct mem_cgroup *mem); > >static inline __attribute__((always_inline)) void mem_cgroup_uncharge_swap(swp_entry_t ent) >{ >} > >static inline __attribute__((always_inline)) void show_swap_cache_info(void) >{ >} > > > > >static inline __attribute__((always_inline)) int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) >{ > return 0; >} > >static inline __attribute__((always_inline)) void swap_shmem_alloc(swp_entry_t swp) >{ >} > >static inline __attribute__((always_inline)) int swap_duplicate(swp_entry_t swp) >{ > return 0; >} > >static inline __attribute__((always_inline)) void swap_free(swp_entry_t swp) >{ >} > >static inline __attribute__((always_inline)) void swapcache_free(swp_entry_t swp, struct page *page) >{ >} > >static inline __attribute__((always_inline)) struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, > struct vm_area_struct *vma, unsigned long addr) >{ > return ((void *)0); >} > >static inline __attribute__((always_inline)) int swap_writepage(struct page *p, struct writeback_control *wbc) >{ > return 0; >} > >static inline __attribute__((always_inline)) struct page *lookup_swap_cache(swp_entry_t swp) >{ > return ((void *)0); >} > >static inline __attribute__((always_inline)) int add_to_swap(struct page *page) >{ > return 0; >} > >static inline __attribute__((always_inline)) int add_to_swap_cache(struct page *page, swp_entry_t entry, > gfp_t gfp_mask) >{ > return -1; >} > >static inline __attribute__((always_inline)) void __delete_from_swap_cache(struct page *page) >{ >} > >static inline __attribute__((always_inline)) void delete_from_swap_cache(struct page *page) >{ >} > > > >static inline __attribute__((always_inline)) int try_to_free_swap(struct page *page) >{ > return 0; >} > >static inline __attribute__((always_inline)) swp_entry_t get_swap_page(void) >{ > swp_entry_t entry; > entry.val = 0; > return entry; >} > > >static inline __attribute__((always_inline)) void put_swap_token(struct mm_struct *mm) >{ >} > >static inline __attribute__((always_inline)) void grab_swap_token(struct mm_struct *mm) >{ >} > >static inline __attribute__((always_inline)) int has_swap_token(struct mm_struct *mm) >{ > return 0; >} > >static inline __attribute__((always_inline)) void disable_swap_token(struct mem_cgroup *memcg) >{ >} > >static inline __attribute__((always_inline)) void >mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) >{ >} > > >static inline __attribute__((always_inline)) int >mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep) >{ > return 0; >} > > > > > > > > > > > > > > > > >extern unsigned long max_low_pfn; >extern unsigned long min_low_pfn; > > > > >extern unsigned long max_pfn; > > > > > > >typedef struct bootmem_data { > unsigned long node_min_pfn; > unsigned long node_low_pfn; > void *node_bootmem_map; > unsigned long last_end_off; > unsigned long hint_idx; > struct list_head list; >} bootmem_data_t; > >extern bootmem_data_t bootmem_node_data[]; > > >extern unsigned long bootmem_bootmap_pages(unsigned long); > >extern unsigned long init_bootmem_node(pg_data_t *pgdat, > unsigned long freepfn, > unsigned long startpfn, > unsigned long endpfn); >extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); > >extern unsigned long free_low_memory_core_early(int nodeid); >extern unsigned long free_all_bootmem_node(pg_data_t *pgdat); >extern unsigned long free_all_bootmem(void); > >extern void free_bootmem_node(pg_data_t *pgdat, > unsigned long addr, > unsigned long size); >extern void free_bootmem(unsigned long addr, unsigned long size); >extern void free_bootmem_late(unsigned long addr, unsigned long size); > >extern int reserve_bootmem(unsigned long addr, > unsigned long size, > int flags); >extern int reserve_bootmem_node(pg_data_t *pgdat, > unsigned long physaddr, > unsigned long size, > int flags); > >extern void *__alloc_bootmem(unsigned long size, > unsigned long align, > unsigned long goal); >extern void *__alloc_bootmem_nopanic(unsigned long size, > unsigned long align, > unsigned long goal); >extern void *__alloc_bootmem_node(pg_data_t *pgdat, > unsigned long size, > unsigned long align, > unsigned long goal); >void *__alloc_bootmem_node_high(pg_data_t *pgdat, > unsigned long size, > unsigned long align, > unsigned long goal); >extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, > unsigned long size, > unsigned long align, > unsigned long goal); >extern void *__alloc_bootmem_low(unsigned long size, > unsigned long align, > unsigned long goal); >extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, > unsigned long size, > unsigned long align, > unsigned long goal); > >extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, > int flags); > >extern void *alloc_bootmem_section(unsigned long size, > unsigned long section_nr); > > > > >static inline __attribute__((always_inline)) void *alloc_remap(int nid, unsigned long size) >{ > return ((void *)0); >} > > >extern void *alloc_large_system_hash(const char *tablename, > unsigned long bucketsize, > unsigned long numentries, > int scale, > int flags, > unsigned int *_hash_shift, > unsigned int *_hash_mask, > unsigned long limit); > >extern int hashdist; > > > > > > > > > >struct fs_struct { > int users; > spinlock_t lock; > seqcount_t seq; > int umask; > int in_exec; > struct path root, pwd; >}; > >extern struct kmem_cache *fs_cachep; > >extern void exit_fs(struct task_struct *); >extern void set_fs_root(struct fs_struct *, struct path *); >extern void set_fs_pwd(struct fs_struct *, struct path *); >extern struct fs_struct *copy_fs_struct(struct fs_struct *); >extern void free_fs_struct(struct fs_struct *); >extern void daemonize_fs_struct(void); >extern int unshare_fs_struct(void); > >static inline __attribute__((always_inline)) void get_fs_root(struct fs_struct *fs, struct path *root) >{ > spin_lock(&fs->lock); > *root = fs->root; > path_get(root); > spin_unlock(&fs->lock); >} > >static inline __attribute__((always_inline)) void get_fs_pwd(struct fs_struct *fs, struct path *pwd) >{ > spin_lock(&fs->lock); > *pwd = fs->pwd; > path_get(pwd); > spin_unlock(&fs->lock); >} > >static inline __attribute__((always_inline)) void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root, > struct path *pwd) >{ > spin_lock(&fs->lock); > *root = fs->root; > path_get(root); > *pwd = fs->pwd; > path_get(pwd); > spin_unlock(&fs->lock); >} > > > > > > >static inline __attribute__((always_inline)) void prefetch_range(void *addr, size_t len) >{ > > char *cp; > char *end = addr + len; > > for (cp = addr; cp < end; cp += (4*(1 << 6))) > prefetch(cp); > >} > > > >struct ratelimit_state { > raw_spinlock_t lock; > > int interval; > int burst; > int printed; > int missed; > unsigned long begin; >}; > >static inline __attribute__((always_inline)) void ratelimit_state_init(struct ratelimit_state *rs, > int interval, int burst) >{ > do { *(&rs->lock) = (raw_spinlock_t) { .raw_lock = { 0 }, }; } while (0); > rs->interval = interval; > rs->burst = burst; > rs->printed = 0; > rs->missed = 0; > rs->begin = 0; >} > >extern struct ratelimit_state printk_ratelimit_state; > >extern int ___ratelimit(struct ratelimit_state *rs, const char *func); > > > > > > >struct super_block; >struct file_system_type; >struct linux_binprm; >struct path; >struct mount; > > > > > >extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) bdev_cache_init(void); > >extern int __sync_blockdev(struct block_device *bdev, int wait); > >extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) chrdev_init(void); > > > > >extern int copy_mount_options(const void *, unsigned long *); >extern int copy_mount_string(const void *, char **); > >extern struct vfsmount *lookup_mnt(struct path *); >extern int finish_automount(struct vfsmount *, struct path *); > >extern void mnt_make_longterm(struct vfsmount *); >extern void mnt_make_shortterm(struct vfsmount *); >extern int sb_prepare_remount_readonly(struct super_block *); > >extern void __attribute__ ((__section__(".init.text"))) __attribute__((__cold__)) __attribute__((no_instrument_function)) mnt_init(void); > >extern void vfsmount_lock_lock_init(void); extern void vfsmount_lock_local_lock(void); extern void vfsmount_lock_local_unlock(void); extern void vfsmount_lock_local_lock_cpu(int cpu); extern void vfsmount_lock_local_unlock_cpu(int cpu); extern void vfsmount_lock_global_lock(void); extern void vfsmount_lock_global_unlock(void); extern void vfsmount_lock_global_lock_online(void); extern void vfsmount_lock_global_unlock_online(void);; > > > > > >extern void chroot_fs_refs(struct path *, struct path *); > > > > >extern void file_sb_list_add(struct file *f, struct super_block *sb); >extern void file_sb_list_del(struct file *f); >extern void mark_files_ro(struct super_block *); >extern struct file *get_empty_filp(void); > > > > >extern int do_remount_sb(struct super_block *, int, void *, int); >extern bool grab_super_passive(struct super_block *sb); >extern struct dentry *mount_fs(struct file_system_type *, > int, const char *, void *); >extern struct super_block *user_get_super(dev_t); > > > > >struct nameidata; >extern struct file *nameidata_to_filp(struct nameidata *); >extern void release_open_intent(struct nameidata *); >struct open_flags { > int open_flag; > umode_t mode; > int acc_mode; > int intent; >}; >extern struct file *do_filp_open(int dfd, const char *pathname, > const struct open_flags *op, int lookup_flags); >extern struct file *do_file_open_root(struct dentry *, struct vfsmount *, > const char *, const struct open_flags *, int lookup_flags); > >extern long do_handle_open(int mountdirfd, > struct file_handle *ufh, int open_flag); > > > > >extern spinlock_t inode_sb_list_lock; > > > > >extern void inode_wb_list_del(struct inode *inode); > >extern int get_nr_dirty_inodes(void); >extern void evict_inodes(struct super_block *); >extern int invalidate_inodes(struct super_block *, bool); > > > > >extern struct dentry *__d_alloc(struct super_block *, const struct qstr *); > > > > > > > > > > > >struct pollfd { > int fd; > short events; > short revents; >}; > > > >extern struct ctl_table epoll_table[]; > >struct poll_table_struct; > > > > >typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); > > > > > >typedef struct poll_table_struct { > poll_queue_proc _qproc; > unsigned long _key; >} poll_table; > >static inline __attribute__((always_inline)) void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) >{ > if (p && p->_qproc && wait_address) > p->_qproc(filp, wait_address, p); >} > > > > > > >static inline __attribute__((always_inline)) bool poll_does_not_wait(const poll_table *p) >{ > return p == ((void *)0) || p->_qproc == ((void *)0); >} > > > > > > > >static inline __attribute__((always_inline)) unsigned long poll_requested_events(const poll_table *p) >{ > return p ? p->_key : ~0UL; >} > >static inline __attribute__((always_inline)) void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) >{ > pt->_qproc = qproc; > pt->_key = ~0UL; >} > >struct poll_table_entry { > struct file *filp; > unsigned long key; > wait_queue_t wait; > wait_queue_head_t *wait_address; >}; > > > > >struct poll_wqueues { > poll_table pt; > struct poll_table_page *table; > struct task_struct *polling_task; > int triggered; > int error; > int inline_index; > struct poll_table_entry inline_entries[((832 - 256) / sizeof(struct poll_table_entry))]; >}; > >extern void poll_initwait(struct poll_wqueues *pwq); >extern void poll_freewait(struct poll_wqueues *pwq); >extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state, > ktime_t *expires, unsigned long slack); >extern long select_estimate_accuracy(struct timespec *tv); > > >static inline __attribute__((always_inline)) int poll_schedule(struct poll_wqueues *pwq, int state) >{ > return poll_schedule_timeout(pwq, state, ((void *)0), 0); >} > > > > > >typedef struct { > unsigned long *in, *out, *ex; > unsigned long *res_in, *res_out, *res_ex; >} fd_set_bits; > >static inline __attribute__((always_inline)) >int get_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset) >{ > nr = ((((nr)+(8*sizeof(long))-1)/(8*sizeof(long)))*sizeof(long)); > if (ufdset) > return copy_from_user(fdset, ufdset, nr) ? -14 : 0; > > ({ void *__p = (fdset); size_t __n = nr; if ((__n) != 0) { if (__builtin_constant_p((0)) && (0) == 0) __memzero((__p),(__n)); else memset((__p),(0),(__n)); } (__p); }); > return 0; >} > >static inline __attribute__((always_inline)) unsigned long __attribute__((warn_unused_result)) >set_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset) >{ > if (ufdset) > return __copy_to_user(ufdset, fdset, ((((nr)+(8*sizeof(long))-1)/(8*sizeof(long)))*sizeof(long))); > return 0; >} > >static inline __attribute__((always_inline)) >void zero_fd_set(unsigned long nr, unsigned long *fdset) >{ > ({ void *__p = (fdset); size_t __n = ((((nr)+(8*sizeof(long))-1)/(8*sizeof(long)))*sizeof(long)); if ((__n) != 0) { if (__builtin_constant_p((0)) && (0) == 0) __memzero((__p),(__n)); else memset((__p),(0),(__n)); } (__p); }); >} > > > >extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time); >extern int do_sys_poll(struct pollfd * ufds, unsigned int nfds, > struct timespec *end_time); >extern int core_sys_select(int n, fd_set *inp, fd_set *outp, > fd_set *exp, struct timespec *end_time); > >extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec); > > >struct mnt_namespace { > atomic_t count; > struct mount * root; > struct list_head list; > wait_queue_head_t poll; > int event; >}; > >struct mnt_pcp { > int mnt_count; > int mnt_writers; >}; > >struct mount { > struct list_head mnt_hash; > struct mount *mnt_parent; > struct dentry *mnt_mountpoint; > struct vfsmount mnt; > > struct mnt_pcp *mnt_pcp; > atomic_t mnt_longterm; > > > > > struct list_head mnt_mounts; > struct list_head mnt_child; > struct list_head mnt_instance; > const char *mnt_devname; > struct list_head mnt_list; > struct list_head mnt_expire; > struct list_head mnt_share; > struct list_head mnt_slave_list; > struct list_head mnt_slave; > struct mount *mnt_master; > struct mnt_namespace *mnt_ns; > > struct hlist_head mnt_fsnotify_marks; > __u32 mnt_fsnotify_mask; > > int mnt_id; > int mnt_group_id; > int mnt_expiry_mark; > int mnt_pinned; > int mnt_ghosts; >}; > >static inline __attribute__((always_inline)) struct mount *real_mount(struct vfsmount *mnt) >{ > return ({ const typeof( ((struct mount *)0)->mnt ) *__mptr = (mnt); (struct mount *)( (char *)__mptr - __builtin_offsetof(struct mount,mnt) );}); >} > >static inline __attribute__((always_inline)) int mnt_has_parent(struct mount *mnt) >{ > return mnt != mnt->mnt_parent; >} > >extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *, int); > >static inline __attribute__((always_inline)) void get_mnt_ns(struct mnt_namespace *ns) >{ > atomic_add(1, &ns->count); >} > >struct proc_mounts { > struct seq_file m; > struct mnt_namespace *ns; > struct path root; > int (*show)(struct seq_file *, struct vfsmount *); >}; > >extern const struct seq_operations mounts_op; > > > >extern int sysctl_vfs_cache_pressure __attribute__((__section__(".data..read_mostly"))); >extern spinlock_t dcache_lru_lock; >extern seqlock_t rename_lock; >extern struct kmem_cache *dentry_cache __attribute__((__section__(".data..read_mostly"))); >extern struct hlist_bl_head *dentry_hashtable __attribute__((__section__(".data..read_mostly"))); >extern int dentry_cmp(const unsigned char *cs, size_t scount, > const unsigned char *ct, size_t tcount); >extern struct hlist_bl_head *d_hash(const struct dentry *parent, > unsigned int hash); >extern __attribute__((section(".data..percpu" ""))) __typeof__(unsigned int) nr_dentry; >extern void __dget_dlock(struct dentry *dentry); >extern void dentry_unlink_inode(struct dentry * dentry); >extern void __d_instantiate(struct dentry *dentry, struct inode *inode); >extern struct dentry *__d_find_alias(struct inode *inode, int want_discon); >extern struct dentry *__d_instantiate_unique(struct dentry *entry, > struct inode *inode); >extern struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq); >extern void shrink_dcache_for_umount_subtree(struct dentry *dentry); >extern void dentry_lru_del(struct dentry *dentry); >extern void dentry_lru_move_list(struct dentry *dentry, struct list_head *list); >extern void shrink_dentry_list(struct list_head *list); >extern void dentry_rcuwalk_barrier(struct dentry *dentry); >extern void __dget(struct dentry *dentry); > > > > >extern unsigned int d_hash_mask __attribute__((__section__(".data..read_mostly"))); >extern unsigned int d_hash_shift __attribute__((__section__(".data..read_mostly"))); > >struct dentry *__d_lookup_rcu(const struct dentry *parent, > const struct qstr *name, > unsigned *seqp, struct inode **inode) >{ > unsigned int len = name->len; > unsigned int hash = name->hash; > const unsigned char *str = name->name; > struct hlist_bl_head *b = d_hash(parent, hash); > struct hlist_bl_node *node; > struct dentry *dentry; > > for (node = hlist_bl_first_rcu(b); node && ({ dentry = ({ const typeof( ((typeof(*dentry) *)0)->d_hash ) *__mptr = (node); (typeof(*dentry) *)( (char *)__mptr - __builtin_offsetof(typeof(*dentry),d_hash) );}); 1; }); node = ({ typeof(*(node->next)) *_________p1 = (typeof(*(node->next))* )(*(volatile typeof((node->next)) *)&((node->next))); do { } while (0); ; do { } while(0); ((typeof(*(node->next)) *)(_________p1)); })) { > unsigned seq; > struct inode *i; > const char *tname; > int tlen; > > if (dentry->d_name.hash != hash) > continue; > >seqretry: > seq = read_seqcount_begin(&dentry->d_seq); > if (dentry->d_parent != parent) > continue; > if (d_unhashed(dentry)) > continue; > tlen = dentry->d_name.len; > tname = dentry->d_name.name; > i = dentry->d_inode; > prefetch(tname); > > > > > > > if (read_seqcount_retry(&dentry->d_seq, seq)) > goto seqretry; > > if (__builtin_expect(!!(parent->d_flags & 0x0002), 0)) { > if (parent->d_op->d_compare(parent, *inode, > dentry, i, > tlen, tname, name)) > continue; > } else { > if (dentry_cmp(tname, tlen, str, len)) > continue; > } > > > > > > > *seqp = seq; > *inode = i; > return dentry; > } > return ((void *)0); >}
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Raw
Actions:
View
Attachments on
bug 58854
: 31083 |
31105