extern void __VERIFIER_error() __attribute__ ((__noreturn__)); /* Generated by CIL v. 1.5.1 */ /* print_CIL_Input is false */ typedef signed char __s8; typedef unsigned char __u8; typedef short __s16; typedef unsigned short __u16; typedef int __s32; typedef unsigned int __u32; typedef long long __s64; typedef unsigned long long __u64; typedef unsigned char u8; typedef unsigned short u16; typedef int s32; typedef unsigned int u32; typedef long long s64; typedef unsigned long long u64; typedef unsigned short umode_t; typedef u64 dma_addr_t; typedef unsigned int __kernel_mode_t; typedef int __kernel_pid_t; typedef unsigned int __kernel_uid_t; typedef unsigned int __kernel_gid_t; typedef unsigned long __kernel_size_t; typedef long __kernel_ssize_t; typedef long __kernel_time_t; typedef long __kernel_clock_t; typedef int __kernel_timer_t; typedef int __kernel_clockid_t; typedef long long __kernel_loff_t; typedef __kernel_uid_t __kernel_uid32_t; typedef __kernel_gid_t __kernel_gid32_t; typedef __u32 __kernel_dev_t; typedef __kernel_dev_t dev_t; typedef __kernel_mode_t mode_t; typedef __kernel_pid_t pid_t; typedef __kernel_clockid_t clockid_t; typedef _Bool bool; typedef __kernel_uid32_t uid_t; typedef __kernel_gid32_t gid_t; typedef __kernel_loff_t loff_t; typedef __kernel_size_t size_t; typedef __kernel_ssize_t ssize_t; typedef __kernel_time_t time_t; typedef __s32 int32_t; typedef __u32 uint32_t; typedef unsigned long sector_t; typedef unsigned long blkcnt_t; typedef unsigned int gfp_t; typedef unsigned int fmode_t; struct __anonstruct_atomic_t_6 { int counter ; }; typedef struct __anonstruct_atomic_t_6 atomic_t; struct __anonstruct_atomic64_t_7 { long counter ; }; typedef struct __anonstruct_atomic64_t_7 atomic64_t; struct task_struct; struct kernel_cap_struct { __u32 cap[2U] ; }; typedef struct kernel_cap_struct kernel_cap_t; struct dentry; struct module; struct bug_entry { int bug_addr_disp ; int file_disp ; unsigned short line ; unsigned short flags ; }; struct completion; struct pt_regs; struct pid; struct timespec; struct compat_timespec; struct __anonstruct_ldv_1747_10 { unsigned long arg0 ; unsigned long arg1 ; unsigned long arg2 ; unsigned long arg3 ; }; struct __anonstruct_futex_11 { u32 *uaddr ; u32 val ; u32 flags ; u32 bitset ; u64 time ; u32 *uaddr2 ; }; struct __anonstruct_nanosleep_12 { clockid_t index ; struct timespec *rmtp ; struct compat_timespec *compat_rmtp ; u64 expires ; }; struct pollfd; struct __anonstruct_poll_13 { struct pollfd *ufds ; int nfds ; int has_timeout ; unsigned long tv_sec ; unsigned long tv_nsec ; }; union __anonunion_ldv_1770_9 { struct __anonstruct_ldv_1747_10 ldv_1747 ; struct __anonstruct_futex_11 futex ; struct __anonstruct_nanosleep_12 nanosleep ; struct __anonstruct_poll_13 poll ; }; struct restart_block { long (*fn)(struct restart_block * ) ; union __anonunion_ldv_1770_9 ldv_1770 ; }; struct page; struct exec_domain; struct mm_struct; struct pt_regs { unsigned long r15 ; unsigned long r14 ; unsigned long r13 ; unsigned long r12 ; unsigned long bp ; unsigned long bx ; unsigned long r11 ; unsigned long r10 ; unsigned long r9 ; unsigned long r8 ; unsigned long ax ; unsigned long cx ; unsigned long dx ; unsigned long si ; unsigned long di ; unsigned long orig_ax ; unsigned long ip ; unsigned long cs ; unsigned long flags ; unsigned long sp ; unsigned long ss ; }; typedef void (*ctor_fn_t)(void); struct kernel_vm86_regs { struct pt_regs pt ; unsigned short es ; unsigned short __esh ; unsigned short ds ; unsigned short __dsh ; unsigned short fs ; unsigned short __fsh ; unsigned short gs ; unsigned short __gsh ; }; union __anonunion_ldv_2019_14 { struct pt_regs *regs ; struct kernel_vm86_regs *vm86 ; }; struct math_emu_info { long ___orig_eip ; union __anonunion_ldv_2019_14 ldv_2019 ; }; typedef unsigned long pgdval_t; typedef unsigned long pgprotval_t; struct pgprot { pgprotval_t pgprot ; }; typedef struct pgprot pgprot_t; struct __anonstruct_pgd_t_17 { pgdval_t pgd ; }; typedef struct __anonstruct_pgd_t_17 pgd_t; struct file; struct seq_file; struct __anonstruct_ldv_2237_21 { unsigned int a ; unsigned int b ; }; struct __anonstruct_ldv_2252_22 { u16 limit0 ; u16 base0 ; unsigned char base1 ; unsigned char type : 4 ; unsigned char s : 1 ; unsigned char dpl : 2 ; unsigned char p : 1 ; unsigned char limit : 4 ; unsigned char avl : 1 ; unsigned char l : 1 ; unsigned char d : 1 ; unsigned char g : 1 ; unsigned char base2 ; }; union __anonunion_ldv_2253_20 { struct __anonstruct_ldv_2237_21 ldv_2237 ; struct __anonstruct_ldv_2252_22 ldv_2252 ; }; struct desc_struct { union __anonunion_ldv_2253_20 ldv_2253 ; }; struct thread_struct; struct cpumask; struct arch_spinlock; struct cpumask { unsigned long bits[64U] ; }; typedef struct cpumask cpumask_t; struct map_segment; struct exec_domain { char const *name ; void (*handler)(int , struct pt_regs * ) ; unsigned char pers_low ; unsigned char pers_high ; unsigned long *signal_map ; unsigned long *signal_invmap ; struct map_segment *err_map ; struct map_segment *socktype_map ; struct map_segment *sockopt_map ; struct map_segment *af_map ; struct module *module ; struct exec_domain *next ; }; struct i387_fsave_struct { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u32 status ; }; struct __anonstruct_ldv_4840_26 { u64 rip ; u64 rdp ; }; struct __anonstruct_ldv_4846_27 { u32 fip ; u32 fcs ; u32 foo ; u32 fos ; }; union __anonunion_ldv_4847_25 { struct __anonstruct_ldv_4840_26 ldv_4840 ; struct __anonstruct_ldv_4846_27 ldv_4846 ; }; union __anonunion_ldv_4856_28 { u32 padding1[12U] ; u32 sw_reserved[12U] ; }; struct i387_fxsave_struct { u16 cwd ; u16 swd ; u16 twd ; u16 fop ; union __anonunion_ldv_4847_25 ldv_4847 ; u32 mxcsr ; u32 mxcsr_mask ; u32 st_space[32U] ; u32 xmm_space[64U] ; u32 padding[12U] ; union __anonunion_ldv_4856_28 ldv_4856 ; }; struct i387_soft_struct { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u8 ftop ; u8 changed ; u8 lookahead ; u8 no_update ; u8 rm ; u8 alimit ; struct math_emu_info *info ; u32 entry_eip ; }; struct ymmh_struct { u32 ymmh_space[64U] ; }; struct xsave_hdr_struct { u64 xstate_bv ; u64 reserved1[2U] ; u64 reserved2[5U] ; }; struct xsave_struct { struct i387_fxsave_struct i387 ; struct xsave_hdr_struct xsave_hdr ; struct ymmh_struct ymmh ; }; union thread_xstate { struct i387_fsave_struct fsave ; struct i387_fxsave_struct fxsave ; struct i387_soft_struct soft ; struct xsave_struct xsave ; }; struct fpu { union thread_xstate *state ; }; struct kmem_cache; struct perf_event; struct thread_struct { struct desc_struct tls_array[3U] ; unsigned long sp0 ; unsigned long sp ; unsigned long usersp ; unsigned short es ; unsigned short ds ; unsigned short fsindex ; unsigned short gsindex ; unsigned long fs ; unsigned long gs ; struct perf_event *ptrace_bps[4U] ; unsigned long debugreg6 ; unsigned long ptrace_dr7 ; unsigned long cr2 ; unsigned long trap_no ; unsigned long error_code ; struct fpu fpu ; unsigned long *io_bitmap_ptr ; unsigned long iopl ; unsigned int io_bitmap_max ; }; struct __anonstruct_mm_segment_t_30 { unsigned long seg ; }; typedef struct __anonstruct_mm_segment_t_30 mm_segment_t; typedef atomic64_t atomic_long_t; struct thread_info { struct task_struct *task ; struct exec_domain *exec_domain ; __u32 flags ; __u32 status ; __u32 cpu ; int preempt_count ; mm_segment_t addr_limit ; struct restart_block restart_block ; void *sysenter_return ; int uaccess_err ; }; struct list_head { struct list_head *next ; struct list_head *prev ; }; struct hlist_node; struct hlist_head { struct hlist_node *first ; }; struct hlist_node { struct hlist_node *next ; struct hlist_node **pprev ; }; struct arch_spinlock { unsigned int slock ; }; typedef struct arch_spinlock arch_spinlock_t; struct __anonstruct_arch_rwlock_t_31 { unsigned int lock ; }; typedef struct __anonstruct_arch_rwlock_t_31 arch_rwlock_t; struct lockdep_map; struct stack_trace { unsigned int nr_entries ; unsigned int max_entries ; unsigned long *entries ; int skip ; }; struct lockdep_subclass_key { char __one_byte ; }; struct lock_class_key { struct lockdep_subclass_key subkeys[8U] ; }; struct lock_class { struct list_head hash_entry ; struct list_head lock_entry ; struct lockdep_subclass_key *key ; unsigned int subclass ; unsigned int dep_gen_id ; unsigned long usage_mask ; struct stack_trace usage_traces[13U] ; struct list_head locks_after ; struct list_head locks_before ; unsigned int version ; unsigned long ops ; char const *name ; int name_version ; unsigned long contention_point[4U] ; unsigned long contending_point[4U] ; }; struct lockdep_map { struct lock_class_key *key ; struct lock_class *class_cache ; char const *name ; int cpu ; unsigned long ip ; }; struct held_lock { u64 prev_chain_key ; unsigned long acquire_ip ; struct lockdep_map *instance ; struct lockdep_map *nest_lock ; u64 waittime_stamp ; u64 holdtime_stamp ; unsigned short class_idx : 13 ; unsigned char irq_context : 2 ; unsigned char trylock : 1 ; unsigned char read : 2 ; unsigned char check : 2 ; unsigned char hardirqs_off : 1 ; unsigned short references : 11 ; }; struct raw_spinlock { arch_spinlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct raw_spinlock raw_spinlock_t; struct __anonstruct_ldv_5846_33 { u8 __padding[1U] ; struct lockdep_map dep_map ; }; union __anonunion_ldv_5847_32 { struct raw_spinlock rlock ; struct __anonstruct_ldv_5846_33 ldv_5846 ; }; struct spinlock { union __anonunion_ldv_5847_32 ldv_5847 ; }; typedef struct spinlock spinlock_t; struct __anonstruct_rwlock_t_34 { arch_rwlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_rwlock_t_34 rwlock_t; struct timespec { __kernel_time_t tv_sec ; long tv_nsec ; }; struct rb_node { unsigned long rb_parent_color ; struct rb_node *rb_right ; struct rb_node *rb_left ; }; struct rb_root { struct rb_node *rb_node ; }; struct __anonstruct_nodemask_t_36 { unsigned long bits[16U] ; }; typedef struct __anonstruct_nodemask_t_36 nodemask_t; struct prio_tree_node; struct raw_prio_tree_node { struct prio_tree_node *left ; struct prio_tree_node *right ; struct prio_tree_node *parent ; }; struct prio_tree_node { struct prio_tree_node *left ; struct prio_tree_node *right ; struct prio_tree_node *parent ; unsigned long start ; unsigned long last ; }; struct prio_tree_root { struct prio_tree_node *prio_tree_node ; unsigned short index_bits ; unsigned short raw ; }; struct rw_semaphore; typedef long rwsem_count_t; struct rw_semaphore { rwsem_count_t count ; spinlock_t wait_lock ; struct list_head wait_list ; struct lockdep_map dep_map ; }; struct __wait_queue_head { spinlock_t lock ; struct list_head task_list ; }; typedef struct __wait_queue_head wait_queue_head_t; struct completion { unsigned int done ; wait_queue_head_t wait ; }; struct mutex { atomic_t count ; spinlock_t wait_lock ; struct list_head wait_list ; struct thread_info *owner ; char const *name ; void *magic ; struct lockdep_map dep_map ; }; struct mutex_waiter { struct list_head list ; struct task_struct *task ; void *magic ; }; struct __anonstruct_mm_context_t_37 { void *ldt ; int size ; struct mutex lock ; void *vdso ; }; typedef struct __anonstruct_mm_context_t_37 mm_context_t; struct address_space; struct __anonstruct_ldv_7125_39 { u16 inuse ; u16 objects ; }; union __anonunion_ldv_7126_38 { atomic_t _mapcount ; struct __anonstruct_ldv_7125_39 ldv_7125 ; }; struct __anonstruct_ldv_7131_41 { unsigned long private ; struct address_space *mapping ; }; union __anonunion_ldv_7134_40 { struct __anonstruct_ldv_7131_41 ldv_7131 ; struct kmem_cache *slab ; struct page *first_page ; }; union __anonunion_ldv_7138_42 { unsigned long index ; void *freelist ; }; struct page { unsigned long flags ; atomic_t _count ; union __anonunion_ldv_7126_38 ldv_7126 ; union __anonunion_ldv_7134_40 ldv_7134 ; union __anonunion_ldv_7138_42 ldv_7138 ; struct list_head lru ; }; struct __anonstruct_vm_set_44 { struct list_head list ; void *parent ; struct vm_area_struct *head ; }; union __anonunion_shared_43 { struct __anonstruct_vm_set_44 vm_set ; struct raw_prio_tree_node prio_tree_node ; }; struct anon_vma; struct vm_operations_struct; struct mempolicy; struct vm_area_struct { struct mm_struct *vm_mm ; unsigned long vm_start ; unsigned long vm_end ; struct vm_area_struct *vm_next ; struct vm_area_struct *vm_prev ; pgprot_t vm_page_prot ; unsigned long vm_flags ; struct rb_node vm_rb ; union __anonunion_shared_43 shared ; struct list_head anon_vma_chain ; struct anon_vma *anon_vma ; struct vm_operations_struct const *vm_ops ; unsigned long vm_pgoff ; struct file *vm_file ; void *vm_private_data ; unsigned long vm_truncate_count ; struct mempolicy *vm_policy ; }; struct core_thread { struct task_struct *task ; struct core_thread *next ; }; struct core_state { atomic_t nr_threads ; struct core_thread dumper ; struct completion startup ; }; struct mm_rss_stat { unsigned long count[3U] ; }; struct linux_binfmt; struct mmu_notifier_mm; struct mm_struct { struct vm_area_struct *mmap ; struct rb_root mm_rb ; struct vm_area_struct *mmap_cache ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; void (*unmap_area)(struct mm_struct * , unsigned long ) ; unsigned long mmap_base ; unsigned long task_size ; unsigned long cached_hole_size ; unsigned long free_area_cache ; pgd_t *pgd ; atomic_t mm_users ; atomic_t mm_count ; int map_count ; struct rw_semaphore mmap_sem ; spinlock_t page_table_lock ; struct list_head mmlist ; unsigned long hiwater_rss ; unsigned long hiwater_vm ; unsigned long total_vm ; unsigned long locked_vm ; unsigned long shared_vm ; unsigned long exec_vm ; unsigned long stack_vm ; unsigned long reserved_vm ; unsigned long def_flags ; unsigned long nr_ptes ; unsigned long start_code ; unsigned long end_code ; unsigned long start_data ; unsigned long end_data ; unsigned long start_brk ; unsigned long brk ; unsigned long start_stack ; unsigned long arg_start ; unsigned long arg_end ; unsigned long env_start ; unsigned long env_end ; unsigned long saved_auxv[44U] ; struct mm_rss_stat rss_stat ; struct linux_binfmt *binfmt ; cpumask_t cpu_vm_mask ; mm_context_t context ; unsigned int faultstamp ; unsigned int token_priority ; unsigned int last_interval ; unsigned long flags ; struct core_state *core_state ; spinlock_t ioctx_lock ; struct hlist_head ioctx_list ; struct task_struct *owner ; struct file *exe_file ; unsigned long num_exe_file_vmas ; struct mmu_notifier_mm *mmu_notifier_mm ; }; typedef unsigned long cputime_t; struct call_single_data { struct list_head list ; void (*func)(void * ) ; void *info ; u16 flags ; u16 priv ; }; struct device; union ktime { s64 tv64 ; }; typedef union ktime ktime_t; struct tvec_base; struct timer_list { struct list_head entry ; unsigned long expires ; struct tvec_base *base ; void (*function)(unsigned long ) ; unsigned long data ; int slack ; void *start_site ; char start_comm[16U] ; int start_pid ; struct lockdep_map lockdep_map ; }; struct hrtimer; enum hrtimer_restart; struct work_struct; struct work_struct { atomic_long_t data ; struct list_head entry ; void (*func)(struct work_struct * ) ; struct lockdep_map lockdep_map ; }; struct delayed_work { struct work_struct work ; struct timer_list timer ; }; struct pm_message { int event ; }; typedef struct pm_message pm_message_t; struct dev_pm_ops { int (*prepare)(struct device * ) ; void (*complete)(struct device * ) ; int (*suspend)(struct device * ) ; int (*resume)(struct device * ) ; int (*freeze)(struct device * ) ; int (*thaw)(struct device * ) ; int (*poweroff)(struct device * ) ; int (*restore)(struct device * ) ; int (*suspend_noirq)(struct device * ) ; int (*resume_noirq)(struct device * ) ; int (*freeze_noirq)(struct device * ) ; int (*thaw_noirq)(struct device * ) ; int (*poweroff_noirq)(struct device * ) ; int (*restore_noirq)(struct device * ) ; int (*runtime_suspend)(struct device * ) ; int (*runtime_resume)(struct device * ) ; int (*runtime_idle)(struct device * ) ; }; enum dpm_state { DPM_INVALID = 0, DPM_ON = 1, DPM_PREPARING = 2, DPM_RESUMING = 3, DPM_SUSPENDING = 4, DPM_OFF = 5, DPM_OFF_IRQ = 6 } ; enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_RESUME = 3 } ; struct dev_pm_info { pm_message_t power_state ; unsigned char can_wakeup : 1 ; unsigned char should_wakeup : 1 ; unsigned char async_suspend : 1 ; enum dpm_state status ; struct list_head entry ; struct completion completion ; struct timer_list suspend_timer ; unsigned long timer_expires ; struct work_struct work ; wait_queue_head_t wait_queue ; spinlock_t lock ; atomic_t usage_count ; atomic_t child_count ; unsigned char disable_depth : 3 ; unsigned char ignore_children : 1 ; unsigned char idle_notification : 1 ; unsigned char request_pending : 1 ; unsigned char deferred_resume : 1 ; unsigned char run_wake : 1 ; unsigned char runtime_auto : 1 ; enum rpm_request request ; enum rpm_status runtime_status ; int runtime_error ; }; struct rcu_head { struct rcu_head *next ; void (*func)(struct rcu_head * ) ; }; struct sem_undo_list; struct sem_undo_list { atomic_t refcnt ; spinlock_t lock ; struct list_head list_proc ; }; struct sysv_sem { struct sem_undo_list *undo_list ; }; struct siginfo; struct __anonstruct_sigset_t_114 { unsigned long sig[1U] ; }; typedef struct __anonstruct_sigset_t_114 sigset_t; typedef void __signalfn_t(int ); typedef __signalfn_t *__sighandler_t; typedef void __restorefn_t(void); typedef __restorefn_t *__sigrestore_t; struct sigaction { __sighandler_t sa_handler ; unsigned long sa_flags ; __sigrestore_t sa_restorer ; sigset_t sa_mask ; }; struct k_sigaction { struct sigaction sa ; }; union sigval { int sival_int ; void *sival_ptr ; }; typedef union sigval sigval_t; struct __anonstruct__kill_116 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; }; struct __anonstruct__timer_117 { __kernel_timer_t _tid ; int _overrun ; char _pad[0U] ; sigval_t _sigval ; int _sys_private ; }; struct __anonstruct__rt_118 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; sigval_t _sigval ; }; struct __anonstruct__sigchld_119 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; int _status ; __kernel_clock_t _utime ; __kernel_clock_t _stime ; }; struct __anonstruct__sigfault_120 { void *_addr ; short _addr_lsb ; }; struct __anonstruct__sigpoll_121 { long _band ; int _fd ; }; union __anonunion__sifields_115 { int _pad[28U] ; struct __anonstruct__kill_116 _kill ; struct __anonstruct__timer_117 _timer ; struct __anonstruct__rt_118 _rt ; struct __anonstruct__sigchld_119 _sigchld ; struct __anonstruct__sigfault_120 _sigfault ; struct __anonstruct__sigpoll_121 _sigpoll ; }; struct siginfo { int si_signo ; int si_errno ; int si_code ; union __anonunion__sifields_115 _sifields ; }; typedef struct siginfo siginfo_t; struct user_struct; struct sigpending { struct list_head list ; sigset_t signal ; }; struct vfsmount; struct path { struct vfsmount *mnt ; struct dentry *dentry ; }; enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; struct pid_namespace; struct upid { int nr ; struct pid_namespace *ns ; struct hlist_node pid_chain ; }; struct pid { atomic_t count ; unsigned int level ; struct hlist_head tasks[3U] ; struct rcu_head rcu ; struct upid numbers[1U] ; }; struct pid_link { struct hlist_node node ; struct pid *pid ; }; struct percpu_counter { spinlock_t lock ; s64 count ; struct list_head list ; s32 *counters ; }; struct prop_local_percpu { struct percpu_counter events ; int shift ; unsigned long period ; spinlock_t lock ; }; struct prop_local_single { unsigned long events ; unsigned long period ; int shift ; spinlock_t lock ; }; struct __anonstruct_seccomp_t_124 { int mode ; }; typedef struct __anonstruct_seccomp_t_124 seccomp_t; struct plist_head { struct list_head prio_list ; struct list_head node_list ; raw_spinlock_t *rawlock ; spinlock_t *spinlock ; }; struct plist_node { int prio ; struct plist_head plist ; }; struct rt_mutex_waiter; struct rlimit { unsigned long rlim_cur ; unsigned long rlim_max ; }; struct hrtimer_clock_base; struct hrtimer_cpu_base; enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; struct hrtimer { struct rb_node node ; ktime_t _expires ; ktime_t _softexpires ; enum hrtimer_restart (*function)(struct hrtimer * ) ; struct hrtimer_clock_base *base ; unsigned long state ; int start_pid ; void *start_site ; char start_comm[16U] ; }; struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base ; clockid_t index ; struct rb_root active ; struct rb_node *first ; ktime_t resolution ; ktime_t (*get_time)(void) ; ktime_t softirq_time ; ktime_t offset ; }; struct hrtimer_cpu_base { raw_spinlock_t lock ; struct hrtimer_clock_base clock_base[2U] ; ktime_t expires_next ; int hres_active ; int hang_detected ; unsigned long nr_events ; unsigned long nr_retries ; unsigned long nr_hangs ; ktime_t max_hang_time ; }; struct task_io_accounting { u64 rchar ; u64 wchar ; u64 syscr ; u64 syscw ; u64 read_bytes ; u64 write_bytes ; u64 cancelled_write_bytes ; }; struct kobject; enum kobj_ns_type; enum kobj_ns_type; struct attribute { char const *name ; struct module *owner ; mode_t mode ; struct lock_class_key *key ; struct lock_class_key skey ; }; struct attribute_group { char const *name ; mode_t (*is_visible)(struct kobject * , struct attribute * , int ) ; struct attribute **attrs ; }; struct sysfs_ops { ssize_t (*show)(struct kobject * , struct attribute * , char * ) ; ssize_t (*store)(struct kobject * , struct attribute * , char const * , size_t ) ; }; struct sysfs_dirent; struct kref { atomic_t refcount ; }; struct kset; struct kobj_type; struct kobject { char const *name ; struct list_head entry ; struct kobject *parent ; struct kset *kset ; struct kobj_type *ktype ; struct sysfs_dirent *sd ; struct kref kref ; unsigned char state_initialized : 1 ; unsigned char state_in_sysfs : 1 ; unsigned char state_add_uevent_sent : 1 ; unsigned char state_remove_uevent_sent : 1 ; unsigned char uevent_suppress : 1 ; }; struct kobj_ns_type_operations; struct kobj_type { void (*release)(struct kobject * ) ; struct sysfs_ops const *sysfs_ops ; struct attribute **default_attrs ; struct kobj_ns_type_operations const *(*child_ns_type)(struct kobject * ) ; void const *(*namespace)(struct kobject * ) ; }; struct kobj_uevent_env { char *envp[32U] ; int envp_idx ; char buf[2048U] ; int buflen ; }; struct kset_uevent_ops { int (* const filter)(struct kset * , struct kobject * ) ; char const *(* const name)(struct kset * , struct kobject * ) ; int (* const uevent)(struct kset * , struct kobject * , struct kobj_uevent_env * ) ; }; enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; struct sock; struct kobj_ns_type_operations { enum kobj_ns_type type ; void const *(*current_ns)(void) ; void const *(*netlink_ns)(struct sock * ) ; void const *(*initial_ns)(void) ; }; struct kset { struct list_head list ; spinlock_t list_lock ; struct kobject kobj ; struct kset_uevent_ops const *uevent_ops ; }; struct latency_record { unsigned long backtrace[12U] ; unsigned int count ; unsigned long time ; unsigned long max ; }; struct nsproxy; typedef int32_t key_serial_t; typedef uint32_t key_perm_t; struct key; struct signal_struct; struct cred; struct key_type; struct keyring_list; struct key_user; union __anonunion_ldv_12757_125 { time_t expiry ; time_t revoked_at ; }; union __anonunion_type_data_126 { struct list_head link ; unsigned long x[2U] ; void *p[2U] ; }; union __anonunion_payload_127 { unsigned long value ; void *data ; struct keyring_list *subscriptions ; }; struct key { atomic_t usage ; key_serial_t serial ; struct rb_node serial_node ; struct key_type *type ; struct rw_semaphore sem ; struct key_user *user ; void *security ; union __anonunion_ldv_12757_125 ldv_12757 ; uid_t uid ; gid_t gid ; key_perm_t perm ; unsigned short quotalen ; unsigned short datalen ; unsigned long flags ; char *description ; union __anonunion_type_data_126 type_data ; union __anonunion_payload_127 payload ; }; struct audit_context; struct inode; struct group_info { atomic_t usage ; int ngroups ; int nblocks ; gid_t small_block[32U] ; gid_t *blocks[0U] ; }; struct thread_group_cred { atomic_t usage ; pid_t tgid ; spinlock_t lock ; struct key *session_keyring ; struct key *process_keyring ; struct rcu_head rcu ; }; struct cred { atomic_t usage ; atomic_t subscribers ; void *put_addr ; unsigned int magic ; uid_t uid ; gid_t gid ; uid_t suid ; gid_t sgid ; uid_t euid ; gid_t egid ; uid_t fsuid ; gid_t fsgid ; unsigned int securebits ; kernel_cap_t cap_inheritable ; kernel_cap_t cap_permitted ; kernel_cap_t cap_effective ; kernel_cap_t cap_bset ; unsigned char jit_keyring ; struct key *thread_keyring ; struct key *request_key_auth ; struct thread_group_cred *tgcred ; void *security ; struct user_struct *user ; struct group_info *group_info ; struct rcu_head rcu ; }; struct futex_pi_state; struct robust_list_head; struct bio_list; struct fs_struct; struct perf_event_context; struct cfs_rq; struct user_namespace; struct io_event { __u64 data ; __u64 obj ; __s64 res ; __s64 res2 ; }; struct iovec { void *iov_base ; __kernel_size_t iov_len ; }; struct kioctx; union __anonunion_ki_obj_128 { void *user ; struct task_struct *tsk ; }; struct eventfd_ctx; struct kiocb { struct list_head ki_run_list ; unsigned long ki_flags ; int ki_users ; unsigned int ki_key ; struct file *ki_filp ; struct kioctx *ki_ctx ; int (*ki_cancel)(struct kiocb * , struct io_event * ) ; ssize_t (*ki_retry)(struct kiocb * ) ; void (*ki_dtor)(struct kiocb * ) ; union __anonunion_ki_obj_128 ki_obj ; __u64 ki_user_data ; loff_t ki_pos ; void *private ; unsigned short ki_opcode ; size_t ki_nbytes ; char *ki_buf ; size_t ki_left ; struct iovec ki_inline_vec ; struct iovec *ki_iovec ; unsigned long ki_nr_segs ; unsigned long ki_cur_seg ; struct list_head ki_list ; struct eventfd_ctx *ki_eventfd ; }; struct aio_ring_info { unsigned long mmap_base ; unsigned long mmap_size ; struct page **ring_pages ; spinlock_t ring_lock ; long nr_pages ; unsigned int nr ; unsigned int tail ; struct page *internal_pages[8U] ; }; struct kioctx { atomic_t users ; int dead ; struct mm_struct *mm ; unsigned long user_id ; struct hlist_node list ; wait_queue_head_t wait ; spinlock_t ctx_lock ; int reqs_active ; struct list_head active_reqs ; struct list_head run_list ; unsigned int max_reqs ; struct aio_ring_info ring_info ; struct delayed_work wq ; struct rcu_head rcu_head ; }; struct sighand_struct { atomic_t count ; struct k_sigaction action[64U] ; spinlock_t siglock ; wait_queue_head_t signalfd_wqh ; }; struct pacct_struct { int ac_flag ; long ac_exitcode ; unsigned long ac_mem ; cputime_t ac_utime ; cputime_t ac_stime ; unsigned long ac_minflt ; unsigned long ac_majflt ; }; struct cpu_itimer { cputime_t expires ; cputime_t incr ; u32 error ; u32 incr_error ; }; struct task_cputime { cputime_t utime ; cputime_t stime ; unsigned long long sum_exec_runtime ; }; struct thread_group_cputimer { struct task_cputime cputime ; int running ; spinlock_t lock ; }; struct tty_struct; struct taskstats; struct tty_audit_buf; struct signal_struct { atomic_t sigcnt ; atomic_t live ; int nr_threads ; wait_queue_head_t wait_chldexit ; struct task_struct *curr_target ; struct sigpending shared_pending ; int group_exit_code ; int notify_count ; struct task_struct *group_exit_task ; int group_stop_count ; unsigned int flags ; struct list_head posix_timers ; struct hrtimer real_timer ; struct pid *leader_pid ; ktime_t it_real_incr ; struct cpu_itimer it[2U] ; struct thread_group_cputimer cputimer ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; struct pid *tty_old_pgrp ; int leader ; struct tty_struct *tty ; cputime_t utime ; cputime_t stime ; cputime_t cutime ; cputime_t cstime ; cputime_t gtime ; cputime_t cgtime ; cputime_t prev_utime ; cputime_t prev_stime ; unsigned long nvcsw ; unsigned long nivcsw ; unsigned long cnvcsw ; unsigned long cnivcsw ; unsigned long min_flt ; unsigned long maj_flt ; unsigned long cmin_flt ; unsigned long cmaj_flt ; unsigned long inblock ; unsigned long oublock ; unsigned long cinblock ; unsigned long coublock ; unsigned long maxrss ; unsigned long cmaxrss ; struct task_io_accounting ioac ; unsigned long long sum_sched_runtime ; struct rlimit rlim[16U] ; struct pacct_struct pacct ; struct taskstats *stats ; unsigned int audit_tty ; struct tty_audit_buf *tty_audit_buf ; int oom_adj ; }; struct user_struct { atomic_t __count ; atomic_t processes ; atomic_t files ; atomic_t sigpending ; atomic_t inotify_watches ; atomic_t inotify_devs ; atomic_t epoll_watches ; unsigned long mq_bytes ; unsigned long locked_shm ; struct key *uid_keyring ; struct key *session_keyring ; struct hlist_node uidhash_node ; uid_t uid ; struct user_namespace *user_ns ; atomic_long_t locked_vm ; }; struct backing_dev_info; struct reclaim_state; struct sched_info { unsigned long pcount ; unsigned long long run_delay ; unsigned long long last_arrival ; unsigned long long last_queued ; unsigned int bkl_count ; }; struct task_delay_info { spinlock_t lock ; unsigned int flags ; struct timespec blkio_start ; struct timespec blkio_end ; u64 blkio_delay ; u64 swapin_delay ; u32 blkio_count ; u32 swapin_count ; struct timespec freepages_start ; struct timespec freepages_end ; u64 freepages_delay ; u32 freepages_count ; }; struct io_context; struct pipe_inode_info; struct rq; struct sched_class { struct sched_class const *next ; void (*enqueue_task)(struct rq * , struct task_struct * , int ) ; void (*dequeue_task)(struct rq * , struct task_struct * , int ) ; void (*yield_task)(struct rq * ) ; void (*check_preempt_curr)(struct rq * , struct task_struct * , int ) ; struct task_struct *(*pick_next_task)(struct rq * ) ; void (*put_prev_task)(struct rq * , struct task_struct * ) ; int (*select_task_rq)(struct rq * , struct task_struct * , int , int ) ; void (*pre_schedule)(struct rq * , struct task_struct * ) ; void (*post_schedule)(struct rq * ) ; void (*task_waking)(struct rq * , struct task_struct * ) ; void (*task_woken)(struct rq * , struct task_struct * ) ; void (*set_cpus_allowed)(struct task_struct * , struct cpumask const * ) ; void (*rq_online)(struct rq * ) ; void (*rq_offline)(struct rq * ) ; void (*set_curr_task)(struct rq * ) ; void (*task_tick)(struct rq * , struct task_struct * , int ) ; void (*task_fork)(struct task_struct * ) ; void (*switched_from)(struct rq * , struct task_struct * , int ) ; void (*switched_to)(struct rq * , struct task_struct * , int ) ; void (*prio_changed)(struct rq * , struct task_struct * , int , int ) ; unsigned int (*get_rr_interval)(struct rq * , struct task_struct * ) ; void (*moved_group)(struct task_struct * , int ) ; }; struct load_weight { unsigned long weight ; unsigned long inv_weight ; }; struct sched_statistics { u64 wait_start ; u64 wait_max ; u64 wait_count ; u64 wait_sum ; u64 iowait_count ; u64 iowait_sum ; u64 sleep_start ; u64 sleep_max ; s64 sum_sleep_runtime ; u64 block_start ; u64 block_max ; u64 exec_max ; u64 slice_max ; u64 nr_migrations_cold ; u64 nr_failed_migrations_affine ; u64 nr_failed_migrations_running ; u64 nr_failed_migrations_hot ; u64 nr_forced_migrations ; u64 nr_wakeups ; u64 nr_wakeups_sync ; u64 nr_wakeups_migrate ; u64 nr_wakeups_local ; u64 nr_wakeups_remote ; u64 nr_wakeups_affine ; u64 nr_wakeups_affine_attempts ; u64 nr_wakeups_passive ; u64 nr_wakeups_idle ; }; struct sched_entity { struct load_weight load ; struct rb_node run_node ; struct list_head group_node ; unsigned int on_rq ; u64 exec_start ; u64 sum_exec_runtime ; u64 vruntime ; u64 prev_sum_exec_runtime ; u64 nr_migrations ; struct sched_statistics statistics ; struct sched_entity *parent ; struct cfs_rq *cfs_rq ; struct cfs_rq *my_q ; }; struct rt_rq; struct sched_rt_entity { struct list_head run_list ; unsigned long timeout ; unsigned int time_slice ; int nr_cpus_allowed ; struct sched_rt_entity *back ; struct sched_rt_entity *parent ; struct rt_rq *rt_rq ; struct rt_rq *my_q ; }; struct mem_cgroup; struct memcg_batch_info { int do_batch ; struct mem_cgroup *memcg ; unsigned long bytes ; unsigned long memsw_bytes ; }; struct files_struct; struct irqaction; struct css_set; struct compat_robust_list_head; struct task_struct { long volatile state ; void *stack ; atomic_t usage ; unsigned int flags ; unsigned int ptrace ; int lock_depth ; int prio ; int static_prio ; int normal_prio ; unsigned int rt_priority ; struct sched_class const *sched_class ; struct sched_entity se ; struct sched_rt_entity rt ; struct hlist_head preempt_notifiers ; unsigned char fpu_counter ; unsigned int policy ; cpumask_t cpus_allowed ; struct sched_info sched_info ; struct list_head tasks ; struct plist_node pushable_tasks ; struct mm_struct *mm ; struct mm_struct *active_mm ; int exit_state ; int exit_code ; int exit_signal ; int pdeath_signal ; unsigned int personality ; unsigned char did_exec : 1 ; unsigned char in_execve : 1 ; unsigned char in_iowait : 1 ; unsigned char sched_reset_on_fork : 1 ; pid_t pid ; pid_t tgid ; unsigned long stack_canary ; struct task_struct *real_parent ; struct task_struct *parent ; struct list_head children ; struct list_head sibling ; struct task_struct *group_leader ; struct list_head ptraced ; struct list_head ptrace_entry ; struct pid_link pids[3U] ; struct list_head thread_group ; struct completion *vfork_done ; int *set_child_tid ; int *clear_child_tid ; cputime_t utime ; cputime_t stime ; cputime_t utimescaled ; cputime_t stimescaled ; cputime_t gtime ; cputime_t prev_utime ; cputime_t prev_stime ; unsigned long nvcsw ; unsigned long nivcsw ; struct timespec start_time ; struct timespec real_start_time ; unsigned long min_flt ; unsigned long maj_flt ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; struct cred const *real_cred ; struct cred const *cred ; struct mutex cred_guard_mutex ; struct cred *replacement_session_keyring ; char comm[16U] ; int link_count ; int total_link_count ; struct sysv_sem sysvsem ; unsigned long last_switch_count ; struct thread_struct thread ; struct fs_struct *fs ; struct files_struct *files ; struct nsproxy *nsproxy ; struct signal_struct *signal ; struct sighand_struct *sighand ; sigset_t blocked ; sigset_t real_blocked ; sigset_t saved_sigmask ; struct sigpending pending ; unsigned long sas_ss_sp ; size_t sas_ss_size ; int (*notifier)(void * ) ; void *notifier_data ; sigset_t *notifier_mask ; struct audit_context *audit_context ; uid_t loginuid ; unsigned int sessionid ; seccomp_t seccomp ; u32 parent_exec_id ; u32 self_exec_id ; spinlock_t alloc_lock ; struct irqaction *irqaction ; raw_spinlock_t pi_lock ; struct plist_head pi_waiters ; struct rt_mutex_waiter *pi_blocked_on ; struct mutex_waiter *blocked_on ; unsigned int irq_events ; unsigned long hardirq_enable_ip ; unsigned long hardirq_disable_ip ; unsigned int hardirq_enable_event ; unsigned int hardirq_disable_event ; int hardirqs_enabled ; int hardirq_context ; unsigned long softirq_disable_ip ; unsigned long softirq_enable_ip ; unsigned int softirq_disable_event ; unsigned int softirq_enable_event ; int softirqs_enabled ; int softirq_context ; u64 curr_chain_key ; int lockdep_depth ; unsigned int lockdep_recursion ; struct held_lock held_locks[48U] ; gfp_t lockdep_reclaim_gfp ; void *journal_info ; struct bio_list *bio_list ; struct reclaim_state *reclaim_state ; struct backing_dev_info *backing_dev_info ; struct io_context *io_context ; unsigned long ptrace_message ; siginfo_t *last_siginfo ; struct task_io_accounting ioac ; u64 acct_rss_mem1 ; u64 acct_vm_mem1 ; cputime_t acct_timexpd ; nodemask_t mems_allowed ; int mems_allowed_change_disable ; int cpuset_mem_spread_rotor ; int cpuset_slab_spread_rotor ; struct css_set *cgroups ; struct list_head cg_list ; struct robust_list_head *robust_list ; struct compat_robust_list_head *compat_robust_list ; struct list_head pi_state_list ; struct futex_pi_state *pi_state_cache ; struct perf_event_context *perf_event_ctxp ; struct mutex perf_event_mutex ; struct list_head perf_event_list ; struct mempolicy *mempolicy ; short il_next ; atomic_t fs_excl ; struct rcu_head rcu ; struct pipe_inode_info *splice_pipe ; struct task_delay_info *delays ; int make_it_fail ; struct prop_local_single dirties ; int latency_record_count ; struct latency_record latency_record[32U] ; unsigned long timer_slack_ns ; unsigned long default_timer_slack_ns ; struct list_head *scm_work_list ; unsigned long trace ; unsigned long trace_recursion ; struct memcg_batch_info memcg_batch ; }; struct device_type; struct class; struct klist_node; struct klist_node { void *n_klist ; struct list_head n_node ; struct kref n_ref ; }; struct kstat { u64 ino ; dev_t dev ; umode_t mode ; unsigned int nlink ; uid_t uid ; gid_t gid ; dev_t rdev ; loff_t size ; struct timespec atime ; struct timespec mtime ; struct timespec ctime ; unsigned long blksize ; unsigned long long blocks ; }; typedef __u64 Elf64_Addr; typedef __u16 Elf64_Half; typedef __u32 Elf64_Word; typedef __u64 Elf64_Xword; struct elf64_sym { Elf64_Word st_name ; unsigned char st_info ; unsigned char st_other ; Elf64_Half st_shndx ; Elf64_Addr st_value ; Elf64_Xword st_size ; }; typedef struct elf64_sym Elf64_Sym; struct kernel_param; struct kparam_string; struct kparam_array; union __anonunion_ldv_15069_133 { void *arg ; struct kparam_string const *str ; struct kparam_array const *arr ; }; struct kernel_param { char const *name ; u16 perm ; u16 flags ; int (*set)(char const * , struct kernel_param * ) ; int (*get)(char * , struct kernel_param * ) ; union __anonunion_ldv_15069_133 ldv_15069 ; }; struct kparam_string { unsigned int maxlen ; char *string ; }; struct kparam_array { unsigned int max ; unsigned int *num ; int (*set)(char const * , struct kernel_param * ) ; int (*get)(char * , struct kernel_param * ) ; unsigned int elemsize ; void *elem ; }; struct tracepoint; struct tracepoint_func { void *func ; void *data ; }; struct tracepoint { char const *name ; int state ; void (*regfunc)(void) ; void (*unregfunc)(void) ; struct tracepoint_func *funcs ; }; struct mod_arch_specific { }; struct kernel_symbol { unsigned long value ; char const *name ; }; struct module_attribute { struct attribute attr ; ssize_t (*show)(struct module_attribute * , struct module * , char * ) ; ssize_t (*store)(struct module_attribute * , struct module * , char const * , size_t ) ; void (*setup)(struct module * , char const * ) ; int (*test)(struct module * ) ; void (*free)(struct module * ) ; }; struct module_param_attrs; struct module_kobject { struct kobject kobj ; struct module *mod ; struct kobject *drivers_dir ; struct module_param_attrs *mp ; }; struct exception_table_entry; enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2 } ; struct module_ref { unsigned int incs ; unsigned int decs ; }; struct module_sect_attrs; struct module_notes_attrs; struct ftrace_event_call; struct module { enum module_state state ; struct list_head list ; char name[56U] ; struct module_kobject mkobj ; struct module_attribute *modinfo_attrs ; char const *version ; char const *srcversion ; struct kobject *holders_dir ; struct kernel_symbol const *syms ; unsigned long const *crcs ; unsigned int num_syms ; struct kernel_param *kp ; unsigned int num_kp ; unsigned int num_gpl_syms ; struct kernel_symbol const *gpl_syms ; unsigned long const *gpl_crcs ; struct kernel_symbol const *unused_syms ; unsigned long const *unused_crcs ; unsigned int num_unused_syms ; unsigned int num_unused_gpl_syms ; struct kernel_symbol const *unused_gpl_syms ; unsigned long const *unused_gpl_crcs ; struct kernel_symbol const *gpl_future_syms ; unsigned long const *gpl_future_crcs ; unsigned int num_gpl_future_syms ; unsigned int num_exentries ; struct exception_table_entry *extable ; int (*init)(void) ; void *module_init ; void *module_core ; unsigned int init_size ; unsigned int core_size ; unsigned int init_text_size ; unsigned int core_text_size ; struct mod_arch_specific arch ; unsigned int taints ; unsigned int num_bugs ; struct list_head bug_list ; struct bug_entry *bug_table ; Elf64_Sym *symtab ; Elf64_Sym *core_symtab ; unsigned int num_symtab ; unsigned int core_num_syms ; char *strtab ; char *core_strtab ; struct module_sect_attrs *sect_attrs ; struct module_notes_attrs *notes_attrs ; void *percpu ; unsigned int percpu_size ; char *args ; struct tracepoint *tracepoints ; unsigned int num_tracepoints ; char const **trace_bprintk_fmt_start ; unsigned int num_trace_bprintk_fmt ; struct ftrace_event_call *trace_events ; unsigned int num_trace_events ; struct list_head source_list ; struct list_head target_list ; struct task_struct *waiter ; void (*exit)(void) ; struct module_ref *refptr ; ctor_fn_t (**ctors)(void) ; unsigned int num_ctors ; }; struct dma_map_ops; struct dev_archdata { void *acpi_handle ; struct dma_map_ops *dma_ops ; void *iommu ; }; struct device_private; struct device_driver; struct driver_private; struct class_private; struct bus_type; struct bus_type_private; struct bus_attribute { struct attribute attr ; ssize_t (*show)(struct bus_type * , char * ) ; ssize_t (*store)(struct bus_type * , char const * , size_t ) ; }; struct device_attribute; struct driver_attribute; struct bus_type { char const *name ; struct bus_attribute *bus_attrs ; struct device_attribute *dev_attrs ; struct driver_attribute *drv_attrs ; int (*match)(struct device * , struct device_driver * ) ; int (*uevent)(struct device * , struct kobj_uevent_env * ) ; int (*probe)(struct device * ) ; int (*remove)(struct device * ) ; void (*shutdown)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct dev_pm_ops const *pm ; struct bus_type_private *p ; }; struct device_driver { char const *name ; struct bus_type *bus ; struct module *owner ; char const *mod_name ; bool suppress_bind_attrs ; int (*probe)(struct device * ) ; int (*remove)(struct device * ) ; void (*shutdown)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct attribute_group const **groups ; struct dev_pm_ops const *pm ; struct driver_private *p ; }; struct driver_attribute { struct attribute attr ; ssize_t (*show)(struct device_driver * , char * ) ; ssize_t (*store)(struct device_driver * , char const * , size_t ) ; }; struct class_attribute; struct class { char const *name ; struct module *owner ; struct class_attribute *class_attrs ; struct device_attribute *dev_attrs ; struct kobject *dev_kobj ; int (*dev_uevent)(struct device * , struct kobj_uevent_env * ) ; char *(*devnode)(struct device * , mode_t * ) ; void (*class_release)(struct class * ) ; void (*dev_release)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct kobj_ns_type_operations const *ns_type ; void const *(*namespace)(struct device * ) ; struct dev_pm_ops const *pm ; struct class_private *p ; }; struct class_attribute { struct attribute attr ; ssize_t (*show)(struct class * , struct class_attribute * , char * ) ; ssize_t (*store)(struct class * , struct class_attribute * , char const * , size_t ) ; }; struct device_type { char const *name ; struct attribute_group const **groups ; int (*uevent)(struct device * , struct kobj_uevent_env * ) ; char *(*devnode)(struct device * , mode_t * ) ; void (*release)(struct device * ) ; struct dev_pm_ops const *pm ; }; struct device_attribute { struct attribute attr ; ssize_t (*show)(struct device * , struct device_attribute * , char * ) ; ssize_t (*store)(struct device * , struct device_attribute * , char const * , size_t ) ; }; struct device_dma_parameters { unsigned int max_segment_size ; unsigned long segment_boundary_mask ; }; struct dma_coherent_mem; struct device { struct device *parent ; struct device_private *p ; struct kobject kobj ; char const *init_name ; struct device_type *type ; struct mutex mutex ; struct bus_type *bus ; struct device_driver *driver ; void *platform_data ; struct dev_pm_info power ; int numa_node ; u64 *dma_mask ; u64 coherent_dma_mask ; struct device_dma_parameters *dma_parms ; struct list_head dma_pools ; struct dma_coherent_mem *dma_mem ; struct dev_archdata archdata ; dev_t devt ; spinlock_t devres_lock ; struct list_head devres_head ; struct klist_node knode_class ; struct class *class ; struct attribute_group const **groups ; void (*release)(struct device * ) ; }; struct nameidata; struct qstr { unsigned int hash ; unsigned int len ; unsigned char const *name ; }; union __anonunion_d_u_134 { struct list_head d_child ; struct rcu_head d_rcu ; }; struct dentry_operations; struct super_block; struct dentry { atomic_t d_count ; unsigned int d_flags ; spinlock_t d_lock ; int d_mounted ; struct inode *d_inode ; struct hlist_node d_hash ; struct dentry *d_parent ; struct qstr d_name ; struct list_head d_lru ; union __anonunion_d_u_134 d_u ; struct list_head d_subdirs ; struct list_head d_alias ; unsigned long d_time ; struct dentry_operations const *d_op ; struct super_block *d_sb ; void *d_fsdata ; unsigned char d_iname[32U] ; }; struct dentry_operations { int (*d_revalidate)(struct dentry * , struct nameidata * ) ; int (*d_hash)(struct dentry * , struct qstr * ) ; int (*d_compare)(struct dentry * , struct qstr * , struct qstr * ) ; int (*d_delete)(struct dentry * ) ; void (*d_release)(struct dentry * ) ; void (*d_iput)(struct dentry * , struct inode * ) ; char *(*d_dname)(struct dentry * , char * , int ) ; }; struct radix_tree_node; struct radix_tree_root { unsigned int height ; gfp_t gfp_mask ; struct radix_tree_node *rnode ; }; struct fiemap_extent { __u64 fe_logical ; __u64 fe_physical ; __u64 fe_length ; __u64 fe_reserved64[2U] ; __u32 fe_flags ; __u32 fe_reserved[3U] ; }; struct export_operations; struct hd_geometry; struct poll_table_struct; struct kstatfs; struct iattr { unsigned int ia_valid ; umode_t ia_mode ; uid_t ia_uid ; gid_t ia_gid ; loff_t ia_size ; struct timespec ia_atime ; struct timespec ia_mtime ; struct timespec ia_ctime ; struct file *ia_file ; }; struct if_dqinfo { __u64 dqi_bgrace ; __u64 dqi_igrace ; __u32 dqi_flags ; __u32 dqi_valid ; }; struct fs_disk_quota { __s8 d_version ; __s8 d_flags ; __u16 d_fieldmask ; __u32 d_id ; __u64 d_blk_hardlimit ; __u64 d_blk_softlimit ; __u64 d_ino_hardlimit ; __u64 d_ino_softlimit ; __u64 d_bcount ; __u64 d_icount ; __s32 d_itimer ; __s32 d_btimer ; __u16 d_iwarns ; __u16 d_bwarns ; __s32 d_padding2 ; __u64 d_rtb_hardlimit ; __u64 d_rtb_softlimit ; __u64 d_rtbcount ; __s32 d_rtbtimer ; __u16 d_rtbwarns ; __s16 d_padding3 ; char d_padding4[8U] ; }; struct fs_qfilestat { __u64 qfs_ino ; __u64 qfs_nblks ; __u32 qfs_nextents ; }; typedef struct fs_qfilestat fs_qfilestat_t; struct fs_quota_stat { __s8 qs_version ; __u16 qs_flags ; __s8 qs_pad ; fs_qfilestat_t qs_uquota ; fs_qfilestat_t qs_gquota ; __u32 qs_incoredqs ; __s32 qs_btimelimit ; __s32 qs_itimelimit ; __s32 qs_rtbtimelimit ; __u16 qs_bwarnlimit ; __u16 qs_iwarnlimit ; }; struct dquot; typedef __kernel_uid32_t qid_t; typedef long long qsize_t; struct mem_dqblk { qsize_t dqb_bhardlimit ; qsize_t dqb_bsoftlimit ; qsize_t dqb_curspace ; qsize_t dqb_rsvspace ; qsize_t dqb_ihardlimit ; qsize_t dqb_isoftlimit ; qsize_t dqb_curinodes ; time_t dqb_btime ; time_t dqb_itime ; }; struct quota_format_type; struct mem_dqinfo { struct quota_format_type *dqi_format ; int dqi_fmt_id ; struct list_head dqi_dirty_list ; unsigned long dqi_flags ; unsigned int dqi_bgrace ; unsigned int dqi_igrace ; qsize_t dqi_maxblimit ; qsize_t dqi_maxilimit ; void *dqi_priv ; }; struct dquot { struct hlist_node dq_hash ; struct list_head dq_inuse ; struct list_head dq_free ; struct list_head dq_dirty ; struct mutex dq_lock ; atomic_t dq_count ; wait_queue_head_t dq_wait_unused ; struct super_block *dq_sb ; unsigned int dq_id ; loff_t dq_off ; unsigned long dq_flags ; short dq_type ; struct mem_dqblk dq_dqb ; }; struct quota_format_ops { int (*check_quota_file)(struct super_block * , int ) ; int (*read_file_info)(struct super_block * , int ) ; int (*write_file_info)(struct super_block * , int ) ; int (*free_file_info)(struct super_block * , int ) ; int (*read_dqblk)(struct dquot * ) ; int (*commit_dqblk)(struct dquot * ) ; int (*release_dqblk)(struct dquot * ) ; }; struct dquot_operations { int (*write_dquot)(struct dquot * ) ; struct dquot *(*alloc_dquot)(struct super_block * , int ) ; void (*destroy_dquot)(struct dquot * ) ; int (*acquire_dquot)(struct dquot * ) ; int (*release_dquot)(struct dquot * ) ; int (*mark_dirty)(struct dquot * ) ; int (*write_info)(struct super_block * , int ) ; qsize_t *(*get_reserved_space)(struct inode * ) ; }; struct quotactl_ops { int (*quota_on)(struct super_block * , int , int , char * ) ; int (*quota_off)(struct super_block * , int ) ; int (*quota_sync)(struct super_block * , int , int ) ; int (*get_info)(struct super_block * , int , struct if_dqinfo * ) ; int (*set_info)(struct super_block * , int , struct if_dqinfo * ) ; int (*get_dqblk)(struct super_block * , int , qid_t , struct fs_disk_quota * ) ; int (*set_dqblk)(struct super_block * , int , qid_t , struct fs_disk_quota * ) ; int (*get_xstate)(struct super_block * , struct fs_quota_stat * ) ; int (*set_xstate)(struct super_block * , unsigned int , int ) ; }; struct quota_format_type { int qf_fmt_id ; struct quota_format_ops const *qf_ops ; struct module *qf_owner ; struct quota_format_type *qf_next ; }; struct quota_info { unsigned int flags ; struct mutex dqio_mutex ; struct mutex dqonoff_mutex ; struct rw_semaphore dqptr_sem ; struct inode *files[2U] ; struct mem_dqinfo info[2U] ; struct quota_format_ops const *ops[2U] ; }; struct writeback_control; union __anonunion_arg_136 { char *buf ; void *data ; }; struct __anonstruct_read_descriptor_t_135 { size_t written ; size_t count ; union __anonunion_arg_136 arg ; int error ; }; typedef struct __anonstruct_read_descriptor_t_135 read_descriptor_t; struct address_space_operations { int (*writepage)(struct page * , struct writeback_control * ) ; int (*readpage)(struct file * , struct page * ) ; void (*sync_page)(struct page * ) ; int (*writepages)(struct address_space * , struct writeback_control * ) ; int (*set_page_dirty)(struct page * ) ; int (*readpages)(struct file * , struct address_space * , struct list_head * , unsigned int ) ; int (*write_begin)(struct file * , struct address_space * , loff_t , unsigned int , unsigned int , struct page ** , void ** ) ; int (*write_end)(struct file * , struct address_space * , loff_t , unsigned int , unsigned int , struct page * , void * ) ; sector_t (*bmap)(struct address_space * , sector_t ) ; void (*invalidatepage)(struct page * , unsigned long ) ; int (*releasepage)(struct page * , gfp_t ) ; ssize_t (*direct_IO)(int , struct kiocb * , struct iovec const * , loff_t , unsigned long ) ; int (*get_xip_mem)(struct address_space * , unsigned long , int , void ** , unsigned long * ) ; int (*migratepage)(struct address_space * , struct page * , struct page * ) ; int (*launder_page)(struct page * ) ; int (*is_partially_uptodate)(struct page * , read_descriptor_t * , unsigned long ) ; int (*error_remove_page)(struct address_space * , struct page * ) ; }; struct address_space { struct inode *host ; struct radix_tree_root page_tree ; spinlock_t tree_lock ; unsigned int i_mmap_writable ; struct prio_tree_root i_mmap ; struct list_head i_mmap_nonlinear ; spinlock_t i_mmap_lock ; unsigned int truncate_count ; unsigned long nrpages ; unsigned long writeback_index ; struct address_space_operations const *a_ops ; unsigned long flags ; struct backing_dev_info *backing_dev_info ; spinlock_t private_lock ; struct list_head private_list ; struct address_space *assoc_mapping ; }; struct hd_struct; struct gendisk; struct block_device { dev_t bd_dev ; struct inode *bd_inode ; struct super_block *bd_super ; int bd_openers ; struct mutex bd_mutex ; struct list_head bd_inodes ; void *bd_claiming ; void *bd_holder ; int bd_holders ; struct list_head bd_holder_list ; struct block_device *bd_contains ; unsigned int bd_block_size ; struct hd_struct *bd_part ; unsigned int bd_part_count ; int bd_invalidated ; struct gendisk *bd_disk ; struct list_head bd_list ; unsigned long bd_private ; int bd_fsfreeze_count ; struct mutex bd_fsfreeze_mutex ; }; struct posix_acl; struct inode_operations; struct file_operations; struct file_lock; struct cdev; union __anonunion_ldv_17212_137 { struct pipe_inode_info *i_pipe ; struct block_device *i_bdev ; struct cdev *i_cdev ; }; struct inode { struct hlist_node i_hash ; struct list_head i_list ; struct list_head i_sb_list ; struct list_head i_dentry ; unsigned long i_ino ; atomic_t i_count ; unsigned int i_nlink ; uid_t i_uid ; gid_t i_gid ; dev_t i_rdev ; unsigned int i_blkbits ; u64 i_version ; loff_t i_size ; struct timespec i_atime ; struct timespec i_mtime ; struct timespec i_ctime ; blkcnt_t i_blocks ; unsigned short i_bytes ; umode_t i_mode ; spinlock_t i_lock ; struct mutex i_mutex ; struct rw_semaphore i_alloc_sem ; struct inode_operations const *i_op ; struct file_operations const *i_fop ; struct super_block *i_sb ; struct file_lock *i_flock ; struct address_space *i_mapping ; struct address_space i_data ; struct dquot *i_dquot[2U] ; struct list_head i_devices ; union __anonunion_ldv_17212_137 ldv_17212 ; __u32 i_generation ; __u32 i_fsnotify_mask ; struct hlist_head i_fsnotify_mark_entries ; struct list_head inotify_watches ; struct mutex inotify_mutex ; unsigned long i_state ; unsigned long dirtied_when ; unsigned int i_flags ; atomic_t i_writecount ; void *i_security ; struct posix_acl *i_acl ; struct posix_acl *i_default_acl ; void *i_private ; }; struct fown_struct { rwlock_t lock ; struct pid *pid ; enum pid_type pid_type ; uid_t uid ; uid_t euid ; int signum ; }; struct file_ra_state { unsigned long start ; unsigned int size ; unsigned int async_size ; unsigned int ra_pages ; unsigned int mmap_miss ; loff_t prev_pos ; }; union __anonunion_f_u_138 { struct list_head fu_list ; struct rcu_head fu_rcuhead ; }; struct file { union __anonunion_f_u_138 f_u ; struct path f_path ; struct file_operations const *f_op ; spinlock_t f_lock ; atomic_long_t f_count ; unsigned int f_flags ; fmode_t f_mode ; loff_t f_pos ; struct fown_struct f_owner ; struct cred const *f_cred ; struct file_ra_state f_ra ; u64 f_version ; void *f_security ; void *private_data ; struct list_head f_ep_links ; struct address_space *f_mapping ; unsigned long f_mnt_write_state ; }; typedef struct files_struct *fl_owner_t; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock * , struct file_lock * ) ; void (*fl_release_private)(struct file_lock * ) ; }; struct lock_manager_operations { int (*fl_compare_owner)(struct file_lock * , struct file_lock * ) ; void (*fl_notify)(struct file_lock * ) ; int (*fl_grant)(struct file_lock * , struct file_lock * , int ) ; void (*fl_copy_lock)(struct file_lock * , struct file_lock * ) ; void (*fl_release_private)(struct file_lock * ) ; void (*fl_break)(struct file_lock * ) ; int (*fl_mylease)(struct file_lock * , struct file_lock * ) ; int (*fl_change)(struct file_lock ** , int ) ; }; struct nlm_lockowner; struct nfs_lock_info { u32 state ; struct nlm_lockowner *owner ; struct list_head list ; }; struct nfs4_lock_state; struct nfs4_lock_info { struct nfs4_lock_state *owner ; }; struct fasync_struct; struct __anonstruct_afs_140 { struct list_head link ; int state ; }; union __anonunion_fl_u_139 { struct nfs_lock_info nfs_fl ; struct nfs4_lock_info nfs4_fl ; struct __anonstruct_afs_140 afs ; }; struct file_lock { struct file_lock *fl_next ; struct list_head fl_link ; struct list_head fl_block ; fl_owner_t fl_owner ; unsigned char fl_flags ; unsigned char fl_type ; unsigned int fl_pid ; struct pid *fl_nspid ; wait_queue_head_t fl_wait ; struct file *fl_file ; loff_t fl_start ; loff_t fl_end ; struct fasync_struct *fl_fasync ; unsigned long fl_break_time ; struct file_lock_operations const *fl_ops ; struct lock_manager_operations const *fl_lmops ; union __anonunion_fl_u_139 fl_u ; }; struct fasync_struct { spinlock_t fa_lock ; int magic ; int fa_fd ; struct fasync_struct *fa_next ; struct file *fa_file ; struct rcu_head fa_rcu ; }; struct file_system_type; struct super_operations; struct xattr_handler; struct mtd_info; struct super_block { struct list_head s_list ; dev_t s_dev ; unsigned char s_dirt ; unsigned char s_blocksize_bits ; unsigned long s_blocksize ; loff_t s_maxbytes ; struct file_system_type *s_type ; struct super_operations const *s_op ; struct dquot_operations const *dq_op ; struct quotactl_ops const *s_qcop ; struct export_operations const *s_export_op ; unsigned long s_flags ; unsigned long s_magic ; struct dentry *s_root ; struct rw_semaphore s_umount ; struct mutex s_lock ; int s_count ; atomic_t s_active ; void *s_security ; struct xattr_handler const **s_xattr ; struct list_head s_inodes ; struct hlist_head s_anon ; struct list_head s_files ; struct list_head s_dentry_lru ; int s_nr_dentry_unused ; struct block_device *s_bdev ; struct backing_dev_info *s_bdi ; struct mtd_info *s_mtd ; struct list_head s_instances ; struct quota_info s_dquot ; int s_frozen ; wait_queue_head_t s_wait_unfrozen ; char s_id[32U] ; void *s_fs_info ; fmode_t s_mode ; u32 s_time_gran ; struct mutex s_vfs_rename_mutex ; char *s_subtype ; char *s_options ; }; struct fiemap_extent_info { unsigned int fi_flags ; unsigned int fi_extents_mapped ; unsigned int fi_extents_max ; struct fiemap_extent *fi_extents_start ; }; struct block_device_operations; struct file_operations { struct module *owner ; loff_t (*llseek)(struct file * , loff_t , int ) ; ssize_t (*read)(struct file * , char * , size_t , loff_t * ) ; ssize_t (*write)(struct file * , char const * , size_t , loff_t * ) ; ssize_t (*aio_read)(struct kiocb * , struct iovec const * , unsigned long , loff_t ) ; ssize_t (*aio_write)(struct kiocb * , struct iovec const * , unsigned long , loff_t ) ; int (*readdir)(struct file * , void * , int (*)(void * , char const * , int , loff_t , u64 , unsigned int ) ) ; unsigned int (*poll)(struct file * , struct poll_table_struct * ) ; int (*ioctl)(struct inode * , struct file * , unsigned int , unsigned long ) ; long (*unlocked_ioctl)(struct file * , unsigned int , unsigned long ) ; long (*compat_ioctl)(struct file * , unsigned int , unsigned long ) ; int (*mmap)(struct file * , struct vm_area_struct * ) ; int (*open)(struct inode * , struct file * ) ; int (*flush)(struct file * , fl_owner_t ) ; int (*release)(struct inode * , struct file * ) ; int (*fsync)(struct file * , int ) ; int (*aio_fsync)(struct kiocb * , int ) ; int (*fasync)(int , struct file * , int ) ; int (*lock)(struct file * , int , struct file_lock * ) ; ssize_t (*sendpage)(struct file * , struct page * , int , size_t , loff_t * , int ) ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; int (*check_flags)(int ) ; int (*flock)(struct file * , int , struct file_lock * ) ; ssize_t (*splice_write)(struct pipe_inode_info * , struct file * , loff_t * , size_t , unsigned int ) ; ssize_t (*splice_read)(struct file * , loff_t * , struct pipe_inode_info * , size_t , unsigned int ) ; int (*setlease)(struct file * , long , struct file_lock ** ) ; }; struct inode_operations { int (*create)(struct inode * , struct dentry * , int , struct nameidata * ) ; struct dentry *(*lookup)(struct inode * , struct dentry * , struct nameidata * ) ; int (*link)(struct dentry * , struct inode * , struct dentry * ) ; int (*unlink)(struct inode * , struct dentry * ) ; int (*symlink)(struct inode * , struct dentry * , char const * ) ; int (*mkdir)(struct inode * , struct dentry * , int ) ; int (*rmdir)(struct inode * , struct dentry * ) ; int (*mknod)(struct inode * , struct dentry * , int , dev_t ) ; int (*rename)(struct inode * , struct dentry * , struct inode * , struct dentry * ) ; int (*readlink)(struct dentry * , char * , int ) ; void *(*follow_link)(struct dentry * , struct nameidata * ) ; void (*put_link)(struct dentry * , struct nameidata * , void * ) ; void (*truncate)(struct inode * ) ; int (*permission)(struct inode * , int ) ; int (*check_acl)(struct inode * , int ) ; int (*setattr)(struct dentry * , struct iattr * ) ; int (*getattr)(struct vfsmount * , struct dentry * , struct kstat * ) ; int (*setxattr)(struct dentry * , char const * , void const * , size_t , int ) ; ssize_t (*getxattr)(struct dentry * , char const * , void * , size_t ) ; ssize_t (*listxattr)(struct dentry * , char * , size_t ) ; int (*removexattr)(struct dentry * , char const * ) ; void (*truncate_range)(struct inode * , loff_t , loff_t ) ; long (*fallocate)(struct inode * , int , loff_t , loff_t ) ; int (*fiemap)(struct inode * , struct fiemap_extent_info * , u64 , u64 ) ; }; struct super_operations { struct inode *(*alloc_inode)(struct super_block * ) ; void (*destroy_inode)(struct inode * ) ; void (*dirty_inode)(struct inode * ) ; int (*write_inode)(struct inode * , struct writeback_control * ) ; void (*drop_inode)(struct inode * ) ; void (*delete_inode)(struct inode * ) ; void (*put_super)(struct super_block * ) ; void (*write_super)(struct super_block * ) ; int (*sync_fs)(struct super_block * , int ) ; int (*freeze_fs)(struct super_block * ) ; int (*unfreeze_fs)(struct super_block * ) ; int (*statfs)(struct dentry * , struct kstatfs * ) ; int (*remount_fs)(struct super_block * , int * , char * ) ; void (*clear_inode)(struct inode * ) ; void (*umount_begin)(struct super_block * ) ; int (*show_options)(struct seq_file * , struct vfsmount * ) ; int (*show_stats)(struct seq_file * , struct vfsmount * ) ; ssize_t (*quota_read)(struct super_block * , int , char * , size_t , loff_t ) ; ssize_t (*quota_write)(struct super_block * , int , char const * , size_t , loff_t ) ; int (*bdev_try_to_free_page)(struct super_block * , struct page * , gfp_t ) ; }; struct file_system_type { char const *name ; int fs_flags ; int (*get_sb)(struct file_system_type * , int , char const * , void * , struct vfsmount * ) ; void (*kill_sb)(struct super_block * ) ; struct module *owner ; struct file_system_type *next ; struct list_head fs_supers ; struct lock_class_key s_lock_key ; struct lock_class_key s_umount_key ; struct lock_class_key s_vfs_rename_key ; struct lock_class_key i_lock_key ; struct lock_class_key i_mutex_key ; struct lock_class_key i_mutex_dir_key ; struct lock_class_key i_alloc_sem_key ; }; struct bio; struct disk_stats { unsigned long sectors[2U] ; unsigned long ios[2U] ; unsigned long merges[2U] ; unsigned long ticks[2U] ; unsigned long io_ticks ; unsigned long time_in_queue ; }; struct hd_struct { sector_t start_sect ; sector_t nr_sects ; sector_t alignment_offset ; unsigned int discard_alignment ; struct device __dev ; struct kobject *holder_dir ; int policy ; int partno ; int make_it_fail ; unsigned long stamp ; int in_flight[2U] ; struct disk_stats *dkstats ; struct rcu_head rcu_head ; }; struct disk_part_tbl { struct rcu_head rcu_head ; int len ; struct hd_struct *last_lookup ; struct hd_struct *part[] ; }; struct request_queue; struct timer_rand_state; struct blk_integrity; struct gendisk { int major ; int first_minor ; int minors ; char disk_name[32U] ; char *(*devnode)(struct gendisk * , mode_t * ) ; struct disk_part_tbl *part_tbl ; struct hd_struct part0 ; struct block_device_operations const *fops ; struct request_queue *queue ; void *private_data ; int flags ; struct device *driverfs_dev ; struct kobject *slave_dir ; struct timer_rand_state *random ; atomic_t sync_io ; struct work_struct async_notify ; struct blk_integrity *integrity ; int node_id ; }; struct vm_fault { unsigned int flags ; unsigned long pgoff ; void *virtual_address ; struct page *page ; }; struct vm_operations_struct { void (*open)(struct vm_area_struct * ) ; void (*close)(struct vm_area_struct * ) ; int (*fault)(struct vm_area_struct * , struct vm_fault * ) ; int (*page_mkwrite)(struct vm_area_struct * , struct vm_fault * ) ; int (*access)(struct vm_area_struct * , unsigned long , void * , int , int ) ; int (*set_policy)(struct vm_area_struct * , struct mempolicy * ) ; struct mempolicy *(*get_policy)(struct vm_area_struct * , unsigned long ) ; int (*migrate)(struct vm_area_struct * , nodemask_t const * , nodemask_t const * , unsigned long ) ; }; struct exception_table_entry { unsigned long insn ; unsigned long fixup ; }; enum writeback_sync_modes { WB_SYNC_NONE = 0, WB_SYNC_ALL = 1 } ; struct writeback_control { enum writeback_sync_modes sync_mode ; unsigned long *older_than_this ; unsigned long wb_start ; long nr_to_write ; long pages_skipped ; loff_t range_start ; loff_t range_end ; unsigned char nonblocking : 1 ; unsigned char encountered_congestion : 1 ; unsigned char for_kupdate : 1 ; unsigned char for_background : 1 ; unsigned char for_reclaim : 1 ; unsigned char range_cyclic : 1 ; unsigned char more_io : 1 ; }; struct bdi_writeback; typedef int congested_fn(void * , int ); struct bdi_writeback { struct list_head list ; struct backing_dev_info *bdi ; unsigned int nr ; unsigned long last_old_flush ; struct task_struct *task ; struct list_head b_dirty ; struct list_head b_io ; struct list_head b_more_io ; }; struct backing_dev_info { struct list_head bdi_list ; struct rcu_head rcu_head ; unsigned long ra_pages ; unsigned long state ; unsigned int capabilities ; congested_fn *congested_fn ; void *congested_data ; void (*unplug_io_fn)(struct backing_dev_info * , struct page * ) ; void *unplug_io_data ; char *name ; struct percpu_counter bdi_stat[2U] ; struct prop_local_percpu completions ; int dirty_exceeded ; unsigned int min_ratio ; unsigned int max_ratio ; unsigned int max_prop_frac ; struct bdi_writeback wb ; spinlock_t wb_lock ; struct list_head wb_list ; struct list_head work_list ; struct device *dev ; struct timer_list laptop_mode_wb_timer ; struct dentry *debug_dir ; struct dentry *debug_stats ; }; typedef void *mempool_alloc_t(gfp_t , void * ); typedef void mempool_free_t(void * , void * ); struct mempool_s { spinlock_t lock ; int min_nr ; int curr_nr ; void **elements ; void *pool_data ; mempool_alloc_t *alloc ; mempool_free_t *free ; wait_queue_head_t wait ; }; typedef struct mempool_s mempool_t; struct io_context { atomic_long_t refcount ; atomic_t nr_tasks ; spinlock_t lock ; unsigned short ioprio ; unsigned short ioprio_changed ; unsigned short cgroup_changed ; int nr_batch_requests ; unsigned long last_waited ; struct radix_tree_root radix_root ; struct hlist_head cic_list ; void *ioc_data ; }; struct bio_vec { struct page *bv_page ; unsigned int bv_len ; unsigned int bv_offset ; }; struct bio_integrity_payload; typedef void bio_end_io_t(struct bio * , int ); typedef void bio_destructor_t(struct bio * ); struct bio { sector_t bi_sector ; struct bio *bi_next ; struct block_device *bi_bdev ; unsigned long bi_flags ; unsigned long bi_rw ; unsigned short bi_vcnt ; unsigned short bi_idx ; unsigned int bi_phys_segments ; unsigned int bi_size ; unsigned int bi_seg_front_size ; unsigned int bi_seg_back_size ; unsigned int bi_max_vecs ; unsigned int bi_comp_cpu ; atomic_t bi_cnt ; struct bio_vec *bi_io_vec ; bio_end_io_t *bi_end_io ; void *bi_private ; struct bio_integrity_payload *bi_integrity ; bio_destructor_t *bi_destructor ; struct bio_vec bi_inline_vecs[0U] ; }; struct bio_integrity_payload { struct bio *bip_bio ; sector_t bip_sector ; void *bip_buf ; bio_end_io_t *bip_end_io ; unsigned int bip_size ; unsigned short bip_slab ; unsigned short bip_vcnt ; unsigned short bip_idx ; struct work_struct bip_work ; struct bio_vec bip_vec[0U] ; }; struct bio_list { struct bio *head ; struct bio *tail ; }; struct bsg_class_device { struct device *class_dev ; struct device *parent ; int minor ; struct request_queue *queue ; struct kref ref ; void (*release)(struct device * ) ; }; struct scatterlist { unsigned long sg_magic ; unsigned long page_link ; unsigned int offset ; unsigned int length ; dma_addr_t dma_address ; unsigned int dma_length ; }; struct elevator_queue; struct request; typedef void rq_end_io_fn(struct request * , int ); struct request_list { int count[2U] ; int starved[2U] ; int elvpriv ; mempool_t *rq_pool ; wait_queue_head_t wait[2U] ; }; enum rq_cmd_type_bits { REQ_TYPE_FS = 1, REQ_TYPE_BLOCK_PC = 2, REQ_TYPE_SENSE = 3, REQ_TYPE_PM_SUSPEND = 4, REQ_TYPE_PM_RESUME = 5, REQ_TYPE_PM_SHUTDOWN = 6, REQ_TYPE_SPECIAL = 7, REQ_TYPE_LINUX_BLOCK = 8, REQ_TYPE_ATA_TASKFILE = 9, REQ_TYPE_ATA_PC = 10 } ; union __anonunion_ldv_24093_142 { struct rb_node rb_node ; void *completion_data ; }; struct request { struct list_head queuelist ; struct call_single_data csd ; struct request_queue *q ; unsigned int cmd_flags ; enum rq_cmd_type_bits cmd_type ; unsigned long atomic_flags ; int cpu ; unsigned int __data_len ; sector_t __sector ; struct bio *bio ; struct bio *biotail ; struct hlist_node hash ; union __anonunion_ldv_24093_142 ldv_24093 ; void *elevator_private ; void *elevator_private2 ; void *elevator_private3 ; struct gendisk *rq_disk ; unsigned long start_time ; unsigned short nr_phys_segments ; unsigned short ioprio ; int ref_count ; void *special ; char *buffer ; int tag ; int errors ; unsigned char __cmd[16U] ; unsigned char *cmd ; unsigned short cmd_len ; unsigned int extra_len ; unsigned int sense_len ; unsigned int resid_len ; void *sense ; unsigned long deadline ; struct list_head timeout_list ; unsigned int timeout ; int retries ; rq_end_io_fn *end_io ; void *end_io_data ; struct request *next_rq ; }; typedef int elevator_merge_fn(struct request_queue * , struct request ** , struct bio * ); typedef void elevator_merge_req_fn(struct request_queue * , struct request * , struct request * ); typedef void elevator_merged_fn(struct request_queue * , struct request * , int ); typedef int elevator_allow_merge_fn(struct request_queue * , struct request * , struct bio * ); typedef void elevator_bio_merged_fn(struct request_queue * , struct request * , struct bio * ); typedef int elevator_dispatch_fn(struct request_queue * , int ); typedef void elevator_add_req_fn(struct request_queue * , struct request * ); typedef int elevator_queue_empty_fn(struct request_queue * ); typedef struct request *elevator_request_list_fn(struct request_queue * , struct request * ); typedef void elevator_completed_req_fn(struct request_queue * , struct request * ); typedef int elevator_may_queue_fn(struct request_queue * , int ); typedef int elevator_set_req_fn(struct request_queue * , struct request * , gfp_t ); typedef void elevator_put_req_fn(struct request * ); typedef void elevator_activate_req_fn(struct request_queue * , struct request * ); typedef void elevator_deactivate_req_fn(struct request_queue * , struct request * ); typedef void *elevator_init_fn(struct request_queue * ); typedef void elevator_exit_fn(struct elevator_queue * ); struct elevator_ops { elevator_merge_fn *elevator_merge_fn ; elevator_merged_fn *elevator_merged_fn ; elevator_merge_req_fn *elevator_merge_req_fn ; elevator_allow_merge_fn *elevator_allow_merge_fn ; elevator_bio_merged_fn *elevator_bio_merged_fn ; elevator_dispatch_fn *elevator_dispatch_fn ; elevator_add_req_fn *elevator_add_req_fn ; elevator_activate_req_fn *elevator_activate_req_fn ; elevator_deactivate_req_fn *elevator_deactivate_req_fn ; elevator_queue_empty_fn *elevator_queue_empty_fn ; elevator_completed_req_fn *elevator_completed_req_fn ; elevator_request_list_fn *elevator_former_req_fn ; elevator_request_list_fn *elevator_latter_req_fn ; elevator_set_req_fn *elevator_set_req_fn ; elevator_put_req_fn *elevator_put_req_fn ; elevator_may_queue_fn *elevator_may_queue_fn ; elevator_init_fn *elevator_init_fn ; elevator_exit_fn *elevator_exit_fn ; void (*trim)(struct io_context * ) ; }; struct elv_fs_entry { struct attribute attr ; ssize_t (*show)(struct elevator_queue * , char * ) ; ssize_t (*store)(struct elevator_queue * , char const * , size_t ) ; }; struct elevator_type { struct list_head list ; struct elevator_ops ops ; struct elv_fs_entry *elevator_attrs ; char elevator_name[16U] ; struct module *elevator_owner ; }; struct elevator_queue { struct elevator_ops *ops ; void *elevator_data ; struct kobject kobj ; struct elevator_type *elevator_type ; struct mutex sysfs_lock ; struct hlist_head *hash ; }; typedef void request_fn_proc(struct request_queue * ); typedef int make_request_fn(struct request_queue * , struct bio * ); typedef int prep_rq_fn(struct request_queue * , struct request * ); typedef void unplug_fn(struct request_queue * ); struct bvec_merge_data { struct block_device *bi_bdev ; sector_t bi_sector ; unsigned int bi_size ; unsigned long bi_rw ; }; typedef int merge_bvec_fn(struct request_queue * , struct bvec_merge_data * , struct bio_vec * ); typedef void prepare_flush_fn(struct request_queue * , struct request * ); typedef void softirq_done_fn(struct request * ); typedef int dma_drain_needed_fn(struct request * ); typedef int lld_busy_fn(struct request_queue * ); enum blk_eh_timer_return { BLK_EH_NOT_HANDLED = 0, BLK_EH_HANDLED = 1, BLK_EH_RESET_TIMER = 2 } ; typedef enum blk_eh_timer_return rq_timed_out_fn(struct request * ); struct blk_queue_tag { struct request **tag_index ; unsigned long *tag_map ; int busy ; int max_depth ; int real_max_depth ; atomic_t refcnt ; }; struct queue_limits { unsigned long bounce_pfn ; unsigned long seg_boundary_mask ; unsigned int max_hw_sectors ; unsigned int max_sectors ; unsigned int max_segment_size ; unsigned int physical_block_size ; unsigned int alignment_offset ; unsigned int io_min ; unsigned int io_opt ; unsigned int max_discard_sectors ; unsigned int discard_granularity ; unsigned int discard_alignment ; unsigned short logical_block_size ; unsigned short max_segments ; unsigned char misaligned ; unsigned char discard_misaligned ; unsigned char no_cluster ; signed char discard_zeroes_data ; }; struct request_queue { struct list_head queue_head ; struct request *last_merge ; struct elevator_queue *elevator ; struct request_list rq ; request_fn_proc *request_fn ; make_request_fn *make_request_fn ; prep_rq_fn *prep_rq_fn ; unplug_fn *unplug_fn ; merge_bvec_fn *merge_bvec_fn ; prepare_flush_fn *prepare_flush_fn ; softirq_done_fn *softirq_done_fn ; rq_timed_out_fn *rq_timed_out_fn ; dma_drain_needed_fn *dma_drain_needed ; lld_busy_fn *lld_busy_fn ; sector_t end_sector ; struct request *boundary_rq ; struct timer_list unplug_timer ; int unplug_thresh ; unsigned long unplug_delay ; struct work_struct unplug_work ; struct backing_dev_info backing_dev_info ; void *queuedata ; gfp_t bounce_gfp ; unsigned long queue_flags ; spinlock_t __queue_lock ; spinlock_t *queue_lock ; struct kobject kobj ; unsigned long nr_requests ; unsigned int nr_congestion_on ; unsigned int nr_congestion_off ; unsigned int nr_batching ; void *dma_drain_buffer ; unsigned int dma_drain_size ; unsigned int dma_pad_mask ; unsigned int dma_alignment ; struct blk_queue_tag *queue_tags ; struct list_head tag_busy_list ; unsigned int nr_sorted ; unsigned int in_flight[2U] ; unsigned int rq_timeout ; struct timer_list timeout ; struct list_head timeout_list ; struct queue_limits limits ; unsigned int sg_timeout ; unsigned int sg_reserved_size ; int node ; unsigned int ordered ; unsigned int next_ordered ; unsigned int ordseq ; int orderr ; int ordcolor ; struct request pre_flush_rq ; struct request bar_rq ; struct request post_flush_rq ; struct request *orig_bar_rq ; struct mutex sysfs_lock ; struct bsg_class_device bsg_dev ; }; struct blk_integrity_exchg { void *prot_buf ; void *data_buf ; sector_t sector ; unsigned int data_size ; unsigned short sector_size ; char const *disk_name ; }; typedef void integrity_gen_fn(struct blk_integrity_exchg * ); typedef int integrity_vrfy_fn(struct blk_integrity_exchg * ); typedef void integrity_set_tag_fn(void * , void * , unsigned int ); typedef void integrity_get_tag_fn(void * , void * , unsigned int ); struct blk_integrity { integrity_gen_fn *generate_fn ; integrity_vrfy_fn *verify_fn ; integrity_set_tag_fn *set_tag_fn ; integrity_get_tag_fn *get_tag_fn ; unsigned short flags ; unsigned short tuple_size ; unsigned short sector_size ; unsigned short tag_size ; char const *name ; struct kobject kobj ; }; struct block_device_operations { int (*open)(struct block_device * , fmode_t ) ; int (*release)(struct gendisk * , fmode_t ) ; int (*locked_ioctl)(struct block_device * , fmode_t , unsigned int , unsigned long ) ; int (*ioctl)(struct block_device * , fmode_t , unsigned int , unsigned long ) ; int (*compat_ioctl)(struct block_device * , fmode_t , unsigned int , unsigned long ) ; int (*direct_access)(struct block_device * , sector_t , void ** , unsigned long * ) ; int (*media_changed)(struct gendisk * ) ; void (*unlock_native_capacity)(struct gendisk * ) ; int (*revalidate_disk)(struct gendisk * ) ; int (*getgeo)(struct block_device * , struct hd_geometry * ) ; void (*swap_slot_free_notify)(struct block_device * , unsigned long ) ; struct module *owner ; }; struct kmem_cache_cpu { void **freelist ; struct page *page ; int node ; unsigned int stat[18U] ; }; struct kmem_cache_node { spinlock_t list_lock ; unsigned long nr_partial ; struct list_head partial ; atomic_long_t nr_slabs ; atomic_long_t total_objects ; struct list_head full ; }; struct kmem_cache_order_objects { unsigned long x ; }; struct kmem_cache { struct kmem_cache_cpu *cpu_slab ; unsigned long flags ; int size ; int objsize ; int offset ; struct kmem_cache_order_objects oo ; struct kmem_cache_order_objects max ; struct kmem_cache_order_objects min ; gfp_t allocflags ; int refcount ; void (*ctor)(void * ) ; int inuse ; int align ; unsigned long min_partial ; char const *name ; struct list_head list ; struct kobject kobj ; int remote_node_defrag_ratio ; struct kmem_cache_node *node[1024U] ; }; struct virtio_device; struct hd_geometry { unsigned char heads ; unsigned char sectors ; unsigned short cylinders ; unsigned long start ; }; struct virtio_device_id { __u32 device ; __u32 vendor ; }; struct virtqueue { struct list_head list ; void (*callback)(struct virtqueue * ) ; char const *name ; struct virtio_device *vdev ; void *priv ; }; struct virtio_config_ops; struct virtio_device { int index ; struct device dev ; struct virtio_device_id id ; struct virtio_config_ops *config ; struct list_head vqs ; unsigned long features[1U] ; void *priv ; }; struct virtio_driver { struct device_driver driver ; struct virtio_device_id const *id_table ; unsigned int const *feature_table ; unsigned int feature_table_size ; int (*probe)(struct virtio_device * ) ; void (*remove)(struct virtio_device * ) ; void (*config_changed)(struct virtio_device * ) ; }; typedef void vq_callback_t(struct virtqueue * ); struct virtio_config_ops { void (*get)(struct virtio_device * , unsigned int , void * , unsigned int ) ; void (*set)(struct virtio_device * , unsigned int , void const * , unsigned int ) ; u8 (*get_status)(struct virtio_device * ) ; void (*set_status)(struct virtio_device * , u8 ) ; void (*reset)(struct virtio_device * ) ; int (*find_vqs)(struct virtio_device * , unsigned int , struct virtqueue ** , vq_callback_t ** , char const ** ) ; void (*del_vqs)(struct virtio_device * ) ; u32 (*get_features)(struct virtio_device * ) ; void (*finalize_features)(struct virtio_device * ) ; }; struct virtio_blk_geometry { __u16 cylinders ; __u8 heads ; __u8 sectors ; }; struct virtio_blk_outhdr { __u32 type ; __u32 ioprio ; __u64 sector ; }; struct virtio_scsi_inhdr { __u32 errors ; __u32 data_len ; __u32 sense_len ; __u32 residual ; }; struct virtio_blk { spinlock_t lock ; struct virtio_device *vdev ; struct virtqueue *vq ; struct gendisk *disk ; struct list_head reqs ; mempool_t *pool ; unsigned int sg_elems ; struct scatterlist sg[] ; }; struct virtblk_req { struct list_head list ; struct request *req ; struct virtio_blk_outhdr out_hdr ; struct virtio_scsi_inhdr in_hdr ; u8 status ; }; long ldv__builtin_expect(long exp , long c ) ; __inline static int variable_test_bit(int nr , unsigned long const volatile *addr ) { int oldbit ; { __asm__ volatile ("bt %2,%1\n\tsbb %0,%0": "=r" (oldbit): "m" (*((unsigned long *)addr)), "Ir" (nr)); return (oldbit); } } extern void might_fault(void) ; extern int sprintf(char * , char const * , ...) ; extern unsigned long __phys_addr(unsigned long ) ; __inline static int get_order(unsigned long size ) { int order ; { size = (size - 1UL) >> 11; order = -1; ldv_1820: size = size >> 1; order = order + 1; if (size != 0UL) { goto ldv_1820; } else { } return (order); } } extern void *memset(void * , int , size_t ) ; __inline static void *ERR_PTR(long error ) ; __inline static long PTR_ERR(void const *ptr ) ; __inline static long IS_ERR(void const *ptr ) ; __inline static void INIT_LIST_HEAD(struct list_head *list ) { { list->next = list; list->prev = list; return; } } extern void __list_add(struct list_head * , struct list_head * , struct list_head * ) ; __inline static void list_add_tail(struct list_head *new , struct list_head *head ) { { __list_add(new, head->prev, head); return; } } extern void list_del(struct list_head * ) ; __inline static int list_empty(struct list_head const *head ) { { return ((unsigned long )((struct list_head const *)head->next) == (unsigned long )head); } } extern void lockdep_rcu_dereference(char const * , int const ) ; extern void __raw_spin_lock_init(raw_spinlock_t * , char const * , struct lock_class_key * ) ; extern unsigned long _raw_spin_lock_irqsave(raw_spinlock_t * ) ; extern void _raw_spin_unlock_irqrestore(raw_spinlock_t * , unsigned long ) ; __inline static raw_spinlock_t *spinlock_check(spinlock_t *lock ) { { return (& lock->ldv_5847.rlock); } } __inline static void spin_unlock_irqrestore(spinlock_t *lock , unsigned long flags ) { { _raw_spin_unlock_irqrestore(& lock->ldv_5847.rlock, flags); return; } } extern int debug_lockdep_rcu_enabled(void) ; __inline static int rcu_read_lock_sched_held(void) { { return (1); } } __inline static void rcu_read_lock_sched_notrace(void) { { return; } } extern unsigned long __get_free_pages(gfp_t , unsigned int ) ; extern struct module __this_module ; extern int register_blkdev(unsigned int , char const * ) ; extern void unregister_blkdev(unsigned int , char const * ) ; extern void add_disk(struct gendisk * ) ; extern void del_gendisk(struct gendisk * ) ; extern void set_disk_ro(struct gendisk * , int ) ; __inline static sector_t get_capacity(struct gendisk *disk ) { { return (disk->part0.nr_sects); } } __inline static void set_capacity(struct gendisk *disk , sector_t size ) { { disk->part0.nr_sects = size; return; } } extern struct gendisk *alloc_disk(int ) ; extern void put_disk(struct gendisk * ) ; extern unsigned long _copy_to_user(void * , void const * , unsigned int ) ; __inline static int copy_to_user(void *dst , void const *src , unsigned int size ) { unsigned long tmp ; { might_fault(); tmp = _copy_to_user(dst, src, size); return ((int )tmp); } } extern mempool_t *mempool_create(int , mempool_alloc_t * , mempool_free_t * , void * ) ; extern void mempool_destroy(mempool_t * ) ; extern void *mempool_alloc(mempool_t * , gfp_t ) ; extern void mempool_free(void * , mempool_t * ) ; extern void *mempool_kmalloc(gfp_t , void * ) ; extern void mempool_kfree(void * , void * ) ; __inline static mempool_t *mempool_create_kmalloc_pool(int min_nr , size_t size ) { mempool_t *tmp ; { tmp = mempool_create(min_nr, & mempool_kmalloc, & mempool_kfree, (void *)size); return (tmp); } } extern void bio_put(struct bio * ) ; extern struct bio *bio_map_kern(struct request_queue * , void * , unsigned int , gfp_t ) ; __inline static unsigned short req_get_ioprio(struct request *req ) { { return (req->ioprio); } } struct request *ldv_blk_make_request_5(struct request_queue *ldv_func_arg1 , struct bio *ldv_func_arg2 , gfp_t ldv_func_arg3 ) ; extern int scsi_cmd_ioctl(struct request_queue * , struct gendisk * , fmode_t , unsigned int , void * ) ; extern void blk_start_queue(struct request_queue * ) ; extern void blk_stop_queue(struct request_queue * ) ; extern int blk_execute_rq(struct request_queue * , struct gendisk * , struct request * , int ) ; __inline static sector_t blk_rq_pos(struct request const *rq ) { { return ((sector_t )rq->__sector); } } extern struct request *blk_peek_request(struct request_queue * ) ; extern void blk_start_request(struct request * ) ; extern void __blk_end_request_all(struct request * , int ) ; extern struct request_queue *blk_init_queue(request_fn_proc * , spinlock_t * ) ; extern void blk_cleanup_queue(struct request_queue * ) ; extern void blk_queue_bounce_limit(struct request_queue * , u64 ) ; extern void blk_queue_max_hw_sectors(struct request_queue * , unsigned int ) ; extern void blk_queue_max_segments(struct request_queue * , unsigned short ) ; extern void blk_queue_max_segment_size(struct request_queue * , unsigned int ) ; extern void blk_queue_logical_block_size(struct request_queue * , unsigned short ) ; extern void blk_queue_physical_block_size(struct request_queue * , unsigned short ) ; extern void blk_queue_alignment_offset(struct request_queue * , unsigned int ) ; extern void blk_queue_io_min(struct request_queue * , unsigned int ) ; extern void blk_queue_io_opt(struct request_queue * , unsigned int ) ; extern int blk_queue_ordered(struct request_queue * , unsigned int , prepare_flush_fn * ) ; extern int blk_rq_map_sg(struct request_queue * , struct request * , struct scatterlist * ) ; __inline static unsigned short queue_logical_block_size(struct request_queue *q ) { int retval ; { retval = 512; if ((unsigned long )q != (unsigned long )((struct request_queue *)0) && (unsigned int )q->limits.logical_block_size != 0U) { retval = (int )q->limits.logical_block_size; } else { } return ((unsigned short )retval); } } struct request *ldv_blk_make_request(gfp_t mask ) ; long ldv_is_err(void const *ptr ) ; void *ldv_err_ptr(long error ) ; long ldv_ptr_err(void const *ptr ) ; extern void kfree(void const * ) ; extern struct tracepoint __tracepoint_kmalloc ; __inline static void trace_kmalloc(unsigned long call_site , void const *ptr , size_t bytes_req , size_t bytes_alloc , gfp_t gfp_flags ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; bool __warned ; int tmp ; int tmp___0 ; struct tracepoint_func *_________p1 ; long tmp___1 ; { tmp___1 = ldv__builtin_expect(__tracepoint_kmalloc.state != 0, 0L); if (tmp___1 != 0L) { rcu_read_lock_sched_notrace(); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_dereference("include/trace/events/kmem.h", 87); } else { } } else { } _________p1 = *((struct tracepoint_func * volatile *)(& __tracepoint_kmalloc.funcs)); it_func_ptr = _________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_25254: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , unsigned long , void const * , size_t , size_t , gfp_t ))it_func))(__data, call_site, ptr, bytes_req, bytes_alloc, gfp_flags); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_25254; } else { } } else { } rcu_read_lock_sched_notrace(); } else { } return; } } __inline static void kmemleak_alloc(void const *ptr , size_t size , int min_count , gfp_t gfp ) { { return; } } extern struct kmem_cache kmalloc_caches[28U] ; __inline static int kmalloc_index(size_t size ) { { if (size == 0UL) { return (0); } else { } if (size <= 8UL) { return (3); } else { } if (size > 64UL && size <= 96UL) { return (1); } else { } if (size > 128UL && size <= 192UL) { return (2); } else { } if (size <= 8UL) { return (3); } else { } if (size <= 16UL) { return (4); } else { } if (size <= 32UL) { return (5); } else { } if (size <= 64UL) { return (6); } else { } if (size <= 128UL) { return (7); } else { } if (size <= 256UL) { return (8); } else { } if (size <= 512UL) { return (9); } else { } if (size <= 1024UL) { return (10); } else { } if (size <= 2048UL) { return (11); } else { } if (size <= 4096UL) { return (12); } else { } if (size <= 8192UL) { return (13); } else { } if (size <= 16384UL) { return (14); } else { } if (size <= 32768UL) { return (15); } else { } if (size <= 65536UL) { return (16); } else { } if (size <= 131072UL) { return (17); } else { } if (size <= 262144UL) { return (18); } else { } if (size <= 524288UL) { return (19); } else { } if (size <= 1048576UL) { return (20); } else { } if (size <= 2097152UL) { return (21); } else { } return (-1); } } __inline static struct kmem_cache *kmalloc_slab(size_t size ) { int index___0 ; int tmp ; { tmp = kmalloc_index(size); index___0 = tmp; if (index___0 == 0) { return ((struct kmem_cache *)0); } else { } return ((struct kmem_cache *)(& kmalloc_caches) + (unsigned long )index___0); } } extern void *__kmalloc(size_t , gfp_t ) ; extern void *kmem_cache_alloc_notrace(struct kmem_cache * , gfp_t ) ; __inline static void *kmalloc_large(size_t size , gfp_t flags ) { unsigned int order ; int tmp ; void *ret ; unsigned long tmp___0 ; { tmp = get_order(size); order = (unsigned int )tmp; tmp___0 = __get_free_pages(flags | 16384U, order); ret = (void *)tmp___0; kmemleak_alloc((void const *)ret, size, 1, flags); trace_kmalloc((unsigned long )((void *)0), (void const *)ret, size, 4096UL << (int )order, flags); return (ret); } } __inline static void *kmalloc(size_t size , gfp_t flags ) { void *ret ; void *tmp ; struct kmem_cache *s ; struct kmem_cache *tmp___0 ; void *tmp___1 ; { if (0) { if (size > 8192UL) { tmp = kmalloc_large(size, flags); return (tmp); } else { } if ((flags & 1U) == 0U) { tmp___0 = kmalloc_slab(size); s = tmp___0; if ((unsigned long )s == (unsigned long )((struct kmem_cache *)0)) { return ((void *)16); } else { } ret = kmem_cache_alloc_notrace(s, flags); trace_kmalloc((unsigned long )((void *)0), (void const *)ret, size, (size_t )s->size, flags); return (ret); } else { } } else { } tmp___1 = __kmalloc(size, flags); return (tmp___1); } } extern void *malloc(size_t size ) ; extern void *calloc(size_t nmemb , size_t size ) ; extern int __VERIFIER_nondet_int(void) ; extern unsigned int __VERIFIER_nondet_uint(void) ; extern unsigned long __VERIFIER_nondet_ulong(void) ; extern void *__VERIFIER_nondet_pointer(void) ; extern void __VERIFIER_assume(int expression ) ; void *ldv_malloc(size_t size ) { void *p ; void *tmp ; int tmp___0 ; { tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { return ((void *)0); } else { tmp = malloc(size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } } void *ldv_zalloc(size_t size ) { void *p ; void *tmp ; int tmp___0 ; { tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { return ((void *)0); } else { tmp = calloc(1UL, size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } } int ldv_undef_int(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); return (tmp); } } void *ldv_undef_ptr(void) { void *tmp ; { tmp = __VERIFIER_nondet_pointer(); return (tmp); } } unsigned long ldv_undef_ulong(void) { unsigned long tmp ; { tmp = __VERIFIER_nondet_ulong(); return (tmp); } } __inline static void ldv_error(void) { { ERROR: ; __VERIFIER_error(); } } __inline static void ldv_stop(void) { { LDV_STOP: ; goto LDV_STOP; } } long ldv__builtin_expect(long exp , long c ) { { return (exp); } } void ldv__builtin_trap(void) { { ldv_error(); return; } } int LDV_IN_INTERRUPT = 1; struct block_device *virtblk_fops_group0 ; int ldv_state_variable_2 ; int ref_cnt ; int ldv_state_variable_1 ; struct virtio_device *virtio_blk_group0 ; int ldv_state_variable_0 ; void ldv_initialize_virtio_driver_1(void) ; void ldv_initialize_block_device_operations_2(void) ; __inline static void sg_assign_page(struct scatterlist *sg , struct page *page ) { unsigned long page_link ; long tmp ; long tmp___0 ; long tmp___1 ; { page_link = sg->page_link & 3UL; tmp = ldv__builtin_expect(((unsigned long )page & 3UL) != 0UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (63), "i" (12UL)); ldv_26127: ; goto ldv_26127; } else { } tmp___0 = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (65), "i" (12UL)); ldv_26128: ; goto ldv_26128; } else { } tmp___1 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (66), "i" (12UL)); ldv_26129: ; goto ldv_26129; } else { } sg->page_link = page_link | (unsigned long )page; return; } } __inline static void sg_set_page(struct scatterlist *sg , struct page *page , unsigned int len , unsigned int offset ) { { sg_assign_page(sg, page); sg->offset = offset; sg->length = len; return; } } __inline static void sg_set_buf(struct scatterlist *sg , void const *buf , unsigned int buflen ) { unsigned long tmp ; { tmp = __phys_addr((unsigned long )buf); sg_set_page(sg, (struct page *)-24189255811072L + (tmp >> 12), buflen, (unsigned int )((long )buf) & 4095U); return; } } extern void sg_init_table(struct scatterlist * , unsigned int ) ; extern int virtqueue_add_buf_gfp(struct virtqueue * , struct scatterlist * , unsigned int , unsigned int , void * , gfp_t ) ; __inline static int virtqueue_add_buf(struct virtqueue *vq , struct scatterlist *sg , unsigned int out_num , unsigned int in_num , void *data ) { int tmp ; { tmp = virtqueue_add_buf_gfp(vq, sg, out_num, in_num, data, 32U); return (tmp); } } extern void virtqueue_kick(struct virtqueue * ) ; extern void *virtqueue_get_buf(struct virtqueue * , unsigned int * ) ; extern int register_virtio_driver(struct virtio_driver * ) ; extern void unregister_virtio_driver(struct virtio_driver * ) ; extern void virtio_check_driver_offered_feature(struct virtio_device const * , unsigned int ) ; __inline static bool virtio_has_feature(struct virtio_device const *vdev , unsigned int fbit ) { int tmp ; { if (fbit <= 27U) { virtio_check_driver_offered_feature(vdev, fbit); } else { } tmp = variable_test_bit((int )fbit, (unsigned long const volatile *)(& vdev->features)); return (tmp != 0); } } __inline static int virtio_config_buf(struct virtio_device *vdev , unsigned int fbit , unsigned int offset , void *buf , unsigned int len ) { bool tmp ; int tmp___0 ; { tmp = virtio_has_feature((struct virtio_device const *)vdev, fbit); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (-2); } else { } (*((vdev->config)->get))(vdev, offset, buf, len); return (0); } } __inline static struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev , vq_callback_t *c , char const *n ) { vq_callback_t *callbacks[1U] ; char const *names[1U] ; struct virtqueue *vq ; int err ; int tmp ; void *tmp___0 ; { callbacks[0] = c; names[0] = n; tmp = (*((vdev->config)->find_vqs))(vdev, 1U, & vq, (vq_callback_t **)(& callbacks), (char const **)(& names)); err = tmp; if (err < 0) { tmp___0 = ERR_PTR((long )err); return ((struct virtqueue *)tmp___0); } else { } return (vq); } } static int major ; static int index ; static void blk_done(struct virtqueue *vq ) { struct virtio_blk *vblk ; struct virtblk_req *vbr ; unsigned int len ; unsigned long flags ; raw_spinlock_t *tmp ; int error ; void *tmp___0 ; { vblk = (struct virtio_blk *)(vq->vdev)->priv; tmp = spinlock_check(& vblk->lock); flags = _raw_spin_lock_irqsave(tmp); goto ldv_26569; ldv_26568: ; switch ((int )vbr->status) { case 0: error = 0; goto ldv_26565; case 2: error = -25; goto ldv_26565; default: error = -5; goto ldv_26565; } ldv_26565: ; if ((unsigned int )(vbr->req)->cmd_type == 2U) { (vbr->req)->resid_len = vbr->in_hdr.residual; (vbr->req)->sense_len = vbr->in_hdr.sense_len; (vbr->req)->errors = (int )vbr->in_hdr.errors; } else { } if ((unsigned int )(vbr->req)->cmd_type == 7U) { (vbr->req)->errors = error != 0; } else { } __blk_end_request_all(vbr->req, error); list_del(& vbr->list); mempool_free((void *)vbr, vblk->pool); ldv_26569: tmp___0 = virtqueue_get_buf(vblk->vq, & len); vbr = (struct virtblk_req *)tmp___0; if ((unsigned long )vbr != (unsigned long )((struct virtblk_req *)0)) { goto ldv_26568; } else { } blk_start_queue((vblk->disk)->queue); spin_unlock_irqrestore(& vblk->lock, flags); return; } } static bool do_req(struct request_queue *q , struct virtio_blk *vblk , struct request *req ) { unsigned long num ; unsigned long out ; unsigned long in ; struct virtblk_req *vbr ; void *tmp ; sector_t tmp___0 ; unsigned short tmp___1 ; unsigned short tmp___2 ; unsigned short tmp___3 ; unsigned short tmp___4 ; unsigned long tmp___5 ; unsigned long tmp___6 ; int tmp___7 ; unsigned long tmp___8 ; unsigned long tmp___9 ; unsigned long tmp___10 ; int tmp___11 ; { out = 0UL; in = 0UL; tmp = mempool_alloc(vblk->pool, 32U); vbr = (struct virtblk_req *)tmp; if ((unsigned long )vbr == (unsigned long )((struct virtblk_req *)0)) { return (0); } else { } vbr->req = req; switch ((unsigned int )req->cmd_type) { case 1U: vbr->out_hdr.type = 0U; tmp___0 = blk_rq_pos((struct request const *)vbr->req); vbr->out_hdr.sector = (__u64 )tmp___0; tmp___1 = req_get_ioprio(vbr->req); vbr->out_hdr.ioprio = (__u32 )tmp___1; goto ldv_26581; case 2U: vbr->out_hdr.type = 2U; vbr->out_hdr.sector = 0ULL; tmp___2 = req_get_ioprio(vbr->req); vbr->out_hdr.ioprio = (__u32 )tmp___2; goto ldv_26581; case 7U: vbr->out_hdr.type = 8U; vbr->out_hdr.sector = 0ULL; tmp___3 = req_get_ioprio(vbr->req); vbr->out_hdr.ioprio = (__u32 )tmp___3; goto ldv_26581; case 8U: ; if ((unsigned int )*(req->cmd) == 65U) { vbr->out_hdr.type = 4U; vbr->out_hdr.sector = 0ULL; tmp___4 = req_get_ioprio(vbr->req); vbr->out_hdr.ioprio = (__u32 )tmp___4; goto ldv_26581; } else { } default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/home/ldvuser/gratinskiy/commit-tester-runs/6-cpa-svcomp-newdeg/commit-tester-work/task-011--linux-stable--dir/work/current--X--drivers/block/virtio_blk.ko--X--defaultlinux-stable-39a1d13-1--X--101_1a--X--cpachecker/linux-stable-39a1d13-1/csd_deg_dscv/11/dscv_tempdir/dscv/ri/101_1a/drivers/block/virtio_blk.o.c.prepared"), "i" (151), "i" (12UL)); ldv_26586: ; goto ldv_26586; } ldv_26581: ; if (((vbr->req)->cmd_flags & 128U) != 0U) { vbr->out_hdr.type = vbr->out_hdr.type | 2147483648U; } else { } tmp___5 = out; out = out + 1UL; sg_set_buf((struct scatterlist *)(& vblk->sg) + tmp___5, (void const *)(& vbr->out_hdr), 16U); if ((unsigned int )(vbr->req)->cmd_type == 2U) { tmp___6 = out; out = out + 1UL; sg_set_buf((struct scatterlist *)(& vblk->sg) + tmp___6, (void const *)(vbr->req)->cmd, (unsigned int )(vbr->req)->cmd_len); } else { } tmp___7 = blk_rq_map_sg(q, vbr->req, (struct scatterlist *)(& vblk->sg) + out); num = (unsigned long )tmp___7; if ((unsigned int )(vbr->req)->cmd_type == 2U) { tmp___8 = in; in = in + 1UL; sg_set_buf((struct scatterlist *)(& vblk->sg) + ((num + out) + tmp___8), (void const *)(vbr->req)->sense, 96U); tmp___9 = in; in = in + 1UL; sg_set_buf((struct scatterlist *)(& vblk->sg) + ((num + out) + tmp___9), (void const *)(& vbr->in_hdr), 16U); } else { } tmp___10 = in; in = in + 1UL; sg_set_buf((struct scatterlist *)(& vblk->sg) + ((num + out) + tmp___10), (void const *)(& vbr->status), 1U); if (num != 0UL) { if ((int )(vbr->req)->cmd_flags & 1) { vbr->out_hdr.type = vbr->out_hdr.type | 1U; out = out + num; } else { vbr->out_hdr.type = vbr->out_hdr.type; in = in + num; } } else { } tmp___11 = virtqueue_add_buf(vblk->vq, (struct scatterlist *)(& vblk->sg), (unsigned int )out, (unsigned int )in, (void *)vbr); if (tmp___11 < 0) { mempool_free((void *)vbr, vblk->pool); return (0); } else { } list_add_tail(& vbr->list, & vblk->reqs); return (1); } } static void do_virtblk_request(struct request_queue *q ) { struct virtio_blk *vblk ; struct request *req ; unsigned int issued ; long tmp ; bool tmp___0 ; int tmp___1 ; { vblk = (struct virtio_blk *)q->queuedata; issued = 0U; goto ldv_26596; ldv_26595: tmp = ldv__builtin_expect((unsigned int )((int )req->nr_phys_segments + 2) > vblk->sg_elems, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/home/ldvuser/gratinskiy/commit-tester-runs/6-cpa-svcomp-newdeg/commit-tester-work/task-011--linux-stable--dir/work/current--X--drivers/block/virtio_blk.ko--X--defaultlinux-stable-39a1d13-1--X--101_1a--X--cpachecker/linux-stable-39a1d13-1/csd_deg_dscv/11/dscv_tempdir/dscv/ri/101_1a/drivers/block/virtio_blk.o.c.prepared"), "i" (205), "i" (12UL)); ldv_26593: ; goto ldv_26593; } else { } tmp___0 = do_req(q, vblk, req); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { blk_stop_queue(q); goto ldv_26594; } else { } blk_start_request(req); issued = issued + 1U; ldv_26596: req = blk_peek_request(q); if ((unsigned long )req != (unsigned long )((struct request *)0)) { goto ldv_26595; } else { } ldv_26594: ; if (issued != 0U) { virtqueue_kick(vblk->vq); } else { } return; } } static void virtblk_prepare_flush(struct request_queue *q , struct request *req ) { { req->cmd_type = 8; *(req->cmd) = 65U; return; } } static int virtblk_get_id(struct gendisk *disk , char *id_str ) { struct virtio_blk *vblk ; struct request *req ; struct bio *bio ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; int tmp___3 ; { vblk = (struct virtio_blk *)disk->private_data; bio = bio_map_kern((vblk->disk)->queue, (void *)id_str, 20U, 208U); tmp___0 = IS_ERR((void const *)bio); if (tmp___0 != 0L) { tmp = PTR_ERR((void const *)bio); return ((int )tmp); } else { } req = ldv_blk_make_request_5((vblk->disk)->queue, bio, 208U); tmp___2 = IS_ERR((void const *)req); if (tmp___2 != 0L) { bio_put(bio); tmp___1 = PTR_ERR((void const *)req); return ((int )tmp___1); } else { } req->cmd_type = 7; tmp___3 = blk_execute_rq((vblk->disk)->queue, vblk->disk, req, 0); return (tmp___3); } } static int virtblk_ioctl(struct block_device *bdev , fmode_t mode , unsigned int cmd , unsigned long data ) { struct gendisk *disk ; struct virtio_blk *vblk ; void *usr_data ; char id_str[20U] ; int err ; int tmp ; bool tmp___0 ; int tmp___1 ; int tmp___2 ; { disk = bdev->bd_disk; vblk = (struct virtio_blk *)disk->private_data; if (cmd == 1447184708U) { usr_data = (void *)data; err = virtblk_get_id(disk, (char *)(& id_str)); if (err == 0) { tmp = copy_to_user(usr_data, (void const *)(& id_str), 20U); if (tmp != 0) { err = -14; } else { } } else { } return (err); } else { } tmp___0 = virtio_has_feature((struct virtio_device const *)vblk->vdev, 7U); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (-25); } else { } tmp___2 = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, (void *)data); return (tmp___2); } } static int virtblk_getgeo(struct block_device *bd , struct hd_geometry *geo ) { struct virtio_blk *vblk ; struct virtio_blk_geometry vgeo ; int err ; sector_t tmp ; { vblk = (struct virtio_blk *)(bd->bd_disk)->private_data; err = virtio_config_buf(vblk->vdev, 4U, 16U, (void *)(& vgeo), 4U); if (err == 0) { geo->heads = vgeo.heads; geo->sectors = vgeo.sectors; geo->cylinders = vgeo.cylinders; } else { geo->heads = 64U; geo->sectors = 32U; tmp = get_capacity(bd->bd_disk); geo->cylinders = (unsigned short )(tmp >> 11); } return (0); } } static struct block_device_operations const virtblk_fops = {0, 0, & virtblk_ioctl, 0, 0, 0, 0, 0, 0, & virtblk_getgeo, 0, & __this_module}; static int index_to_minor(int index___0 ) { { return (index___0 << 4); } } static int virtblk_probe(struct virtio_device *vdev ) { struct virtio_blk *vblk ; struct request_queue *q ; int err ; u64 cap ; u32 v ; u32 blk_size ; u32 sg_elems ; u32 opt_io_size ; u16 min_io_size ; u8 physical_block_exp ; u8 alignment_offset ; int tmp ; void *tmp___0 ; struct lock_class_key __key ; long tmp___1 ; long tmp___2 ; struct request_queue *tmp___3 ; unsigned int m1 ; unsigned int m2 ; unsigned int m3 ; bool tmp___4 ; bool tmp___5 ; bool tmp___6 ; unsigned short tmp___9 ; { tmp = index_to_minor(index); if (tmp > 1048575) { return (-28); } else { } err = virtio_config_buf(vdev, 2U, 12U, (void *)(& sg_elems), 4U); if (err != 0 || sg_elems == 0U) { sg_elems = 1U; } else { } sg_elems = sg_elems + 2U; tmp___0 = kmalloc((unsigned long )sg_elems * 40UL + 120UL, 208U); vblk = (struct virtio_blk *)tmp___0; vdev->priv = (void *)vblk; if ((unsigned long )vblk == (unsigned long )((struct virtio_blk *)0)) { err = -12; goto out; } else { } INIT_LIST_HEAD(& vblk->reqs); spinlock_check(& vblk->lock); __raw_spin_lock_init(& vblk->lock.ldv_5847.rlock, "&(&vblk->lock)->rlock", & __key); vblk->vdev = vdev; vblk->sg_elems = sg_elems; sg_init_table((struct scatterlist *)(& vblk->sg), vblk->sg_elems); vblk->vq = virtio_find_single_vq(vdev, & blk_done, "requests"); tmp___2 = IS_ERR((void const *)vblk->vq); if (tmp___2 != 0L) { tmp___1 = PTR_ERR((void const *)vblk->vq); err = (int )tmp___1; goto out_free_vblk; } else { } vblk->pool = mempool_create_kmalloc_pool(1, 64UL); if ((unsigned long )vblk->pool == (unsigned long )((mempool_t *)0)) { err = -12; goto out_free_vq; } else { } vblk->disk = alloc_disk(16); if ((unsigned long )vblk->disk == (unsigned long )((struct gendisk *)0)) { err = -12; goto out_mempool; } else { } tmp___3 = blk_init_queue(& do_virtblk_request, & vblk->lock); (vblk->disk)->queue = tmp___3; q = tmp___3; if ((unsigned long )q == (unsigned long )((struct request_queue *)0)) { err = -12; goto out_put_disk; } else { } q->queuedata = (void *)vblk; if (index <= 25) { sprintf((char *)(& (vblk->disk)->disk_name), "vd%c", index % 26 + 97); } else if (index <= 701) { sprintf((char *)(& (vblk->disk)->disk_name), "vd%c%c", index / 26 + 96, index % 26 + 97); } else { m1 = (unsigned int const )((index / 26 + -1) / 26 + -1); m2 = (unsigned int const )((index / 26 + -1) % 26); m3 = (unsigned int const )(index % 26); sprintf((char *)(& (vblk->disk)->disk_name), "vd%c%c%c", m1 + 97U, m2 + 97U, m3 + 97U); } (vblk->disk)->major = major; (vblk->disk)->first_minor = index_to_minor(index); (vblk->disk)->private_data = (void *)vblk; (vblk->disk)->fops = & virtblk_fops; (vblk->disk)->driverfs_dev = & vdev->dev; index = index + 1; tmp___5 = virtio_has_feature((struct virtio_device const *)vdev, 9U); if ((int )tmp___5) { blk_queue_ordered(q, 113U, & virtblk_prepare_flush); } else { tmp___4 = virtio_has_feature((struct virtio_device const *)vdev, 0U); if ((int )tmp___4) { blk_queue_ordered(q, 34U, (prepare_flush_fn *)0); } else { } } tmp___6 = virtio_has_feature((struct virtio_device const *)vdev, 5U); if ((int )tmp___6) { set_disk_ro(vblk->disk, 1); } else { } (*((vdev->config)->get))(vdev, 0U, (void *)(& cap), 8U); set_capacity(vblk->disk, (sector_t )cap); blk_queue_max_segments(q, (int )((unsigned int )((unsigned short )vblk->sg_elems) - 2U)); blk_queue_bounce_limit(q, 0xffffffffffffffffULL); blk_queue_max_hw_sectors(q, 4294967295U); err = virtio_config_buf(vdev, 1U, 8U, (void *)(& v), 4U); if (err == 0) { blk_queue_max_segment_size(q, v); } else { blk_queue_max_segment_size(q, 4294967295U); } err = virtio_config_buf(vdev, 6U, 20U, (void *)(& blk_size), 4U); if (err == 0) { blk_queue_logical_block_size(q, (int )((unsigned short )blk_size)); } else { tmp___9 = queue_logical_block_size(q); blk_size = (u32 )tmp___9; } err = virtio_config_buf(vdev, 10U, 24U, (void *)(& physical_block_exp), 1U); if (err == 0 && (unsigned int )physical_block_exp != 0U) { blk_queue_physical_block_size(q, (int )((unsigned short )(blk_size << (int )physical_block_exp))); } else { } err = virtio_config_buf(vdev, 10U, 25U, (void *)(& alignment_offset), 1U); if (err == 0 && (unsigned int )alignment_offset != 0U) { blk_queue_alignment_offset(q, (u32 )alignment_offset * blk_size); } else { } err = virtio_config_buf(vdev, 10U, 26U, (void *)(& min_io_size), 2U); if (err == 0 && (unsigned int )min_io_size != 0U) { blk_queue_io_min(q, (u32 )min_io_size * blk_size); } else { } err = virtio_config_buf(vdev, 10U, 28U, (void *)(& opt_io_size), 4U); if (err == 0 && opt_io_size != 0U) { blk_queue_io_opt(q, blk_size * opt_io_size); } else { } add_disk(vblk->disk); return (0); out_put_disk: put_disk(vblk->disk); out_mempool: mempool_destroy(vblk->pool); out_free_vq: (*((vdev->config)->del_vqs))(vdev); out_free_vblk: kfree((void const *)vblk); out: ; return (err); } } static void virtblk_remove(struct virtio_device *vdev ) { struct virtio_blk *vblk ; int tmp ; long tmp___0 ; { vblk = (struct virtio_blk *)vdev->priv; tmp = list_empty((struct list_head const *)(& vblk->reqs)); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/home/ldvuser/gratinskiy/commit-tester-runs/6-cpa-svcomp-newdeg/commit-tester-work/task-011--linux-stable--dir/work/current--X--drivers/block/virtio_blk.ko--X--defaultlinux-stable-39a1d13-1--X--101_1a--X--cpachecker/linux-stable-39a1d13-1/csd_deg_dscv/11/dscv_tempdir/dscv/ri/101_1a/drivers/block/virtio_blk.o.c.prepared"), "i" (495), "i" (12UL)); ldv_26657: ; goto ldv_26657; } else { } (*((vdev->config)->reset))(vdev); del_gendisk(vblk->disk); blk_cleanup_queue((vblk->disk)->queue); put_disk(vblk->disk); mempool_destroy(vblk->pool); (*((vdev->config)->del_vqs))(vdev); kfree((void const *)vblk); return; } } static struct virtio_device_id const id_table[2U] = { {2U, 4294967295U}, {0U, 0U}}; static unsigned int features[9U] = { 0U, 2U, 1U, 4U, 5U, 6U, 7U, 9U, 10U}; static struct virtio_driver virtio_blk = {{"virtio_blk", 0, & __this_module, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0}, (struct virtio_device_id const *)(& id_table), (unsigned int const *)(& features), 9U, & virtblk_probe, & virtblk_remove, 0}; static int init(void) { int tmp ; { major = register_blkdev(0U, "virtblk"); if (major < 0) { return (major); } else { } tmp = register_virtio_driver(& virtio_blk); return (tmp); } } static void fini(void) { { unregister_blkdev((unsigned int )major, "virtblk"); unregister_virtio_driver(& virtio_blk); return; } } struct virtio_device_id const __mod_virtio_device_table ; int ldv_retval_0 ; int ldv_retval_1 ; extern void ldv_initialize(void) ; void ldv_check_final_state(void) ; void ldv_initialize_virtio_driver_1(void) { void *tmp ; { tmp = ldv_zalloc(1048UL); virtio_blk_group0 = (struct virtio_device *)tmp; return; } } void ldv_initialize_block_device_operations_2(void) { void *tmp ; { tmp = ldv_zalloc(464UL); virtblk_fops_group0 = (struct block_device *)tmp; return; } } int main(void) { unsigned long ldvarg1 ; unsigned long tmp ; fmode_t ldvarg3 ; struct hd_geometry *ldvarg0 ; void *tmp___0 ; unsigned int ldvarg2 ; unsigned int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; { tmp = __VERIFIER_nondet_ulong(); ldvarg1 = tmp; tmp___0 = ldv_zalloc(16UL); ldvarg0 = (struct hd_geometry *)tmp___0; tmp___1 = __VERIFIER_nondet_uint(); ldvarg2 = tmp___1; ldv_initialize(); memset((void *)(& ldvarg3), 0, 4UL); ldv_state_variable_1 = 0; ref_cnt = 0; ldv_state_variable_0 = 1; ldv_state_variable_2 = 0; ldv_26727: tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_1 != 0) { tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_1 == 1) { ldv_retval_0 = virtblk_probe(virtio_blk_group0); if (ldv_retval_0 == 0) { ldv_state_variable_1 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_26711; case 1: ; if (ldv_state_variable_1 == 2) { virtblk_remove(virtio_blk_group0); ldv_state_variable_1 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_26711; default: ldv_stop(); } ldv_26711: ; } else { } goto ldv_26714; case 1: ; if (ldv_state_variable_0 != 0) { tmp___4 = __VERIFIER_nondet_int(); switch (tmp___4) { case 0: ; if (ldv_state_variable_0 == 3 && ref_cnt == 0) { fini(); ldv_state_variable_0 = 2; goto ldv_final; } else { } goto ldv_26718; case 1: ; if (ldv_state_variable_0 == 1) { ldv_retval_1 = init(); if (ldv_retval_1 == 0) { ldv_state_variable_0 = 3; ldv_state_variable_2 = 1; ldv_initialize_block_device_operations_2(); ldv_state_variable_1 = 1; ldv_initialize_virtio_driver_1(); } else { } if (ldv_retval_1 != 0) { ldv_state_variable_0 = 2; goto ldv_final; } else { } } else { } goto ldv_26718; default: ldv_stop(); } ldv_26718: ; } else { } goto ldv_26714; case 2: ; if (ldv_state_variable_2 != 0) { tmp___5 = __VERIFIER_nondet_int(); switch (tmp___5) { case 0: ; if (ldv_state_variable_2 == 1) { virtblk_ioctl(virtblk_fops_group0, ldvarg3, ldvarg2, ldvarg1); ldv_state_variable_2 = 1; } else { } goto ldv_26723; case 1: ; if (ldv_state_variable_2 == 1) { virtblk_getgeo(virtblk_fops_group0, ldvarg0); ldv_state_variable_2 = 1; } else { } goto ldv_26723; default: ldv_stop(); } ldv_26723: ; } else { } goto ldv_26714; default: ldv_stop(); } ldv_26714: ; goto ldv_26727; ldv_final: ldv_check_final_state(); return 0; } } __inline static void *ERR_PTR(long error ) { void *tmp ; { tmp = ldv_err_ptr(error); return (tmp); } } __inline static long PTR_ERR(void const *ptr ) { long tmp ; { tmp = ldv_ptr_err(ptr); return (tmp); } } __inline static long IS_ERR(void const *ptr ) { long tmp ; { tmp = ldv_is_err(ptr); return (tmp); } } struct request *ldv_blk_make_request_5(struct request_queue *ldv_func_arg1 , struct bio *ldv_func_arg2 , gfp_t ldv_func_arg3 ) { struct request *tmp ; { tmp = ldv_blk_make_request(ldv_func_arg3); return (tmp); } } __inline static void ldv_stop___0(void) { { LDV_STOP: goto LDV_STOP; } } long ldv_is_err(void const *ptr ) { { return ((unsigned long )ptr > 2012UL); } } void *ldv_err_ptr(long error ) { { return ((void *)(2012L - error)); } } long ldv_ptr_err(void const *ptr ) { { return ((long )(2012UL - (unsigned long )ptr)); } } long ldv_is_err_or_null(void const *ptr ) { long tmp ; int tmp___0 ; { if (! ptr) { tmp___0 = 1; } else { tmp = ldv_is_err((unsigned long )ptr); if (tmp) { tmp___0 = 1; } else { tmp___0 = 0; } } return (tmp___0); } } __inline static long ( __attribute__((__warn_unused_result__)) IS_ERR___0)(void const *ptr ) { long tmp ; { tmp = ldv__builtin_expect(! (! ((unsigned long )ptr >= 0xfffffffffffff001UL)), 0); return (tmp); } } int ldv_blk_rq = 0; struct request *ldv_blk_get_request(gfp_t mask ) { struct request *res ; void *tmp ; { if (ldv_blk_rq == 0) { } else { ldv_error(); } tmp = ldv_undef_ptr(); res = (struct request *)tmp; if ((mask == 16U || mask == 208U) || mask == 16U) { if ((unsigned long )res != (unsigned long )((void *)0)) { } else { ldv_stop___0(); } } else { } if ((unsigned long )res != (unsigned long )((void *)0)) { ldv_blk_rq = 1; } else { } return (res); } } struct request *ldv_blk_make_request(gfp_t mask ) { struct request *res ; void *tmp ; long tmp___0 ; { if (ldv_blk_rq == 0) { } else { ldv_error(); } tmp = ldv_undef_ptr(); res = (struct request *)tmp; if ((unsigned long )res != (unsigned long )((void *)0)) { } else { ldv_stop___0(); } tmp___0 = IS_ERR___0(res); if (tmp___0) { } else { ldv_blk_rq = 1; } return (res); } } void ldv_put_blk_rq(void) { { if (ldv_blk_rq == 1) { } else { ldv_error(); } ldv_blk_rq = 0; return; } } void ldv_check_final_state(void) { { if (ldv_blk_rq == 0) { } else { ldv_error(); } return; } }