extern void __VERIFIER_error() __attribute__ ((__noreturn__)); /* Generated by CIL v. 1.5.1 */ /* print_CIL_Input is false */ typedef unsigned char __u8; typedef short __s16; typedef unsigned short __u16; typedef int __s32; typedef unsigned int __u32; typedef unsigned long long __u64; typedef signed char s8; typedef unsigned char u8; typedef unsigned short u16; typedef int s32; typedef unsigned int u32; typedef long long s64; typedef unsigned long long u64; typedef long __kernel_long_t; typedef unsigned long __kernel_ulong_t; typedef int __kernel_pid_t; typedef unsigned int __kernel_uid32_t; typedef unsigned int __kernel_gid32_t; typedef __kernel_ulong_t __kernel_size_t; typedef __kernel_long_t __kernel_ssize_t; typedef long long __kernel_loff_t; typedef __kernel_long_t __kernel_time_t; typedef __kernel_long_t __kernel_clock_t; typedef int __kernel_timer_t; typedef int __kernel_clockid_t; typedef __u16 __be16; typedef __u32 __be32; typedef __u32 __wsum; struct kernel_symbol { unsigned long value ; char const *name ; }; struct module; typedef __u32 __kernel_dev_t; typedef __kernel_dev_t dev_t; typedef unsigned short umode_t; typedef __kernel_pid_t pid_t; typedef __kernel_clockid_t clockid_t; typedef _Bool bool; typedef __kernel_uid32_t uid_t; typedef __kernel_gid32_t gid_t; typedef __kernel_loff_t loff_t; typedef __kernel_size_t size_t; typedef __kernel_ssize_t ssize_t; typedef __kernel_time_t time_t; typedef __s32 int32_t; typedef __u8 uint8_t; typedef __u32 uint32_t; typedef __u64 uint64_t; typedef unsigned long sector_t; typedef unsigned long blkcnt_t; typedef u64 dma_addr_t; typedef unsigned int gfp_t; typedef unsigned int fmode_t; typedef unsigned int oom_flags_t; struct __anonstruct_atomic_t_6 { int counter ; }; typedef struct __anonstruct_atomic_t_6 atomic_t; struct __anonstruct_atomic64_t_7 { long counter ; }; typedef struct __anonstruct_atomic64_t_7 atomic64_t; struct list_head { struct list_head *next ; struct list_head *prev ; }; struct hlist_node; struct hlist_head { struct hlist_node *first ; }; struct hlist_node { struct hlist_node *next ; struct hlist_node **pprev ; }; struct callback_head { struct callback_head *next ; void (*func)(struct callback_head * ) ; }; struct pt_regs { unsigned long r15 ; unsigned long r14 ; unsigned long r13 ; unsigned long r12 ; unsigned long bp ; unsigned long bx ; unsigned long r11 ; unsigned long r10 ; unsigned long r9 ; unsigned long r8 ; unsigned long ax ; unsigned long cx ; unsigned long dx ; unsigned long si ; unsigned long di ; unsigned long orig_ax ; unsigned long ip ; unsigned long cs ; unsigned long flags ; unsigned long sp ; unsigned long ss ; }; struct __anonstruct____missing_field_name_9 { unsigned int a ; unsigned int b ; }; struct __anonstruct____missing_field_name_10 { u16 limit0 ; u16 base0 ; unsigned char base1 ; unsigned char type : 4 ; unsigned char s : 1 ; unsigned char dpl : 2 ; unsigned char p : 1 ; unsigned char limit : 4 ; unsigned char avl : 1 ; unsigned char l : 1 ; unsigned char d : 1 ; unsigned char g : 1 ; unsigned char base2 ; }; union __anonunion____missing_field_name_8 { struct __anonstruct____missing_field_name_9 __annonCompField4 ; struct __anonstruct____missing_field_name_10 __annonCompField5 ; }; struct desc_struct { union __anonunion____missing_field_name_8 __annonCompField6 ; }; typedef unsigned long pteval_t; typedef unsigned long pgdval_t; typedef unsigned long pgprotval_t; struct __anonstruct_pte_t_11 { pteval_t pte ; }; typedef struct __anonstruct_pte_t_11 pte_t; struct pgprot { pgprotval_t pgprot ; }; typedef struct pgprot pgprot_t; struct __anonstruct_pgd_t_12 { pgdval_t pgd ; }; typedef struct __anonstruct_pgd_t_12 pgd_t; struct page; typedef struct page *pgtable_t; struct file; struct seq_file; struct thread_struct; struct mm_struct; struct task_struct; struct cpumask; struct qspinlock { atomic_t val ; }; typedef struct qspinlock arch_spinlock_t; struct qrwlock { atomic_t cnts ; arch_spinlock_t lock ; }; typedef struct qrwlock arch_rwlock_t; typedef void (*ctor_fn_t)(void); struct _ddebug { char const *modname ; char const *function ; char const *filename ; char const *format ; unsigned int lineno : 18 ; unsigned char flags ; }; struct device; struct net_device; struct file_operations; struct completion; struct lockdep_map; struct kernel_vm86_regs { struct pt_regs pt ; unsigned short es ; unsigned short __esh ; unsigned short ds ; unsigned short __dsh ; unsigned short fs ; unsigned short __fsh ; unsigned short gs ; unsigned short __gsh ; }; union __anonunion____missing_field_name_15 { struct pt_regs *regs ; struct kernel_vm86_regs *vm86 ; }; struct math_emu_info { long ___orig_eip ; union __anonunion____missing_field_name_15 __annonCompField7 ; }; struct bug_entry { int bug_addr_disp ; int file_disp ; unsigned short line ; unsigned short flags ; }; struct cpumask { unsigned long bits[128U] ; }; typedef struct cpumask cpumask_t; typedef struct cpumask *cpumask_var_t; struct static_key; struct fregs_state { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u32 status ; }; struct __anonstruct____missing_field_name_25 { u64 rip ; u64 rdp ; }; struct __anonstruct____missing_field_name_26 { u32 fip ; u32 fcs ; u32 foo ; u32 fos ; }; union __anonunion____missing_field_name_24 { struct __anonstruct____missing_field_name_25 __annonCompField11 ; struct __anonstruct____missing_field_name_26 __annonCompField12 ; }; union __anonunion____missing_field_name_27 { u32 padding1[12U] ; u32 sw_reserved[12U] ; }; struct fxregs_state { u16 cwd ; u16 swd ; u16 twd ; u16 fop ; union __anonunion____missing_field_name_24 __annonCompField13 ; u32 mxcsr ; u32 mxcsr_mask ; u32 st_space[32U] ; u32 xmm_space[64U] ; u32 padding[12U] ; union __anonunion____missing_field_name_27 __annonCompField14 ; }; struct swregs_state { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u8 ftop ; u8 changed ; u8 lookahead ; u8 no_update ; u8 rm ; u8 alimit ; struct math_emu_info *info ; u32 entry_eip ; }; struct xstate_header { u64 xfeatures ; u64 xcomp_bv ; u64 reserved[6U] ; }; struct xregs_state { struct fxregs_state i387 ; struct xstate_header header ; u8 __reserved[464U] ; }; union fpregs_state { struct fregs_state fsave ; struct fxregs_state fxsave ; struct swregs_state soft ; struct xregs_state xsave ; }; struct fpu { union fpregs_state state ; unsigned int last_cpu ; unsigned char fpstate_active ; unsigned char fpregs_active ; unsigned char counter ; }; struct seq_operations; struct perf_event; struct thread_struct { struct desc_struct tls_array[3U] ; unsigned long sp0 ; unsigned long sp ; unsigned short es ; unsigned short ds ; unsigned short fsindex ; unsigned short gsindex ; unsigned long fs ; unsigned long gs ; struct fpu fpu ; struct perf_event *ptrace_bps[4U] ; unsigned long debugreg6 ; unsigned long ptrace_dr7 ; unsigned long cr2 ; unsigned long trap_nr ; unsigned long error_code ; unsigned long *io_bitmap_ptr ; unsigned long iopl ; unsigned int io_bitmap_max ; }; typedef atomic64_t atomic_long_t; struct stack_trace { unsigned int nr_entries ; unsigned int max_entries ; unsigned long *entries ; int skip ; }; struct lockdep_subclass_key { char __one_byte ; }; struct lock_class_key { struct lockdep_subclass_key subkeys[8U] ; }; struct lock_class { struct list_head hash_entry ; struct list_head lock_entry ; struct lockdep_subclass_key *key ; unsigned int subclass ; unsigned int dep_gen_id ; unsigned long usage_mask ; struct stack_trace usage_traces[13U] ; struct list_head locks_after ; struct list_head locks_before ; unsigned int version ; unsigned long ops ; char const *name ; int name_version ; unsigned long contention_point[4U] ; unsigned long contending_point[4U] ; }; struct lockdep_map { struct lock_class_key *key ; struct lock_class *class_cache[2U] ; char const *name ; int cpu ; unsigned long ip ; }; struct held_lock { u64 prev_chain_key ; unsigned long acquire_ip ; struct lockdep_map *instance ; struct lockdep_map *nest_lock ; u64 waittime_stamp ; u64 holdtime_stamp ; unsigned short class_idx : 13 ; unsigned char irq_context : 2 ; unsigned char trylock : 1 ; unsigned char read : 2 ; unsigned char check : 1 ; unsigned char hardirqs_off : 1 ; unsigned short references : 12 ; unsigned int pin_count ; }; struct raw_spinlock { arch_spinlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct raw_spinlock raw_spinlock_t; struct __anonstruct____missing_field_name_31 { u8 __padding[24U] ; struct lockdep_map dep_map ; }; union __anonunion____missing_field_name_30 { struct raw_spinlock rlock ; struct __anonstruct____missing_field_name_31 __annonCompField16 ; }; struct spinlock { union __anonunion____missing_field_name_30 __annonCompField17 ; }; typedef struct spinlock spinlock_t; struct __anonstruct_rwlock_t_32 { arch_rwlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_rwlock_t_32 rwlock_t; struct optimistic_spin_queue { atomic_t tail ; }; struct mutex { atomic_t count ; spinlock_t wait_lock ; struct list_head wait_list ; struct task_struct *owner ; void *magic ; struct lockdep_map dep_map ; }; struct mutex_waiter { struct list_head list ; struct task_struct *task ; void *magic ; }; struct timespec; struct compat_timespec; struct __anonstruct_futex_34 { u32 *uaddr ; u32 val ; u32 flags ; u32 bitset ; u64 time ; u32 *uaddr2 ; }; struct __anonstruct_nanosleep_35 { clockid_t clockid ; struct timespec *rmtp ; struct compat_timespec *compat_rmtp ; u64 expires ; }; struct pollfd; struct __anonstruct_poll_36 { struct pollfd *ufds ; int nfds ; int has_timeout ; unsigned long tv_sec ; unsigned long tv_nsec ; }; union __anonunion____missing_field_name_33 { struct __anonstruct_futex_34 futex ; struct __anonstruct_nanosleep_35 nanosleep ; struct __anonstruct_poll_36 poll ; }; struct restart_block { long (*fn)(struct restart_block * ) ; union __anonunion____missing_field_name_33 __annonCompField18 ; }; struct static_key { atomic_t enabled ; }; struct seqcount { unsigned int sequence ; struct lockdep_map dep_map ; }; typedef struct seqcount seqcount_t; struct __anonstruct_seqlock_t_45 { struct seqcount seqcount ; spinlock_t lock ; }; typedef struct __anonstruct_seqlock_t_45 seqlock_t; struct timespec { __kernel_time_t tv_sec ; long tv_nsec ; }; union ktime { s64 tv64 ; }; typedef union ktime ktime_t; struct timer_list { struct hlist_node entry ; unsigned long expires ; void (*function)(unsigned long ) ; unsigned long data ; u32 flags ; int slack ; int start_pid ; void *start_site ; char start_comm[16U] ; struct lockdep_map lockdep_map ; }; struct hrtimer; enum hrtimer_restart; struct __wait_queue_head { spinlock_t lock ; struct list_head task_list ; }; typedef struct __wait_queue_head wait_queue_head_t; struct completion { unsigned int done ; wait_queue_head_t wait ; }; struct notifier_block; struct rb_node { unsigned long __rb_parent_color ; struct rb_node *rb_right ; struct rb_node *rb_left ; }; struct rb_root { struct rb_node *rb_node ; }; struct ctl_table; struct nsproxy; struct ctl_table_root; struct ctl_table_header; struct ctl_dir; typedef int proc_handler(struct ctl_table * , int , void * , size_t * , loff_t * ); struct ctl_table_poll { atomic_t event ; wait_queue_head_t wait ; }; struct ctl_table { char const *procname ; void *data ; int maxlen ; umode_t mode ; struct ctl_table *child ; proc_handler *proc_handler ; struct ctl_table_poll *poll ; void *extra1 ; void *extra2 ; }; struct ctl_node { struct rb_node node ; struct ctl_table_header *header ; }; struct __anonstruct____missing_field_name_47 { struct ctl_table *ctl_table ; int used ; int count ; int nreg ; }; union __anonunion____missing_field_name_46 { struct __anonstruct____missing_field_name_47 __annonCompField19 ; struct callback_head rcu ; }; struct ctl_table_set; struct ctl_table_header { union __anonunion____missing_field_name_46 __annonCompField20 ; struct completion *unregistering ; struct ctl_table *ctl_table_arg ; struct ctl_table_root *root ; struct ctl_table_set *set ; struct ctl_dir *parent ; struct ctl_node *node ; }; struct ctl_dir { struct ctl_table_header header ; struct rb_root root ; }; struct ctl_table_set { int (*is_seen)(struct ctl_table_set * ) ; struct ctl_dir dir ; }; struct ctl_table_root { struct ctl_table_set default_set ; struct ctl_table_set *(*lookup)(struct ctl_table_root * , struct nsproxy * ) ; int (*permissions)(struct ctl_table_header * , struct ctl_table * ) ; }; struct workqueue_struct; struct work_struct; struct work_struct { atomic_long_t data ; struct list_head entry ; void (*func)(struct work_struct * ) ; struct lockdep_map lockdep_map ; }; struct delayed_work { struct work_struct work ; struct timer_list timer ; struct workqueue_struct *wq ; int cpu ; }; struct vm_area_struct; struct __anonstruct_nodemask_t_48 { unsigned long bits[16U] ; }; typedef struct __anonstruct_nodemask_t_48 nodemask_t; struct rw_semaphore; struct rw_semaphore { long count ; struct list_head wait_list ; raw_spinlock_t wait_lock ; struct optimistic_spin_queue osq ; struct task_struct *owner ; struct lockdep_map dep_map ; }; struct notifier_block { int (*notifier_call)(struct notifier_block * , unsigned long , void * ) ; struct notifier_block *next ; int priority ; }; struct pm_message { int event ; }; typedef struct pm_message pm_message_t; struct dev_pm_ops { int (*prepare)(struct device * ) ; void (*complete)(struct device * ) ; int (*suspend)(struct device * ) ; int (*resume)(struct device * ) ; int (*freeze)(struct device * ) ; int (*thaw)(struct device * ) ; int (*poweroff)(struct device * ) ; int (*restore)(struct device * ) ; int (*suspend_late)(struct device * ) ; int (*resume_early)(struct device * ) ; int (*freeze_late)(struct device * ) ; int (*thaw_early)(struct device * ) ; int (*poweroff_late)(struct device * ) ; int (*restore_early)(struct device * ) ; int (*suspend_noirq)(struct device * ) ; int (*resume_noirq)(struct device * ) ; int (*freeze_noirq)(struct device * ) ; int (*thaw_noirq)(struct device * ) ; int (*poweroff_noirq)(struct device * ) ; int (*restore_noirq)(struct device * ) ; int (*runtime_suspend)(struct device * ) ; int (*runtime_resume)(struct device * ) ; int (*runtime_idle)(struct device * ) ; }; enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; struct wakeup_source; struct wake_irq; struct pm_subsys_data { spinlock_t lock ; unsigned int refcount ; struct list_head clock_list ; }; struct dev_pm_qos; struct dev_pm_info { pm_message_t power_state ; unsigned char can_wakeup : 1 ; unsigned char async_suspend : 1 ; bool is_prepared ; bool is_suspended ; bool is_noirq_suspended ; bool is_late_suspended ; bool ignore_children ; bool early_init ; bool direct_complete ; spinlock_t lock ; struct list_head entry ; struct completion completion ; struct wakeup_source *wakeup ; bool wakeup_path ; bool syscore ; struct timer_list suspend_timer ; unsigned long timer_expires ; struct work_struct work ; wait_queue_head_t wait_queue ; struct wake_irq *wakeirq ; atomic_t usage_count ; atomic_t child_count ; unsigned char disable_depth : 3 ; unsigned char idle_notification : 1 ; unsigned char request_pending : 1 ; unsigned char deferred_resume : 1 ; unsigned char run_wake : 1 ; unsigned char runtime_auto : 1 ; unsigned char no_callbacks : 1 ; unsigned char irq_safe : 1 ; unsigned char use_autosuspend : 1 ; unsigned char timer_autosuspends : 1 ; unsigned char memalloc_noio : 1 ; enum rpm_request request ; enum rpm_status runtime_status ; int runtime_error ; int autosuspend_delay ; unsigned long last_busy ; unsigned long active_jiffies ; unsigned long suspended_jiffies ; unsigned long accounting_timestamp ; struct pm_subsys_data *subsys_data ; void (*set_latency_tolerance)(struct device * , s32 ) ; struct dev_pm_qos *qos ; }; struct dev_pm_domain { struct dev_pm_ops ops ; void (*detach)(struct device * , bool ) ; int (*activate)(struct device * ) ; void (*sync)(struct device * ) ; void (*dismiss)(struct device * ) ; }; struct __anonstruct_mm_context_t_113 { void *ldt ; int size ; unsigned short ia32_compat ; struct mutex lock ; void *vdso ; atomic_t perf_rdpmc_allowed ; }; typedef struct __anonstruct_mm_context_t_113 mm_context_t; struct bio_vec; struct llist_node; struct llist_node { struct llist_node *next ; }; struct call_single_data { struct llist_node llist ; void (*func)(void * ) ; void *info ; unsigned int flags ; }; struct kmem_cache; struct kernel_cap_struct { __u32 cap[2U] ; }; typedef struct kernel_cap_struct kernel_cap_t; struct inode; struct dentry; struct user_namespace; struct plist_node { int prio ; struct list_head prio_list ; struct list_head node_list ; }; struct arch_uprobe_task { unsigned long saved_scratch_register ; unsigned int saved_trap_nr ; unsigned int saved_tf ; }; enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; struct __anonstruct____missing_field_name_146 { struct arch_uprobe_task autask ; unsigned long vaddr ; }; struct __anonstruct____missing_field_name_147 { struct callback_head dup_xol_work ; unsigned long dup_xol_addr ; }; union __anonunion____missing_field_name_145 { struct __anonstruct____missing_field_name_146 __annonCompField33 ; struct __anonstruct____missing_field_name_147 __annonCompField34 ; }; struct uprobe; struct return_instance; struct uprobe_task { enum uprobe_task_state state ; union __anonunion____missing_field_name_145 __annonCompField35 ; struct uprobe *active_uprobe ; unsigned long xol_vaddr ; struct return_instance *return_instances ; unsigned int depth ; }; struct xol_area; struct uprobes_state { struct xol_area *xol_area ; }; struct address_space; struct mem_cgroup; typedef void compound_page_dtor(struct page * ); union __anonunion____missing_field_name_148 { struct address_space *mapping ; void *s_mem ; }; union __anonunion____missing_field_name_150 { unsigned long index ; void *freelist ; bool pfmemalloc ; }; struct __anonstruct____missing_field_name_154 { unsigned short inuse ; unsigned short objects : 15 ; unsigned char frozen : 1 ; }; union __anonunion____missing_field_name_153 { atomic_t _mapcount ; struct __anonstruct____missing_field_name_154 __annonCompField38 ; int units ; }; struct __anonstruct____missing_field_name_152 { union __anonunion____missing_field_name_153 __annonCompField39 ; atomic_t _count ; }; union __anonunion____missing_field_name_151 { unsigned long counters ; struct __anonstruct____missing_field_name_152 __annonCompField40 ; unsigned int active ; }; struct __anonstruct____missing_field_name_149 { union __anonunion____missing_field_name_150 __annonCompField37 ; union __anonunion____missing_field_name_151 __annonCompField41 ; }; struct __anonstruct____missing_field_name_156 { struct page *next ; int pages ; int pobjects ; }; struct slab; struct __anonstruct____missing_field_name_157 { compound_page_dtor *compound_dtor ; unsigned long compound_order ; }; union __anonunion____missing_field_name_155 { struct list_head lru ; struct __anonstruct____missing_field_name_156 __annonCompField43 ; struct slab *slab_page ; struct callback_head callback_head ; struct __anonstruct____missing_field_name_157 __annonCompField44 ; pgtable_t pmd_huge_pte ; }; union __anonunion____missing_field_name_158 { unsigned long private ; spinlock_t *ptl ; struct kmem_cache *slab_cache ; struct page *first_page ; }; struct page { unsigned long flags ; union __anonunion____missing_field_name_148 __annonCompField36 ; struct __anonstruct____missing_field_name_149 __annonCompField42 ; union __anonunion____missing_field_name_155 __annonCompField45 ; union __anonunion____missing_field_name_158 __annonCompField46 ; struct mem_cgroup *mem_cgroup ; }; struct page_frag { struct page *page ; __u32 offset ; __u32 size ; }; struct __anonstruct_shared_159 { struct rb_node rb ; unsigned long rb_subtree_last ; }; struct anon_vma; struct vm_operations_struct; struct mempolicy; struct vm_area_struct { unsigned long vm_start ; unsigned long vm_end ; struct vm_area_struct *vm_next ; struct vm_area_struct *vm_prev ; struct rb_node vm_rb ; unsigned long rb_subtree_gap ; struct mm_struct *vm_mm ; pgprot_t vm_page_prot ; unsigned long vm_flags ; struct __anonstruct_shared_159 shared ; struct list_head anon_vma_chain ; struct anon_vma *anon_vma ; struct vm_operations_struct const *vm_ops ; unsigned long vm_pgoff ; struct file *vm_file ; void *vm_private_data ; struct mempolicy *vm_policy ; }; struct core_thread { struct task_struct *task ; struct core_thread *next ; }; struct core_state { atomic_t nr_threads ; struct core_thread dumper ; struct completion startup ; }; struct task_rss_stat { int events ; int count[3U] ; }; struct mm_rss_stat { atomic_long_t count[3U] ; }; struct kioctx_table; struct linux_binfmt; struct mmu_notifier_mm; struct mm_struct { struct vm_area_struct *mmap ; struct rb_root mm_rb ; u32 vmacache_seqnum ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; unsigned long mmap_base ; unsigned long mmap_legacy_base ; unsigned long task_size ; unsigned long highest_vm_end ; pgd_t *pgd ; atomic_t mm_users ; atomic_t mm_count ; atomic_long_t nr_ptes ; atomic_long_t nr_pmds ; int map_count ; spinlock_t page_table_lock ; struct rw_semaphore mmap_sem ; struct list_head mmlist ; unsigned long hiwater_rss ; unsigned long hiwater_vm ; unsigned long total_vm ; unsigned long locked_vm ; unsigned long pinned_vm ; unsigned long shared_vm ; unsigned long exec_vm ; unsigned long stack_vm ; unsigned long def_flags ; unsigned long start_code ; unsigned long end_code ; unsigned long start_data ; unsigned long end_data ; unsigned long start_brk ; unsigned long brk ; unsigned long start_stack ; unsigned long arg_start ; unsigned long arg_end ; unsigned long env_start ; unsigned long env_end ; unsigned long saved_auxv[46U] ; struct mm_rss_stat rss_stat ; struct linux_binfmt *binfmt ; cpumask_var_t cpu_vm_mask_var ; mm_context_t context ; unsigned long flags ; struct core_state *core_state ; spinlock_t ioctx_lock ; struct kioctx_table *ioctx_table ; struct task_struct *owner ; struct file *exe_file ; struct mmu_notifier_mm *mmu_notifier_mm ; struct cpumask cpumask_allocation ; unsigned long numa_next_scan ; unsigned long numa_scan_offset ; int numa_scan_seq ; bool tlb_flush_pending ; struct uprobes_state uprobes_state ; void *bd_addr ; }; typedef unsigned long cputime_t; struct __anonstruct_kuid_t_161 { uid_t val ; }; typedef struct __anonstruct_kuid_t_161 kuid_t; struct __anonstruct_kgid_t_162 { gid_t val ; }; typedef struct __anonstruct_kgid_t_162 kgid_t; struct sem_undo_list; struct sysv_sem { struct sem_undo_list *undo_list ; }; struct user_struct; struct sysv_shm { struct list_head shm_clist ; }; struct __anonstruct_sigset_t_163 { unsigned long sig[1U] ; }; typedef struct __anonstruct_sigset_t_163 sigset_t; struct siginfo; typedef void __signalfn_t(int ); typedef __signalfn_t *__sighandler_t; typedef void __restorefn_t(void); typedef __restorefn_t *__sigrestore_t; union sigval { int sival_int ; void *sival_ptr ; }; typedef union sigval sigval_t; struct __anonstruct__kill_165 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; }; struct __anonstruct__timer_166 { __kernel_timer_t _tid ; int _overrun ; char _pad[0U] ; sigval_t _sigval ; int _sys_private ; }; struct __anonstruct__rt_167 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; sigval_t _sigval ; }; struct __anonstruct__sigchld_168 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; int _status ; __kernel_clock_t _utime ; __kernel_clock_t _stime ; }; struct __anonstruct__addr_bnd_170 { void *_lower ; void *_upper ; }; struct __anonstruct__sigfault_169 { void *_addr ; short _addr_lsb ; struct __anonstruct__addr_bnd_170 _addr_bnd ; }; struct __anonstruct__sigpoll_171 { long _band ; int _fd ; }; struct __anonstruct__sigsys_172 { void *_call_addr ; int _syscall ; unsigned int _arch ; }; union __anonunion__sifields_164 { int _pad[28U] ; struct __anonstruct__kill_165 _kill ; struct __anonstruct__timer_166 _timer ; struct __anonstruct__rt_167 _rt ; struct __anonstruct__sigchld_168 _sigchld ; struct __anonstruct__sigfault_169 _sigfault ; struct __anonstruct__sigpoll_171 _sigpoll ; struct __anonstruct__sigsys_172 _sigsys ; }; struct siginfo { int si_signo ; int si_errno ; int si_code ; union __anonunion__sifields_164 _sifields ; }; typedef struct siginfo siginfo_t; struct sigpending { struct list_head list ; sigset_t signal ; }; struct sigaction { __sighandler_t sa_handler ; unsigned long sa_flags ; __sigrestore_t sa_restorer ; sigset_t sa_mask ; }; struct k_sigaction { struct sigaction sa ; }; enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; struct pid_namespace; struct upid { int nr ; struct pid_namespace *ns ; struct hlist_node pid_chain ; }; struct pid { atomic_t count ; unsigned int level ; struct hlist_head tasks[3U] ; struct callback_head rcu ; struct upid numbers[1U] ; }; struct pid_link { struct hlist_node node ; struct pid *pid ; }; struct percpu_counter { raw_spinlock_t lock ; s64 count ; struct list_head list ; s32 *counters ; }; struct seccomp_filter; struct seccomp { int mode ; struct seccomp_filter *filter ; }; struct rt_mutex_waiter; struct rlimit { __kernel_ulong_t rlim_cur ; __kernel_ulong_t rlim_max ; }; struct timerqueue_node { struct rb_node node ; ktime_t expires ; }; struct timerqueue_head { struct rb_root head ; struct timerqueue_node *next ; }; struct hrtimer_clock_base; struct hrtimer_cpu_base; enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; struct hrtimer { struct timerqueue_node node ; ktime_t _softexpires ; enum hrtimer_restart (*function)(struct hrtimer * ) ; struct hrtimer_clock_base *base ; unsigned long state ; int start_pid ; void *start_site ; char start_comm[16U] ; }; struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base ; int index ; clockid_t clockid ; struct timerqueue_head active ; ktime_t (*get_time)(void) ; ktime_t offset ; }; struct hrtimer_cpu_base { raw_spinlock_t lock ; seqcount_t seq ; struct hrtimer *running ; unsigned int cpu ; unsigned int active_bases ; unsigned int clock_was_set_seq ; bool migration_enabled ; bool nohz_active ; unsigned char in_hrtirq : 1 ; unsigned char hres_active : 1 ; unsigned char hang_detected : 1 ; ktime_t expires_next ; struct hrtimer *next_timer ; unsigned int nr_events ; unsigned int nr_retries ; unsigned int nr_hangs ; unsigned int max_hang_time ; struct hrtimer_clock_base clock_base[4U] ; }; struct task_io_accounting { u64 rchar ; u64 wchar ; u64 syscr ; u64 syscw ; u64 read_bytes ; u64 write_bytes ; u64 cancelled_write_bytes ; }; struct latency_record { unsigned long backtrace[12U] ; unsigned int count ; unsigned long time ; unsigned long max ; }; struct assoc_array_ptr; struct assoc_array { struct assoc_array_ptr *root ; unsigned long nr_leaves_on_tree ; }; typedef int32_t key_serial_t; typedef uint32_t key_perm_t; struct key; struct signal_struct; struct cred; struct key_type; struct keyring_index_key { struct key_type *type ; char const *description ; size_t desc_len ; }; union __anonunion____missing_field_name_179 { struct list_head graveyard_link ; struct rb_node serial_node ; }; struct key_user; union __anonunion____missing_field_name_180 { time_t expiry ; time_t revoked_at ; }; struct __anonstruct____missing_field_name_182 { struct key_type *type ; char *description ; }; union __anonunion____missing_field_name_181 { struct keyring_index_key index_key ; struct __anonstruct____missing_field_name_182 __annonCompField49 ; }; union __anonunion_type_data_183 { struct list_head link ; unsigned long x[2U] ; void *p[2U] ; int reject_error ; }; union __anonunion_payload_185 { unsigned long value ; void *rcudata ; void *data ; void *data2[2U] ; }; union __anonunion____missing_field_name_184 { union __anonunion_payload_185 payload ; struct assoc_array keys ; }; struct key { atomic_t usage ; key_serial_t serial ; union __anonunion____missing_field_name_179 __annonCompField47 ; struct rw_semaphore sem ; struct key_user *user ; void *security ; union __anonunion____missing_field_name_180 __annonCompField48 ; time_t last_used_at ; kuid_t uid ; kgid_t gid ; key_perm_t perm ; unsigned short quotalen ; unsigned short datalen ; unsigned long flags ; union __anonunion____missing_field_name_181 __annonCompField50 ; union __anonunion_type_data_183 type_data ; union __anonunion____missing_field_name_184 __annonCompField51 ; }; struct audit_context; struct group_info { atomic_t usage ; int ngroups ; int nblocks ; kgid_t small_block[32U] ; kgid_t *blocks[0U] ; }; struct cred { atomic_t usage ; atomic_t subscribers ; void *put_addr ; unsigned int magic ; kuid_t uid ; kgid_t gid ; kuid_t suid ; kgid_t sgid ; kuid_t euid ; kgid_t egid ; kuid_t fsuid ; kgid_t fsgid ; unsigned int securebits ; kernel_cap_t cap_inheritable ; kernel_cap_t cap_permitted ; kernel_cap_t cap_effective ; kernel_cap_t cap_bset ; unsigned char jit_keyring ; struct key *session_keyring ; struct key *process_keyring ; struct key *thread_keyring ; struct key *request_key_auth ; void *security ; struct user_struct *user ; struct user_namespace *user_ns ; struct group_info *group_info ; struct callback_head rcu ; }; union __anonunion____missing_field_name_186 { unsigned long bitmap[4U] ; struct callback_head callback_head ; }; struct idr_layer { int prefix ; int layer ; struct idr_layer *ary[256U] ; int count ; union __anonunion____missing_field_name_186 __annonCompField52 ; }; struct idr { struct idr_layer *hint ; struct idr_layer *top ; int layers ; int cur ; spinlock_t lock ; int id_free_cnt ; struct idr_layer *id_free ; }; struct ida_bitmap { long nr_busy ; unsigned long bitmap[15U] ; }; struct ida { struct idr idr ; struct ida_bitmap *free_bitmap ; }; struct percpu_ref; typedef void percpu_ref_func_t(struct percpu_ref * ); struct percpu_ref { atomic_long_t count ; unsigned long percpu_count_ptr ; percpu_ref_func_t *release ; percpu_ref_func_t *confirm_switch ; bool force_atomic ; struct callback_head rcu ; }; struct cgroup; struct cgroup_root; struct cgroup_subsys; struct cgroup_taskset; struct kernfs_node; struct kernfs_ops; struct kernfs_open_file; struct cgroup_subsys_state { struct cgroup *cgroup ; struct cgroup_subsys *ss ; struct percpu_ref refcnt ; struct cgroup_subsys_state *parent ; struct list_head sibling ; struct list_head children ; int id ; unsigned int flags ; u64 serial_nr ; struct callback_head callback_head ; struct work_struct destroy_work ; }; struct css_set { atomic_t refcount ; struct hlist_node hlist ; struct list_head tasks ; struct list_head mg_tasks ; struct list_head cgrp_links ; struct cgroup *dfl_cgrp ; struct cgroup_subsys_state *subsys[12U] ; struct list_head mg_preload_node ; struct list_head mg_node ; struct cgroup *mg_src_cgrp ; struct css_set *mg_dst_cset ; struct list_head e_cset_node[12U] ; struct callback_head callback_head ; }; struct cgroup { struct cgroup_subsys_state self ; unsigned long flags ; int id ; int populated_cnt ; struct kernfs_node *kn ; struct kernfs_node *procs_kn ; struct kernfs_node *populated_kn ; unsigned int subtree_control ; unsigned int child_subsys_mask ; struct cgroup_subsys_state *subsys[12U] ; struct cgroup_root *root ; struct list_head cset_links ; struct list_head e_csets[12U] ; struct list_head pidlists ; struct mutex pidlist_mutex ; wait_queue_head_t offline_waitq ; struct work_struct release_agent_work ; }; struct kernfs_root; struct cgroup_root { struct kernfs_root *kf_root ; unsigned int subsys_mask ; int hierarchy_id ; struct cgroup cgrp ; atomic_t nr_cgrps ; struct list_head root_list ; unsigned int flags ; struct idr cgroup_idr ; char release_agent_path[4096U] ; char name[64U] ; }; struct cftype { char name[64U] ; int private ; umode_t mode ; size_t max_write_len ; unsigned int flags ; struct cgroup_subsys *ss ; struct list_head node ; struct kernfs_ops *kf_ops ; u64 (*read_u64)(struct cgroup_subsys_state * , struct cftype * ) ; s64 (*read_s64)(struct cgroup_subsys_state * , struct cftype * ) ; int (*seq_show)(struct seq_file * , void * ) ; void *(*seq_start)(struct seq_file * , loff_t * ) ; void *(*seq_next)(struct seq_file * , void * , loff_t * ) ; void (*seq_stop)(struct seq_file * , void * ) ; int (*write_u64)(struct cgroup_subsys_state * , struct cftype * , u64 ) ; int (*write_s64)(struct cgroup_subsys_state * , struct cftype * , s64 ) ; ssize_t (*write)(struct kernfs_open_file * , char * , size_t , loff_t ) ; struct lock_class_key lockdep_key ; }; struct cgroup_subsys { struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state * ) ; int (*css_online)(struct cgroup_subsys_state * ) ; void (*css_offline)(struct cgroup_subsys_state * ) ; void (*css_released)(struct cgroup_subsys_state * ) ; void (*css_free)(struct cgroup_subsys_state * ) ; void (*css_reset)(struct cgroup_subsys_state * ) ; void (*css_e_css_changed)(struct cgroup_subsys_state * ) ; int (*can_attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*cancel_attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*fork)(struct task_struct * ) ; void (*exit)(struct cgroup_subsys_state * , struct cgroup_subsys_state * , struct task_struct * ) ; void (*bind)(struct cgroup_subsys_state * ) ; int disabled ; int early_init ; bool broken_hierarchy ; bool warned_broken_hierarchy ; int id ; char const *name ; struct cgroup_root *root ; struct idr css_idr ; struct list_head cfts ; struct cftype *dfl_cftypes ; struct cftype *legacy_cftypes ; unsigned int depends_on ; }; struct futex_pi_state; struct robust_list_head; struct bio_list; struct fs_struct; struct perf_event_context; struct blk_plug; struct nameidata; struct cfs_rq; struct task_group; struct sighand_struct { atomic_t count ; struct k_sigaction action[64U] ; spinlock_t siglock ; wait_queue_head_t signalfd_wqh ; }; struct pacct_struct { int ac_flag ; long ac_exitcode ; unsigned long ac_mem ; cputime_t ac_utime ; cputime_t ac_stime ; unsigned long ac_minflt ; unsigned long ac_majflt ; }; struct cpu_itimer { cputime_t expires ; cputime_t incr ; u32 error ; u32 incr_error ; }; struct cputime { cputime_t utime ; cputime_t stime ; }; struct task_cputime { cputime_t utime ; cputime_t stime ; unsigned long long sum_exec_runtime ; }; struct task_cputime_atomic { atomic64_t utime ; atomic64_t stime ; atomic64_t sum_exec_runtime ; }; struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic ; int running ; }; struct autogroup; struct tty_struct; struct taskstats; struct tty_audit_buf; struct signal_struct { atomic_t sigcnt ; atomic_t live ; int nr_threads ; struct list_head thread_head ; wait_queue_head_t wait_chldexit ; struct task_struct *curr_target ; struct sigpending shared_pending ; int group_exit_code ; int notify_count ; struct task_struct *group_exit_task ; int group_stop_count ; unsigned int flags ; unsigned char is_child_subreaper : 1 ; unsigned char has_child_subreaper : 1 ; int posix_timer_id ; struct list_head posix_timers ; struct hrtimer real_timer ; struct pid *leader_pid ; ktime_t it_real_incr ; struct cpu_itimer it[2U] ; struct thread_group_cputimer cputimer ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; struct pid *tty_old_pgrp ; int leader ; struct tty_struct *tty ; struct autogroup *autogroup ; seqlock_t stats_lock ; cputime_t utime ; cputime_t stime ; cputime_t cutime ; cputime_t cstime ; cputime_t gtime ; cputime_t cgtime ; struct cputime prev_cputime ; unsigned long nvcsw ; unsigned long nivcsw ; unsigned long cnvcsw ; unsigned long cnivcsw ; unsigned long min_flt ; unsigned long maj_flt ; unsigned long cmin_flt ; unsigned long cmaj_flt ; unsigned long inblock ; unsigned long oublock ; unsigned long cinblock ; unsigned long coublock ; unsigned long maxrss ; unsigned long cmaxrss ; struct task_io_accounting ioac ; unsigned long long sum_sched_runtime ; struct rlimit rlim[16U] ; struct pacct_struct pacct ; struct taskstats *stats ; unsigned int audit_tty ; unsigned int audit_tty_log_passwd ; struct tty_audit_buf *tty_audit_buf ; oom_flags_t oom_flags ; short oom_score_adj ; short oom_score_adj_min ; struct mutex cred_guard_mutex ; }; struct user_struct { atomic_t __count ; atomic_t processes ; atomic_t sigpending ; atomic_t inotify_watches ; atomic_t inotify_devs ; atomic_t fanotify_listeners ; atomic_long_t epoll_watches ; unsigned long mq_bytes ; unsigned long locked_shm ; struct key *uid_keyring ; struct key *session_keyring ; struct hlist_node uidhash_node ; kuid_t uid ; atomic_long_t locked_vm ; }; struct backing_dev_info; struct reclaim_state; struct sched_info { unsigned long pcount ; unsigned long long run_delay ; unsigned long long last_arrival ; unsigned long long last_queued ; }; struct task_delay_info { spinlock_t lock ; unsigned int flags ; u64 blkio_start ; u64 blkio_delay ; u64 swapin_delay ; u32 blkio_count ; u32 swapin_count ; u64 freepages_start ; u64 freepages_delay ; u32 freepages_count ; }; struct wake_q_node { struct wake_q_node *next ; }; struct io_context; struct pipe_inode_info; struct uts_namespace; struct load_weight { unsigned long weight ; u32 inv_weight ; }; struct sched_avg { u64 last_runnable_update ; s64 decay_count ; unsigned long load_avg_contrib ; unsigned long utilization_avg_contrib ; u32 runnable_avg_sum ; u32 avg_period ; u32 running_avg_sum ; }; struct sched_statistics { u64 wait_start ; u64 wait_max ; u64 wait_count ; u64 wait_sum ; u64 iowait_count ; u64 iowait_sum ; u64 sleep_start ; u64 sleep_max ; s64 sum_sleep_runtime ; u64 block_start ; u64 block_max ; u64 exec_max ; u64 slice_max ; u64 nr_migrations_cold ; u64 nr_failed_migrations_affine ; u64 nr_failed_migrations_running ; u64 nr_failed_migrations_hot ; u64 nr_forced_migrations ; u64 nr_wakeups ; u64 nr_wakeups_sync ; u64 nr_wakeups_migrate ; u64 nr_wakeups_local ; u64 nr_wakeups_remote ; u64 nr_wakeups_affine ; u64 nr_wakeups_affine_attempts ; u64 nr_wakeups_passive ; u64 nr_wakeups_idle ; }; struct sched_entity { struct load_weight load ; struct rb_node run_node ; struct list_head group_node ; unsigned int on_rq ; u64 exec_start ; u64 sum_exec_runtime ; u64 vruntime ; u64 prev_sum_exec_runtime ; u64 nr_migrations ; struct sched_statistics statistics ; int depth ; struct sched_entity *parent ; struct cfs_rq *cfs_rq ; struct cfs_rq *my_q ; struct sched_avg avg ; }; struct rt_rq; struct sched_rt_entity { struct list_head run_list ; unsigned long timeout ; unsigned long watchdog_stamp ; unsigned int time_slice ; struct sched_rt_entity *back ; struct sched_rt_entity *parent ; struct rt_rq *rt_rq ; struct rt_rq *my_q ; }; struct sched_dl_entity { struct rb_node rb_node ; u64 dl_runtime ; u64 dl_deadline ; u64 dl_period ; u64 dl_bw ; s64 runtime ; u64 deadline ; unsigned int flags ; int dl_throttled ; int dl_new ; int dl_boosted ; int dl_yielded ; struct hrtimer dl_timer ; }; struct memcg_oom_info { struct mem_cgroup *memcg ; gfp_t gfp_mask ; int order ; unsigned char may_oom : 1 ; }; struct sched_class; struct files_struct; struct compat_robust_list_head; struct numa_group; struct task_struct { long volatile state ; void *stack ; atomic_t usage ; unsigned int flags ; unsigned int ptrace ; struct llist_node wake_entry ; int on_cpu ; struct task_struct *last_wakee ; unsigned long wakee_flips ; unsigned long wakee_flip_decay_ts ; int wake_cpu ; int on_rq ; int prio ; int static_prio ; int normal_prio ; unsigned int rt_priority ; struct sched_class const *sched_class ; struct sched_entity se ; struct sched_rt_entity rt ; struct task_group *sched_task_group ; struct sched_dl_entity dl ; struct hlist_head preempt_notifiers ; unsigned int policy ; int nr_cpus_allowed ; cpumask_t cpus_allowed ; unsigned long rcu_tasks_nvcsw ; bool rcu_tasks_holdout ; struct list_head rcu_tasks_holdout_list ; int rcu_tasks_idle_cpu ; struct sched_info sched_info ; struct list_head tasks ; struct plist_node pushable_tasks ; struct rb_node pushable_dl_tasks ; struct mm_struct *mm ; struct mm_struct *active_mm ; u32 vmacache_seqnum ; struct vm_area_struct *vmacache[4U] ; struct task_rss_stat rss_stat ; int exit_state ; int exit_code ; int exit_signal ; int pdeath_signal ; unsigned long jobctl ; unsigned int personality ; unsigned char in_execve : 1 ; unsigned char in_iowait : 1 ; unsigned char sched_reset_on_fork : 1 ; unsigned char sched_contributes_to_load : 1 ; unsigned char sched_migrated : 1 ; unsigned char memcg_kmem_skip_account : 1 ; unsigned char brk_randomized : 1 ; unsigned long atomic_flags ; struct restart_block restart_block ; pid_t pid ; pid_t tgid ; struct task_struct *real_parent ; struct task_struct *parent ; struct list_head children ; struct list_head sibling ; struct task_struct *group_leader ; struct list_head ptraced ; struct list_head ptrace_entry ; struct pid_link pids[3U] ; struct list_head thread_group ; struct list_head thread_node ; struct completion *vfork_done ; int *set_child_tid ; int *clear_child_tid ; cputime_t utime ; cputime_t stime ; cputime_t utimescaled ; cputime_t stimescaled ; cputime_t gtime ; struct cputime prev_cputime ; unsigned long nvcsw ; unsigned long nivcsw ; u64 start_time ; u64 real_start_time ; unsigned long min_flt ; unsigned long maj_flt ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; struct cred const *real_cred ; struct cred const *cred ; char comm[16U] ; struct nameidata *nameidata ; struct sysv_sem sysvsem ; struct sysv_shm sysvshm ; unsigned long last_switch_count ; struct thread_struct thread ; struct fs_struct *fs ; struct files_struct *files ; struct nsproxy *nsproxy ; struct signal_struct *signal ; struct sighand_struct *sighand ; sigset_t blocked ; sigset_t real_blocked ; sigset_t saved_sigmask ; struct sigpending pending ; unsigned long sas_ss_sp ; size_t sas_ss_size ; int (*notifier)(void * ) ; void *notifier_data ; sigset_t *notifier_mask ; struct callback_head *task_works ; struct audit_context *audit_context ; kuid_t loginuid ; unsigned int sessionid ; struct seccomp seccomp ; u32 parent_exec_id ; u32 self_exec_id ; spinlock_t alloc_lock ; raw_spinlock_t pi_lock ; struct wake_q_node wake_q ; struct rb_root pi_waiters ; struct rb_node *pi_waiters_leftmost ; struct rt_mutex_waiter *pi_blocked_on ; struct mutex_waiter *blocked_on ; unsigned int irq_events ; unsigned long hardirq_enable_ip ; unsigned long hardirq_disable_ip ; unsigned int hardirq_enable_event ; unsigned int hardirq_disable_event ; int hardirqs_enabled ; int hardirq_context ; unsigned long softirq_disable_ip ; unsigned long softirq_enable_ip ; unsigned int softirq_disable_event ; unsigned int softirq_enable_event ; int softirqs_enabled ; int softirq_context ; u64 curr_chain_key ; int lockdep_depth ; unsigned int lockdep_recursion ; struct held_lock held_locks[48U] ; gfp_t lockdep_reclaim_gfp ; void *journal_info ; struct bio_list *bio_list ; struct blk_plug *plug ; struct reclaim_state *reclaim_state ; struct backing_dev_info *backing_dev_info ; struct io_context *io_context ; unsigned long ptrace_message ; siginfo_t *last_siginfo ; struct task_io_accounting ioac ; u64 acct_rss_mem1 ; u64 acct_vm_mem1 ; cputime_t acct_timexpd ; nodemask_t mems_allowed ; seqcount_t mems_allowed_seq ; int cpuset_mem_spread_rotor ; int cpuset_slab_spread_rotor ; struct css_set *cgroups ; struct list_head cg_list ; struct robust_list_head *robust_list ; struct compat_robust_list_head *compat_robust_list ; struct list_head pi_state_list ; struct futex_pi_state *pi_state_cache ; struct perf_event_context *perf_event_ctxp[2U] ; struct mutex perf_event_mutex ; struct list_head perf_event_list ; struct mempolicy *mempolicy ; short il_next ; short pref_node_fork ; int numa_scan_seq ; unsigned int numa_scan_period ; unsigned int numa_scan_period_max ; int numa_preferred_nid ; unsigned long numa_migrate_retry ; u64 node_stamp ; u64 last_task_numa_placement ; u64 last_sum_exec_runtime ; struct callback_head numa_work ; struct list_head numa_entry ; struct numa_group *numa_group ; unsigned long *numa_faults ; unsigned long total_numa_faults ; unsigned long numa_faults_locality[3U] ; unsigned long numa_pages_migrated ; struct callback_head rcu ; struct pipe_inode_info *splice_pipe ; struct page_frag task_frag ; struct task_delay_info *delays ; int make_it_fail ; int nr_dirtied ; int nr_dirtied_pause ; unsigned long dirty_paused_when ; int latency_record_count ; struct latency_record latency_record[32U] ; unsigned long timer_slack_ns ; unsigned long default_timer_slack_ns ; unsigned int kasan_depth ; unsigned long trace ; unsigned long trace_recursion ; struct memcg_oom_info memcg_oom ; struct uprobe_task *utask ; unsigned int sequential_io ; unsigned int sequential_io_avg ; unsigned long task_state_change ; int pagefault_disabled ; }; struct config_item; struct t10_alua_tg_pt_gp; struct se_dev_attrib; struct se_hba; struct configfs_attribute; struct config_group; struct se_device; struct se_lun_acl; struct se_lun; struct trace_event_call; struct t10_wwn; struct t10_alua_lu_gp; struct se_cmd; struct kstat { u64 ino ; dev_t dev ; umode_t mode ; unsigned int nlink ; kuid_t uid ; kgid_t gid ; dev_t rdev ; loff_t size ; struct timespec atime ; struct timespec mtime ; struct timespec ctime ; unsigned long blksize ; unsigned long long blocks ; }; typedef __u64 Elf64_Addr; typedef __u16 Elf64_Half; typedef __u32 Elf64_Word; typedef __u64 Elf64_Xword; struct elf64_sym { Elf64_Word st_name ; unsigned char st_info ; unsigned char st_other ; Elf64_Half st_shndx ; Elf64_Addr st_value ; Elf64_Xword st_size ; }; typedef struct elf64_sym Elf64_Sym; struct iattr; struct super_block; struct file_system_type; struct kernfs_open_node; struct kernfs_iattrs; struct kernfs_elem_dir { unsigned long subdirs ; struct rb_root children ; struct kernfs_root *root ; }; struct kernfs_elem_symlink { struct kernfs_node *target_kn ; }; struct kernfs_elem_attr { struct kernfs_ops const *ops ; struct kernfs_open_node *open ; loff_t size ; struct kernfs_node *notify_next ; }; union __anonunion____missing_field_name_209 { struct kernfs_elem_dir dir ; struct kernfs_elem_symlink symlink ; struct kernfs_elem_attr attr ; }; struct kernfs_node { atomic_t count ; atomic_t active ; struct lockdep_map dep_map ; struct kernfs_node *parent ; char const *name ; struct rb_node rb ; void const *ns ; unsigned int hash ; union __anonunion____missing_field_name_209 __annonCompField56 ; void *priv ; unsigned short flags ; umode_t mode ; unsigned int ino ; struct kernfs_iattrs *iattr ; }; struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root * , int * , char * ) ; int (*show_options)(struct seq_file * , struct kernfs_root * ) ; int (*mkdir)(struct kernfs_node * , char const * , umode_t ) ; int (*rmdir)(struct kernfs_node * ) ; int (*rename)(struct kernfs_node * , struct kernfs_node * , char const * ) ; }; struct kernfs_root { struct kernfs_node *kn ; unsigned int flags ; struct ida ino_ida ; struct kernfs_syscall_ops *syscall_ops ; struct list_head supers ; wait_queue_head_t deactivate_waitq ; }; struct kernfs_open_file { struct kernfs_node *kn ; struct file *file ; void *priv ; struct mutex mutex ; int event ; struct list_head list ; char *prealloc_buf ; size_t atomic_write_len ; bool mmapped ; struct vm_operations_struct const *vm_ops ; }; struct kernfs_ops { int (*seq_show)(struct seq_file * , void * ) ; void *(*seq_start)(struct seq_file * , loff_t * ) ; void *(*seq_next)(struct seq_file * , void * , loff_t * ) ; void (*seq_stop)(struct seq_file * , void * ) ; ssize_t (*read)(struct kernfs_open_file * , char * , size_t , loff_t ) ; size_t atomic_write_len ; bool prealloc ; ssize_t (*write)(struct kernfs_open_file * , char * , size_t , loff_t ) ; int (*mmap)(struct kernfs_open_file * , struct vm_area_struct * ) ; struct lock_class_key lockdep_key ; }; struct sock; struct kobject; enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; struct kobj_ns_type_operations { enum kobj_ns_type type ; bool (*current_may_mount)(void) ; void *(*grab_current_ns)(void) ; void const *(*netlink_ns)(struct sock * ) ; void const *(*initial_ns)(void) ; void (*drop_ns)(void * ) ; }; struct bin_attribute; struct attribute { char const *name ; umode_t mode ; bool ignore_lockdep ; struct lock_class_key *key ; struct lock_class_key skey ; }; struct attribute_group { char const *name ; umode_t (*is_visible)(struct kobject * , struct attribute * , int ) ; struct attribute **attrs ; struct bin_attribute **bin_attrs ; }; struct bin_attribute { struct attribute attr ; size_t size ; void *private ; ssize_t (*read)(struct file * , struct kobject * , struct bin_attribute * , char * , loff_t , size_t ) ; ssize_t (*write)(struct file * , struct kobject * , struct bin_attribute * , char * , loff_t , size_t ) ; int (*mmap)(struct file * , struct kobject * , struct bin_attribute * , struct vm_area_struct * ) ; }; struct sysfs_ops { ssize_t (*show)(struct kobject * , struct attribute * , char * ) ; ssize_t (*store)(struct kobject * , struct attribute * , char const * , size_t ) ; }; struct kref { atomic_t refcount ; }; struct kset; struct kobj_type; struct kobject { char const *name ; struct list_head entry ; struct kobject *parent ; struct kset *kset ; struct kobj_type *ktype ; struct kernfs_node *sd ; struct kref kref ; struct delayed_work release ; unsigned char state_initialized : 1 ; unsigned char state_in_sysfs : 1 ; unsigned char state_add_uevent_sent : 1 ; unsigned char state_remove_uevent_sent : 1 ; unsigned char uevent_suppress : 1 ; }; struct kobj_type { void (*release)(struct kobject * ) ; struct sysfs_ops const *sysfs_ops ; struct attribute **default_attrs ; struct kobj_ns_type_operations const *(*child_ns_type)(struct kobject * ) ; void const *(*namespace)(struct kobject * ) ; }; struct kobj_uevent_env { char *argv[3U] ; char *envp[32U] ; int envp_idx ; char buf[2048U] ; int buflen ; }; struct kset_uevent_ops { int (* const filter)(struct kset * , struct kobject * ) ; char const *(* const name)(struct kset * , struct kobject * ) ; int (* const uevent)(struct kset * , struct kobject * , struct kobj_uevent_env * ) ; }; struct kset { struct list_head list ; spinlock_t list_lock ; struct kobject kobj ; struct kset_uevent_ops const *uevent_ops ; }; struct kernel_param; struct kernel_param_ops { unsigned int flags ; int (*set)(char const * , struct kernel_param const * ) ; int (*get)(char * , struct kernel_param const * ) ; void (*free)(void * ) ; }; struct kparam_string; struct kparam_array; union __anonunion____missing_field_name_210 { void *arg ; struct kparam_string const *str ; struct kparam_array const *arr ; }; struct kernel_param { char const *name ; struct module *mod ; struct kernel_param_ops const *ops ; u16 const perm ; s8 level ; u8 flags ; union __anonunion____missing_field_name_210 __annonCompField57 ; }; struct kparam_string { unsigned int maxlen ; char *string ; }; struct kparam_array { unsigned int max ; unsigned int elemsize ; unsigned int *num ; struct kernel_param_ops const *ops ; void *elem ; }; struct latch_tree_node { struct rb_node node[2U] ; }; struct mod_arch_specific { }; struct module_param_attrs; struct module_kobject { struct kobject kobj ; struct module *mod ; struct kobject *drivers_dir ; struct module_param_attrs *mp ; struct completion *kobj_completion ; }; struct module_attribute { struct attribute attr ; ssize_t (*show)(struct module_attribute * , struct module_kobject * , char * ) ; ssize_t (*store)(struct module_attribute * , struct module_kobject * , char const * , size_t ) ; void (*setup)(struct module * , char const * ) ; int (*test)(struct module * ) ; void (*free)(struct module * ) ; }; struct exception_table_entry; enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; struct mod_tree_node { struct module *mod ; struct latch_tree_node node ; }; struct module_sect_attrs; struct module_notes_attrs; struct tracepoint; struct trace_enum_map; struct module { enum module_state state ; struct list_head list ; char name[56U] ; struct module_kobject mkobj ; struct module_attribute *modinfo_attrs ; char const *version ; char const *srcversion ; struct kobject *holders_dir ; struct kernel_symbol const *syms ; unsigned long const *crcs ; unsigned int num_syms ; struct mutex param_lock ; struct kernel_param *kp ; unsigned int num_kp ; unsigned int num_gpl_syms ; struct kernel_symbol const *gpl_syms ; unsigned long const *gpl_crcs ; struct kernel_symbol const *unused_syms ; unsigned long const *unused_crcs ; unsigned int num_unused_syms ; unsigned int num_unused_gpl_syms ; struct kernel_symbol const *unused_gpl_syms ; unsigned long const *unused_gpl_crcs ; bool sig_ok ; bool async_probe_requested ; struct kernel_symbol const *gpl_future_syms ; unsigned long const *gpl_future_crcs ; unsigned int num_gpl_future_syms ; unsigned int num_exentries ; struct exception_table_entry *extable ; int (*init)(void) ; void *module_init ; void *module_core ; unsigned int init_size ; unsigned int core_size ; unsigned int init_text_size ; unsigned int core_text_size ; struct mod_tree_node mtn_core ; struct mod_tree_node mtn_init ; unsigned int init_ro_size ; unsigned int core_ro_size ; struct mod_arch_specific arch ; unsigned int taints ; unsigned int num_bugs ; struct list_head bug_list ; struct bug_entry *bug_table ; Elf64_Sym *symtab ; Elf64_Sym *core_symtab ; unsigned int num_symtab ; unsigned int core_num_syms ; char *strtab ; char *core_strtab ; struct module_sect_attrs *sect_attrs ; struct module_notes_attrs *notes_attrs ; char *args ; void *percpu ; unsigned int percpu_size ; unsigned int num_tracepoints ; struct tracepoint * const *tracepoints_ptrs ; unsigned int num_trace_bprintk_fmt ; char const **trace_bprintk_fmt_start ; struct trace_event_call **trace_events ; unsigned int num_trace_events ; struct trace_enum_map **trace_enums ; unsigned int num_trace_enums ; bool klp_alive ; struct list_head source_list ; struct list_head target_list ; void (*exit)(void) ; atomic_t refcnt ; ctor_fn_t (**ctors)(void) ; unsigned int num_ctors ; }; struct mnt_namespace; struct ipc_namespace; struct net; struct nsproxy { atomic_t count ; struct uts_namespace *uts_ns ; struct ipc_namespace *ipc_ns ; struct mnt_namespace *mnt_ns ; struct pid_namespace *pid_ns_for_children ; struct net *net_ns ; }; struct proc_ns_operations; struct ns_common { atomic_long_t stashed ; struct proc_ns_operations const *ops ; unsigned int inum ; }; struct new_utsname { char sysname[65U] ; char nodename[65U] ; char release[65U] ; char version[65U] ; char machine[65U] ; char domainname[65U] ; }; struct uts_namespace { struct kref kref ; struct new_utsname name ; struct user_namespace *user_ns ; struct ns_common ns ; }; struct hlist_bl_node; struct hlist_bl_head { struct hlist_bl_node *first ; }; struct hlist_bl_node { struct hlist_bl_node *next ; struct hlist_bl_node **pprev ; }; struct __anonstruct____missing_field_name_220 { spinlock_t lock ; int count ; }; union __anonunion____missing_field_name_219 { struct __anonstruct____missing_field_name_220 __annonCompField58 ; }; struct lockref { union __anonunion____missing_field_name_219 __annonCompField59 ; }; struct path; struct vfsmount; struct __anonstruct____missing_field_name_222 { u32 hash ; u32 len ; }; union __anonunion____missing_field_name_221 { struct __anonstruct____missing_field_name_222 __annonCompField60 ; u64 hash_len ; }; struct qstr { union __anonunion____missing_field_name_221 __annonCompField61 ; unsigned char const *name ; }; struct dentry_operations; union __anonunion_d_u_223 { struct hlist_node d_alias ; struct callback_head d_rcu ; }; struct dentry { unsigned int d_flags ; seqcount_t d_seq ; struct hlist_bl_node d_hash ; struct dentry *d_parent ; struct qstr d_name ; struct inode *d_inode ; unsigned char d_iname[32U] ; struct lockref d_lockref ; struct dentry_operations const *d_op ; struct super_block *d_sb ; unsigned long d_time ; void *d_fsdata ; struct list_head d_lru ; struct list_head d_child ; struct list_head d_subdirs ; union __anonunion_d_u_223 d_u ; }; struct dentry_operations { int (*d_revalidate)(struct dentry * , unsigned int ) ; int (*d_weak_revalidate)(struct dentry * , unsigned int ) ; int (*d_hash)(struct dentry const * , struct qstr * ) ; int (*d_compare)(struct dentry const * , struct dentry const * , unsigned int , char const * , struct qstr const * ) ; int (*d_delete)(struct dentry const * ) ; void (*d_release)(struct dentry * ) ; void (*d_prune)(struct dentry * ) ; void (*d_iput)(struct dentry * , struct inode * ) ; char *(*d_dname)(struct dentry * , char * , int ) ; struct vfsmount *(*d_automount)(struct path * ) ; int (*d_manage)(struct dentry * , bool ) ; struct inode *(*d_select_inode)(struct dentry * , unsigned int ) ; }; struct path { struct vfsmount *mnt ; struct dentry *dentry ; }; struct shrink_control { gfp_t gfp_mask ; unsigned long nr_to_scan ; int nid ; struct mem_cgroup *memcg ; }; struct shrinker { unsigned long (*count_objects)(struct shrinker * , struct shrink_control * ) ; unsigned long (*scan_objects)(struct shrinker * , struct shrink_control * ) ; int seeks ; long batch ; unsigned long flags ; struct list_head list ; atomic_long_t *nr_deferred ; }; struct list_lru_one { struct list_head list ; long nr_items ; }; struct list_lru_memcg { struct list_lru_one *lru[0U] ; }; struct list_lru_node { spinlock_t lock ; struct list_lru_one lru ; struct list_lru_memcg *memcg_lrus ; }; struct list_lru { struct list_lru_node *node ; struct list_head list ; }; struct __anonstruct____missing_field_name_227 { struct radix_tree_node *parent ; void *private_data ; }; union __anonunion____missing_field_name_226 { struct __anonstruct____missing_field_name_227 __annonCompField62 ; struct callback_head callback_head ; }; struct radix_tree_node { unsigned int path ; unsigned int count ; union __anonunion____missing_field_name_226 __annonCompField63 ; struct list_head private_list ; void *slots[64U] ; unsigned long tags[3U][1U] ; }; struct radix_tree_root { unsigned int height ; gfp_t gfp_mask ; struct radix_tree_node *rnode ; }; struct semaphore { raw_spinlock_t lock ; unsigned int count ; struct list_head wait_list ; }; struct fiemap_extent { __u64 fe_logical ; __u64 fe_physical ; __u64 fe_length ; __u64 fe_reserved64[2U] ; __u32 fe_flags ; __u32 fe_reserved[3U] ; }; enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; struct bio_set; struct bio; struct bio_integrity_payload; struct block_device; typedef void bio_end_io_t(struct bio * , int ); struct bio_vec { struct page *bv_page ; unsigned int bv_len ; unsigned int bv_offset ; }; struct bvec_iter { sector_t bi_sector ; unsigned int bi_size ; unsigned int bi_idx ; unsigned int bi_bvec_done ; }; union __anonunion____missing_field_name_230 { struct bio_integrity_payload *bi_integrity ; }; struct bio { struct bio *bi_next ; struct block_device *bi_bdev ; unsigned long bi_flags ; unsigned long bi_rw ; struct bvec_iter bi_iter ; unsigned int bi_phys_segments ; unsigned int bi_seg_front_size ; unsigned int bi_seg_back_size ; atomic_t __bi_remaining ; bio_end_io_t *bi_end_io ; void *bi_private ; struct io_context *bi_ioc ; struct cgroup_subsys_state *bi_css ; union __anonunion____missing_field_name_230 __annonCompField64 ; unsigned short bi_vcnt ; unsigned short bi_max_vecs ; atomic_t __bi_cnt ; struct bio_vec *bi_io_vec ; struct bio_set *bi_pool ; struct bio_vec bi_inline_vecs[0U] ; }; struct bdi_writeback; struct export_operations; struct hd_geometry; struct iovec; struct kiocb; struct poll_table_struct; struct kstatfs; struct swap_info_struct; struct iov_iter; struct vm_fault; struct iattr { unsigned int ia_valid ; umode_t ia_mode ; kuid_t ia_uid ; kgid_t ia_gid ; loff_t ia_size ; struct timespec ia_atime ; struct timespec ia_mtime ; struct timespec ia_ctime ; struct file *ia_file ; }; struct dquot; typedef __kernel_uid32_t projid_t; struct __anonstruct_kprojid_t_231 { projid_t val ; }; typedef struct __anonstruct_kprojid_t_231 kprojid_t; enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; typedef long long qsize_t; union __anonunion____missing_field_name_232 { kuid_t uid ; kgid_t gid ; kprojid_t projid ; }; struct kqid { union __anonunion____missing_field_name_232 __annonCompField65 ; enum quota_type type ; }; struct mem_dqblk { qsize_t dqb_bhardlimit ; qsize_t dqb_bsoftlimit ; qsize_t dqb_curspace ; qsize_t dqb_rsvspace ; qsize_t dqb_ihardlimit ; qsize_t dqb_isoftlimit ; qsize_t dqb_curinodes ; time_t dqb_btime ; time_t dqb_itime ; }; struct quota_format_type; struct mem_dqinfo { struct quota_format_type *dqi_format ; int dqi_fmt_id ; struct list_head dqi_dirty_list ; unsigned long dqi_flags ; unsigned int dqi_bgrace ; unsigned int dqi_igrace ; qsize_t dqi_max_spc_limit ; qsize_t dqi_max_ino_limit ; void *dqi_priv ; }; struct dquot { struct hlist_node dq_hash ; struct list_head dq_inuse ; struct list_head dq_free ; struct list_head dq_dirty ; struct mutex dq_lock ; atomic_t dq_count ; wait_queue_head_t dq_wait_unused ; struct super_block *dq_sb ; struct kqid dq_id ; loff_t dq_off ; unsigned long dq_flags ; struct mem_dqblk dq_dqb ; }; struct quota_format_ops { int (*check_quota_file)(struct super_block * , int ) ; int (*read_file_info)(struct super_block * , int ) ; int (*write_file_info)(struct super_block * , int ) ; int (*free_file_info)(struct super_block * , int ) ; int (*read_dqblk)(struct dquot * ) ; int (*commit_dqblk)(struct dquot * ) ; int (*release_dqblk)(struct dquot * ) ; }; struct dquot_operations { int (*write_dquot)(struct dquot * ) ; struct dquot *(*alloc_dquot)(struct super_block * , int ) ; void (*destroy_dquot)(struct dquot * ) ; int (*acquire_dquot)(struct dquot * ) ; int (*release_dquot)(struct dquot * ) ; int (*mark_dirty)(struct dquot * ) ; int (*write_info)(struct super_block * , int ) ; qsize_t *(*get_reserved_space)(struct inode * ) ; int (*get_projid)(struct inode * , kprojid_t * ) ; }; struct qc_dqblk { int d_fieldmask ; u64 d_spc_hardlimit ; u64 d_spc_softlimit ; u64 d_ino_hardlimit ; u64 d_ino_softlimit ; u64 d_space ; u64 d_ino_count ; s64 d_ino_timer ; s64 d_spc_timer ; int d_ino_warns ; int d_spc_warns ; u64 d_rt_spc_hardlimit ; u64 d_rt_spc_softlimit ; u64 d_rt_space ; s64 d_rt_spc_timer ; int d_rt_spc_warns ; }; struct qc_type_state { unsigned int flags ; unsigned int spc_timelimit ; unsigned int ino_timelimit ; unsigned int rt_spc_timelimit ; unsigned int spc_warnlimit ; unsigned int ino_warnlimit ; unsigned int rt_spc_warnlimit ; unsigned long long ino ; blkcnt_t blocks ; blkcnt_t nextents ; }; struct qc_state { unsigned int s_incoredqs ; struct qc_type_state s_state[3U] ; }; struct qc_info { int i_fieldmask ; unsigned int i_flags ; unsigned int i_spc_timelimit ; unsigned int i_ino_timelimit ; unsigned int i_rt_spc_timelimit ; unsigned int i_spc_warnlimit ; unsigned int i_ino_warnlimit ; unsigned int i_rt_spc_warnlimit ; }; struct quotactl_ops { int (*quota_on)(struct super_block * , int , int , struct path * ) ; int (*quota_off)(struct super_block * , int ) ; int (*quota_enable)(struct super_block * , unsigned int ) ; int (*quota_disable)(struct super_block * , unsigned int ) ; int (*quota_sync)(struct super_block * , int ) ; int (*set_info)(struct super_block * , int , struct qc_info * ) ; int (*get_dqblk)(struct super_block * , struct kqid , struct qc_dqblk * ) ; int (*set_dqblk)(struct super_block * , struct kqid , struct qc_dqblk * ) ; int (*get_state)(struct super_block * , struct qc_state * ) ; int (*rm_xquota)(struct super_block * , unsigned int ) ; }; struct quota_format_type { int qf_fmt_id ; struct quota_format_ops const *qf_ops ; struct module *qf_owner ; struct quota_format_type *qf_next ; }; struct quota_info { unsigned int flags ; struct mutex dqio_mutex ; struct mutex dqonoff_mutex ; struct inode *files[3U] ; struct mem_dqinfo info[3U] ; struct quota_format_ops const *ops[3U] ; }; struct writeback_control; struct kiocb { struct file *ki_filp ; loff_t ki_pos ; void (*ki_complete)(struct kiocb * , long , long ) ; void *private ; int ki_flags ; }; struct address_space_operations { int (*writepage)(struct page * , struct writeback_control * ) ; int (*readpage)(struct file * , struct page * ) ; int (*writepages)(struct address_space * , struct writeback_control * ) ; int (*set_page_dirty)(struct page * ) ; int (*readpages)(struct file * , struct address_space * , struct list_head * , unsigned int ) ; int (*write_begin)(struct file * , struct address_space * , loff_t , unsigned int , unsigned int , struct page ** , void ** ) ; int (*write_end)(struct file * , struct address_space * , loff_t , unsigned int , unsigned int , struct page * , void * ) ; sector_t (*bmap)(struct address_space * , sector_t ) ; void (*invalidatepage)(struct page * , unsigned int , unsigned int ) ; int (*releasepage)(struct page * , gfp_t ) ; void (*freepage)(struct page * ) ; ssize_t (*direct_IO)(struct kiocb * , struct iov_iter * , loff_t ) ; int (*migratepage)(struct address_space * , struct page * , struct page * , enum migrate_mode ) ; int (*launder_page)(struct page * ) ; int (*is_partially_uptodate)(struct page * , unsigned long , unsigned long ) ; void (*is_dirty_writeback)(struct page * , bool * , bool * ) ; int (*error_remove_page)(struct address_space * , struct page * ) ; int (*swap_activate)(struct swap_info_struct * , struct file * , sector_t * ) ; void (*swap_deactivate)(struct file * ) ; }; struct address_space { struct inode *host ; struct radix_tree_root page_tree ; spinlock_t tree_lock ; atomic_t i_mmap_writable ; struct rb_root i_mmap ; struct rw_semaphore i_mmap_rwsem ; unsigned long nrpages ; unsigned long nrshadows ; unsigned long writeback_index ; struct address_space_operations const *a_ops ; unsigned long flags ; spinlock_t private_lock ; struct list_head private_list ; void *private_data ; }; struct request_queue; struct hd_struct; struct gendisk; struct block_device { dev_t bd_dev ; int bd_openers ; struct inode *bd_inode ; struct super_block *bd_super ; struct mutex bd_mutex ; struct list_head bd_inodes ; void *bd_claiming ; void *bd_holder ; int bd_holders ; bool bd_write_holder ; struct list_head bd_holder_disks ; struct block_device *bd_contains ; unsigned int bd_block_size ; struct hd_struct *bd_part ; unsigned int bd_part_count ; int bd_invalidated ; struct gendisk *bd_disk ; struct request_queue *bd_queue ; struct list_head bd_list ; unsigned long bd_private ; int bd_fsfreeze_count ; struct mutex bd_fsfreeze_mutex ; }; struct posix_acl; struct inode_operations; union __anonunion____missing_field_name_235 { unsigned int const i_nlink ; unsigned int __i_nlink ; }; union __anonunion____missing_field_name_236 { struct hlist_head i_dentry ; struct callback_head i_rcu ; }; struct file_lock_context; struct cdev; union __anonunion____missing_field_name_237 { struct pipe_inode_info *i_pipe ; struct block_device *i_bdev ; struct cdev *i_cdev ; char *i_link ; }; struct inode { umode_t i_mode ; unsigned short i_opflags ; kuid_t i_uid ; kgid_t i_gid ; unsigned int i_flags ; struct posix_acl *i_acl ; struct posix_acl *i_default_acl ; struct inode_operations const *i_op ; struct super_block *i_sb ; struct address_space *i_mapping ; void *i_security ; unsigned long i_ino ; union __anonunion____missing_field_name_235 __annonCompField66 ; dev_t i_rdev ; loff_t i_size ; struct timespec i_atime ; struct timespec i_mtime ; struct timespec i_ctime ; spinlock_t i_lock ; unsigned short i_bytes ; unsigned int i_blkbits ; blkcnt_t i_blocks ; unsigned long i_state ; struct mutex i_mutex ; unsigned long dirtied_when ; unsigned long dirtied_time_when ; struct hlist_node i_hash ; struct list_head i_wb_list ; struct bdi_writeback *i_wb ; int i_wb_frn_winner ; u16 i_wb_frn_avg_time ; u16 i_wb_frn_history ; struct list_head i_lru ; struct list_head i_sb_list ; union __anonunion____missing_field_name_236 __annonCompField67 ; u64 i_version ; atomic_t i_count ; atomic_t i_dio_count ; atomic_t i_writecount ; atomic_t i_readcount ; struct file_operations const *i_fop ; struct file_lock_context *i_flctx ; struct address_space i_data ; struct list_head i_devices ; union __anonunion____missing_field_name_237 __annonCompField68 ; __u32 i_generation ; __u32 i_fsnotify_mask ; struct hlist_head i_fsnotify_marks ; void *i_private ; }; struct fown_struct { rwlock_t lock ; struct pid *pid ; enum pid_type pid_type ; kuid_t uid ; kuid_t euid ; int signum ; }; struct file_ra_state { unsigned long start ; unsigned int size ; unsigned int async_size ; unsigned int ra_pages ; unsigned int mmap_miss ; loff_t prev_pos ; }; union __anonunion_f_u_238 { struct llist_node fu_llist ; struct callback_head fu_rcuhead ; }; struct file { union __anonunion_f_u_238 f_u ; struct path f_path ; struct inode *f_inode ; struct file_operations const *f_op ; spinlock_t f_lock ; atomic_long_t f_count ; unsigned int f_flags ; fmode_t f_mode ; struct mutex f_pos_lock ; loff_t f_pos ; struct fown_struct f_owner ; struct cred const *f_cred ; struct file_ra_state f_ra ; u64 f_version ; void *f_security ; void *private_data ; struct list_head f_ep_links ; struct list_head f_tfile_llink ; struct address_space *f_mapping ; }; typedef void *fl_owner_t; struct file_lock; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock * , struct file_lock * ) ; void (*fl_release_private)(struct file_lock * ) ; }; struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock * , struct file_lock * ) ; unsigned long (*lm_owner_key)(struct file_lock * ) ; fl_owner_t (*lm_get_owner)(fl_owner_t ) ; void (*lm_put_owner)(fl_owner_t ) ; void (*lm_notify)(struct file_lock * ) ; int (*lm_grant)(struct file_lock * , int ) ; bool (*lm_break)(struct file_lock * ) ; int (*lm_change)(struct file_lock * , int , struct list_head * ) ; void (*lm_setup)(struct file_lock * , void ** ) ; }; struct nlm_lockowner; struct nfs_lock_info { u32 state ; struct nlm_lockowner *owner ; struct list_head list ; }; struct nfs4_lock_state; struct nfs4_lock_info { struct nfs4_lock_state *owner ; }; struct fasync_struct; struct __anonstruct_afs_240 { struct list_head link ; int state ; }; union __anonunion_fl_u_239 { struct nfs_lock_info nfs_fl ; struct nfs4_lock_info nfs4_fl ; struct __anonstruct_afs_240 afs ; }; struct file_lock { struct file_lock *fl_next ; struct list_head fl_list ; struct hlist_node fl_link ; struct list_head fl_block ; fl_owner_t fl_owner ; unsigned int fl_flags ; unsigned char fl_type ; unsigned int fl_pid ; int fl_link_cpu ; struct pid *fl_nspid ; wait_queue_head_t fl_wait ; struct file *fl_file ; loff_t fl_start ; loff_t fl_end ; struct fasync_struct *fl_fasync ; unsigned long fl_break_time ; unsigned long fl_downgrade_time ; struct file_lock_operations const *fl_ops ; struct lock_manager_operations const *fl_lmops ; union __anonunion_fl_u_239 fl_u ; }; struct file_lock_context { spinlock_t flc_lock ; struct list_head flc_flock ; struct list_head flc_posix ; struct list_head flc_lease ; }; struct fasync_struct { spinlock_t fa_lock ; int magic ; int fa_fd ; struct fasync_struct *fa_next ; struct file *fa_file ; struct callback_head fa_rcu ; }; struct sb_writers { struct percpu_counter counter[3U] ; wait_queue_head_t wait ; int frozen ; wait_queue_head_t wait_unfrozen ; struct lockdep_map lock_map[3U] ; }; struct super_operations; struct xattr_handler; struct mtd_info; struct super_block { struct list_head s_list ; dev_t s_dev ; unsigned char s_blocksize_bits ; unsigned long s_blocksize ; loff_t s_maxbytes ; struct file_system_type *s_type ; struct super_operations const *s_op ; struct dquot_operations const *dq_op ; struct quotactl_ops const *s_qcop ; struct export_operations const *s_export_op ; unsigned long s_flags ; unsigned long s_iflags ; unsigned long s_magic ; struct dentry *s_root ; struct rw_semaphore s_umount ; int s_count ; atomic_t s_active ; void *s_security ; struct xattr_handler const **s_xattr ; struct list_head s_inodes ; struct hlist_bl_head s_anon ; struct list_head s_mounts ; struct block_device *s_bdev ; struct backing_dev_info *s_bdi ; struct mtd_info *s_mtd ; struct hlist_node s_instances ; unsigned int s_quota_types ; struct quota_info s_dquot ; struct sb_writers s_writers ; char s_id[32U] ; u8 s_uuid[16U] ; void *s_fs_info ; unsigned int s_max_links ; fmode_t s_mode ; u32 s_time_gran ; struct mutex s_vfs_rename_mutex ; char *s_subtype ; char *s_options ; struct dentry_operations const *s_d_op ; int cleancache_poolid ; struct shrinker s_shrink ; atomic_long_t s_remove_count ; int s_readonly_remount ; struct workqueue_struct *s_dio_done_wq ; struct hlist_head s_pins ; struct list_lru s_dentry_lru ; struct list_lru s_inode_lru ; struct callback_head rcu ; int s_stack_depth ; }; struct fiemap_extent_info { unsigned int fi_flags ; unsigned int fi_extents_mapped ; unsigned int fi_extents_max ; struct fiemap_extent *fi_extents_start ; }; struct dir_context; struct dir_context { int (*actor)(struct dir_context * , char const * , int , loff_t , u64 , unsigned int ) ; loff_t pos ; }; struct block_device_operations; struct file_operations { struct module *owner ; loff_t (*llseek)(struct file * , loff_t , int ) ; ssize_t (*read)(struct file * , char * , size_t , loff_t * ) ; ssize_t (*write)(struct file * , char const * , size_t , loff_t * ) ; ssize_t (*read_iter)(struct kiocb * , struct iov_iter * ) ; ssize_t (*write_iter)(struct kiocb * , struct iov_iter * ) ; int (*iterate)(struct file * , struct dir_context * ) ; unsigned int (*poll)(struct file * , struct poll_table_struct * ) ; long (*unlocked_ioctl)(struct file * , unsigned int , unsigned long ) ; long (*compat_ioctl)(struct file * , unsigned int , unsigned long ) ; int (*mmap)(struct file * , struct vm_area_struct * ) ; int (*mremap)(struct file * , struct vm_area_struct * ) ; int (*open)(struct inode * , struct file * ) ; int (*flush)(struct file * , fl_owner_t ) ; int (*release)(struct inode * , struct file * ) ; int (*fsync)(struct file * , loff_t , loff_t , int ) ; int (*aio_fsync)(struct kiocb * , int ) ; int (*fasync)(int , struct file * , int ) ; int (*lock)(struct file * , int , struct file_lock * ) ; ssize_t (*sendpage)(struct file * , struct page * , int , size_t , loff_t * , int ) ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; int (*check_flags)(int ) ; int (*flock)(struct file * , int , struct file_lock * ) ; ssize_t (*splice_write)(struct pipe_inode_info * , struct file * , loff_t * , size_t , unsigned int ) ; ssize_t (*splice_read)(struct file * , loff_t * , struct pipe_inode_info * , size_t , unsigned int ) ; int (*setlease)(struct file * , long , struct file_lock ** , void ** ) ; long (*fallocate)(struct file * , int , loff_t , loff_t ) ; void (*show_fdinfo)(struct seq_file * , struct file * ) ; }; struct inode_operations { struct dentry *(*lookup)(struct inode * , struct dentry * , unsigned int ) ; char const *(*follow_link)(struct dentry * , void ** ) ; int (*permission)(struct inode * , int ) ; struct posix_acl *(*get_acl)(struct inode * , int ) ; int (*readlink)(struct dentry * , char * , int ) ; void (*put_link)(struct inode * , void * ) ; int (*create)(struct inode * , struct dentry * , umode_t , bool ) ; int (*link)(struct dentry * , struct inode * , struct dentry * ) ; int (*unlink)(struct inode * , struct dentry * ) ; int (*symlink)(struct inode * , struct dentry * , char const * ) ; int (*mkdir)(struct inode * , struct dentry * , umode_t ) ; int (*rmdir)(struct inode * , struct dentry * ) ; int (*mknod)(struct inode * , struct dentry * , umode_t , dev_t ) ; int (*rename)(struct inode * , struct dentry * , struct inode * , struct dentry * ) ; int (*rename2)(struct inode * , struct dentry * , struct inode * , struct dentry * , unsigned int ) ; int (*setattr)(struct dentry * , struct iattr * ) ; int (*getattr)(struct vfsmount * , struct dentry * , struct kstat * ) ; int (*setxattr)(struct dentry * , char const * , void const * , size_t , int ) ; ssize_t (*getxattr)(struct dentry * , char const * , void * , size_t ) ; ssize_t (*listxattr)(struct dentry * , char * , size_t ) ; int (*removexattr)(struct dentry * , char const * ) ; int (*fiemap)(struct inode * , struct fiemap_extent_info * , u64 , u64 ) ; int (*update_time)(struct inode * , struct timespec * , int ) ; int (*atomic_open)(struct inode * , struct dentry * , struct file * , unsigned int , umode_t , int * ) ; int (*tmpfile)(struct inode * , struct dentry * , umode_t ) ; int (*set_acl)(struct inode * , struct posix_acl * , int ) ; }; struct super_operations { struct inode *(*alloc_inode)(struct super_block * ) ; void (*destroy_inode)(struct inode * ) ; void (*dirty_inode)(struct inode * , int ) ; int (*write_inode)(struct inode * , struct writeback_control * ) ; int (*drop_inode)(struct inode * ) ; void (*evict_inode)(struct inode * ) ; void (*put_super)(struct super_block * ) ; int (*sync_fs)(struct super_block * , int ) ; int (*freeze_super)(struct super_block * ) ; int (*freeze_fs)(struct super_block * ) ; int (*thaw_super)(struct super_block * ) ; int (*unfreeze_fs)(struct super_block * ) ; int (*statfs)(struct dentry * , struct kstatfs * ) ; int (*remount_fs)(struct super_block * , int * , char * ) ; void (*umount_begin)(struct super_block * ) ; int (*show_options)(struct seq_file * , struct dentry * ) ; int (*show_devname)(struct seq_file * , struct dentry * ) ; int (*show_path)(struct seq_file * , struct dentry * ) ; int (*show_stats)(struct seq_file * , struct dentry * ) ; ssize_t (*quota_read)(struct super_block * , int , char * , size_t , loff_t ) ; ssize_t (*quota_write)(struct super_block * , int , char const * , size_t , loff_t ) ; struct dquot **(*get_dquots)(struct inode * ) ; int (*bdev_try_to_free_page)(struct super_block * , struct page * , gfp_t ) ; long (*nr_cached_objects)(struct super_block * , struct shrink_control * ) ; long (*free_cached_objects)(struct super_block * , struct shrink_control * ) ; }; struct file_system_type { char const *name ; int fs_flags ; struct dentry *(*mount)(struct file_system_type * , int , char const * , void * ) ; void (*kill_sb)(struct super_block * ) ; struct module *owner ; struct file_system_type *next ; struct hlist_head fs_supers ; struct lock_class_key s_lock_key ; struct lock_class_key s_umount_key ; struct lock_class_key s_vfs_rename_key ; struct lock_class_key s_writers_key[3U] ; struct lock_class_key i_lock_key ; struct lock_class_key i_mutex_key ; struct lock_class_key i_mutex_dir_key ; }; struct match_token { int token ; char const *pattern ; }; struct __anonstruct_substring_t_241 { char *from ; char *to ; }; typedef struct __anonstruct_substring_t_241 substring_t; struct sockaddr; struct perf_event_attr; struct tracepoint_func { void *func ; void *data ; }; struct tracepoint { char const *name ; struct static_key key ; void (*regfunc)(void) ; void (*unregfunc)(void) ; struct tracepoint_func *funcs ; }; struct trace_enum_map { char const *system ; char const *enum_string ; unsigned long enum_value ; }; struct vm_fault { unsigned int flags ; unsigned long pgoff ; void *virtual_address ; struct page *cow_page ; struct page *page ; unsigned long max_pgoff ; pte_t *pte ; }; struct vm_operations_struct { void (*open)(struct vm_area_struct * ) ; void (*close)(struct vm_area_struct * ) ; int (*fault)(struct vm_area_struct * , struct vm_fault * ) ; void (*map_pages)(struct vm_area_struct * , struct vm_fault * ) ; int (*page_mkwrite)(struct vm_area_struct * , struct vm_fault * ) ; int (*pfn_mkwrite)(struct vm_area_struct * , struct vm_fault * ) ; int (*access)(struct vm_area_struct * , unsigned long , void * , int , int ) ; char const *(*name)(struct vm_area_struct * ) ; int (*set_policy)(struct vm_area_struct * , struct mempolicy * ) ; struct mempolicy *(*get_policy)(struct vm_area_struct * , unsigned long ) ; struct page *(*find_special_page)(struct vm_area_struct * , unsigned long ) ; }; struct kvec; struct seq_file { char *buf ; size_t size ; size_t from ; size_t count ; size_t pad_until ; loff_t index ; loff_t read_pos ; u64 version ; struct mutex lock ; struct seq_operations const *op ; int poll_event ; struct user_namespace *user_ns ; void *private ; }; struct seq_operations { void *(*start)(struct seq_file * , loff_t * ) ; void (*stop)(struct seq_file * , void * ) ; void *(*next)(struct seq_file * , void * , loff_t * ) ; int (*show)(struct seq_file * , void * ) ; }; struct exception_table_entry { int insn ; int fixup ; }; struct pollfd { int fd ; short events ; short revents ; }; struct poll_table_struct { void (*_qproc)(struct file * , wait_queue_head_t * , struct poll_table_struct * ) ; unsigned long _key ; }; struct ring_buffer; struct ring_buffer_iter; struct trace_seq; struct seq_buf { char *buffer ; size_t size ; size_t len ; loff_t readpos ; }; struct trace_seq { unsigned char buffer[4096U] ; struct seq_buf seq ; int full ; }; struct proc_dir_entry; union __anonunion____missing_field_name_251 { __u64 sample_period ; __u64 sample_freq ; }; union __anonunion____missing_field_name_252 { __u32 wakeup_events ; __u32 wakeup_watermark ; }; union __anonunion____missing_field_name_253 { __u64 bp_addr ; __u64 config1 ; }; union __anonunion____missing_field_name_254 { __u64 bp_len ; __u64 config2 ; }; struct perf_event_attr { __u32 type ; __u32 size ; __u64 config ; union __anonunion____missing_field_name_251 __annonCompField76 ; __u64 sample_type ; __u64 read_format ; unsigned char disabled : 1 ; unsigned char inherit : 1 ; unsigned char pinned : 1 ; unsigned char exclusive : 1 ; unsigned char exclude_user : 1 ; unsigned char exclude_kernel : 1 ; unsigned char exclude_hv : 1 ; unsigned char exclude_idle : 1 ; unsigned char mmap : 1 ; unsigned char comm : 1 ; unsigned char freq : 1 ; unsigned char inherit_stat : 1 ; unsigned char enable_on_exec : 1 ; unsigned char task : 1 ; unsigned char watermark : 1 ; unsigned char precise_ip : 2 ; unsigned char mmap_data : 1 ; unsigned char sample_id_all : 1 ; unsigned char exclude_host : 1 ; unsigned char exclude_guest : 1 ; unsigned char exclude_callchain_kernel : 1 ; unsigned char exclude_callchain_user : 1 ; unsigned char mmap2 : 1 ; unsigned char comm_exec : 1 ; unsigned char use_clockid : 1 ; unsigned long __reserved_1 : 38 ; union __anonunion____missing_field_name_252 __annonCompField77 ; __u32 bp_type ; union __anonunion____missing_field_name_253 __annonCompField78 ; union __anonunion____missing_field_name_254 __annonCompField79 ; __u64 branch_sample_type ; __u64 sample_regs_user ; __u32 sample_stack_user ; __s32 clockid ; __u64 sample_regs_intr ; __u32 aux_watermark ; __u32 __reserved_2 ; }; struct __anonstruct____missing_field_name_257 { unsigned char mem_op : 5 ; unsigned short mem_lvl : 14 ; unsigned char mem_snoop : 5 ; unsigned char mem_lock : 2 ; unsigned char mem_dtlb : 7 ; unsigned int mem_rsvd : 31 ; }; union perf_mem_data_src { __u64 val ; struct __anonstruct____missing_field_name_257 __annonCompField82 ; }; struct perf_branch_entry { __u64 from ; __u64 to ; unsigned char mispred : 1 ; unsigned char predicted : 1 ; unsigned char in_tx : 1 ; unsigned char abort : 1 ; unsigned long reserved : 60 ; }; struct pidmap { atomic_t nr_free ; void *page ; }; struct fs_pin; struct pid_namespace { struct kref kref ; struct pidmap pidmap[128U] ; struct callback_head rcu ; int last_pid ; unsigned int nr_hashed ; struct task_struct *child_reaper ; struct kmem_cache *pid_cachep ; unsigned int level ; struct pid_namespace *parent ; struct vfsmount *proc_mnt ; struct dentry *proc_self ; struct dentry *proc_thread_self ; struct fs_pin *bacct ; struct user_namespace *user_ns ; struct work_struct proc_work ; kgid_t pid_gid ; int hide_pid ; int reboot ; struct ns_common ns ; }; struct __anonstruct_local_t_265 { atomic_long_t a ; }; typedef struct __anonstruct_local_t_265 local_t; struct __anonstruct_local64_t_266 { local_t a ; }; typedef struct __anonstruct_local64_t_266 local64_t; struct arch_hw_breakpoint { unsigned long address ; unsigned long mask ; u8 len ; u8 type ; }; struct pmu; struct klist_node; struct klist_node { void *n_klist ; struct list_head n_node ; struct kref n_ref ; }; struct pinctrl; struct pinctrl_state; struct dev_pin_info { struct pinctrl *p ; struct pinctrl_state *default_state ; struct pinctrl_state *sleep_state ; struct pinctrl_state *idle_state ; }; struct dma_map_ops; struct dev_archdata { struct dma_map_ops *dma_ops ; void *iommu ; }; struct device_private; struct device_driver; struct driver_private; struct class; struct subsys_private; struct bus_type; struct device_node; struct fwnode_handle; struct iommu_ops; struct iommu_group; struct device_attribute; struct bus_type { char const *name ; char const *dev_name ; struct device *dev_root ; struct device_attribute *dev_attrs ; struct attribute_group const **bus_groups ; struct attribute_group const **dev_groups ; struct attribute_group const **drv_groups ; int (*match)(struct device * , struct device_driver * ) ; int (*uevent)(struct device * , struct kobj_uevent_env * ) ; int (*probe)(struct device * ) ; int (*remove)(struct device * ) ; void (*shutdown)(struct device * ) ; int (*online)(struct device * ) ; int (*offline)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct dev_pm_ops const *pm ; struct iommu_ops const *iommu_ops ; struct subsys_private *p ; struct lock_class_key lock_key ; }; struct device_type; enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ; struct of_device_id; struct acpi_device_id; struct device_driver { char const *name ; struct bus_type *bus ; struct module *owner ; char const *mod_name ; bool suppress_bind_attrs ; enum probe_type probe_type ; struct of_device_id const *of_match_table ; struct acpi_device_id const *acpi_match_table ; int (*probe)(struct device * ) ; int (*remove)(struct device * ) ; void (*shutdown)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct attribute_group const **groups ; struct dev_pm_ops const *pm ; struct driver_private *p ; }; struct class_attribute; struct class { char const *name ; struct module *owner ; struct class_attribute *class_attrs ; struct attribute_group const **dev_groups ; struct kobject *dev_kobj ; int (*dev_uevent)(struct device * , struct kobj_uevent_env * ) ; char *(*devnode)(struct device * , umode_t * ) ; void (*class_release)(struct class * ) ; void (*dev_release)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct kobj_ns_type_operations const *ns_type ; void const *(*namespace)(struct device * ) ; struct dev_pm_ops const *pm ; struct subsys_private *p ; }; struct class_attribute { struct attribute attr ; ssize_t (*show)(struct class * , struct class_attribute * , char * ) ; ssize_t (*store)(struct class * , struct class_attribute * , char const * , size_t ) ; }; struct device_type { char const *name ; struct attribute_group const **groups ; int (*uevent)(struct device * , struct kobj_uevent_env * ) ; char *(*devnode)(struct device * , umode_t * , kuid_t * , kgid_t * ) ; void (*release)(struct device * ) ; struct dev_pm_ops const *pm ; }; struct device_attribute { struct attribute attr ; ssize_t (*show)(struct device * , struct device_attribute * , char * ) ; ssize_t (*store)(struct device * , struct device_attribute * , char const * , size_t ) ; }; struct device_dma_parameters { unsigned int max_segment_size ; unsigned long segment_boundary_mask ; }; struct dma_coherent_mem; struct cma; struct device { struct device *parent ; struct device_private *p ; struct kobject kobj ; char const *init_name ; struct device_type const *type ; struct mutex mutex ; struct bus_type *bus ; struct device_driver *driver ; void *platform_data ; void *driver_data ; struct dev_pm_info power ; struct dev_pm_domain *pm_domain ; struct dev_pin_info *pins ; int numa_node ; u64 *dma_mask ; u64 coherent_dma_mask ; unsigned long dma_pfn_offset ; struct device_dma_parameters *dma_parms ; struct list_head dma_pools ; struct dma_coherent_mem *dma_mem ; struct cma *cma_area ; struct dev_archdata archdata ; struct device_node *of_node ; struct fwnode_handle *fwnode ; dev_t devt ; u32 id ; spinlock_t devres_lock ; struct list_head devres_head ; struct klist_node knode_class ; struct class *class ; struct attribute_group const **groups ; void (*release)(struct device * ) ; struct iommu_group *iommu_group ; bool offline_disabled ; bool offline ; }; struct wakeup_source { char const *name ; struct list_head entry ; spinlock_t lock ; struct wake_irq *wakeirq ; struct timer_list timer ; unsigned long timer_expires ; ktime_t total_time ; ktime_t max_time ; ktime_t last_time ; ktime_t start_prevent_time ; ktime_t prevent_sleep_time ; unsigned long event_count ; unsigned long active_count ; unsigned long relax_count ; unsigned long expire_count ; unsigned long wakeup_count ; bool active ; bool autosleep_enabled ; }; struct irq_work { unsigned long flags ; struct llist_node llnode ; void (*func)(struct irq_work * ) ; }; struct perf_regs { __u64 abi ; struct pt_regs *regs ; }; struct taskstats { __u16 version ; __u32 ac_exitcode ; __u8 ac_flag ; __u8 ac_nice ; __u64 cpu_count ; __u64 cpu_delay_total ; __u64 blkio_count ; __u64 blkio_delay_total ; __u64 swapin_count ; __u64 swapin_delay_total ; __u64 cpu_run_real_total ; __u64 cpu_run_virtual_total ; char ac_comm[32U] ; __u8 ac_sched ; __u8 ac_pad[3U] ; __u32 ac_uid ; __u32 ac_gid ; __u32 ac_pid ; __u32 ac_ppid ; __u32 ac_btime ; __u64 ac_etime ; __u64 ac_utime ; __u64 ac_stime ; __u64 ac_minflt ; __u64 ac_majflt ; __u64 coremem ; __u64 virtmem ; __u64 hiwater_rss ; __u64 hiwater_vm ; __u64 read_char ; __u64 write_char ; __u64 read_syscalls ; __u64 write_syscalls ; __u64 read_bytes ; __u64 write_bytes ; __u64 cancelled_write_bytes ; __u64 nvcsw ; __u64 nivcsw ; __u64 ac_utimescaled ; __u64 ac_stimescaled ; __u64 cpu_scaled_run_real_total ; __u64 freepages_count ; __u64 freepages_delay_total ; }; struct perf_callchain_entry { __u64 nr ; __u64 ip[127U] ; }; struct perf_raw_record { u32 size ; void *data ; }; struct perf_branch_stack { __u64 nr ; struct perf_branch_entry entries[0U] ; }; struct hw_perf_event_extra { u64 config ; unsigned int reg ; int alloc ; int idx ; }; struct __anonstruct____missing_field_name_274 { u64 config ; u64 last_tag ; unsigned long config_base ; unsigned long event_base ; int event_base_rdpmc ; int idx ; int last_cpu ; int flags ; struct hw_perf_event_extra extra_reg ; struct hw_perf_event_extra branch_reg ; }; struct __anonstruct____missing_field_name_275 { struct hrtimer hrtimer ; }; struct __anonstruct____missing_field_name_276 { struct list_head tp_list ; }; struct __anonstruct____missing_field_name_277 { int cqm_state ; u32 cqm_rmid ; struct list_head cqm_events_entry ; struct list_head cqm_groups_entry ; struct list_head cqm_group_entry ; }; struct __anonstruct____missing_field_name_278 { int itrace_started ; }; struct __anonstruct____missing_field_name_279 { struct arch_hw_breakpoint info ; struct list_head bp_list ; }; union __anonunion____missing_field_name_273 { struct __anonstruct____missing_field_name_274 __annonCompField83 ; struct __anonstruct____missing_field_name_275 __annonCompField84 ; struct __anonstruct____missing_field_name_276 __annonCompField85 ; struct __anonstruct____missing_field_name_277 __annonCompField86 ; struct __anonstruct____missing_field_name_278 __annonCompField87 ; struct __anonstruct____missing_field_name_279 __annonCompField88 ; }; struct hw_perf_event { union __anonunion____missing_field_name_273 __annonCompField89 ; struct task_struct *target ; int state ; local64_t prev_count ; u64 sample_period ; u64 last_period ; local64_t period_left ; u64 interrupts_seq ; u64 interrupts ; u64 freq_time_stamp ; u64 freq_count_stamp ; }; struct perf_cpu_context; struct pmu { struct list_head entry ; struct module *module ; struct device *dev ; struct attribute_group const **attr_groups ; char const *name ; int type ; int capabilities ; int *pmu_disable_count ; struct perf_cpu_context *pmu_cpu_context ; atomic_t exclusive_cnt ; int task_ctx_nr ; int hrtimer_interval_ms ; void (*pmu_enable)(struct pmu * ) ; void (*pmu_disable)(struct pmu * ) ; int (*event_init)(struct perf_event * ) ; void (*event_mapped)(struct perf_event * ) ; void (*event_unmapped)(struct perf_event * ) ; int (*add)(struct perf_event * , int ) ; void (*del)(struct perf_event * , int ) ; void (*start)(struct perf_event * , int ) ; void (*stop)(struct perf_event * , int ) ; void (*read)(struct perf_event * ) ; void (*start_txn)(struct pmu * ) ; int (*commit_txn)(struct pmu * ) ; void (*cancel_txn)(struct pmu * ) ; int (*event_idx)(struct perf_event * ) ; void (*sched_task)(struct perf_event_context * , bool ) ; size_t task_ctx_size ; u64 (*count)(struct perf_event * ) ; void *(*setup_aux)(int , void ** , int , bool ) ; void (*free_aux)(void * ) ; int (*filter_match)(struct perf_event * ) ; }; enum perf_event_active_state { PERF_EVENT_STATE_EXIT = -3, PERF_EVENT_STATE_ERROR = -2, PERF_EVENT_STATE_OFF = -1, PERF_EVENT_STATE_INACTIVE = 0, PERF_EVENT_STATE_ACTIVE = 1 } ; struct perf_sample_data; struct perf_cgroup; struct event_filter; struct perf_event { struct list_head event_entry ; struct list_head group_entry ; struct list_head sibling_list ; struct list_head migrate_entry ; struct hlist_node hlist_entry ; struct list_head active_entry ; int nr_siblings ; int group_flags ; struct perf_event *group_leader ; struct pmu *pmu ; enum perf_event_active_state state ; unsigned int attach_state ; local64_t count ; atomic64_t child_count ; u64 total_time_enabled ; u64 total_time_running ; u64 tstamp_enabled ; u64 tstamp_running ; u64 tstamp_stopped ; u64 shadow_ctx_time ; struct perf_event_attr attr ; u16 header_size ; u16 id_header_size ; u16 read_size ; struct hw_perf_event hw ; struct perf_event_context *ctx ; atomic_long_t refcount ; atomic64_t child_total_time_enabled ; atomic64_t child_total_time_running ; struct mutex child_mutex ; struct list_head child_list ; struct perf_event *parent ; int oncpu ; int cpu ; struct list_head owner_entry ; struct task_struct *owner ; struct mutex mmap_mutex ; atomic_t mmap_count ; struct ring_buffer *rb ; struct list_head rb_entry ; unsigned long rcu_batches ; int rcu_pending ; wait_queue_head_t waitq ; struct fasync_struct *fasync ; int pending_wakeup ; int pending_kill ; int pending_disable ; struct irq_work pending ; atomic_t event_limit ; void (*destroy)(struct perf_event * ) ; struct callback_head callback_head ; struct pid_namespace *ns ; u64 id ; u64 (*clock)(void) ; void (*overflow_handler)(struct perf_event * , struct perf_sample_data * , struct pt_regs * ) ; void *overflow_handler_context ; struct trace_event_call *tp_event ; struct event_filter *filter ; struct perf_cgroup *cgrp ; int cgrp_defer_enabled ; }; struct perf_event_context { struct pmu *pmu ; raw_spinlock_t lock ; struct mutex mutex ; struct list_head active_ctx_list ; struct list_head pinned_groups ; struct list_head flexible_groups ; struct list_head event_list ; int nr_events ; int nr_active ; int is_active ; int nr_stat ; int nr_freq ; int rotate_disable ; atomic_t refcount ; struct task_struct *task ; u64 time ; u64 timestamp ; struct perf_event_context *parent_ctx ; u64 parent_gen ; u64 generation ; int pin_count ; int nr_cgroups ; void *task_ctx_data ; struct callback_head callback_head ; struct delayed_work orphans_remove ; bool orphans_remove_sched ; }; struct perf_cpu_context { struct perf_event_context ctx ; struct perf_event_context *task_ctx ; int active_oncpu ; int exclusive ; raw_spinlock_t hrtimer_lock ; struct hrtimer hrtimer ; ktime_t hrtimer_interval ; unsigned int hrtimer_active ; struct pmu *unique_pmu ; struct perf_cgroup *cgrp ; }; struct perf_cgroup_info { u64 time ; u64 timestamp ; }; struct perf_cgroup { struct cgroup_subsys_state css ; struct perf_cgroup_info *info ; }; struct __anonstruct_tid_entry_281 { u32 pid ; u32 tid ; }; struct __anonstruct_cpu_entry_282 { u32 cpu ; u32 reserved ; }; struct perf_sample_data { u64 addr ; struct perf_raw_record *raw ; struct perf_branch_stack *br_stack ; u64 period ; u64 weight ; u64 txn ; union perf_mem_data_src data_src ; u64 type ; u64 ip ; struct __anonstruct_tid_entry_281 tid_entry ; u64 time ; u64 id ; u64 stream_id ; struct __anonstruct_cpu_entry_282 cpu_entry ; struct perf_callchain_entry *callchain ; struct perf_regs regs_user ; struct pt_regs regs_user_copy ; struct perf_regs regs_intr ; u64 stack_user_size ; }; struct trace_array; struct trace_buffer; struct tracer; struct bpf_prog; struct trace_iterator; struct trace_event; struct trace_entry { unsigned short type ; unsigned char flags ; unsigned char preempt_count ; int pid ; }; struct trace_iterator { struct trace_array *tr ; struct tracer *trace ; struct trace_buffer *trace_buffer ; void *private ; int cpu_file ; struct mutex mutex ; struct ring_buffer_iter **buffer_iter ; unsigned long iter_flags ; struct trace_seq tmp_seq ; cpumask_var_t started ; bool snapshot ; struct trace_seq seq ; struct trace_entry *ent ; unsigned long lost_events ; int leftover ; int ent_size ; int cpu ; u64 ts ; loff_t pos ; long idx ; }; enum print_line_t; struct trace_event_functions { enum print_line_t (*trace)(struct trace_iterator * , int , struct trace_event * ) ; enum print_line_t (*raw)(struct trace_iterator * , int , struct trace_event * ) ; enum print_line_t (*hex)(struct trace_iterator * , int , struct trace_event * ) ; enum print_line_t (*binary)(struct trace_iterator * , int , struct trace_event * ) ; }; struct trace_event { struct hlist_node node ; struct list_head list ; int type ; struct trace_event_functions *funcs ; }; enum print_line_t { TRACE_TYPE_PARTIAL_LINE = 0, TRACE_TYPE_HANDLED = 1, TRACE_TYPE_UNHANDLED = 2, TRACE_TYPE_NO_CONSUME = 3 } ; enum trace_reg { TRACE_REG_REGISTER = 0, TRACE_REG_UNREGISTER = 1, TRACE_REG_PERF_REGISTER = 2, TRACE_REG_PERF_UNREGISTER = 3, TRACE_REG_PERF_OPEN = 4, TRACE_REG_PERF_CLOSE = 5, TRACE_REG_PERF_ADD = 6, TRACE_REG_PERF_DEL = 7 } ; struct trace_event_class { char const *system ; void *probe ; void *perf_probe ; int (*reg)(struct trace_event_call * , enum trace_reg , void * ) ; int (*define_fields)(struct trace_event_call * ) ; struct list_head *(*get_fields)(struct trace_event_call * ) ; struct list_head fields ; int (*raw_init)(struct trace_event_call * ) ; }; union __anonunion____missing_field_name_283 { char *name ; struct tracepoint *tp ; }; struct trace_event_call { struct list_head list ; struct trace_event_class *class ; union __anonunion____missing_field_name_283 __annonCompField91 ; struct trace_event event ; char *print_fmt ; struct event_filter *filter ; void *mod ; void *data ; int flags ; int perf_refcount ; struct hlist_head *perf_events ; struct bpf_prog *prog ; int (*perf_perm)(struct trace_event_call * , struct perf_event * ) ; }; struct configfs_item_operations; struct configfs_group_operations; struct configfs_subsystem; struct config_item_type; struct config_item { char *ci_name ; char ci_namebuf[20U] ; struct kref ci_kref ; struct list_head ci_entry ; struct config_item *ci_parent ; struct config_group *ci_group ; struct config_item_type *ci_type ; struct dentry *ci_dentry ; }; struct config_item_type { struct module *ct_owner ; struct configfs_item_operations *ct_item_ops ; struct configfs_group_operations *ct_group_ops ; struct configfs_attribute **ct_attrs ; }; struct config_group { struct config_item cg_item ; struct list_head cg_children ; struct configfs_subsystem *cg_subsys ; struct config_group **default_groups ; }; struct configfs_attribute { char const *ca_name ; struct module *ca_owner ; umode_t ca_mode ; }; struct configfs_item_operations { void (*release)(struct config_item * ) ; ssize_t (*show_attribute)(struct config_item * , struct configfs_attribute * , char * ) ; ssize_t (*store_attribute)(struct config_item * , struct configfs_attribute * , char const * , size_t ) ; int (*allow_link)(struct config_item * , struct config_item * ) ; int (*drop_link)(struct config_item * , struct config_item * ) ; }; struct configfs_group_operations { struct config_item *(*make_item)(struct config_group * , char const * ) ; struct config_group *(*make_group)(struct config_group * , char const * ) ; int (*commit_item)(struct config_item * ) ; void (*disconnect_notify)(struct config_group * , struct config_item * ) ; void (*drop_item)(struct config_group * , struct config_item * ) ; }; struct configfs_subsystem { struct config_group su_group ; struct mutex su_mutex ; }; struct iovec { void *iov_base ; __kernel_size_t iov_len ; }; struct kvec { void *iov_base ; size_t iov_len ; }; union __anonunion____missing_field_name_284 { struct iovec const *iov ; struct kvec const *kvec ; struct bio_vec const *bvec ; }; struct iov_iter { int type ; size_t iov_offset ; size_t count ; union __anonunion____missing_field_name_284 __annonCompField92 ; unsigned long nr_segs ; }; typedef unsigned short __kernel_sa_family_t; typedef __kernel_sa_family_t sa_family_t; struct sockaddr { sa_family_t sa_family ; char sa_data[14U] ; }; struct msghdr { void *msg_name ; int msg_namelen ; struct iov_iter msg_iter ; void *msg_control ; __kernel_size_t msg_controllen ; unsigned int msg_flags ; struct kiocb *msg_iocb ; }; struct dma_attrs { unsigned long flags[1U] ; }; enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; struct scatterlist { unsigned long sg_magic ; unsigned long page_link ; unsigned int offset ; unsigned int length ; dma_addr_t dma_address ; unsigned int dma_length ; }; struct sg_table { struct scatterlist *sgl ; unsigned int nents ; unsigned int orig_nents ; }; struct dma_map_ops { void *(*alloc)(struct device * , size_t , dma_addr_t * , gfp_t , struct dma_attrs * ) ; void (*free)(struct device * , size_t , void * , dma_addr_t , struct dma_attrs * ) ; int (*mmap)(struct device * , struct vm_area_struct * , void * , dma_addr_t , size_t , struct dma_attrs * ) ; int (*get_sgtable)(struct device * , struct sg_table * , void * , dma_addr_t , size_t , struct dma_attrs * ) ; dma_addr_t (*map_page)(struct device * , struct page * , unsigned long , size_t , enum dma_data_direction , struct dma_attrs * ) ; void (*unmap_page)(struct device * , dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs * ) ; int (*map_sg)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ) ; void (*unmap_sg)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ) ; void (*sync_single_for_cpu)(struct device * , dma_addr_t , size_t , enum dma_data_direction ) ; void (*sync_single_for_device)(struct device * , dma_addr_t , size_t , enum dma_data_direction ) ; void (*sync_sg_for_cpu)(struct device * , struct scatterlist * , int , enum dma_data_direction ) ; void (*sync_sg_for_device)(struct device * , struct scatterlist * , int , enum dma_data_direction ) ; int (*mapping_error)(struct device * , dma_addr_t ) ; int (*dma_supported)(struct device * , u64 ) ; int (*set_dma_mask)(struct device * , u64 ) ; int is_phys ; }; struct disk_stats { unsigned long sectors[2U] ; unsigned long ios[2U] ; unsigned long merges[2U] ; unsigned long ticks[2U] ; unsigned long io_ticks ; unsigned long time_in_queue ; }; struct partition_meta_info { char uuid[37U] ; u8 volname[64U] ; }; struct hd_struct { sector_t start_sect ; sector_t nr_sects ; seqcount_t nr_sects_seq ; sector_t alignment_offset ; unsigned int discard_alignment ; struct device __dev ; struct kobject *holder_dir ; int policy ; int partno ; struct partition_meta_info *info ; int make_it_fail ; unsigned long stamp ; atomic_t in_flight[2U] ; struct disk_stats *dkstats ; atomic_t ref ; struct callback_head callback_head ; }; struct disk_part_tbl { struct callback_head callback_head ; int len ; struct hd_struct *last_lookup ; struct hd_struct *part[] ; }; struct disk_events; struct timer_rand_state; struct blk_integrity; struct gendisk { int major ; int first_minor ; int minors ; char disk_name[32U] ; char *(*devnode)(struct gendisk * , umode_t * ) ; unsigned int events ; unsigned int async_events ; struct disk_part_tbl *part_tbl ; struct hd_struct part0 ; struct block_device_operations const *fops ; struct request_queue *queue ; void *private_data ; int flags ; struct device *driverfs_dev ; struct kobject *slave_dir ; struct timer_rand_state *random ; atomic_t sync_io ; struct disk_events *ev ; struct blk_integrity *integrity ; int node_id ; }; struct fprop_local_percpu { struct percpu_counter events ; unsigned int period ; raw_spinlock_t lock ; }; typedef int congested_fn(void * , int ); struct bdi_writeback_congested { unsigned long state ; atomic_t refcnt ; struct backing_dev_info *bdi ; int blkcg_id ; struct rb_node rb_node ; }; union __anonunion____missing_field_name_285 { struct work_struct release_work ; struct callback_head rcu ; }; struct bdi_writeback { struct backing_dev_info *bdi ; unsigned long state ; unsigned long last_old_flush ; struct list_head b_dirty ; struct list_head b_io ; struct list_head b_more_io ; struct list_head b_dirty_time ; spinlock_t list_lock ; struct percpu_counter stat[4U] ; struct bdi_writeback_congested *congested ; unsigned long bw_time_stamp ; unsigned long dirtied_stamp ; unsigned long written_stamp ; unsigned long write_bandwidth ; unsigned long avg_write_bandwidth ; unsigned long dirty_ratelimit ; unsigned long balanced_dirty_ratelimit ; struct fprop_local_percpu completions ; int dirty_exceeded ; spinlock_t work_lock ; struct list_head work_list ; struct delayed_work dwork ; struct percpu_ref refcnt ; struct fprop_local_percpu memcg_completions ; struct cgroup_subsys_state *memcg_css ; struct cgroup_subsys_state *blkcg_css ; struct list_head memcg_node ; struct list_head blkcg_node ; union __anonunion____missing_field_name_285 __annonCompField93 ; }; struct backing_dev_info { struct list_head bdi_list ; unsigned long ra_pages ; unsigned int capabilities ; congested_fn *congested_fn ; void *congested_data ; char *name ; unsigned int min_ratio ; unsigned int max_ratio ; unsigned int max_prop_frac ; atomic_long_t tot_write_bandwidth ; struct bdi_writeback wb ; struct radix_tree_root cgwb_tree ; struct rb_root cgwb_congested_tree ; atomic_t usage_cnt ; wait_queue_head_t wb_waitq ; struct device *dev ; struct timer_list laptop_mode_wb_timer ; struct dentry *debug_dir ; struct dentry *debug_stats ; }; typedef void *mempool_alloc_t(gfp_t , void * ); typedef void mempool_free_t(void * , void * ); struct mempool_s { spinlock_t lock ; int min_nr ; int curr_nr ; void **elements ; void *pool_data ; mempool_alloc_t *alloc ; mempool_free_t *free ; wait_queue_head_t wait ; }; typedef struct mempool_s mempool_t; union __anonunion____missing_field_name_286 { struct list_head q_node ; struct kmem_cache *__rcu_icq_cache ; }; union __anonunion____missing_field_name_287 { struct hlist_node ioc_node ; struct callback_head __rcu_head ; }; struct io_cq { struct request_queue *q ; struct io_context *ioc ; union __anonunion____missing_field_name_286 __annonCompField94 ; union __anonunion____missing_field_name_287 __annonCompField95 ; unsigned int flags ; }; struct io_context { atomic_long_t refcount ; atomic_t active_ref ; atomic_t nr_tasks ; spinlock_t lock ; unsigned short ioprio ; int nr_batch_requests ; unsigned long last_waited ; struct radix_tree_root icq_tree ; struct io_cq *icq_hint ; struct hlist_head icq_list ; struct work_struct release_work ; }; struct bio_integrity_payload { struct bio *bip_bio ; struct bvec_iter bip_iter ; bio_end_io_t *bip_end_io ; unsigned short bip_slab ; unsigned short bip_vcnt ; unsigned short bip_max_vcnt ; unsigned short bip_flags ; struct work_struct bip_work ; struct bio_vec *bip_vec ; struct bio_vec bip_inline_vecs[0U] ; }; struct bio_list { struct bio *head ; struct bio *tail ; }; struct bio_set { struct kmem_cache *bio_slab ; unsigned int front_pad ; mempool_t *bio_pool ; mempool_t *bvec_pool ; mempool_t *bio_integrity_pool ; mempool_t *bvec_integrity_pool ; spinlock_t rescue_lock ; struct bio_list rescue_list ; struct work_struct rescue_work ; struct workqueue_struct *rescue_workqueue ; }; struct bsg_class_device { struct device *class_dev ; struct device *parent ; int minor ; struct request_queue *queue ; struct kref ref ; void (*release)(struct device * ) ; }; struct elevator_queue; struct request; struct bsg_job; struct blkcg_gq; struct blk_flush_queue; typedef void rq_end_io_fn(struct request * , int ); struct request_list { struct request_queue *q ; struct blkcg_gq *blkg ; int count[2U] ; int starved[2U] ; mempool_t *rq_pool ; wait_queue_head_t wait[2U] ; unsigned int flags ; }; union __anonunion____missing_field_name_288 { struct call_single_data csd ; unsigned long fifo_time ; }; struct blk_mq_ctx; union __anonunion____missing_field_name_289 { struct hlist_node hash ; struct list_head ipi_list ; }; union __anonunion____missing_field_name_290 { struct rb_node rb_node ; void *completion_data ; }; struct __anonstruct_elv_292 { struct io_cq *icq ; void *priv[2U] ; }; struct __anonstruct_flush_293 { unsigned int seq ; struct list_head list ; rq_end_io_fn *saved_end_io ; }; union __anonunion____missing_field_name_291 { struct __anonstruct_elv_292 elv ; struct __anonstruct_flush_293 flush ; }; struct request { struct list_head queuelist ; union __anonunion____missing_field_name_288 __annonCompField96 ; struct request_queue *q ; struct blk_mq_ctx *mq_ctx ; u64 cmd_flags ; unsigned int cmd_type ; unsigned long atomic_flags ; int cpu ; unsigned int __data_len ; sector_t __sector ; struct bio *bio ; struct bio *biotail ; union __anonunion____missing_field_name_289 __annonCompField97 ; union __anonunion____missing_field_name_290 __annonCompField98 ; union __anonunion____missing_field_name_291 __annonCompField99 ; struct gendisk *rq_disk ; struct hd_struct *part ; unsigned long start_time ; struct request_list *rl ; unsigned long long start_time_ns ; unsigned long long io_start_time_ns ; unsigned short nr_phys_segments ; unsigned short nr_integrity_segments ; unsigned short ioprio ; void *special ; int tag ; int errors ; unsigned char __cmd[16U] ; unsigned char *cmd ; unsigned short cmd_len ; unsigned int extra_len ; unsigned int sense_len ; unsigned int resid_len ; void *sense ; unsigned long deadline ; struct list_head timeout_list ; unsigned int timeout ; int retries ; rq_end_io_fn *end_io ; void *end_io_data ; struct request *next_rq ; }; struct elevator_type; typedef int elevator_merge_fn(struct request_queue * , struct request ** , struct bio * ); typedef void elevator_merge_req_fn(struct request_queue * , struct request * , struct request * ); typedef void elevator_merged_fn(struct request_queue * , struct request * , int ); typedef int elevator_allow_merge_fn(struct request_queue * , struct request * , struct bio * ); typedef void elevator_bio_merged_fn(struct request_queue * , struct request * , struct bio * ); typedef int elevator_dispatch_fn(struct request_queue * , int ); typedef void elevator_add_req_fn(struct request_queue * , struct request * ); typedef struct request *elevator_request_list_fn(struct request_queue * , struct request * ); typedef void elevator_completed_req_fn(struct request_queue * , struct request * ); typedef int elevator_may_queue_fn(struct request_queue * , int ); typedef void elevator_init_icq_fn(struct io_cq * ); typedef void elevator_exit_icq_fn(struct io_cq * ); typedef int elevator_set_req_fn(struct request_queue * , struct request * , struct bio * , gfp_t ); typedef void elevator_put_req_fn(struct request * ); typedef void elevator_activate_req_fn(struct request_queue * , struct request * ); typedef void elevator_deactivate_req_fn(struct request_queue * , struct request * ); typedef int elevator_init_fn(struct request_queue * , struct elevator_type * ); typedef void elevator_exit_fn(struct elevator_queue * ); typedef void elevator_registered_fn(struct request_queue * ); struct elevator_ops { elevator_merge_fn *elevator_merge_fn ; elevator_merged_fn *elevator_merged_fn ; elevator_merge_req_fn *elevator_merge_req_fn ; elevator_allow_merge_fn *elevator_allow_merge_fn ; elevator_bio_merged_fn *elevator_bio_merged_fn ; elevator_dispatch_fn *elevator_dispatch_fn ; elevator_add_req_fn *elevator_add_req_fn ; elevator_activate_req_fn *elevator_activate_req_fn ; elevator_deactivate_req_fn *elevator_deactivate_req_fn ; elevator_completed_req_fn *elevator_completed_req_fn ; elevator_request_list_fn *elevator_former_req_fn ; elevator_request_list_fn *elevator_latter_req_fn ; elevator_init_icq_fn *elevator_init_icq_fn ; elevator_exit_icq_fn *elevator_exit_icq_fn ; elevator_set_req_fn *elevator_set_req_fn ; elevator_put_req_fn *elevator_put_req_fn ; elevator_may_queue_fn *elevator_may_queue_fn ; elevator_init_fn *elevator_init_fn ; elevator_exit_fn *elevator_exit_fn ; elevator_registered_fn *elevator_registered_fn ; }; struct elv_fs_entry { struct attribute attr ; ssize_t (*show)(struct elevator_queue * , char * ) ; ssize_t (*store)(struct elevator_queue * , char const * , size_t ) ; }; struct elevator_type { struct kmem_cache *icq_cache ; struct elevator_ops ops ; size_t icq_size ; size_t icq_align ; struct elv_fs_entry *elevator_attrs ; char elevator_name[16U] ; struct module *elevator_owner ; char icq_cache_name[21U] ; struct list_head list ; }; struct elevator_queue { struct elevator_type *type ; void *elevator_data ; struct kobject kobj ; struct mutex sysfs_lock ; unsigned char registered : 1 ; struct hlist_head hash[64U] ; }; typedef void request_fn_proc(struct request_queue * ); typedef void make_request_fn(struct request_queue * , struct bio * ); typedef int prep_rq_fn(struct request_queue * , struct request * ); typedef void unprep_rq_fn(struct request_queue * , struct request * ); struct bvec_merge_data { struct block_device *bi_bdev ; sector_t bi_sector ; unsigned int bi_size ; unsigned long bi_rw ; }; typedef int merge_bvec_fn(struct request_queue * , struct bvec_merge_data * , struct bio_vec * ); typedef void softirq_done_fn(struct request * ); typedef int dma_drain_needed_fn(struct request * ); typedef int lld_busy_fn(struct request_queue * ); typedef int bsg_job_fn(struct bsg_job * ); enum blk_eh_timer_return { BLK_EH_NOT_HANDLED = 0, BLK_EH_HANDLED = 1, BLK_EH_RESET_TIMER = 2 } ; typedef enum blk_eh_timer_return rq_timed_out_fn(struct request * ); struct blk_queue_tag { struct request **tag_index ; unsigned long *tag_map ; int busy ; int max_depth ; int real_max_depth ; atomic_t refcnt ; int alloc_policy ; int next_tag ; }; struct queue_limits { unsigned long bounce_pfn ; unsigned long seg_boundary_mask ; unsigned int max_hw_sectors ; unsigned int chunk_sectors ; unsigned int max_sectors ; unsigned int max_segment_size ; unsigned int physical_block_size ; unsigned int alignment_offset ; unsigned int io_min ; unsigned int io_opt ; unsigned int max_discard_sectors ; unsigned int max_write_same_sectors ; unsigned int discard_granularity ; unsigned int discard_alignment ; unsigned short logical_block_size ; unsigned short max_segments ; unsigned short max_integrity_segments ; unsigned char misaligned ; unsigned char discard_misaligned ; unsigned char cluster ; unsigned char discard_zeroes_data ; unsigned char raid_partial_stripes_expensive ; }; struct blk_mq_ops; struct blk_mq_hw_ctx; struct throtl_data; struct blk_mq_tag_set; struct request_queue { struct list_head queue_head ; struct request *last_merge ; struct elevator_queue *elevator ; int nr_rqs[2U] ; int nr_rqs_elvpriv ; struct request_list root_rl ; request_fn_proc *request_fn ; make_request_fn *make_request_fn ; prep_rq_fn *prep_rq_fn ; unprep_rq_fn *unprep_rq_fn ; merge_bvec_fn *merge_bvec_fn ; softirq_done_fn *softirq_done_fn ; rq_timed_out_fn *rq_timed_out_fn ; dma_drain_needed_fn *dma_drain_needed ; lld_busy_fn *lld_busy_fn ; struct blk_mq_ops *mq_ops ; unsigned int *mq_map ; struct blk_mq_ctx *queue_ctx ; unsigned int nr_queues ; struct blk_mq_hw_ctx **queue_hw_ctx ; unsigned int nr_hw_queues ; sector_t end_sector ; struct request *boundary_rq ; struct delayed_work delay_work ; struct backing_dev_info backing_dev_info ; void *queuedata ; unsigned long queue_flags ; int id ; gfp_t bounce_gfp ; spinlock_t __queue_lock ; spinlock_t *queue_lock ; struct kobject kobj ; struct kobject mq_kobj ; struct device *dev ; int rpm_status ; unsigned int nr_pending ; unsigned long nr_requests ; unsigned int nr_congestion_on ; unsigned int nr_congestion_off ; unsigned int nr_batching ; unsigned int dma_drain_size ; void *dma_drain_buffer ; unsigned int dma_pad_mask ; unsigned int dma_alignment ; struct blk_queue_tag *queue_tags ; struct list_head tag_busy_list ; unsigned int nr_sorted ; unsigned int in_flight[2U] ; unsigned int request_fn_active ; unsigned int rq_timeout ; struct timer_list timeout ; struct list_head timeout_list ; struct list_head icq_list ; unsigned long blkcg_pols[1U] ; struct blkcg_gq *root_blkg ; struct list_head blkg_list ; struct queue_limits limits ; unsigned int sg_timeout ; unsigned int sg_reserved_size ; int node ; unsigned int flush_flags ; unsigned char flush_not_queueable : 1 ; struct blk_flush_queue *fq ; struct list_head requeue_list ; spinlock_t requeue_lock ; struct work_struct requeue_work ; struct mutex sysfs_lock ; int bypass_depth ; atomic_t mq_freeze_depth ; bsg_job_fn *bsg_job_fn ; int bsg_job_size ; struct bsg_class_device bsg_dev ; struct throtl_data *td ; struct callback_head callback_head ; wait_queue_head_t mq_freeze_wq ; struct percpu_ref mq_usage_counter ; struct list_head all_q_node ; struct blk_mq_tag_set *tag_set ; struct list_head tag_set_list ; }; struct blk_plug { struct list_head list ; struct list_head mq_list ; struct list_head cb_list ; }; struct blk_integrity_iter { void *prot_buf ; void *data_buf ; sector_t seed ; unsigned int data_size ; unsigned short interval ; char const *disk_name ; }; typedef int integrity_processing_fn(struct blk_integrity_iter * ); struct blk_integrity { integrity_processing_fn *generate_fn ; integrity_processing_fn *verify_fn ; unsigned short flags ; unsigned short tuple_size ; unsigned short interval ; unsigned short tag_size ; char const *name ; struct kobject kobj ; }; struct block_device_operations { int (*open)(struct block_device * , fmode_t ) ; void (*release)(struct gendisk * , fmode_t ) ; int (*rw_page)(struct block_device * , sector_t , struct page * , int ) ; int (*ioctl)(struct block_device * , fmode_t , unsigned int , unsigned long ) ; int (*compat_ioctl)(struct block_device * , fmode_t , unsigned int , unsigned long ) ; long (*direct_access)(struct block_device * , sector_t , void ** , unsigned long * , long ) ; unsigned int (*check_events)(struct gendisk * , unsigned int ) ; int (*media_changed)(struct gendisk * ) ; void (*unlock_native_capacity)(struct gendisk * ) ; int (*revalidate_disk)(struct gendisk * ) ; int (*getgeo)(struct block_device * , struct hd_geometry * ) ; void (*swap_slot_free_notify)(struct block_device * , unsigned long ) ; struct module *owner ; }; struct percpu_ida_cpu; struct __anonstruct____missing_field_name_295 { spinlock_t lock ; unsigned int cpu_last_stolen ; wait_queue_head_t wait ; unsigned int nr_free ; unsigned int *freelist ; }; struct percpu_ida { unsigned int nr_tags ; unsigned int percpu_max_size ; unsigned int percpu_batch_size ; struct percpu_ida_cpu *tag_cpu ; cpumask_t cpus_have_tags ; struct __anonstruct____missing_field_name_295 __annonCompField100 ; }; struct hlist_nulls_node; struct hlist_nulls_head { struct hlist_nulls_node *first ; }; struct hlist_nulls_node { struct hlist_nulls_node *next ; struct hlist_nulls_node **pprev ; }; struct dql { unsigned int num_queued ; unsigned int adj_limit ; unsigned int last_obj_cnt ; unsigned int limit ; unsigned int num_completed ; unsigned int prev_ovlimit ; unsigned int prev_num_queued ; unsigned int prev_last_obj_cnt ; unsigned int lowest_slack ; unsigned long slack_start_time ; unsigned int max_limit ; unsigned int min_limit ; unsigned int slack_hold_time ; }; struct __anonstruct_sync_serial_settings_297 { unsigned int clock_rate ; unsigned int clock_type ; unsigned short loopback ; }; typedef struct __anonstruct_sync_serial_settings_297 sync_serial_settings; struct __anonstruct_te1_settings_298 { unsigned int clock_rate ; unsigned int clock_type ; unsigned short loopback ; unsigned int slot_map ; }; typedef struct __anonstruct_te1_settings_298 te1_settings; struct __anonstruct_raw_hdlc_proto_299 { unsigned short encoding ; unsigned short parity ; }; typedef struct __anonstruct_raw_hdlc_proto_299 raw_hdlc_proto; struct __anonstruct_fr_proto_300 { unsigned int t391 ; unsigned int t392 ; unsigned int n391 ; unsigned int n392 ; unsigned int n393 ; unsigned short lmi ; unsigned short dce ; }; typedef struct __anonstruct_fr_proto_300 fr_proto; struct __anonstruct_fr_proto_pvc_301 { unsigned int dlci ; }; typedef struct __anonstruct_fr_proto_pvc_301 fr_proto_pvc; struct __anonstruct_fr_proto_pvc_info_302 { unsigned int dlci ; char master[16U] ; }; typedef struct __anonstruct_fr_proto_pvc_info_302 fr_proto_pvc_info; struct __anonstruct_cisco_proto_303 { unsigned int interval ; unsigned int timeout ; }; typedef struct __anonstruct_cisco_proto_303 cisco_proto; struct ifmap { unsigned long mem_start ; unsigned long mem_end ; unsigned short base_addr ; unsigned char irq ; unsigned char dma ; unsigned char port ; }; union __anonunion_ifs_ifsu_304 { raw_hdlc_proto *raw_hdlc ; cisco_proto *cisco ; fr_proto *fr ; fr_proto_pvc *fr_pvc ; fr_proto_pvc_info *fr_pvc_info ; sync_serial_settings *sync ; te1_settings *te1 ; }; struct if_settings { unsigned int type ; unsigned int size ; union __anonunion_ifs_ifsu_304 ifs_ifsu ; }; union __anonunion_ifr_ifrn_305 { char ifrn_name[16U] ; }; union __anonunion_ifr_ifru_306 { struct sockaddr ifru_addr ; struct sockaddr ifru_dstaddr ; struct sockaddr ifru_broadaddr ; struct sockaddr ifru_netmask ; struct sockaddr ifru_hwaddr ; short ifru_flags ; int ifru_ivalue ; int ifru_mtu ; struct ifmap ifru_map ; char ifru_slave[16U] ; char ifru_newname[16U] ; void *ifru_data ; struct if_settings ifru_settings ; }; struct ifreq { union __anonunion_ifr_ifrn_305 ifr_ifrn ; union __anonunion_ifr_ifru_306 ifr_ifru ; }; typedef s32 compat_time_t; typedef s32 compat_long_t; typedef u32 compat_uptr_t; struct compat_timespec { compat_time_t tv_sec ; s32 tv_nsec ; }; struct compat_robust_list { compat_uptr_t next ; }; struct compat_robust_list_head { struct compat_robust_list list ; compat_long_t futex_offset ; compat_uptr_t list_op_pending ; }; enum ldv_29319 { SS_FREE = 0, SS_UNCONNECTED = 1, SS_CONNECTING = 2, SS_CONNECTED = 3, SS_DISCONNECTING = 4 } ; typedef enum ldv_29319 socket_state; struct socket_wq { wait_queue_head_t wait ; struct fasync_struct *fasync_list ; struct callback_head rcu ; }; struct proto_ops; struct socket { socket_state state ; short type ; unsigned long flags ; struct socket_wq *wq ; struct file *file ; struct sock *sk ; struct proto_ops const *ops ; }; struct proto_ops { int family ; struct module *owner ; int (*release)(struct socket * ) ; int (*bind)(struct socket * , struct sockaddr * , int ) ; int (*connect)(struct socket * , struct sockaddr * , int , int ) ; int (*socketpair)(struct socket * , struct socket * ) ; int (*accept)(struct socket * , struct socket * , int ) ; int (*getname)(struct socket * , struct sockaddr * , int * , int ) ; unsigned int (*poll)(struct file * , struct socket * , struct poll_table_struct * ) ; int (*ioctl)(struct socket * , unsigned int , unsigned long ) ; int (*compat_ioctl)(struct socket * , unsigned int , unsigned long ) ; int (*listen)(struct socket * , int ) ; int (*shutdown)(struct socket * , int ) ; int (*setsockopt)(struct socket * , int , int , char * , unsigned int ) ; int (*getsockopt)(struct socket * , int , int , char * , int * ) ; int (*compat_setsockopt)(struct socket * , int , int , char * , unsigned int ) ; int (*compat_getsockopt)(struct socket * , int , int , char * , int * ) ; int (*sendmsg)(struct socket * , struct msghdr * , size_t ) ; int (*recvmsg)(struct socket * , struct msghdr * , size_t , int ) ; int (*mmap)(struct file * , struct socket * , struct vm_area_struct * ) ; ssize_t (*sendpage)(struct socket * , struct page * , int , size_t , int ) ; ssize_t (*splice_read)(struct socket * , loff_t * , struct pipe_inode_info * , size_t , unsigned int ) ; int (*set_peek_off)(struct sock * , int ) ; }; struct in6_addr; struct sk_buff; typedef u64 netdev_features_t; union __anonunion_in6_u_322 { __u8 u6_addr8[16U] ; __be16 u6_addr16[8U] ; __be32 u6_addr32[4U] ; }; struct in6_addr { union __anonunion_in6_u_322 in6_u ; }; struct ethhdr { unsigned char h_dest[6U] ; unsigned char h_source[6U] ; __be16 h_proto ; }; struct pipe_buf_operations; struct pipe_buffer { struct page *page ; unsigned int offset ; unsigned int len ; struct pipe_buf_operations const *ops ; unsigned int flags ; unsigned long private ; }; struct pipe_inode_info { struct mutex mutex ; wait_queue_head_t wait ; unsigned int nrbufs ; unsigned int curbuf ; unsigned int buffers ; unsigned int readers ; unsigned int writers ; unsigned int files ; unsigned int waiting_writers ; unsigned int r_counter ; unsigned int w_counter ; struct page *tmp_page ; struct fasync_struct *fasync_readers ; struct fasync_struct *fasync_writers ; struct pipe_buffer *bufs ; }; struct pipe_buf_operations { int can_merge ; int (*confirm)(struct pipe_inode_info * , struct pipe_buffer * ) ; void (*release)(struct pipe_inode_info * , struct pipe_buffer * ) ; int (*steal)(struct pipe_inode_info * , struct pipe_buffer * ) ; void (*get)(struct pipe_inode_info * , struct pipe_buffer * ) ; }; struct napi_struct; struct nf_conntrack { atomic_t use ; }; union __anonunion____missing_field_name_327 { struct net_device *physoutdev ; char neigh_header[8U] ; }; union __anonunion____missing_field_name_328 { __be32 ipv4_daddr ; struct in6_addr ipv6_daddr ; }; struct nf_bridge_info { atomic_t use ; unsigned char orig_proto ; bool pkt_otherhost ; __u16 frag_max_size ; unsigned int mask ; struct net_device *physindev ; union __anonunion____missing_field_name_327 __annonCompField104 ; union __anonunion____missing_field_name_328 __annonCompField105 ; }; struct sk_buff_head { struct sk_buff *next ; struct sk_buff *prev ; __u32 qlen ; spinlock_t lock ; }; typedef unsigned int sk_buff_data_t; struct __anonstruct____missing_field_name_331 { u32 stamp_us ; u32 stamp_jiffies ; }; union __anonunion____missing_field_name_330 { u64 v64 ; struct __anonstruct____missing_field_name_331 __annonCompField106 ; }; struct skb_mstamp { union __anonunion____missing_field_name_330 __annonCompField107 ; }; union __anonunion____missing_field_name_334 { ktime_t tstamp ; struct skb_mstamp skb_mstamp ; }; struct __anonstruct____missing_field_name_333 { struct sk_buff *next ; struct sk_buff *prev ; union __anonunion____missing_field_name_334 __annonCompField108 ; }; union __anonunion____missing_field_name_332 { struct __anonstruct____missing_field_name_333 __annonCompField109 ; struct rb_node rbnode ; }; struct sec_path; struct __anonstruct____missing_field_name_336 { __u16 csum_start ; __u16 csum_offset ; }; union __anonunion____missing_field_name_335 { __wsum csum ; struct __anonstruct____missing_field_name_336 __annonCompField111 ; }; union __anonunion____missing_field_name_337 { unsigned int napi_id ; unsigned int sender_cpu ; }; union __anonunion____missing_field_name_338 { __u32 mark ; __u32 reserved_tailroom ; }; union __anonunion____missing_field_name_339 { __be16 inner_protocol ; __u8 inner_ipproto ; }; struct sk_buff { union __anonunion____missing_field_name_332 __annonCompField110 ; struct sock *sk ; struct net_device *dev ; char cb[48U] ; unsigned long _skb_refdst ; void (*destructor)(struct sk_buff * ) ; struct sec_path *sp ; struct nf_conntrack *nfct ; struct nf_bridge_info *nf_bridge ; unsigned int len ; unsigned int data_len ; __u16 mac_len ; __u16 hdr_len ; __u16 queue_mapping ; unsigned char cloned : 1 ; unsigned char nohdr : 1 ; unsigned char fclone : 2 ; unsigned char peeked : 1 ; unsigned char head_frag : 1 ; unsigned char xmit_more : 1 ; __u32 headers_start[0U] ; __u8 __pkt_type_offset[0U] ; unsigned char pkt_type : 3 ; unsigned char pfmemalloc : 1 ; unsigned char ignore_df : 1 ; unsigned char nfctinfo : 3 ; unsigned char nf_trace : 1 ; unsigned char ip_summed : 2 ; unsigned char ooo_okay : 1 ; unsigned char l4_hash : 1 ; unsigned char sw_hash : 1 ; unsigned char wifi_acked_valid : 1 ; unsigned char wifi_acked : 1 ; unsigned char no_fcs : 1 ; unsigned char encapsulation : 1 ; unsigned char encap_hdr_csum : 1 ; unsigned char csum_valid : 1 ; unsigned char csum_complete_sw : 1 ; unsigned char csum_level : 2 ; unsigned char csum_bad : 1 ; unsigned char ndisc_nodetype : 2 ; unsigned char ipvs_property : 1 ; unsigned char inner_protocol_type : 1 ; unsigned char remcsum_offload : 1 ; __u16 tc_index ; __u16 tc_verd ; union __anonunion____missing_field_name_335 __annonCompField112 ; __u32 priority ; int skb_iif ; __u32 hash ; __be16 vlan_proto ; __u16 vlan_tci ; union __anonunion____missing_field_name_337 __annonCompField113 ; __u32 secmark ; union __anonunion____missing_field_name_338 __annonCompField114 ; union __anonunion____missing_field_name_339 __annonCompField115 ; __u16 inner_transport_header ; __u16 inner_network_header ; __u16 inner_mac_header ; __be16 protocol ; __u16 transport_header ; __u16 network_header ; __u16 mac_header ; __u32 headers_end[0U] ; sk_buff_data_t tail ; sk_buff_data_t end ; unsigned char *head ; unsigned char *data ; unsigned int truesize ; atomic_t users ; }; struct dst_entry; struct rtable; struct ethtool_cmd { __u32 cmd ; __u32 supported ; __u32 advertising ; __u16 speed ; __u8 duplex ; __u8 port ; __u8 phy_address ; __u8 transceiver ; __u8 autoneg ; __u8 mdio_support ; __u32 maxtxpkt ; __u32 maxrxpkt ; __u16 speed_hi ; __u8 eth_tp_mdix ; __u8 eth_tp_mdix_ctrl ; __u32 lp_advertising ; __u32 reserved[2U] ; }; struct ethtool_drvinfo { __u32 cmd ; char driver[32U] ; char version[32U] ; char fw_version[32U] ; char bus_info[32U] ; char erom_version[32U] ; char reserved2[12U] ; __u32 n_priv_flags ; __u32 n_stats ; __u32 testinfo_len ; __u32 eedump_len ; __u32 regdump_len ; }; struct ethtool_wolinfo { __u32 cmd ; __u32 supported ; __u32 wolopts ; __u8 sopass[6U] ; }; struct ethtool_tunable { __u32 cmd ; __u32 id ; __u32 type_id ; __u32 len ; void *data[0U] ; }; struct ethtool_regs { __u32 cmd ; __u32 version ; __u32 len ; __u8 data[0U] ; }; struct ethtool_eeprom { __u32 cmd ; __u32 magic ; __u32 offset ; __u32 len ; __u8 data[0U] ; }; struct ethtool_eee { __u32 cmd ; __u32 supported ; __u32 advertised ; __u32 lp_advertised ; __u32 eee_active ; __u32 eee_enabled ; __u32 tx_lpi_enabled ; __u32 tx_lpi_timer ; __u32 reserved[2U] ; }; struct ethtool_modinfo { __u32 cmd ; __u32 type ; __u32 eeprom_len ; __u32 reserved[8U] ; }; struct ethtool_coalesce { __u32 cmd ; __u32 rx_coalesce_usecs ; __u32 rx_max_coalesced_frames ; __u32 rx_coalesce_usecs_irq ; __u32 rx_max_coalesced_frames_irq ; __u32 tx_coalesce_usecs ; __u32 tx_max_coalesced_frames ; __u32 tx_coalesce_usecs_irq ; __u32 tx_max_coalesced_frames_irq ; __u32 stats_block_coalesce_usecs ; __u32 use_adaptive_rx_coalesce ; __u32 use_adaptive_tx_coalesce ; __u32 pkt_rate_low ; __u32 rx_coalesce_usecs_low ; __u32 rx_max_coalesced_frames_low ; __u32 tx_coalesce_usecs_low ; __u32 tx_max_coalesced_frames_low ; __u32 pkt_rate_high ; __u32 rx_coalesce_usecs_high ; __u32 rx_max_coalesced_frames_high ; __u32 tx_coalesce_usecs_high ; __u32 tx_max_coalesced_frames_high ; __u32 rate_sample_interval ; }; struct ethtool_ringparam { __u32 cmd ; __u32 rx_max_pending ; __u32 rx_mini_max_pending ; __u32 rx_jumbo_max_pending ; __u32 tx_max_pending ; __u32 rx_pending ; __u32 rx_mini_pending ; __u32 rx_jumbo_pending ; __u32 tx_pending ; }; struct ethtool_channels { __u32 cmd ; __u32 max_rx ; __u32 max_tx ; __u32 max_other ; __u32 max_combined ; __u32 rx_count ; __u32 tx_count ; __u32 other_count ; __u32 combined_count ; }; struct ethtool_pauseparam { __u32 cmd ; __u32 autoneg ; __u32 rx_pause ; __u32 tx_pause ; }; struct ethtool_test { __u32 cmd ; __u32 flags ; __u32 reserved ; __u32 len ; __u64 data[0U] ; }; struct ethtool_stats { __u32 cmd ; __u32 n_stats ; __u64 data[0U] ; }; struct ethtool_tcpip4_spec { __be32 ip4src ; __be32 ip4dst ; __be16 psrc ; __be16 pdst ; __u8 tos ; }; struct ethtool_ah_espip4_spec { __be32 ip4src ; __be32 ip4dst ; __be32 spi ; __u8 tos ; }; struct ethtool_usrip4_spec { __be32 ip4src ; __be32 ip4dst ; __be32 l4_4_bytes ; __u8 tos ; __u8 ip_ver ; __u8 proto ; }; union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec ; struct ethtool_tcpip4_spec udp_ip4_spec ; struct ethtool_tcpip4_spec sctp_ip4_spec ; struct ethtool_ah_espip4_spec ah_ip4_spec ; struct ethtool_ah_espip4_spec esp_ip4_spec ; struct ethtool_usrip4_spec usr_ip4_spec ; struct ethhdr ether_spec ; __u8 hdata[52U] ; }; struct ethtool_flow_ext { __u8 padding[2U] ; unsigned char h_dest[6U] ; __be16 vlan_etype ; __be16 vlan_tci ; __be32 data[2U] ; }; struct ethtool_rx_flow_spec { __u32 flow_type ; union ethtool_flow_union h_u ; struct ethtool_flow_ext h_ext ; union ethtool_flow_union m_u ; struct ethtool_flow_ext m_ext ; __u64 ring_cookie ; __u32 location ; }; struct ethtool_rxnfc { __u32 cmd ; __u32 flow_type ; __u64 data ; struct ethtool_rx_flow_spec fs ; __u32 rule_cnt ; __u32 rule_locs[0U] ; }; struct ethtool_flash { __u32 cmd ; __u32 region ; char data[128U] ; }; struct ethtool_dump { __u32 cmd ; __u32 version ; __u32 flag ; __u32 len ; __u8 data[0U] ; }; struct ethtool_ts_info { __u32 cmd ; __u32 so_timestamping ; __s32 phc_index ; __u32 tx_types ; __u32 tx_reserved[3U] ; __u32 rx_filters ; __u32 rx_reserved[3U] ; }; enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ; struct ethtool_ops { int (*get_settings)(struct net_device * , struct ethtool_cmd * ) ; int (*set_settings)(struct net_device * , struct ethtool_cmd * ) ; void (*get_drvinfo)(struct net_device * , struct ethtool_drvinfo * ) ; int (*get_regs_len)(struct net_device * ) ; void (*get_regs)(struct net_device * , struct ethtool_regs * , void * ) ; void (*get_wol)(struct net_device * , struct ethtool_wolinfo * ) ; int (*set_wol)(struct net_device * , struct ethtool_wolinfo * ) ; u32 (*get_msglevel)(struct net_device * ) ; void (*set_msglevel)(struct net_device * , u32 ) ; int (*nway_reset)(struct net_device * ) ; u32 (*get_link)(struct net_device * ) ; int (*get_eeprom_len)(struct net_device * ) ; int (*get_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*set_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*get_coalesce)(struct net_device * , struct ethtool_coalesce * ) ; int (*set_coalesce)(struct net_device * , struct ethtool_coalesce * ) ; void (*get_ringparam)(struct net_device * , struct ethtool_ringparam * ) ; int (*set_ringparam)(struct net_device * , struct ethtool_ringparam * ) ; void (*get_pauseparam)(struct net_device * , struct ethtool_pauseparam * ) ; int (*set_pauseparam)(struct net_device * , struct ethtool_pauseparam * ) ; void (*self_test)(struct net_device * , struct ethtool_test * , u64 * ) ; void (*get_strings)(struct net_device * , u32 , u8 * ) ; int (*set_phys_id)(struct net_device * , enum ethtool_phys_id_state ) ; void (*get_ethtool_stats)(struct net_device * , struct ethtool_stats * , u64 * ) ; int (*begin)(struct net_device * ) ; void (*complete)(struct net_device * ) ; u32 (*get_priv_flags)(struct net_device * ) ; int (*set_priv_flags)(struct net_device * , u32 ) ; int (*get_sset_count)(struct net_device * , int ) ; int (*get_rxnfc)(struct net_device * , struct ethtool_rxnfc * , u32 * ) ; int (*set_rxnfc)(struct net_device * , struct ethtool_rxnfc * ) ; int (*flash_device)(struct net_device * , struct ethtool_flash * ) ; int (*reset)(struct net_device * , u32 * ) ; u32 (*get_rxfh_key_size)(struct net_device * ) ; u32 (*get_rxfh_indir_size)(struct net_device * ) ; int (*get_rxfh)(struct net_device * , u32 * , u8 * , u8 * ) ; int (*set_rxfh)(struct net_device * , u32 const * , u8 const * , u8 const ) ; void (*get_channels)(struct net_device * , struct ethtool_channels * ) ; int (*set_channels)(struct net_device * , struct ethtool_channels * ) ; int (*get_dump_flag)(struct net_device * , struct ethtool_dump * ) ; int (*get_dump_data)(struct net_device * , struct ethtool_dump * , void * ) ; int (*set_dump)(struct net_device * , struct ethtool_dump * ) ; int (*get_ts_info)(struct net_device * , struct ethtool_ts_info * ) ; int (*get_module_info)(struct net_device * , struct ethtool_modinfo * ) ; int (*get_module_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*get_eee)(struct net_device * , struct ethtool_eee * ) ; int (*set_eee)(struct net_device * , struct ethtool_eee * ) ; int (*get_tunable)(struct net_device * , struct ethtool_tunable const * , void * ) ; int (*set_tunable)(struct net_device * , struct ethtool_tunable const * , void const * ) ; }; struct prot_inuse; struct netns_core { struct ctl_table_header *sysctl_hdr ; int sysctl_somaxconn ; struct prot_inuse *inuse ; }; struct u64_stats_sync { }; struct ipstats_mib { u64 mibs[36U] ; struct u64_stats_sync syncp ; }; struct icmp_mib { unsigned long mibs[28U] ; }; struct icmpmsg_mib { atomic_long_t mibs[512U] ; }; struct icmpv6_mib { unsigned long mibs[6U] ; }; struct icmpv6_mib_device { atomic_long_t mibs[6U] ; }; struct icmpv6msg_mib { atomic_long_t mibs[512U] ; }; struct icmpv6msg_mib_device { atomic_long_t mibs[512U] ; }; struct tcp_mib { unsigned long mibs[16U] ; }; struct udp_mib { unsigned long mibs[9U] ; }; struct linux_mib { unsigned long mibs[115U] ; }; struct linux_xfrm_mib { unsigned long mibs[29U] ; }; struct netns_mib { struct tcp_mib *tcp_statistics ; struct ipstats_mib *ip_statistics ; struct linux_mib *net_statistics ; struct udp_mib *udp_statistics ; struct udp_mib *udplite_statistics ; struct icmp_mib *icmp_statistics ; struct icmpmsg_mib *icmpmsg_statistics ; struct proc_dir_entry *proc_net_devsnmp6 ; struct udp_mib *udp_stats_in6 ; struct udp_mib *udplite_stats_in6 ; struct ipstats_mib *ipv6_statistics ; struct icmpv6_mib *icmpv6_statistics ; struct icmpv6msg_mib *icmpv6msg_statistics ; struct linux_xfrm_mib *xfrm_statistics ; }; struct netns_unix { int sysctl_max_dgram_qlen ; struct ctl_table_header *ctl ; }; struct netns_packet { struct mutex sklist_lock ; struct hlist_head sklist ; }; struct netns_frags { struct percpu_counter mem ; int timeout ; int high_thresh ; int low_thresh ; }; struct ipv4_devconf; struct fib_rules_ops; struct fib_table; struct local_ports { seqlock_t lock ; int range[2U] ; bool warned ; }; struct ping_group_range { seqlock_t lock ; kgid_t range[2U] ; }; struct inet_peer_base; struct xt_table; struct netns_ipv4 { struct ctl_table_header *forw_hdr ; struct ctl_table_header *frags_hdr ; struct ctl_table_header *ipv4_hdr ; struct ctl_table_header *route_hdr ; struct ctl_table_header *xfrm4_hdr ; struct ipv4_devconf *devconf_all ; struct ipv4_devconf *devconf_dflt ; struct fib_rules_ops *rules_ops ; bool fib_has_custom_rules ; struct fib_table *fib_local ; struct fib_table *fib_main ; struct fib_table *fib_default ; int fib_num_tclassid_users ; struct hlist_head *fib_table_hash ; bool fib_offload_disabled ; struct sock *fibnl ; struct sock **icmp_sk ; struct sock *mc_autojoin_sk ; struct inet_peer_base *peers ; struct sock **tcp_sk ; struct netns_frags frags ; struct xt_table *iptable_filter ; struct xt_table *iptable_mangle ; struct xt_table *iptable_raw ; struct xt_table *arptable_filter ; struct xt_table *iptable_security ; struct xt_table *nat_table ; int sysctl_icmp_echo_ignore_all ; int sysctl_icmp_echo_ignore_broadcasts ; int sysctl_icmp_ignore_bogus_error_responses ; int sysctl_icmp_ratelimit ; int sysctl_icmp_ratemask ; int sysctl_icmp_errors_use_inbound_ifaddr ; struct local_ports ip_local_ports ; int sysctl_tcp_ecn ; int sysctl_tcp_ecn_fallback ; int sysctl_ip_no_pmtu_disc ; int sysctl_ip_fwd_use_pmtu ; int sysctl_ip_nonlocal_bind ; int sysctl_fwmark_reflect ; int sysctl_tcp_fwmark_accept ; int sysctl_tcp_mtu_probing ; int sysctl_tcp_base_mss ; int sysctl_tcp_probe_threshold ; u32 sysctl_tcp_probe_interval ; struct ping_group_range ping_group_range ; atomic_t dev_addr_genid ; unsigned long *sysctl_local_reserved_ports ; struct list_head mr_tables ; struct fib_rules_ops *mr_rules_ops ; atomic_t rt_genid ; }; struct neighbour; struct dst_ops { unsigned short family ; unsigned int gc_thresh ; int (*gc)(struct dst_ops * ) ; struct dst_entry *(*check)(struct dst_entry * , __u32 ) ; unsigned int (*default_advmss)(struct dst_entry const * ) ; unsigned int (*mtu)(struct dst_entry const * ) ; u32 *(*cow_metrics)(struct dst_entry * , unsigned long ) ; void (*destroy)(struct dst_entry * ) ; void (*ifdown)(struct dst_entry * , struct net_device * , int ) ; struct dst_entry *(*negative_advice)(struct dst_entry * ) ; void (*link_failure)(struct sk_buff * ) ; void (*update_pmtu)(struct dst_entry * , struct sock * , struct sk_buff * , u32 ) ; void (*redirect)(struct dst_entry * , struct sock * , struct sk_buff * ) ; int (*local_out)(struct sk_buff * ) ; struct neighbour *(*neigh_lookup)(struct dst_entry const * , struct sk_buff * , void const * ) ; struct kmem_cache *kmem_cachep ; struct percpu_counter pcpuc_entries ; }; struct netns_sysctl_ipv6 { struct ctl_table_header *hdr ; struct ctl_table_header *route_hdr ; struct ctl_table_header *icmp_hdr ; struct ctl_table_header *frags_hdr ; struct ctl_table_header *xfrm6_hdr ; int bindv6only ; int flush_delay ; int ip6_rt_max_size ; int ip6_rt_gc_min_interval ; int ip6_rt_gc_timeout ; int ip6_rt_gc_interval ; int ip6_rt_gc_elasticity ; int ip6_rt_mtu_expires ; int ip6_rt_min_advmss ; int flowlabel_consistency ; int auto_flowlabels ; int icmpv6_time ; int anycast_src_echo_reply ; int fwmark_reflect ; int idgen_retries ; int idgen_delay ; int flowlabel_state_ranges ; }; struct ipv6_devconf; struct rt6_info; struct rt6_statistics; struct fib6_table; struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl ; struct ipv6_devconf *devconf_all ; struct ipv6_devconf *devconf_dflt ; struct inet_peer_base *peers ; struct netns_frags frags ; struct xt_table *ip6table_filter ; struct xt_table *ip6table_mangle ; struct xt_table *ip6table_raw ; struct xt_table *ip6table_security ; struct xt_table *ip6table_nat ; struct rt6_info *ip6_null_entry ; struct rt6_statistics *rt6_stats ; struct timer_list ip6_fib_timer ; struct hlist_head *fib_table_hash ; struct fib6_table *fib6_main_tbl ; struct dst_ops ip6_dst_ops ; unsigned int ip6_rt_gc_expire ; unsigned long ip6_rt_last_gc ; struct rt6_info *ip6_prohibit_entry ; struct rt6_info *ip6_blk_hole_entry ; struct fib6_table *fib6_local_tbl ; struct fib_rules_ops *fib6_rules_ops ; struct sock **icmp_sk ; struct sock *ndisc_sk ; struct sock *tcp_sk ; struct sock *igmp_sk ; struct sock *mc_autojoin_sk ; struct list_head mr6_tables ; struct fib_rules_ops *mr6_rules_ops ; atomic_t dev_addr_genid ; atomic_t fib6_sernum ; }; struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl ; struct netns_frags frags ; }; struct netns_sysctl_lowpan { struct ctl_table_header *frags_hdr ; }; struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl ; struct netns_frags frags ; }; struct sctp_mib; struct netns_sctp { struct sctp_mib *sctp_statistics ; struct proc_dir_entry *proc_net_sctp ; struct ctl_table_header *sysctl_header ; struct sock *ctl_sock ; struct list_head local_addr_list ; struct list_head addr_waitq ; struct timer_list addr_wq_timer ; struct list_head auto_asconf_splist ; spinlock_t addr_wq_lock ; spinlock_t local_addr_lock ; unsigned int rto_initial ; unsigned int rto_min ; unsigned int rto_max ; int rto_alpha ; int rto_beta ; int max_burst ; int cookie_preserve_enable ; char *sctp_hmac_alg ; unsigned int valid_cookie_life ; unsigned int sack_timeout ; unsigned int hb_interval ; int max_retrans_association ; int max_retrans_path ; int max_retrans_init ; int pf_retrans ; int sndbuf_policy ; int rcvbuf_policy ; int default_auto_asconf ; int addip_enable ; int addip_noauth ; int prsctp_enable ; int auth_enable ; int scope_policy ; int rwnd_upd_shift ; unsigned long max_autoclose ; }; struct netns_dccp { struct sock *v4_ctl_sk ; struct sock *v6_ctl_sk ; }; struct nf_logger; struct netns_nf { struct proc_dir_entry *proc_netfilter ; struct nf_logger const *nf_loggers[13U] ; struct ctl_table_header *nf_log_dir_header ; }; struct ebt_table; struct netns_xt { struct list_head tables[13U] ; bool notrack_deprecated_warning ; bool clusterip_deprecated_warning ; struct ebt_table *broute_table ; struct ebt_table *frame_filter ; struct ebt_table *frame_nat ; }; struct nf_proto_net { struct ctl_table_header *ctl_table_header ; struct ctl_table *ctl_table ; struct ctl_table_header *ctl_compat_header ; struct ctl_table *ctl_compat_table ; unsigned int users ; }; struct nf_generic_net { struct nf_proto_net pn ; unsigned int timeout ; }; struct nf_tcp_net { struct nf_proto_net pn ; unsigned int timeouts[14U] ; unsigned int tcp_loose ; unsigned int tcp_be_liberal ; unsigned int tcp_max_retrans ; }; struct nf_udp_net { struct nf_proto_net pn ; unsigned int timeouts[2U] ; }; struct nf_icmp_net { struct nf_proto_net pn ; unsigned int timeout ; }; struct nf_ip_net { struct nf_generic_net generic ; struct nf_tcp_net tcp ; struct nf_udp_net udp ; struct nf_icmp_net icmp ; struct nf_icmp_net icmpv6 ; struct ctl_table_header *ctl_table_header ; struct ctl_table *ctl_table ; }; struct ct_pcpu { spinlock_t lock ; struct hlist_nulls_head unconfirmed ; struct hlist_nulls_head dying ; struct hlist_nulls_head tmpl ; }; struct ip_conntrack_stat; struct nf_ct_event_notifier; struct nf_exp_event_notifier; struct netns_ct { atomic_t count ; unsigned int expect_count ; struct delayed_work ecache_dwork ; bool ecache_dwork_pending ; struct ctl_table_header *sysctl_header ; struct ctl_table_header *acct_sysctl_header ; struct ctl_table_header *tstamp_sysctl_header ; struct ctl_table_header *event_sysctl_header ; struct ctl_table_header *helper_sysctl_header ; char *slabname ; unsigned int sysctl_log_invalid ; int sysctl_events ; int sysctl_acct ; int sysctl_auto_assign_helper ; bool auto_assign_helper_warned ; int sysctl_tstamp ; int sysctl_checksum ; unsigned int htable_size ; seqcount_t generation ; struct kmem_cache *nf_conntrack_cachep ; struct hlist_nulls_head *hash ; struct hlist_head *expect_hash ; struct ct_pcpu *pcpu_lists ; struct ip_conntrack_stat *stat ; struct nf_ct_event_notifier *nf_conntrack_event_cb ; struct nf_exp_event_notifier *nf_expect_event_cb ; struct nf_ip_net nf_ct_proto ; unsigned int labels_used ; u8 label_words ; struct hlist_head *nat_bysource ; unsigned int nat_htable_size ; }; struct nft_af_info; struct netns_nftables { struct list_head af_info ; struct list_head commit_list ; struct nft_af_info *ipv4 ; struct nft_af_info *ipv6 ; struct nft_af_info *inet ; struct nft_af_info *arp ; struct nft_af_info *bridge ; struct nft_af_info *netdev ; unsigned int base_seq ; u8 gencursor ; }; struct tasklet_struct { struct tasklet_struct *next ; unsigned long state ; atomic_t count ; void (*func)(unsigned long ) ; unsigned long data ; }; struct flow_cache_percpu { struct hlist_head *hash_table ; int hash_count ; u32 hash_rnd ; int hash_rnd_recalc ; struct tasklet_struct flush_tasklet ; }; struct flow_cache { u32 hash_shift ; struct flow_cache_percpu *percpu ; struct notifier_block hotcpu_notifier ; int low_watermark ; int high_watermark ; struct timer_list rnd_timer ; }; struct xfrm_policy_hash { struct hlist_head *table ; unsigned int hmask ; u8 dbits4 ; u8 sbits4 ; u8 dbits6 ; u8 sbits6 ; }; struct xfrm_policy_hthresh { struct work_struct work ; seqlock_t lock ; u8 lbits4 ; u8 rbits4 ; u8 lbits6 ; u8 rbits6 ; }; struct netns_xfrm { struct list_head state_all ; struct hlist_head *state_bydst ; struct hlist_head *state_bysrc ; struct hlist_head *state_byspi ; unsigned int state_hmask ; unsigned int state_num ; struct work_struct state_hash_work ; struct hlist_head state_gc_list ; struct work_struct state_gc_work ; struct list_head policy_all ; struct hlist_head *policy_byidx ; unsigned int policy_idx_hmask ; struct hlist_head policy_inexact[3U] ; struct xfrm_policy_hash policy_bydst[3U] ; unsigned int policy_count[6U] ; struct work_struct policy_hash_work ; struct xfrm_policy_hthresh policy_hthresh ; struct sock *nlsk ; struct sock *nlsk_stash ; u32 sysctl_aevent_etime ; u32 sysctl_aevent_rseqth ; int sysctl_larval_drop ; u32 sysctl_acq_expires ; struct ctl_table_header *sysctl_hdr ; struct dst_ops xfrm4_dst_ops ; struct dst_ops xfrm6_dst_ops ; spinlock_t xfrm_state_lock ; rwlock_t xfrm_policy_lock ; struct mutex xfrm_cfg_mutex ; struct flow_cache flow_cache_global ; atomic_t flow_cache_genid ; struct list_head flow_cache_gc_list ; spinlock_t flow_cache_gc_lock ; struct work_struct flow_cache_gc_work ; struct work_struct flow_cache_flush_work ; struct mutex flow_flush_sem ; }; struct mpls_route; struct netns_mpls { size_t platform_labels ; struct mpls_route **platform_label ; struct ctl_table_header *ctl ; }; struct net_generic; struct netns_ipvs; struct net { atomic_t passive ; atomic_t count ; spinlock_t rules_mod_lock ; atomic64_t cookie_gen ; struct list_head list ; struct list_head cleanup_list ; struct list_head exit_list ; struct user_namespace *user_ns ; spinlock_t nsid_lock ; struct idr netns_ids ; struct ns_common ns ; struct proc_dir_entry *proc_net ; struct proc_dir_entry *proc_net_stat ; struct ctl_table_set sysctls ; struct sock *rtnl ; struct sock *genl_sock ; struct list_head dev_base_head ; struct hlist_head *dev_name_head ; struct hlist_head *dev_index_head ; unsigned int dev_base_seq ; int ifindex ; unsigned int dev_unreg_count ; struct list_head rules_ops ; struct net_device *loopback_dev ; struct netns_core core ; struct netns_mib mib ; struct netns_packet packet ; struct netns_unix unx ; struct netns_ipv4 ipv4 ; struct netns_ipv6 ipv6 ; struct netns_ieee802154_lowpan ieee802154_lowpan ; struct netns_sctp sctp ; struct netns_dccp dccp ; struct netns_nf nf ; struct netns_xt xt ; struct netns_ct ct ; struct netns_nftables nft ; struct netns_nf_frag nf_frag ; struct sock *nfnl ; struct sock *nfnl_stash ; struct sk_buff_head wext_nlevents ; struct net_generic *gen ; struct netns_xfrm xfrm ; struct netns_ipvs *ipvs ; struct netns_mpls mpls ; struct sock *diag_nlsk ; atomic_t fnhe_genid ; }; struct __anonstruct_possible_net_t_348 { struct net *net ; }; typedef struct __anonstruct_possible_net_t_348 possible_net_t; typedef unsigned long kernel_ulong_t; struct acpi_device_id { __u8 id[9U] ; kernel_ulong_t driver_data ; }; struct of_device_id { char name[32U] ; char type[32U] ; char compatible[128U] ; void const *data ; }; enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_PDATA = 3 } ; struct fwnode_handle { enum fwnode_type type ; struct fwnode_handle *secondary ; }; typedef u32 phandle; struct property { char *name ; int length ; void *value ; struct property *next ; unsigned long _flags ; unsigned int unique_id ; struct bin_attribute attr ; }; struct device_node { char const *name ; char const *type ; phandle phandle ; char const *full_name ; struct fwnode_handle fwnode ; struct property *properties ; struct property *deadprops ; struct device_node *parent ; struct device_node *child ; struct device_node *sibling ; struct kobject kobj ; unsigned long _flags ; void *data ; }; enum ldv_31902 { PHY_INTERFACE_MODE_NA = 0, PHY_INTERFACE_MODE_MII = 1, PHY_INTERFACE_MODE_GMII = 2, PHY_INTERFACE_MODE_SGMII = 3, PHY_INTERFACE_MODE_TBI = 4, PHY_INTERFACE_MODE_REVMII = 5, PHY_INTERFACE_MODE_RMII = 6, PHY_INTERFACE_MODE_RGMII = 7, PHY_INTERFACE_MODE_RGMII_ID = 8, PHY_INTERFACE_MODE_RGMII_RXID = 9, PHY_INTERFACE_MODE_RGMII_TXID = 10, PHY_INTERFACE_MODE_RTBI = 11, PHY_INTERFACE_MODE_SMII = 12, PHY_INTERFACE_MODE_XGMII = 13, PHY_INTERFACE_MODE_MOCA = 14, PHY_INTERFACE_MODE_QSGMII = 15, PHY_INTERFACE_MODE_MAX = 16 } ; typedef enum ldv_31902 phy_interface_t; enum ldv_31956 { MDIOBUS_ALLOCATED = 1, MDIOBUS_REGISTERED = 2, MDIOBUS_UNREGISTERED = 3, MDIOBUS_RELEASED = 4 } ; struct phy_device; struct mii_bus { char const *name ; char id[17U] ; void *priv ; int (*read)(struct mii_bus * , int , int ) ; int (*write)(struct mii_bus * , int , int , u16 ) ; int (*reset)(struct mii_bus * ) ; struct mutex mdio_lock ; struct device *parent ; enum ldv_31956 state ; struct device dev ; struct phy_device *phy_map[32U] ; u32 phy_mask ; u32 phy_ignore_ta_mask ; int *irq ; }; enum phy_state { PHY_DOWN = 0, PHY_STARTING = 1, PHY_READY = 2, PHY_PENDING = 3, PHY_UP = 4, PHY_AN = 5, PHY_RUNNING = 6, PHY_NOLINK = 7, PHY_FORCING = 8, PHY_CHANGELINK = 9, PHY_HALTED = 10, PHY_RESUMING = 11 } ; struct phy_c45_device_ids { u32 devices_in_package ; u32 device_ids[8U] ; }; struct phy_driver; struct phy_device { struct phy_driver *drv ; struct mii_bus *bus ; struct device dev ; u32 phy_id ; struct phy_c45_device_ids c45_ids ; bool is_c45 ; bool is_internal ; bool has_fixups ; bool suspended ; enum phy_state state ; u32 dev_flags ; phy_interface_t interface ; int addr ; int speed ; int duplex ; int pause ; int asym_pause ; int link ; u32 interrupts ; u32 supported ; u32 advertising ; u32 lp_advertising ; int autoneg ; int link_timeout ; int irq ; void *priv ; struct work_struct phy_queue ; struct delayed_work state_queue ; atomic_t irq_disable ; struct mutex lock ; struct net_device *attached_dev ; void (*adjust_link)(struct net_device * ) ; }; struct phy_driver { u32 phy_id ; char *name ; unsigned int phy_id_mask ; u32 features ; u32 flags ; void const *driver_data ; int (*soft_reset)(struct phy_device * ) ; int (*config_init)(struct phy_device * ) ; int (*probe)(struct phy_device * ) ; int (*suspend)(struct phy_device * ) ; int (*resume)(struct phy_device * ) ; int (*config_aneg)(struct phy_device * ) ; int (*aneg_done)(struct phy_device * ) ; int (*read_status)(struct phy_device * ) ; int (*ack_interrupt)(struct phy_device * ) ; int (*config_intr)(struct phy_device * ) ; int (*did_interrupt)(struct phy_device * ) ; void (*remove)(struct phy_device * ) ; int (*match_phy_device)(struct phy_device * ) ; int (*ts_info)(struct phy_device * , struct ethtool_ts_info * ) ; int (*hwtstamp)(struct phy_device * , struct ifreq * ) ; bool (*rxtstamp)(struct phy_device * , struct sk_buff * , int ) ; void (*txtstamp)(struct phy_device * , struct sk_buff * , int ) ; int (*set_wol)(struct phy_device * , struct ethtool_wolinfo * ) ; void (*get_wol)(struct phy_device * , struct ethtool_wolinfo * ) ; void (*link_change_notify)(struct phy_device * ) ; int (*read_mmd_indirect)(struct phy_device * , int , int , int ) ; void (*write_mmd_indirect)(struct phy_device * , int , int , int , u32 ) ; int (*module_info)(struct phy_device * , struct ethtool_modinfo * ) ; int (*module_eeprom)(struct phy_device * , struct ethtool_eeprom * , u8 * ) ; struct device_driver driver ; }; struct fixed_phy_status { int link ; int speed ; int duplex ; int pause ; int asym_pause ; }; enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = 0, DSA_TAG_PROTO_DSA = 1, DSA_TAG_PROTO_TRAILER = 2, DSA_TAG_PROTO_EDSA = 3, DSA_TAG_PROTO_BRCM = 4 } ; struct dsa_chip_data { struct device *host_dev ; int sw_addr ; int eeprom_len ; struct device_node *of_node ; char *port_names[12U] ; struct device_node *port_dn[12U] ; s8 *rtable ; }; struct dsa_platform_data { struct device *netdev ; struct net_device *of_netdev ; int nr_chips ; struct dsa_chip_data *chip ; }; struct packet_type; struct dsa_switch; struct dsa_switch_tree { struct dsa_platform_data *pd ; struct net_device *master_netdev ; int (*rcv)(struct sk_buff * , struct net_device * , struct packet_type * , struct net_device * ) ; enum dsa_tag_protocol tag_protocol ; s8 cpu_switch ; s8 cpu_port ; int link_poll_needed ; struct work_struct link_poll_work ; struct timer_list link_poll_timer ; struct dsa_switch *ds[4U] ; }; struct dsa_switch_driver; struct dsa_switch { struct dsa_switch_tree *dst ; int index ; enum dsa_tag_protocol tag_protocol ; struct dsa_chip_data *pd ; struct dsa_switch_driver *drv ; struct device *master_dev ; char hwmon_name[24U] ; struct device *hwmon_dev ; u32 dsa_port_mask ; u32 phys_port_mask ; u32 phys_mii_mask ; struct mii_bus *slave_mii_bus ; struct net_device *ports[12U] ; }; struct dsa_switch_driver { struct list_head list ; enum dsa_tag_protocol tag_protocol ; int priv_size ; char *(*probe)(struct device * , int ) ; int (*setup)(struct dsa_switch * ) ; int (*set_addr)(struct dsa_switch * , u8 * ) ; u32 (*get_phy_flags)(struct dsa_switch * , int ) ; int (*phy_read)(struct dsa_switch * , int , int ) ; int (*phy_write)(struct dsa_switch * , int , int , u16 ) ; void (*poll_link)(struct dsa_switch * ) ; void (*adjust_link)(struct dsa_switch * , int , struct phy_device * ) ; void (*fixed_link_update)(struct dsa_switch * , int , struct fixed_phy_status * ) ; void (*get_strings)(struct dsa_switch * , int , uint8_t * ) ; void (*get_ethtool_stats)(struct dsa_switch * , int , uint64_t * ) ; int (*get_sset_count)(struct dsa_switch * ) ; void (*get_wol)(struct dsa_switch * , int , struct ethtool_wolinfo * ) ; int (*set_wol)(struct dsa_switch * , int , struct ethtool_wolinfo * ) ; int (*suspend)(struct dsa_switch * ) ; int (*resume)(struct dsa_switch * ) ; int (*port_enable)(struct dsa_switch * , int , struct phy_device * ) ; void (*port_disable)(struct dsa_switch * , int , struct phy_device * ) ; int (*set_eee)(struct dsa_switch * , int , struct phy_device * , struct ethtool_eee * ) ; int (*get_eee)(struct dsa_switch * , int , struct ethtool_eee * ) ; int (*get_temp)(struct dsa_switch * , int * ) ; int (*get_temp_limit)(struct dsa_switch * , int * ) ; int (*set_temp_limit)(struct dsa_switch * , int ) ; int (*get_temp_alarm)(struct dsa_switch * , bool * ) ; int (*get_eeprom_len)(struct dsa_switch * ) ; int (*get_eeprom)(struct dsa_switch * , struct ethtool_eeprom * , u8 * ) ; int (*set_eeprom)(struct dsa_switch * , struct ethtool_eeprom * , u8 * ) ; int (*get_regs_len)(struct dsa_switch * , int ) ; void (*get_regs)(struct dsa_switch * , int , struct ethtool_regs * , void * ) ; int (*port_join_bridge)(struct dsa_switch * , int , u32 ) ; int (*port_leave_bridge)(struct dsa_switch * , int , u32 ) ; int (*port_stp_update)(struct dsa_switch * , int , u8 ) ; int (*fdb_add)(struct dsa_switch * , int , unsigned char const * , u16 ) ; int (*fdb_del)(struct dsa_switch * , int , unsigned char const * , u16 ) ; int (*fdb_getnext)(struct dsa_switch * , int , unsigned char * , bool * ) ; }; struct ieee_ets { __u8 willing ; __u8 ets_cap ; __u8 cbs ; __u8 tc_tx_bw[8U] ; __u8 tc_rx_bw[8U] ; __u8 tc_tsa[8U] ; __u8 prio_tc[8U] ; __u8 tc_reco_bw[8U] ; __u8 tc_reco_tsa[8U] ; __u8 reco_prio_tc[8U] ; }; struct ieee_maxrate { __u64 tc_maxrate[8U] ; }; struct ieee_qcn { __u8 rpg_enable[8U] ; __u32 rppp_max_rps[8U] ; __u32 rpg_time_reset[8U] ; __u32 rpg_byte_reset[8U] ; __u32 rpg_threshold[8U] ; __u32 rpg_max_rate[8U] ; __u32 rpg_ai_rate[8U] ; __u32 rpg_hai_rate[8U] ; __u32 rpg_gd[8U] ; __u32 rpg_min_dec_fac[8U] ; __u32 rpg_min_rate[8U] ; __u32 cndd_state_machine[8U] ; }; struct ieee_qcn_stats { __u64 rppp_rp_centiseconds[8U] ; __u32 rppp_created_rps[8U] ; }; struct ieee_pfc { __u8 pfc_cap ; __u8 pfc_en ; __u8 mbc ; __u16 delay ; __u64 requests[8U] ; __u64 indications[8U] ; }; struct cee_pg { __u8 willing ; __u8 error ; __u8 pg_en ; __u8 tcs_supported ; __u8 pg_bw[8U] ; __u8 prio_pg[8U] ; }; struct cee_pfc { __u8 willing ; __u8 error ; __u8 pfc_en ; __u8 tcs_supported ; }; struct dcb_app { __u8 selector ; __u8 priority ; __u16 protocol ; }; struct dcb_peer_app_info { __u8 willing ; __u8 error ; }; struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_setets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_getmaxrate)(struct net_device * , struct ieee_maxrate * ) ; int (*ieee_setmaxrate)(struct net_device * , struct ieee_maxrate * ) ; int (*ieee_getqcn)(struct net_device * , struct ieee_qcn * ) ; int (*ieee_setqcn)(struct net_device * , struct ieee_qcn * ) ; int (*ieee_getqcnstats)(struct net_device * , struct ieee_qcn_stats * ) ; int (*ieee_getpfc)(struct net_device * , struct ieee_pfc * ) ; int (*ieee_setpfc)(struct net_device * , struct ieee_pfc * ) ; int (*ieee_getapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_setapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_delapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_peer_getets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_peer_getpfc)(struct net_device * , struct ieee_pfc * ) ; u8 (*getstate)(struct net_device * ) ; u8 (*setstate)(struct net_device * , u8 ) ; void (*getpermhwaddr)(struct net_device * , u8 * ) ; void (*setpgtccfgtx)(struct net_device * , int , u8 , u8 , u8 , u8 ) ; void (*setpgbwgcfgtx)(struct net_device * , int , u8 ) ; void (*setpgtccfgrx)(struct net_device * , int , u8 , u8 , u8 , u8 ) ; void (*setpgbwgcfgrx)(struct net_device * , int , u8 ) ; void (*getpgtccfgtx)(struct net_device * , int , u8 * , u8 * , u8 * , u8 * ) ; void (*getpgbwgcfgtx)(struct net_device * , int , u8 * ) ; void (*getpgtccfgrx)(struct net_device * , int , u8 * , u8 * , u8 * , u8 * ) ; void (*getpgbwgcfgrx)(struct net_device * , int , u8 * ) ; void (*setpfccfg)(struct net_device * , int , u8 ) ; void (*getpfccfg)(struct net_device * , int , u8 * ) ; u8 (*setall)(struct net_device * ) ; u8 (*getcap)(struct net_device * , int , u8 * ) ; int (*getnumtcs)(struct net_device * , int , u8 * ) ; int (*setnumtcs)(struct net_device * , int , u8 ) ; u8 (*getpfcstate)(struct net_device * ) ; void (*setpfcstate)(struct net_device * , u8 ) ; void (*getbcncfg)(struct net_device * , int , u32 * ) ; void (*setbcncfg)(struct net_device * , int , u32 ) ; void (*getbcnrp)(struct net_device * , int , u8 * ) ; void (*setbcnrp)(struct net_device * , int , u8 ) ; int (*setapp)(struct net_device * , u8 , u16 , u8 ) ; int (*getapp)(struct net_device * , u8 , u16 ) ; u8 (*getfeatcfg)(struct net_device * , int , u8 * ) ; u8 (*setfeatcfg)(struct net_device * , int , u8 ) ; u8 (*getdcbx)(struct net_device * ) ; u8 (*setdcbx)(struct net_device * , u8 ) ; int (*peer_getappinfo)(struct net_device * , struct dcb_peer_app_info * , u16 * ) ; int (*peer_getapptable)(struct net_device * , struct dcb_app * ) ; int (*cee_peer_getpg)(struct net_device * , struct cee_pg * ) ; int (*cee_peer_getpfc)(struct net_device * , struct cee_pfc * ) ; }; struct netprio_map { struct callback_head rcu ; u32 priomap_len ; u32 priomap[] ; }; struct xfrm_policy; struct xfrm_state; struct request_sock; struct nlmsghdr { __u32 nlmsg_len ; __u16 nlmsg_type ; __u16 nlmsg_flags ; __u32 nlmsg_seq ; __u32 nlmsg_pid ; }; struct nlattr { __u16 nla_len ; __u16 nla_type ; }; struct netlink_callback { struct sk_buff *skb ; struct nlmsghdr const *nlh ; int (*dump)(struct sk_buff * , struct netlink_callback * ) ; int (*done)(struct netlink_callback * ) ; void *data ; struct module *module ; u16 family ; u16 min_dump_alloc ; unsigned int prev_seq ; unsigned int seq ; long args[6U] ; }; struct ndmsg { __u8 ndm_family ; __u8 ndm_pad1 ; __u16 ndm_pad2 ; __s32 ndm_ifindex ; __u16 ndm_state ; __u8 ndm_flags ; __u8 ndm_type ; }; struct rtnl_link_stats64 { __u64 rx_packets ; __u64 tx_packets ; __u64 rx_bytes ; __u64 tx_bytes ; __u64 rx_errors ; __u64 tx_errors ; __u64 rx_dropped ; __u64 tx_dropped ; __u64 multicast ; __u64 collisions ; __u64 rx_length_errors ; __u64 rx_over_errors ; __u64 rx_crc_errors ; __u64 rx_frame_errors ; __u64 rx_fifo_errors ; __u64 rx_missed_errors ; __u64 tx_aborted_errors ; __u64 tx_carrier_errors ; __u64 tx_fifo_errors ; __u64 tx_heartbeat_errors ; __u64 tx_window_errors ; __u64 rx_compressed ; __u64 tx_compressed ; }; struct ifla_vf_stats { __u64 rx_packets ; __u64 tx_packets ; __u64 rx_bytes ; __u64 tx_bytes ; __u64 broadcast ; __u64 multicast ; }; struct ifla_vf_info { __u32 vf ; __u8 mac[32U] ; __u32 vlan ; __u32 qos ; __u32 spoofchk ; __u32 linkstate ; __u32 min_tx_rate ; __u32 max_tx_rate ; __u32 rss_query_en ; }; struct netpoll_info; struct wireless_dev; struct wpan_dev; struct mpls_dev; enum netdev_tx { __NETDEV_TX_MIN = (-0x7FFFFFFF-1), NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16, NETDEV_TX_LOCKED = 32 } ; typedef enum netdev_tx netdev_tx_t; struct net_device_stats { unsigned long rx_packets ; unsigned long tx_packets ; unsigned long rx_bytes ; unsigned long tx_bytes ; unsigned long rx_errors ; unsigned long tx_errors ; unsigned long rx_dropped ; unsigned long tx_dropped ; unsigned long multicast ; unsigned long collisions ; unsigned long rx_length_errors ; unsigned long rx_over_errors ; unsigned long rx_crc_errors ; unsigned long rx_frame_errors ; unsigned long rx_fifo_errors ; unsigned long rx_missed_errors ; unsigned long tx_aborted_errors ; unsigned long tx_carrier_errors ; unsigned long tx_fifo_errors ; unsigned long tx_heartbeat_errors ; unsigned long tx_window_errors ; unsigned long rx_compressed ; unsigned long tx_compressed ; }; struct neigh_parms; struct netdev_hw_addr_list { struct list_head list ; int count ; }; struct hh_cache { u16 hh_len ; u16 __pad ; seqlock_t hh_lock ; unsigned long hh_data[16U] ; }; struct header_ops { int (*create)(struct sk_buff * , struct net_device * , unsigned short , void const * , void const * , unsigned int ) ; int (*parse)(struct sk_buff const * , unsigned char * ) ; int (*cache)(struct neighbour const * , struct hh_cache * , __be16 ) ; void (*cache_update)(struct hh_cache * , struct net_device const * , unsigned char const * ) ; }; struct napi_struct { struct list_head poll_list ; unsigned long state ; int weight ; unsigned int gro_count ; int (*poll)(struct napi_struct * , int ) ; spinlock_t poll_lock ; int poll_owner ; struct net_device *dev ; struct sk_buff *gro_list ; struct sk_buff *skb ; struct hrtimer timer ; struct list_head dev_list ; struct hlist_node napi_hash_node ; unsigned int napi_id ; }; enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ; typedef enum rx_handler_result rx_handler_result_t; typedef rx_handler_result_t rx_handler_func_t(struct sk_buff ** ); struct Qdisc; struct netdev_queue { struct net_device *dev ; struct Qdisc *qdisc ; struct Qdisc *qdisc_sleeping ; struct kobject kobj ; int numa_node ; spinlock_t _xmit_lock ; int xmit_lock_owner ; unsigned long trans_start ; unsigned long trans_timeout ; unsigned long state ; struct dql dql ; unsigned long tx_maxrate ; }; struct rps_map { unsigned int len ; struct callback_head rcu ; u16 cpus[0U] ; }; struct rps_dev_flow { u16 cpu ; u16 filter ; unsigned int last_qtail ; }; struct rps_dev_flow_table { unsigned int mask ; struct callback_head rcu ; struct rps_dev_flow flows[0U] ; }; struct netdev_rx_queue { struct rps_map *rps_map ; struct rps_dev_flow_table *rps_flow_table ; struct kobject kobj ; struct net_device *dev ; }; struct xps_map { unsigned int len ; unsigned int alloc_len ; struct callback_head rcu ; u16 queues[0U] ; }; struct xps_dev_maps { struct callback_head rcu ; struct xps_map *cpu_map[0U] ; }; struct netdev_tc_txq { u16 count ; u16 offset ; }; struct netdev_fcoe_hbainfo { char manufacturer[64U] ; char serial_number[64U] ; char hardware_version[64U] ; char driver_version[64U] ; char optionrom_version[64U] ; char firmware_version[64U] ; char model[256U] ; char model_description[256U] ; }; struct netdev_phys_item_id { unsigned char id[32U] ; unsigned char id_len ; }; struct net_device_ops { int (*ndo_init)(struct net_device * ) ; void (*ndo_uninit)(struct net_device * ) ; int (*ndo_open)(struct net_device * ) ; int (*ndo_stop)(struct net_device * ) ; netdev_tx_t (*ndo_start_xmit)(struct sk_buff * , struct net_device * ) ; u16 (*ndo_select_queue)(struct net_device * , struct sk_buff * , void * , u16 (*)(struct net_device * , struct sk_buff * ) ) ; void (*ndo_change_rx_flags)(struct net_device * , int ) ; void (*ndo_set_rx_mode)(struct net_device * ) ; int (*ndo_set_mac_address)(struct net_device * , void * ) ; int (*ndo_validate_addr)(struct net_device * ) ; int (*ndo_do_ioctl)(struct net_device * , struct ifreq * , int ) ; int (*ndo_set_config)(struct net_device * , struct ifmap * ) ; int (*ndo_change_mtu)(struct net_device * , int ) ; int (*ndo_neigh_setup)(struct net_device * , struct neigh_parms * ) ; void (*ndo_tx_timeout)(struct net_device * ) ; struct rtnl_link_stats64 *(*ndo_get_stats64)(struct net_device * , struct rtnl_link_stats64 * ) ; struct net_device_stats *(*ndo_get_stats)(struct net_device * ) ; int (*ndo_vlan_rx_add_vid)(struct net_device * , __be16 , u16 ) ; int (*ndo_vlan_rx_kill_vid)(struct net_device * , __be16 , u16 ) ; void (*ndo_poll_controller)(struct net_device * ) ; int (*ndo_netpoll_setup)(struct net_device * , struct netpoll_info * ) ; void (*ndo_netpoll_cleanup)(struct net_device * ) ; int (*ndo_busy_poll)(struct napi_struct * ) ; int (*ndo_set_vf_mac)(struct net_device * , int , u8 * ) ; int (*ndo_set_vf_vlan)(struct net_device * , int , u16 , u8 ) ; int (*ndo_set_vf_rate)(struct net_device * , int , int , int ) ; int (*ndo_set_vf_spoofchk)(struct net_device * , int , bool ) ; int (*ndo_get_vf_config)(struct net_device * , int , struct ifla_vf_info * ) ; int (*ndo_set_vf_link_state)(struct net_device * , int , int ) ; int (*ndo_get_vf_stats)(struct net_device * , int , struct ifla_vf_stats * ) ; int (*ndo_set_vf_port)(struct net_device * , int , struct nlattr ** ) ; int (*ndo_get_vf_port)(struct net_device * , int , struct sk_buff * ) ; int (*ndo_set_vf_rss_query_en)(struct net_device * , int , bool ) ; int (*ndo_setup_tc)(struct net_device * , u8 ) ; int (*ndo_fcoe_enable)(struct net_device * ) ; int (*ndo_fcoe_disable)(struct net_device * ) ; int (*ndo_fcoe_ddp_setup)(struct net_device * , u16 , struct scatterlist * , unsigned int ) ; int (*ndo_fcoe_ddp_done)(struct net_device * , u16 ) ; int (*ndo_fcoe_ddp_target)(struct net_device * , u16 , struct scatterlist * , unsigned int ) ; int (*ndo_fcoe_get_hbainfo)(struct net_device * , struct netdev_fcoe_hbainfo * ) ; int (*ndo_fcoe_get_wwn)(struct net_device * , u64 * , int ) ; int (*ndo_rx_flow_steer)(struct net_device * , struct sk_buff const * , u16 , u32 ) ; int (*ndo_add_slave)(struct net_device * , struct net_device * ) ; int (*ndo_del_slave)(struct net_device * , struct net_device * ) ; netdev_features_t (*ndo_fix_features)(struct net_device * , netdev_features_t ) ; int (*ndo_set_features)(struct net_device * , netdev_features_t ) ; int (*ndo_neigh_construct)(struct neighbour * ) ; void (*ndo_neigh_destroy)(struct neighbour * ) ; int (*ndo_fdb_add)(struct ndmsg * , struct nlattr ** , struct net_device * , unsigned char const * , u16 , u16 ) ; int (*ndo_fdb_del)(struct ndmsg * , struct nlattr ** , struct net_device * , unsigned char const * , u16 ) ; int (*ndo_fdb_dump)(struct sk_buff * , struct netlink_callback * , struct net_device * , struct net_device * , int ) ; int (*ndo_bridge_setlink)(struct net_device * , struct nlmsghdr * , u16 ) ; int (*ndo_bridge_getlink)(struct sk_buff * , u32 , u32 , struct net_device * , u32 , int ) ; int (*ndo_bridge_dellink)(struct net_device * , struct nlmsghdr * , u16 ) ; int (*ndo_change_carrier)(struct net_device * , bool ) ; int (*ndo_get_phys_port_id)(struct net_device * , struct netdev_phys_item_id * ) ; int (*ndo_get_phys_port_name)(struct net_device * , char * , size_t ) ; void (*ndo_add_vxlan_port)(struct net_device * , sa_family_t , __be16 ) ; void (*ndo_del_vxlan_port)(struct net_device * , sa_family_t , __be16 ) ; void *(*ndo_dfwd_add_station)(struct net_device * , struct net_device * ) ; void (*ndo_dfwd_del_station)(struct net_device * , void * ) ; netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff * , struct net_device * , void * ) ; int (*ndo_get_lock_subclass)(struct net_device * ) ; netdev_features_t (*ndo_features_check)(struct sk_buff * , struct net_device * , netdev_features_t ) ; int (*ndo_set_tx_maxrate)(struct net_device * , int , u32 ) ; int (*ndo_get_iflink)(struct net_device const * ) ; }; struct __anonstruct_adj_list_355 { struct list_head upper ; struct list_head lower ; }; struct __anonstruct_all_adj_list_356 { struct list_head upper ; struct list_head lower ; }; struct iw_handler_def; struct iw_public_data; struct switchdev_ops; struct vlan_info; struct tipc_bearer; struct in_device; struct dn_dev; struct inet6_dev; struct tcf_proto; struct cpu_rmap; struct pcpu_lstats; struct pcpu_sw_netstats; struct pcpu_dstats; struct pcpu_vstats; union __anonunion____missing_field_name_357 { void *ml_priv ; struct pcpu_lstats *lstats ; struct pcpu_sw_netstats *tstats ; struct pcpu_dstats *dstats ; struct pcpu_vstats *vstats ; }; struct garp_port; struct mrp_port; struct rtnl_link_ops; struct net_device { char name[16U] ; struct hlist_node name_hlist ; char *ifalias ; unsigned long mem_end ; unsigned long mem_start ; unsigned long base_addr ; int irq ; atomic_t carrier_changes ; unsigned long state ; struct list_head dev_list ; struct list_head napi_list ; struct list_head unreg_list ; struct list_head close_list ; struct list_head ptype_all ; struct list_head ptype_specific ; struct __anonstruct_adj_list_355 adj_list ; struct __anonstruct_all_adj_list_356 all_adj_list ; netdev_features_t features ; netdev_features_t hw_features ; netdev_features_t wanted_features ; netdev_features_t vlan_features ; netdev_features_t hw_enc_features ; netdev_features_t mpls_features ; int ifindex ; int group ; struct net_device_stats stats ; atomic_long_t rx_dropped ; atomic_long_t tx_dropped ; struct iw_handler_def const *wireless_handlers ; struct iw_public_data *wireless_data ; struct net_device_ops const *netdev_ops ; struct ethtool_ops const *ethtool_ops ; struct switchdev_ops const *switchdev_ops ; struct header_ops const *header_ops ; unsigned int flags ; unsigned int priv_flags ; unsigned short gflags ; unsigned short padded ; unsigned char operstate ; unsigned char link_mode ; unsigned char if_port ; unsigned char dma ; unsigned int mtu ; unsigned short type ; unsigned short hard_header_len ; unsigned short needed_headroom ; unsigned short needed_tailroom ; unsigned char perm_addr[32U] ; unsigned char addr_assign_type ; unsigned char addr_len ; unsigned short neigh_priv_len ; unsigned short dev_id ; unsigned short dev_port ; spinlock_t addr_list_lock ; unsigned char name_assign_type ; bool uc_promisc ; struct netdev_hw_addr_list uc ; struct netdev_hw_addr_list mc ; struct netdev_hw_addr_list dev_addrs ; struct kset *queues_kset ; unsigned int promiscuity ; unsigned int allmulti ; struct vlan_info *vlan_info ; struct dsa_switch_tree *dsa_ptr ; struct tipc_bearer *tipc_ptr ; void *atalk_ptr ; struct in_device *ip_ptr ; struct dn_dev *dn_ptr ; struct inet6_dev *ip6_ptr ; void *ax25_ptr ; struct wireless_dev *ieee80211_ptr ; struct wpan_dev *ieee802154_ptr ; struct mpls_dev *mpls_ptr ; unsigned long last_rx ; unsigned char *dev_addr ; struct netdev_rx_queue *_rx ; unsigned int num_rx_queues ; unsigned int real_num_rx_queues ; unsigned long gro_flush_timeout ; rx_handler_func_t *rx_handler ; void *rx_handler_data ; struct tcf_proto *ingress_cl_list ; struct netdev_queue *ingress_queue ; struct list_head nf_hooks_ingress ; unsigned char broadcast[32U] ; struct cpu_rmap *rx_cpu_rmap ; struct hlist_node index_hlist ; struct netdev_queue *_tx ; unsigned int num_tx_queues ; unsigned int real_num_tx_queues ; struct Qdisc *qdisc ; unsigned long tx_queue_len ; spinlock_t tx_global_lock ; int watchdog_timeo ; struct xps_dev_maps *xps_maps ; unsigned long trans_start ; struct timer_list watchdog_timer ; int *pcpu_refcnt ; struct list_head todo_list ; struct list_head link_watch_list ; unsigned char reg_state ; bool dismantle ; unsigned short rtnl_link_state ; void (*destructor)(struct net_device * ) ; struct netpoll_info *npinfo ; possible_net_t nd_net ; union __anonunion____missing_field_name_357 __annonCompField118 ; struct garp_port *garp_port ; struct mrp_port *mrp_port ; struct device dev ; struct attribute_group const *sysfs_groups[4U] ; struct attribute_group const *sysfs_rx_queue_group ; struct rtnl_link_ops const *rtnl_link_ops ; unsigned int gso_max_size ; u16 gso_max_segs ; u16 gso_min_segs ; struct dcbnl_rtnl_ops const *dcbnl_ops ; u8 num_tc ; struct netdev_tc_txq tc_to_txq[16U] ; u8 prio_tc_map[16U] ; unsigned int fcoe_ddp_xid ; struct netprio_map *priomap ; struct phy_device *phydev ; struct lock_class_key *qdisc_tx_busylock ; }; struct packet_type { __be16 type ; struct net_device *dev ; int (*func)(struct sk_buff * , struct net_device * , struct packet_type * , struct net_device * ) ; bool (*id_match)(struct packet_type * , struct sock * ) ; void *af_packet_priv ; struct list_head list ; }; struct pcpu_sw_netstats { u64 rx_packets ; u64 rx_bytes ; u64 tx_packets ; u64 tx_bytes ; struct u64_stats_sync syncp ; }; struct page_counter { atomic_long_t count ; unsigned long limit ; struct page_counter *parent ; unsigned long watermark ; unsigned long failcnt ; }; struct sock_filter { __u16 code ; __u8 jt ; __u8 jf ; __u32 k ; }; struct bpf_insn { __u8 code ; unsigned char dst_reg : 4 ; unsigned char src_reg : 4 ; __s16 off ; __s32 imm ; }; enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC = 0, BPF_PROG_TYPE_SOCKET_FILTER = 1, BPF_PROG_TYPE_KPROBE = 2, BPF_PROG_TYPE_SCHED_CLS = 3, BPF_PROG_TYPE_SCHED_ACT = 4 } ; struct bpf_prog_aux; struct sock_fprog_kern { u16 len ; struct sock_filter *filter ; }; union __anonunion____missing_field_name_368 { struct sock_filter insns[0U] ; struct bpf_insn insnsi[0U] ; }; struct bpf_prog { u16 pages ; bool jited ; bool gpl_compatible ; u32 len ; enum bpf_prog_type type ; struct bpf_prog_aux *aux ; struct sock_fprog_kern *orig_prog ; unsigned int (*bpf_func)(struct sk_buff const * , struct bpf_insn const * ) ; union __anonunion____missing_field_name_368 __annonCompField123 ; }; struct sk_filter { atomic_t refcnt ; struct callback_head rcu ; struct bpf_prog *prog ; }; struct nla_policy { u16 type ; u16 len ; }; struct rtnl_link_ops { struct list_head list ; char const *kind ; size_t priv_size ; void (*setup)(struct net_device * ) ; int maxtype ; struct nla_policy const *policy ; int (*validate)(struct nlattr ** , struct nlattr ** ) ; int (*newlink)(struct net * , struct net_device * , struct nlattr ** , struct nlattr ** ) ; int (*changelink)(struct net_device * , struct nlattr ** , struct nlattr ** ) ; void (*dellink)(struct net_device * , struct list_head * ) ; size_t (*get_size)(struct net_device const * ) ; int (*fill_info)(struct sk_buff * , struct net_device const * ) ; size_t (*get_xstats_size)(struct net_device const * ) ; int (*fill_xstats)(struct sk_buff * , struct net_device const * ) ; unsigned int (*get_num_tx_queues)(void) ; unsigned int (*get_num_rx_queues)(void) ; int slave_maxtype ; struct nla_policy const *slave_policy ; int (*slave_validate)(struct nlattr ** , struct nlattr ** ) ; int (*slave_changelink)(struct net_device * , struct net_device * , struct nlattr ** , struct nlattr ** ) ; size_t (*get_slave_size)(struct net_device const * , struct net_device const * ) ; int (*fill_slave_info)(struct sk_buff * , struct net_device const * , struct net_device const * ) ; struct net *(*get_link_net)(struct net_device const * ) ; }; struct neigh_table; struct neigh_parms { possible_net_t net ; struct net_device *dev ; struct list_head list ; int (*neigh_setup)(struct neighbour * ) ; void (*neigh_cleanup)(struct neighbour * ) ; struct neigh_table *tbl ; void *sysctl_table ; int dead ; atomic_t refcnt ; struct callback_head callback_head ; int reachable_time ; int data[13U] ; unsigned long data_state[1U] ; }; struct neigh_statistics { unsigned long allocs ; unsigned long destroys ; unsigned long hash_grows ; unsigned long res_failed ; unsigned long lookups ; unsigned long hits ; unsigned long rcv_probes_mcast ; unsigned long rcv_probes_ucast ; unsigned long periodic_gc_runs ; unsigned long forced_gc_runs ; unsigned long unres_discards ; }; struct neigh_ops; struct neighbour { struct neighbour *next ; struct neigh_table *tbl ; struct neigh_parms *parms ; unsigned long confirmed ; unsigned long updated ; rwlock_t lock ; atomic_t refcnt ; struct sk_buff_head arp_queue ; unsigned int arp_queue_len_bytes ; struct timer_list timer ; unsigned long used ; atomic_t probes ; __u8 flags ; __u8 nud_state ; __u8 type ; __u8 dead ; seqlock_t ha_lock ; unsigned char ha[32U] ; struct hh_cache hh ; int (*output)(struct neighbour * , struct sk_buff * ) ; struct neigh_ops const *ops ; struct callback_head rcu ; struct net_device *dev ; u8 primary_key[0U] ; }; struct neigh_ops { int family ; void (*solicit)(struct neighbour * , struct sk_buff * ) ; void (*error_report)(struct neighbour * , struct sk_buff * ) ; int (*output)(struct neighbour * , struct sk_buff * ) ; int (*connected_output)(struct neighbour * , struct sk_buff * ) ; }; struct pneigh_entry { struct pneigh_entry *next ; possible_net_t net ; struct net_device *dev ; u8 flags ; u8 key[0U] ; }; struct neigh_hash_table { struct neighbour **hash_buckets ; unsigned int hash_shift ; __u32 hash_rnd[4U] ; struct callback_head rcu ; }; struct neigh_table { int family ; int entry_size ; int key_len ; __be16 protocol ; __u32 (*hash)(void const * , struct net_device const * , __u32 * ) ; bool (*key_eq)(struct neighbour const * , void const * ) ; int (*constructor)(struct neighbour * ) ; int (*pconstructor)(struct pneigh_entry * ) ; void (*pdestructor)(struct pneigh_entry * ) ; void (*proxy_redo)(struct sk_buff * ) ; char *id ; struct neigh_parms parms ; struct list_head parms_list ; int gc_interval ; int gc_thresh1 ; int gc_thresh2 ; int gc_thresh3 ; unsigned long last_flush ; struct delayed_work gc_work ; struct timer_list proxy_timer ; struct sk_buff_head proxy_queue ; atomic_t entries ; rwlock_t lock ; unsigned long last_rand ; struct neigh_statistics *stats ; struct neigh_hash_table *nht ; struct pneigh_entry **phash_buckets ; }; struct dn_route; union __anonunion____missing_field_name_378 { struct dst_entry *next ; struct rtable *rt_next ; struct rt6_info *rt6_next ; struct dn_route *dn_next ; }; struct dst_entry { struct callback_head callback_head ; struct dst_entry *child ; struct net_device *dev ; struct dst_ops *ops ; unsigned long _metrics ; unsigned long expires ; struct dst_entry *path ; struct dst_entry *from ; struct xfrm_state *xfrm ; int (*input)(struct sk_buff * ) ; int (*output)(struct sock * , struct sk_buff * ) ; unsigned short flags ; unsigned short pending_confirm ; short error ; short obsolete ; unsigned short header_len ; unsigned short trailer_len ; __u32 tclassid ; long __pad_to_align_refcnt[2U] ; atomic_t __refcnt ; int __use ; unsigned long lastuse ; union __anonunion____missing_field_name_378 __annonCompField124 ; }; struct __anonstruct_socket_lock_t_379 { spinlock_t slock ; int owned ; wait_queue_head_t wq ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_socket_lock_t_379 socket_lock_t; struct proto; typedef __u32 __portpair; typedef __u64 __addrpair; struct __anonstruct____missing_field_name_381 { __be32 skc_daddr ; __be32 skc_rcv_saddr ; }; union __anonunion____missing_field_name_380 { __addrpair skc_addrpair ; struct __anonstruct____missing_field_name_381 __annonCompField125 ; }; union __anonunion____missing_field_name_382 { unsigned int skc_hash ; __u16 skc_u16hashes[2U] ; }; struct __anonstruct____missing_field_name_384 { __be16 skc_dport ; __u16 skc_num ; }; union __anonunion____missing_field_name_383 { __portpair skc_portpair ; struct __anonstruct____missing_field_name_384 __annonCompField128 ; }; union __anonunion____missing_field_name_385 { struct hlist_node skc_bind_node ; struct hlist_nulls_node skc_portaddr_node ; }; union __anonunion____missing_field_name_386 { struct hlist_node skc_node ; struct hlist_nulls_node skc_nulls_node ; }; struct sock_common { union __anonunion____missing_field_name_380 __annonCompField126 ; union __anonunion____missing_field_name_382 __annonCompField127 ; union __anonunion____missing_field_name_383 __annonCompField129 ; unsigned short skc_family ; unsigned char volatile skc_state ; unsigned char skc_reuse : 4 ; unsigned char skc_reuseport : 1 ; unsigned char skc_ipv6only : 1 ; unsigned char skc_net_refcnt : 1 ; int skc_bound_dev_if ; union __anonunion____missing_field_name_385 __annonCompField130 ; struct proto *skc_prot ; possible_net_t skc_net ; struct in6_addr skc_v6_daddr ; struct in6_addr skc_v6_rcv_saddr ; atomic64_t skc_cookie ; int skc_dontcopy_begin[0U] ; union __anonunion____missing_field_name_386 __annonCompField131 ; int skc_tx_queue_mapping ; atomic_t skc_refcnt ; int skc_dontcopy_end[0U] ; }; struct cg_proto; struct __anonstruct_sk_backlog_387 { atomic_t rmem_alloc ; int len ; struct sk_buff *head ; struct sk_buff *tail ; }; struct sock { struct sock_common __sk_common ; socket_lock_t sk_lock ; struct sk_buff_head sk_receive_queue ; struct __anonstruct_sk_backlog_387 sk_backlog ; int sk_forward_alloc ; __u32 sk_rxhash ; u16 sk_incoming_cpu ; __u32 sk_txhash ; unsigned int sk_napi_id ; unsigned int sk_ll_usec ; atomic_t sk_drops ; int sk_rcvbuf ; struct sk_filter *sk_filter ; struct socket_wq *sk_wq ; struct xfrm_policy *sk_policy[2U] ; unsigned long sk_flags ; struct dst_entry *sk_rx_dst ; struct dst_entry *sk_dst_cache ; spinlock_t sk_dst_lock ; atomic_t sk_wmem_alloc ; atomic_t sk_omem_alloc ; int sk_sndbuf ; struct sk_buff_head sk_write_queue ; unsigned char sk_shutdown : 2 ; unsigned char sk_no_check_tx : 1 ; unsigned char sk_no_check_rx : 1 ; unsigned char sk_userlocks : 4 ; unsigned char sk_protocol ; unsigned short sk_type ; int sk_wmem_queued ; gfp_t sk_allocation ; u32 sk_pacing_rate ; u32 sk_max_pacing_rate ; netdev_features_t sk_route_caps ; netdev_features_t sk_route_nocaps ; int sk_gso_type ; unsigned int sk_gso_max_size ; u16 sk_gso_max_segs ; int sk_rcvlowat ; unsigned long sk_lingertime ; struct sk_buff_head sk_error_queue ; struct proto *sk_prot_creator ; rwlock_t sk_callback_lock ; int sk_err ; int sk_err_soft ; u32 sk_ack_backlog ; u32 sk_max_ack_backlog ; __u32 sk_priority ; __u32 sk_cgrp_prioidx ; struct pid *sk_peer_pid ; struct cred const *sk_peer_cred ; long sk_rcvtimeo ; long sk_sndtimeo ; struct timer_list sk_timer ; ktime_t sk_stamp ; u16 sk_tsflags ; u32 sk_tskey ; struct socket *sk_socket ; void *sk_user_data ; struct page_frag sk_frag ; struct sk_buff *sk_send_head ; __s32 sk_peek_off ; int sk_write_pending ; void *sk_security ; __u32 sk_mark ; u32 sk_classid ; struct cg_proto *sk_cgrp ; void (*sk_state_change)(struct sock * ) ; void (*sk_data_ready)(struct sock * ) ; void (*sk_write_space)(struct sock * ) ; void (*sk_error_report)(struct sock * ) ; int (*sk_backlog_rcv)(struct sock * , struct sk_buff * ) ; void (*sk_destruct)(struct sock * ) ; }; struct request_sock_ops; struct timewait_sock_ops; struct inet_hashinfo; struct raw_hashinfo; struct udp_table; union __anonunion_h_390 { struct inet_hashinfo *hashinfo ; struct udp_table *udp_table ; struct raw_hashinfo *raw_hash ; }; struct proto { void (*close)(struct sock * , long ) ; int (*connect)(struct sock * , struct sockaddr * , int ) ; int (*disconnect)(struct sock * , int ) ; struct sock *(*accept)(struct sock * , int , int * ) ; int (*ioctl)(struct sock * , int , unsigned long ) ; int (*init)(struct sock * ) ; void (*destroy)(struct sock * ) ; void (*shutdown)(struct sock * , int ) ; int (*setsockopt)(struct sock * , int , int , char * , unsigned int ) ; int (*getsockopt)(struct sock * , int , int , char * , int * ) ; int (*compat_setsockopt)(struct sock * , int , int , char * , unsigned int ) ; int (*compat_getsockopt)(struct sock * , int , int , char * , int * ) ; int (*compat_ioctl)(struct sock * , unsigned int , unsigned long ) ; int (*sendmsg)(struct sock * , struct msghdr * , size_t ) ; int (*recvmsg)(struct sock * , struct msghdr * , size_t , int , int , int * ) ; int (*sendpage)(struct sock * , struct page * , int , size_t , int ) ; int (*bind)(struct sock * , struct sockaddr * , int ) ; int (*backlog_rcv)(struct sock * , struct sk_buff * ) ; void (*release_cb)(struct sock * ) ; void (*hash)(struct sock * ) ; void (*unhash)(struct sock * ) ; void (*rehash)(struct sock * ) ; int (*get_port)(struct sock * , unsigned short ) ; void (*clear_sk)(struct sock * , int ) ; unsigned int inuse_idx ; bool (*stream_memory_free)(struct sock const * ) ; void (*enter_memory_pressure)(struct sock * ) ; atomic_long_t *memory_allocated ; struct percpu_counter *sockets_allocated ; int *memory_pressure ; long *sysctl_mem ; int *sysctl_wmem ; int *sysctl_rmem ; int max_header ; bool no_autobind ; struct kmem_cache *slab ; unsigned int obj_size ; int slab_flags ; struct percpu_counter *orphan_count ; struct request_sock_ops *rsk_prot ; struct timewait_sock_ops *twsk_prot ; union __anonunion_h_390 h ; struct module *owner ; char name[32U] ; struct list_head node ; int (*init_cgroup)(struct mem_cgroup * , struct cgroup_subsys * ) ; void (*destroy_cgroup)(struct mem_cgroup * ) ; struct cg_proto *(*proto_cgroup)(struct mem_cgroup * ) ; }; struct cg_proto { struct page_counter memory_allocated ; struct percpu_counter sockets_allocated ; int memory_pressure ; long sysctl_mem[3U] ; unsigned long flags ; struct mem_cgroup *memcg ; }; struct request_sock_ops { int family ; int obj_size ; struct kmem_cache *slab ; char *slab_name ; int (*rtx_syn_ack)(struct sock * , struct request_sock * ) ; void (*send_ack)(struct sock * , struct sk_buff * , struct request_sock * ) ; void (*send_reset)(struct sock * , struct sk_buff * ) ; void (*destructor)(struct request_sock * ) ; void (*syn_ack_timeout)(struct request_sock const * ) ; }; struct request_sock { struct sock_common __req_common ; struct request_sock *dl_next ; struct sock *rsk_listener ; u16 mss ; u8 num_retrans ; unsigned char cookie_ts : 1 ; unsigned char num_timeout : 7 ; u32 window_clamp ; u32 rcv_wnd ; u32 ts_recent ; struct timer_list rsk_timer ; struct request_sock_ops const *rsk_ops ; struct sock *sk ; u32 *saved_syn ; u32 secid ; u32 peer_secid ; }; struct timewait_sock_ops { struct kmem_cache *twsk_slab ; char *twsk_slab_name ; unsigned int twsk_obj_size ; int (*twsk_unique)(struct sock * , struct sock * , void * ) ; void (*twsk_destructor)(struct sock * ) ; }; struct ipv6_stable_secret { bool initialized ; struct in6_addr secret ; }; struct ipv6_devconf { __s32 forwarding ; __s32 hop_limit ; __s32 mtu6 ; __s32 accept_ra ; __s32 accept_redirects ; __s32 autoconf ; __s32 dad_transmits ; __s32 rtr_solicits ; __s32 rtr_solicit_interval ; __s32 rtr_solicit_delay ; __s32 force_mld_version ; __s32 mldv1_unsolicited_report_interval ; __s32 mldv2_unsolicited_report_interval ; __s32 use_tempaddr ; __s32 temp_valid_lft ; __s32 temp_prefered_lft ; __s32 regen_max_retry ; __s32 max_desync_factor ; __s32 max_addresses ; __s32 accept_ra_defrtr ; __s32 accept_ra_pinfo ; __s32 accept_ra_rtr_pref ; __s32 rtr_probe_interval ; __s32 accept_ra_rt_info_max_plen ; __s32 proxy_ndp ; __s32 accept_source_route ; __s32 accept_ra_from_local ; __s32 optimistic_dad ; __s32 use_optimistic ; __s32 mc_forwarding ; __s32 disable_ipv6 ; __s32 accept_dad ; __s32 force_tllao ; __s32 ndisc_notify ; __s32 suppress_frag_ndisc ; __s32 accept_ra_mtu ; struct ipv6_stable_secret stable_secret ; void *sysctl ; }; struct ip6_sf_list { struct ip6_sf_list *sf_next ; struct in6_addr sf_addr ; unsigned long sf_count[2U] ; unsigned char sf_gsresp ; unsigned char sf_oldin ; unsigned char sf_crcount ; }; struct ifmcaddr6 { struct in6_addr mca_addr ; struct inet6_dev *idev ; struct ifmcaddr6 *next ; struct ip6_sf_list *mca_sources ; struct ip6_sf_list *mca_tomb ; unsigned int mca_sfmode ; unsigned char mca_crcount ; unsigned long mca_sfcount[2U] ; struct timer_list mca_timer ; unsigned int mca_flags ; int mca_users ; atomic_t mca_refcnt ; spinlock_t mca_lock ; unsigned long mca_cstamp ; unsigned long mca_tstamp ; }; struct ifacaddr6 { struct in6_addr aca_addr ; struct inet6_dev *aca_idev ; struct rt6_info *aca_rt ; struct ifacaddr6 *aca_next ; int aca_users ; atomic_t aca_refcnt ; unsigned long aca_cstamp ; unsigned long aca_tstamp ; }; struct ipv6_devstat { struct proc_dir_entry *proc_dir_entry ; struct ipstats_mib *ipv6 ; struct icmpv6_mib_device *icmpv6dev ; struct icmpv6msg_mib_device *icmpv6msgdev ; }; struct inet6_dev { struct net_device *dev ; struct list_head addr_list ; struct ifmcaddr6 *mc_list ; struct ifmcaddr6 *mc_tomb ; spinlock_t mc_lock ; unsigned char mc_qrv ; unsigned char mc_gq_running ; unsigned char mc_ifc_count ; unsigned char mc_dad_count ; unsigned long mc_v1_seen ; unsigned long mc_qi ; unsigned long mc_qri ; unsigned long mc_maxdelay ; struct timer_list mc_gq_timer ; struct timer_list mc_ifc_timer ; struct timer_list mc_dad_timer ; struct ifacaddr6 *ac_list ; rwlock_t lock ; atomic_t refcnt ; __u32 if_flags ; int dead ; u8 rndid[8U] ; struct timer_list regen_timer ; struct list_head tempaddr_list ; struct in6_addr token ; struct neigh_parms *nd_parms ; struct ipv6_devconf cnf ; struct ipv6_devstat stats ; struct timer_list rs_timer ; __u8 rs_probes ; __u8 addr_gen_mode ; unsigned long tstamp ; struct callback_head rcu ; }; union __anonunion____missing_field_name_412 { __be32 a4 ; __be32 a6[4U] ; struct in6_addr in6 ; }; struct inetpeer_addr_base { union __anonunion____missing_field_name_412 __annonCompField133 ; }; struct inetpeer_addr { struct inetpeer_addr_base addr ; __u16 family ; }; union __anonunion____missing_field_name_413 { struct list_head gc_list ; struct callback_head gc_rcu ; }; struct __anonstruct____missing_field_name_415 { atomic_t rid ; }; union __anonunion____missing_field_name_414 { struct __anonstruct____missing_field_name_415 __annonCompField135 ; struct callback_head rcu ; struct inet_peer *gc_next ; }; struct inet_peer { struct inet_peer *avl_left ; struct inet_peer *avl_right ; struct inetpeer_addr daddr ; __u32 avl_height ; u32 metrics[16U] ; u32 rate_tokens ; unsigned long rate_last ; union __anonunion____missing_field_name_413 __annonCompField134 ; union __anonunion____missing_field_name_414 __annonCompField136 ; __u32 dtime ; atomic_t refcnt ; }; struct inet_peer_base { struct inet_peer *root ; seqlock_t lock ; int total ; }; struct uncached_list; struct rtable { struct dst_entry dst ; int rt_genid ; unsigned int rt_flags ; __u16 rt_type ; __u8 rt_is_input ; __u8 rt_uses_gateway ; int rt_iif ; __be32 rt_gateway ; u32 rt_pmtu ; struct list_head rt_uncached ; struct uncached_list *rt_uncached_list ; }; struct inet_ehash_bucket { struct hlist_nulls_head chain ; }; struct inet_bind_hashbucket { spinlock_t lock ; struct hlist_head chain ; }; struct inet_listen_hashbucket { spinlock_t lock ; struct hlist_nulls_head head ; }; struct inet_hashinfo { struct inet_ehash_bucket *ehash ; spinlock_t *ehash_locks ; unsigned int ehash_mask ; unsigned int ehash_locks_mask ; struct inet_bind_hashbucket *bhash ; unsigned int bhash_size ; struct kmem_cache *bind_bucket_cachep ; struct inet_listen_hashbucket listening_hash[32U] ; }; enum transport_state_table { TRANSPORT_NO_STATE = 0, TRANSPORT_NEW_CMD = 1, TRANSPORT_WRITE_PENDING = 3, TRANSPORT_PROCESSING = 5, TRANSPORT_COMPLETE = 6, TRANSPORT_ISTATE_PROCESSING = 11, TRANSPORT_COMPLETE_QF_WP = 18, TRANSPORT_COMPLETE_QF_OK = 19 } ; typedef unsigned int sense_reason_t; struct t10_alua_lba_map_member { struct list_head lba_map_mem_list ; int lba_map_mem_alua_state ; int lba_map_mem_alua_pg_id ; }; struct t10_alua_lba_map { u64 lba_map_first_lba ; u64 lba_map_last_lba ; struct list_head lba_map_list ; struct list_head lba_map_mem_list ; }; struct t10_alua { u16 alua_tg_pt_gps_counter ; u32 alua_tg_pt_gps_count ; spinlock_t lba_map_lock ; u32 lba_map_segment_size ; u32 lba_map_segment_multiplier ; struct list_head lba_map_list ; spinlock_t tg_pt_gps_lock ; struct se_device *t10_dev ; struct t10_alua_tg_pt_gp *default_tg_pt_gp ; struct config_group alua_tg_pt_gps_group ; struct list_head tg_pt_gps_list ; }; struct t10_alua_lu_gp { u16 lu_gp_id ; int lu_gp_valid_id ; u32 lu_gp_members ; atomic_t lu_gp_ref_cnt ; spinlock_t lu_gp_lock ; struct config_group lu_gp_group ; struct list_head lu_gp_node ; struct list_head lu_gp_mem_list ; }; struct t10_alua_lu_gp_member { bool lu_gp_assoc ; atomic_t lu_gp_mem_ref_cnt ; spinlock_t lu_gp_mem_lock ; struct t10_alua_lu_gp *lu_gp ; struct se_device *lu_gp_mem_dev ; struct list_head lu_gp_mem_list ; }; struct se_node_acl; struct t10_alua_tg_pt_gp { u16 tg_pt_gp_id ; int tg_pt_gp_valid_id ; int tg_pt_gp_alua_supported_states ; int tg_pt_gp_alua_pending_state ; int tg_pt_gp_alua_previous_state ; int tg_pt_gp_alua_access_status ; int tg_pt_gp_alua_access_type ; int tg_pt_gp_nonop_delay_msecs ; int tg_pt_gp_trans_delay_msecs ; int tg_pt_gp_implicit_trans_secs ; int tg_pt_gp_pref ; int tg_pt_gp_write_metadata ; u32 tg_pt_gp_members ; atomic_t tg_pt_gp_alua_access_state ; atomic_t tg_pt_gp_ref_cnt ; spinlock_t tg_pt_gp_lock ; struct mutex tg_pt_gp_md_mutex ; struct se_device *tg_pt_gp_dev ; struct config_group tg_pt_gp_group ; struct list_head tg_pt_gp_list ; struct list_head tg_pt_gp_lun_list ; struct se_lun *tg_pt_gp_alua_lun ; struct se_node_acl *tg_pt_gp_alua_nacl ; struct delayed_work tg_pt_gp_transition_work ; struct completion *tg_pt_gp_transition_complete ; }; struct t10_vpd { unsigned char device_identifier[254U] ; int protocol_identifier_set ; u32 protocol_identifier ; u32 device_identifier_code_set ; u32 association ; u32 device_identifier_type ; struct list_head vpd_list ; }; struct t10_wwn { char vendor[8U] ; char model[16U] ; char revision[4U] ; char unit_serial[254U] ; spinlock_t t10_vpd_lock ; struct se_device *t10_dev ; struct config_group t10_wwn_group ; struct list_head t10_vpd_list ; }; struct se_dev_entry; struct t10_pr_registration { char pr_reg_isid[16U] ; unsigned char pr_iport[256U] ; unsigned char pr_tport[256U] ; u16 pr_aptpl_rpti ; u16 pr_reg_tpgt ; int pr_reg_all_tg_pt ; int pr_reg_aptpl ; int pr_res_holder ; int pr_res_type ; int pr_res_scope ; bool isid_present_at_reg ; u64 pr_res_mapped_lun ; u64 pr_aptpl_target_lun ; u16 tg_pt_sep_rtpi ; u32 pr_res_generation ; u64 pr_reg_bin_isid ; u64 pr_res_key ; atomic_t pr_res_holders ; struct se_node_acl *pr_reg_nacl ; struct se_dev_entry *pr_reg_deve ; struct list_head pr_reg_list ; struct list_head pr_reg_abort_list ; struct list_head pr_reg_aptpl_list ; struct list_head pr_reg_atp_list ; struct list_head pr_reg_atp_mem_list ; }; struct t10_reservation { int pr_all_tg_pt ; int pr_aptpl_active ; u32 pr_generation ; spinlock_t registration_lock ; spinlock_t aptpl_reg_lock ; struct se_node_acl *pr_res_holder ; struct list_head registration_list ; struct list_head aptpl_reg_list ; }; struct se_tmr_req { u8 function ; u8 response ; int call_transport ; u64 ref_task_tag ; void *fabric_tmr_ptr ; struct se_cmd *task_cmd ; struct se_device *tmr_dev ; struct se_lun *tmr_lun ; struct list_head tmr_list ; }; enum target_prot_op { TARGET_PROT_NORMAL = 0, TARGET_PROT_DIN_INSERT = 1, TARGET_PROT_DOUT_INSERT = 2, TARGET_PROT_DIN_STRIP = 4, TARGET_PROT_DOUT_STRIP = 8, TARGET_PROT_DIN_PASS = 16, TARGET_PROT_DOUT_PASS = 32 } ; enum target_prot_type { TARGET_DIF_TYPE0_PROT = 0, TARGET_DIF_TYPE1_PROT = 1, TARGET_DIF_TYPE2_PROT = 2, TARGET_DIF_TYPE3_PROT = 3 } ; struct se_session; struct target_core_fabric_ops; struct se_cmd { u8 scsi_status ; u8 scsi_asc ; u8 scsi_ascq ; u16 scsi_sense_length ; u64 tag ; int alua_nonop_delay ; enum dma_data_direction data_direction ; int sam_task_attr ; unsigned int map_tag ; enum transport_state_table t_state ; unsigned char cmd_wait_set : 1 ; unsigned char unknown_data_length : 1 ; u32 se_cmd_flags ; u32 se_ordered_id ; u32 data_length ; u32 residual_count ; u64 orig_fe_lun ; u64 pr_res_key ; void *sense_buffer ; struct list_head se_delayed_node ; struct list_head se_qf_node ; struct se_device *se_dev ; struct se_lun *se_lun ; struct se_session *se_sess ; struct se_tmr_req *se_tmr_req ; struct list_head se_cmd_list ; struct completion cmd_wait_comp ; struct kref cmd_kref ; struct target_core_fabric_ops const *se_tfo ; sense_reason_t (*execute_cmd)(struct se_cmd * ) ; sense_reason_t (*transport_complete_callback)(struct se_cmd * , bool ) ; void *protocol_data ; unsigned char *t_task_cdb ; unsigned char __t_task_cdb[32U] ; unsigned long long t_task_lba ; unsigned int t_task_nolb ; unsigned int transport_state ; spinlock_t t_state_lock ; struct completion t_transport_stop_comp ; struct work_struct work ; struct scatterlist *t_data_sg ; struct scatterlist *t_data_sg_orig ; unsigned int t_data_nents ; unsigned int t_data_nents_orig ; void *t_data_vmap ; struct scatterlist *t_bidi_data_sg ; unsigned int t_bidi_data_nents ; struct list_head state_list ; bool state_active ; struct completion task_stop_comp ; void *priv ; int lun_ref_active ; enum target_prot_op prot_op ; enum target_prot_type prot_type ; u8 prot_checks ; u32 prot_length ; u32 reftag_seed ; struct scatterlist *t_prot_sg ; unsigned int t_prot_nents ; sense_reason_t pi_err ; sector_t bad_sector ; bool prot_pto ; }; struct se_portal_group; struct se_node_acl { char initiatorname[224U] ; bool dynamic_node_acl ; bool acl_stop ; u32 queue_depth ; u32 acl_index ; enum target_prot_type saved_prot_type ; char acl_tag[64U] ; atomic_t acl_pr_ref_count ; struct hlist_head lun_entry_hlist ; struct se_session *nacl_sess ; struct se_portal_group *se_tpg ; struct mutex lun_entry_mutex ; spinlock_t nacl_sess_lock ; struct config_group acl_group ; struct config_group acl_attrib_group ; struct config_group acl_auth_group ; struct config_group acl_param_group ; struct config_group acl_fabric_stat_group ; struct config_group *acl_default_groups[5U] ; struct list_head acl_list ; struct list_head acl_sess_list ; struct completion acl_free_comp ; struct kref acl_kref ; }; struct se_session { unsigned char sess_tearing_down : 1 ; u64 sess_bin_isid ; enum target_prot_op sup_prot_ops ; enum target_prot_type sess_prot_type ; struct se_node_acl *se_node_acl ; struct se_portal_group *se_tpg ; void *fabric_sess_ptr ; struct list_head sess_list ; struct list_head sess_acl_list ; struct list_head sess_cmd_list ; struct list_head sess_wait_list ; spinlock_t sess_cmd_lock ; struct kref sess_kref ; void *sess_cmd_map ; struct percpu_ida sess_tag_pool ; }; struct se_ml_stat_grps { struct config_group stat_group ; struct config_group scsi_auth_intr_group ; struct config_group scsi_att_intr_port_group ; }; struct se_lun_acl { char initiatorname[224U] ; u64 mapped_lun ; struct se_node_acl *se_lun_nacl ; struct se_lun *se_lun ; struct config_group se_lun_group ; struct se_ml_stat_grps ml_stat_grps ; }; struct se_dev_entry { u64 mapped_lun ; u64 pr_res_key ; u64 creation_time ; u32 lun_flags ; u32 attach_count ; atomic_long_t total_cmds ; atomic_long_t read_bytes ; atomic_long_t write_bytes ; atomic_t ua_count ; struct kref pr_kref ; struct completion pr_comp ; struct se_lun_acl *se_lun_acl ; spinlock_t ua_lock ; struct se_lun *se_lun ; unsigned long deve_flags ; struct list_head alua_port_list ; struct list_head lun_link ; struct list_head ua_list ; struct hlist_node link ; struct callback_head callback_head ; }; struct se_dev_attrib { int emulate_model_alias ; int emulate_dpo ; int emulate_fua_write ; int emulate_fua_read ; int emulate_write_cache ; int emulate_ua_intlck_ctrl ; int emulate_tas ; int emulate_tpu ; int emulate_tpws ; int emulate_caw ; int emulate_3pc ; int pi_prot_format ; enum target_prot_type pi_prot_type ; enum target_prot_type hw_pi_prot_type ; int enforce_pr_isids ; int force_pr_aptpl ; int is_nonrot ; int emulate_rest_reord ; u32 hw_block_size ; u32 block_size ; u32 hw_max_sectors ; u32 optimal_sectors ; u32 hw_queue_depth ; u32 queue_depth ; u32 max_unmap_lba_count ; u32 max_unmap_block_desc_count ; u32 unmap_granularity ; u32 unmap_granularity_alignment ; u32 max_write_same_len ; u32 max_bytes_per_io ; struct se_device *da_dev ; struct config_group da_group ; }; struct se_port_stat_grps { struct config_group stat_group ; struct config_group scsi_port_group ; struct config_group scsi_tgt_port_group ; struct config_group scsi_transport_group ; }; struct scsi_port_stats { atomic_long_t cmd_pdus ; atomic_long_t tx_data_octets ; atomic_long_t rx_data_octets ; }; struct se_lun { u64 unpacked_lun ; u32 lun_link_magic ; u32 lun_access ; u32 lun_flags ; u32 lun_index ; u16 lun_rtpi ; atomic_t lun_acl_count ; struct se_device *lun_se_dev ; struct list_head lun_deve_list ; spinlock_t lun_deve_lock ; int lun_tg_pt_secondary_stat ; int lun_tg_pt_secondary_write_md ; atomic_t lun_tg_pt_secondary_offline ; struct mutex lun_tg_pt_md_mutex ; struct list_head lun_tg_pt_gp_link ; struct t10_alua_tg_pt_gp *lun_tg_pt_gp ; spinlock_t lun_tg_pt_gp_lock ; struct se_portal_group *lun_tpg ; struct scsi_port_stats lun_stats ; struct config_group lun_group ; struct se_port_stat_grps port_stat_grps ; struct completion lun_ref_comp ; struct percpu_ref lun_ref ; struct list_head lun_dev_link ; struct hlist_node link ; struct callback_head callback_head ; }; struct se_dev_stat_grps { struct config_group stat_group ; struct config_group scsi_dev_group ; struct config_group scsi_tgt_dev_group ; struct config_group scsi_lu_group ; }; struct target_backend_ops; struct se_device { u32 dev_link_magic ; u16 dev_rpti_counter ; u32 dev_cur_ordered_id ; u32 dev_flags ; u32 queue_depth ; u64 dev_res_bin_isid ; u32 dev_index ; u64 creation_time ; atomic_long_t num_resets ; atomic_long_t num_cmds ; atomic_long_t read_bytes ; atomic_long_t write_bytes ; atomic_t simple_cmds ; atomic_t dev_ordered_id ; atomic_t dev_ordered_sync ; atomic_t dev_qf_count ; u32 export_count ; spinlock_t delayed_cmd_lock ; spinlock_t execute_task_lock ; spinlock_t dev_reservation_lock ; unsigned int dev_reservation_flags ; spinlock_t se_port_lock ; spinlock_t se_tmr_lock ; spinlock_t qf_cmd_lock ; struct semaphore caw_sem ; struct se_node_acl *dev_reserved_node_acl ; struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem ; struct t10_pr_registration *dev_pr_res_holder ; struct list_head dev_sep_list ; struct list_head dev_tmr_list ; struct workqueue_struct *tmr_wq ; struct work_struct qf_work_queue ; struct list_head delayed_cmd_list ; struct list_head state_list ; struct list_head qf_cmd_list ; struct list_head g_dev_node ; struct se_hba *se_hba ; struct t10_wwn t10_wwn ; struct t10_alua t10_alua ; struct t10_reservation t10_pr ; struct se_dev_attrib dev_attrib ; struct config_group dev_group ; struct config_group dev_pr_group ; struct se_dev_stat_grps dev_stat_grps ; unsigned char dev_alias[512U] ; unsigned char udev_path[512U] ; struct target_backend_ops const *transport ; struct list_head dev_list ; struct se_lun xcopy_lun ; int prot_length ; u32 hba_index ; struct callback_head callback_head ; }; struct target_backend; struct se_hba { u16 hba_tpgt ; u32 hba_id ; u32 hba_flags ; u32 dev_count ; u32 hba_index ; void *hba_ptr ; struct list_head hba_node ; spinlock_t device_lock ; struct config_group hba_group ; struct mutex hba_access_mutex ; struct target_backend *backend ; }; struct se_tpg_np { struct se_portal_group *tpg_np_parent ; struct config_group tpg_np_group ; }; struct se_wwn; struct se_portal_group { int proto_id ; u32 num_node_acls ; atomic_t tpg_pr_ref_count ; struct mutex acl_node_mutex ; spinlock_t session_lock ; struct mutex tpg_lun_mutex ; struct list_head se_tpg_node ; struct list_head acl_node_list ; struct hlist_head tpg_lun_hlist ; struct se_lun *tpg_virt_lun0 ; struct list_head tpg_sess_list ; struct target_core_fabric_ops const *se_tpg_tfo ; struct se_wwn *se_tpg_wwn ; struct config_group tpg_group ; struct config_group *tpg_default_groups[7U] ; struct config_group tpg_lun_group ; struct config_group tpg_np_group ; struct config_group tpg_acl_group ; struct config_group tpg_attrib_group ; struct config_group tpg_auth_group ; struct config_group tpg_param_group ; }; struct target_fabric_configfs; struct se_wwn { struct target_fabric_configfs *wwn_tf ; struct config_group wwn_group ; struct config_group *wwn_default_groups[2U] ; struct config_group fabric_stat_group ; }; struct target_backend_ops { char name[16U] ; char inquiry_prod[16U] ; char inquiry_rev[4U] ; struct module *owner ; u8 transport_flags ; int (*attach_hba)(struct se_hba * , u32 ) ; void (*detach_hba)(struct se_hba * ) ; int (*pmode_enable_hba)(struct se_hba * , unsigned long ) ; struct se_device *(*alloc_device)(struct se_hba * , char const * ) ; int (*configure_device)(struct se_device * ) ; void (*free_device)(struct se_device * ) ; ssize_t (*set_configfs_dev_params)(struct se_device * , char const * , ssize_t ) ; ssize_t (*show_configfs_dev_params)(struct se_device * , char * ) ; void (*transport_complete)(struct se_cmd * , struct scatterlist * , unsigned char * ) ; sense_reason_t (*parse_cdb)(struct se_cmd * ) ; u32 (*get_device_type)(struct se_device * ) ; sector_t (*get_blocks)(struct se_device * ) ; sector_t (*get_alignment_offset_lbas)(struct se_device * ) ; unsigned int (*get_lbppbe)(struct se_device * ) ; unsigned int (*get_io_min)(struct se_device * ) ; unsigned int (*get_io_opt)(struct se_device * ) ; unsigned char *(*get_sense_buffer)(struct se_cmd * ) ; bool (*get_write_cache)(struct se_device * ) ; int (*init_prot)(struct se_device * ) ; int (*format_prot)(struct se_device * ) ; void (*free_prot)(struct se_device * ) ; struct configfs_attribute **tb_dev_attrib_attrs ; }; struct target_core_fabric_ops { struct module *module ; char const *name ; size_t node_acl_size ; char *(*get_fabric_name)(void) ; char *(*tpg_get_wwn)(struct se_portal_group * ) ; u16 (*tpg_get_tag)(struct se_portal_group * ) ; u32 (*tpg_get_default_depth)(struct se_portal_group * ) ; int (*tpg_check_demo_mode)(struct se_portal_group * ) ; int (*tpg_check_demo_mode_cache)(struct se_portal_group * ) ; int (*tpg_check_demo_mode_write_protect)(struct se_portal_group * ) ; int (*tpg_check_prod_mode_write_protect)(struct se_portal_group * ) ; int (*tpg_check_demo_mode_login_only)(struct se_portal_group * ) ; int (*tpg_check_prot_fabric_only)(struct se_portal_group * ) ; u32 (*tpg_get_inst_index)(struct se_portal_group * ) ; int (*check_stop_free)(struct se_cmd * ) ; void (*release_cmd)(struct se_cmd * ) ; int (*shutdown_session)(struct se_session * ) ; void (*close_session)(struct se_session * ) ; u32 (*sess_get_index)(struct se_session * ) ; u32 (*sess_get_initiator_sid)(struct se_session * , unsigned char * , u32 ) ; int (*write_pending)(struct se_cmd * ) ; int (*write_pending_status)(struct se_cmd * ) ; void (*set_default_node_attributes)(struct se_node_acl * ) ; int (*get_cmd_state)(struct se_cmd * ) ; int (*queue_data_in)(struct se_cmd * ) ; int (*queue_status)(struct se_cmd * ) ; void (*queue_tm_rsp)(struct se_cmd * ) ; void (*aborted_task)(struct se_cmd * ) ; struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs * , struct config_group * , char const * ) ; void (*fabric_drop_wwn)(struct se_wwn * ) ; struct se_portal_group *(*fabric_make_tpg)(struct se_wwn * , struct config_group * , char const * ) ; void (*fabric_drop_tpg)(struct se_portal_group * ) ; int (*fabric_post_link)(struct se_portal_group * , struct se_lun * ) ; void (*fabric_pre_unlink)(struct se_portal_group * , struct se_lun * ) ; struct se_tpg_np *(*fabric_make_np)(struct se_portal_group * , struct config_group * , char const * ) ; void (*fabric_drop_np)(struct se_tpg_np * ) ; int (*fabric_init_nodeacl)(struct se_node_acl * , char const * ) ; void (*fabric_cleanup_nodeacl)(struct se_node_acl * ) ; struct configfs_attribute **tfc_discovery_attrs ; struct configfs_attribute **tfc_wwn_attrs ; struct configfs_attribute **tfc_tpg_base_attrs ; struct configfs_attribute **tfc_tpg_np_base_attrs ; struct configfs_attribute **tfc_tpg_attrib_attrs ; struct configfs_attribute **tfc_tpg_auth_attrs ; struct configfs_attribute **tfc_tpg_param_attrs ; struct configfs_attribute **tfc_tpg_nacl_base_attrs ; struct configfs_attribute **tfc_tpg_nacl_attrib_attrs ; struct configfs_attribute **tfc_tpg_nacl_auth_attrs ; struct configfs_attribute **tfc_tpg_nacl_param_attrs ; }; struct target_backend { struct list_head list ; struct target_backend_ops const *ops ; struct config_item_type tb_dev_cit ; struct config_item_type tb_dev_attrib_cit ; struct config_item_type tb_dev_pr_cit ; struct config_item_type tb_dev_wwn_cit ; struct config_item_type tb_dev_alua_tg_pt_gps_cit ; struct config_item_type tb_dev_stat_cit ; }; struct target_fabric_configfs { atomic_t tf_access_cnt ; struct list_head tf_list ; struct config_group tf_group ; struct config_group tf_disc_group ; struct config_group *tf_default_groups[2U] ; struct target_core_fabric_ops const *tf_ops ; struct config_item_type tf_discovery_cit ; struct config_item_type tf_wwn_cit ; struct config_item_type tf_wwn_fabric_stats_cit ; struct config_item_type tf_tpg_cit ; struct config_item_type tf_tpg_base_cit ; struct config_item_type tf_tpg_lun_cit ; struct config_item_type tf_tpg_port_cit ; struct config_item_type tf_tpg_port_stat_cit ; struct config_item_type tf_tpg_np_cit ; struct config_item_type tf_tpg_np_base_cit ; struct config_item_type tf_tpg_attrib_cit ; struct config_item_type tf_tpg_auth_cit ; struct config_item_type tf_tpg_param_cit ; struct config_item_type tf_tpg_nacl_cit ; struct config_item_type tf_tpg_nacl_base_cit ; struct config_item_type tf_tpg_nacl_attrib_cit ; struct config_item_type tf_tpg_nacl_auth_cit ; struct config_item_type tf_tpg_nacl_param_cit ; struct config_item_type tf_tpg_nacl_stat_cit ; struct config_item_type tf_tpg_mappedlun_cit ; struct config_item_type tf_tpg_mappedlun_stat_cit ; }; struct target_core_configfs_attribute { struct configfs_attribute attr ; ssize_t (*show)(void * , char * ) ; ssize_t (*store)(void * , char const * , size_t ) ; }; struct target_backend_dev_attrib_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_dev_attrib * , char * ) ; ssize_t (*store)(struct se_dev_attrib * , char const * , size_t ) ; }; struct target_core_dev_attrib_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_dev_attrib * , char * ) ; ssize_t (*store)(struct se_dev_attrib * , char const * , size_t ) ; }; struct target_core_dev_wwn_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct t10_wwn * , char * ) ; ssize_t (*store)(struct t10_wwn * , char const * , size_t ) ; }; struct target_core_dev_pr_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_device * , char * ) ; ssize_t (*store)(struct se_device * , char const * , size_t ) ; }; struct target_core_alua_lu_gp_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct t10_alua_lu_gp * , char * ) ; ssize_t (*store)(struct t10_alua_lu_gp * , char const * , size_t ) ; }; struct target_core_alua_tg_pt_gp_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct t10_alua_tg_pt_gp * , char * ) ; ssize_t (*store)(struct t10_alua_tg_pt_gp * , char const * , size_t ) ; }; struct target_core_hba_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_hba * , char * ) ; ssize_t (*store)(struct se_hba * , char const * , size_t ) ; }; typedef bool ldv_func_ret_type; typedef bool ldv_func_ret_type___0; typedef bool ldv_func_ret_type___1; typedef bool ldv_func_ret_type___2; typedef int ldv_func_ret_type___3; typedef int ldv_func_ret_type___4; typedef int pao_T__; typedef int pao_T_____0; enum hrtimer_restart; union __anonunion___u_192 { unsigned long __val ; char __c[1U] ; }; typedef unsigned long pao_T_____1; typedef unsigned long pao_T_____2; typedef unsigned long pao_T_____3; typedef unsigned long pao_T_____4; enum ldv_37788 { SCSI_INST_INDEX = 0, SCSI_DEVICE_INDEX = 1, SCSI_AUTH_INTR_INDEX = 2, SCSI_INDEX_TYPE_MAX = 3 } ; typedef enum ldv_37788 scsi_index_t; union __anonunion___u_382 { struct se_lun *__val ; char __c[1U] ; }; union __anonunion___u_384 { struct se_lun *__val ; char __c[1U] ; }; union __anonunion___u_386 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_388 { struct se_lun *__val ; char __c[1U] ; }; union __anonunion___u_390 { struct se_lun *__val ; char __c[1U] ; }; union __anonunion___u_392 { struct se_lun *__val ; char __c[1U] ; }; union __anonunion___u_394 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_396 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_398 { struct hlist_node *__val ; char __c[1U] ; }; union __anonunion___u_400 { struct se_lun *__val ; char __c[1U] ; }; union __anonunion___u_402 { struct hlist_node *__val ; char __c[1U] ; }; union __anonunion___u_404 { struct hlist_node *__val ; char __c[1U] ; }; union __anonunion___u_406 { struct se_lun *__val ; char __c[1U] ; }; union __anonunion___u_408 { struct hlist_node *__val ; char __c[1U] ; }; union __anonunion___u_410 { struct hlist_node *__val ; char __c[1U] ; }; union __anonunion___u_412 { struct hlist_node *__val ; char __c[1U] ; }; union __anonunion___u_414 { struct hlist_node *__val ; char __c[1U] ; }; union __anonunion___u_416 { struct hlist_node *__val ; char __c[1U] ; }; union __anonunion___u_418 { struct se_lun *__val ; char __c[1U] ; }; union __anonunion___u_420 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_422 { struct hlist_node *__val ; char __c[1U] ; }; union __anonunion___u_424 { struct se_lun *__val ; char __c[1U] ; }; union __anonunion___u_426 { struct hlist_node *__val ; char __c[1U] ; }; union __anonunion___u_428 { struct se_device *__val ; char __c[1U] ; }; enum hrtimer_restart; enum print_line_t; struct target_fabric_nacl_attrib_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_node_acl * , char * ) ; ssize_t (*store)(struct se_node_acl * , char const * , size_t ) ; }; struct target_fabric_nacl_auth_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_node_acl * , char * ) ; ssize_t (*store)(struct se_node_acl * , char const * , size_t ) ; }; struct target_fabric_nacl_param_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_node_acl * , char * ) ; ssize_t (*store)(struct se_node_acl * , char const * , size_t ) ; }; struct target_fabric_nacl_base_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_node_acl * , char * ) ; ssize_t (*store)(struct se_node_acl * , char const * , size_t ) ; }; struct target_fabric_np_base_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_tpg_np * , char * ) ; ssize_t (*store)(struct se_tpg_np * , char const * , size_t ) ; }; struct target_fabric_tpg_attrib_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_portal_group * , char * ) ; ssize_t (*store)(struct se_portal_group * , char const * , size_t ) ; }; struct target_fabric_tpg_auth_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_portal_group * , char * ) ; ssize_t (*store)(struct se_portal_group * , char const * , size_t ) ; }; struct target_fabric_tpg_param_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_portal_group * , char * ) ; ssize_t (*store)(struct se_portal_group * , char const * , size_t ) ; }; struct target_fabric_tpg_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_portal_group * , char * ) ; ssize_t (*store)(struct se_portal_group * , char const * , size_t ) ; }; struct target_fabric_wwn_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct target_fabric_configfs * , char * ) ; ssize_t (*store)(struct target_fabric_configfs * , char const * , size_t ) ; }; struct target_fabric_discovery_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct target_fabric_configfs * , char * ) ; ssize_t (*store)(struct target_fabric_configfs * , char const * , size_t ) ; }; struct target_fabric_mappedlun_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_lun_acl * , char * ) ; ssize_t (*store)(struct se_lun_acl * , char const * , size_t ) ; }; struct target_fabric_port_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_lun * , char * ) ; ssize_t (*store)(struct se_lun * , char const * , size_t ) ; }; enum hrtimer_restart; enum hrtimer_restart; typedef __u64 __be64; enum hrtimer_restart; union __anonunion___u_192___0 { unsigned long __val ; char __c[1U] ; }; typedef unsigned long pao_T_____9; typedef unsigned long pao_T_____10; typedef unsigned long pao_T_____11; typedef unsigned long pao_T_____12; typedef unsigned long pao_T_____13; typedef unsigned long pao_T_____14; typedef unsigned long pao_T_____15; typedef unsigned long pao_T_____16; struct pr_transport_id_holder { struct t10_pr_registration *dest_pr_reg ; struct se_portal_group *dest_tpg ; struct se_node_acl *dest_node_acl ; struct se_dev_entry *dest_se_deve ; struct list_head dest_list ; }; enum register_type { REGISTER = 0, REGISTER_AND_IGNORE_EXISTING_KEY = 1, REGISTER_AND_MOVE = 2 } ; enum preempt_type { PREEMPT = 0, PREEMPT_AND_ABORT = 1 } ; union __anonunion___u_382___0 { struct se_lun_acl *__val ; char __c[1U] ; }; union __anonunion___u_384___0 { struct se_lun *__val ; char __c[1U] ; }; union __anonunion___u_386___0 { struct se_lun_acl *__val ; char __c[1U] ; }; union __anonunion___u_388___0 { struct se_lun_acl *__val ; char __c[1U] ; }; union __anonunion___u_390___0 { struct se_lun *__val ; char __c[1U] ; }; union __anonunion___u_392___0 { struct se_lun *__val ; char __c[1U] ; }; enum hrtimer_restart; union __anonunion___u_192___1 { unsigned long __val ; char __c[1U] ; }; union __anonunion___u_382___1 { struct se_lun_acl *__val ; char __c[1U] ; }; union __anonunion___u_384___1 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_386___1 { struct se_device *__val ; char __c[1U] ; }; typedef bool ldv_func_ret_type___5; typedef bool ldv_func_ret_type___6; enum hrtimer_restart; enum hrtimer_restart; union __anonunion___u_382___2 { struct hlist_node *__val ; char __c[1U] ; }; union __anonunion___u_384___2 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_386___2 { struct hlist_node *__val ; char __c[1U] ; }; union __anonunion___u_388___1 { struct se_device *__val ; char __c[1U] ; }; struct paravirt_callee_save { void *func ; }; struct pv_irq_ops { struct paravirt_callee_save save_fl ; struct paravirt_callee_save restore_fl ; struct paravirt_callee_save irq_disable ; struct paravirt_callee_save irq_enable ; void (*safe_halt)(void) ; void (*halt)(void) ; void (*adjust_exception_frame)(void) ; }; enum hrtimer_restart; union __anonunion___u_192___2 { unsigned long __val ; char __c[1U] ; }; struct scsi_varlen_cdb_hdr { __u8 opcode ; __u8 control ; __u8 misc[5U] ; __u8 additional_cdb_length ; __be16 service_action ; }; struct blk_mq_tags; struct blk_mq_cpu_notifier { struct list_head list ; void *data ; int (*notify)(void * , unsigned long , unsigned int ) ; }; struct blk_align_bitmap; struct blk_mq_ctxmap { unsigned int size ; unsigned int bits_per_word ; struct blk_align_bitmap *map ; }; struct __anonstruct____missing_field_name_399 { spinlock_t lock ; struct list_head dispatch ; }; struct blk_mq_hw_ctx { struct __anonstruct____missing_field_name_399 __annonCompField124 ; unsigned long state ; struct delayed_work run_work ; struct delayed_work delay_work ; cpumask_var_t cpumask ; int next_cpu ; int next_cpu_batch ; unsigned long flags ; struct request_queue *queue ; struct blk_flush_queue *fq ; void *driver_data ; struct blk_mq_ctxmap ctx_map ; unsigned int nr_ctx ; struct blk_mq_ctx **ctxs ; atomic_t wait_index ; struct blk_mq_tags *tags ; unsigned long queued ; unsigned long run ; unsigned long dispatched[10U] ; unsigned int numa_node ; unsigned int queue_num ; atomic_t nr_active ; struct blk_mq_cpu_notifier cpu_notifier ; struct kobject kobj ; }; struct blk_mq_tag_set { struct blk_mq_ops *ops ; unsigned int nr_hw_queues ; unsigned int queue_depth ; unsigned int reserved_tags ; unsigned int cmd_size ; int numa_node ; unsigned int timeout ; unsigned int flags ; void *driver_data ; struct blk_mq_tags **tags ; struct mutex tag_list_lock ; struct list_head tag_list ; }; struct blk_mq_queue_data { struct request *rq ; struct list_head *list ; bool last ; }; typedef int queue_rq_fn(struct blk_mq_hw_ctx * , struct blk_mq_queue_data const * ); typedef struct blk_mq_hw_ctx *map_queue_fn(struct request_queue * , int const ); typedef enum blk_eh_timer_return timeout_fn(struct request * , bool ); typedef int init_hctx_fn(struct blk_mq_hw_ctx * , void * , unsigned int ); typedef void exit_hctx_fn(struct blk_mq_hw_ctx * , unsigned int ); typedef int init_request_fn(void * , struct request * , unsigned int , unsigned int , unsigned int ); typedef void exit_request_fn(void * , struct request * , unsigned int , unsigned int ); struct blk_mq_ops { queue_rq_fn *queue_rq ; map_queue_fn *map_queue ; timeout_fn *timeout ; softirq_done_fn *complete ; init_hctx_fn *init_hctx ; exit_hctx_fn *exit_hctx ; init_request_fn *init_request ; exit_request_fn *exit_request ; }; union __anonunion___u_404___0 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_406___0 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_408___0 { struct tracepoint_func *__val ; char __c[1U] ; }; union __anonunion___u_410___0 { struct tracepoint_func *__val ; char __c[1U] ; }; struct trace_print_flags { unsigned long mask ; char const *name ; }; enum print_line_t; struct trace_event_raw_target_sequencer_start { struct trace_entry ent ; unsigned int unpacked_lun ; unsigned int opcode ; unsigned int data_length ; unsigned int task_attribute ; unsigned char cdb[32U] ; u32 __data_loc_initiator ; char __data[0U] ; }; struct trace_event_raw_target_cmd_complete { struct trace_entry ent ; unsigned int unpacked_lun ; unsigned int opcode ; unsigned int data_length ; unsigned int task_attribute ; unsigned char scsi_status ; unsigned char sense_length ; unsigned char cdb[32U] ; unsigned char sense_data[18U] ; u32 __data_loc_initiator ; char __data[0U] ; }; enum hrtimer_restart; struct sg_page_iter { struct scatterlist *sg ; unsigned int sg_pgoffset ; unsigned int __nents ; int __pg_advance ; }; struct sg_mapping_iter { struct page *page ; void *addr ; size_t length ; size_t consumed ; struct sg_page_iter piter ; unsigned int __offset ; unsigned int __remaining ; unsigned int __flags ; }; struct se_dif_v1_tuple { __be16 guard_tag ; __be16 app_tag ; __be32 ref_tag ; }; struct sbc_ops { sense_reason_t (*execute_rw)(struct se_cmd * , struct scatterlist * , u32 , enum dma_data_direction ) ; sense_reason_t (*execute_sync_cache)(struct se_cmd * ) ; sense_reason_t (*execute_write_same)(struct se_cmd * ) ; sense_reason_t (*execute_unmap)(struct se_cmd * , sector_t , sector_t ) ; }; enum hrtimer_restart; struct scsi_lun { __u8 scsi_lun[8U] ; }; struct __anonstruct_evpd_handlers_396 { uint8_t page ; sense_reason_t (*emulate)(struct se_cmd * , unsigned char * ) ; }; union __anonunion___u_401 { struct se_device *__val ; char __c[1U] ; }; struct __anonstruct_modesense_handlers_402 { uint8_t page ; uint8_t subpage ; int (*emulate)(struct se_cmd * , u8 , unsigned char * ) ; }; union __anonunion___u_407 { struct hlist_node *__val ; char __c[1U] ; }; union __anonunion___u_409 { struct hlist_node *__val ; char __c[1U] ; }; enum hrtimer_restart; struct se_ua { u8 ua_asc ; u8 ua_ascq ; struct list_head ua_nacl_list ; }; enum hrtimer_restart; struct rd_dev_sg_table { u32 page_start_offset ; u32 page_end_offset ; u32 rd_sg_count ; struct scatterlist *sg_table ; }; struct rd_host; struct rd_dev { struct se_device dev ; u32 rd_flags ; u32 rd_dev_id ; u32 rd_page_count ; u32 sg_table_count ; u32 sg_prot_count ; struct rd_dev_sg_table *sg_table_array ; struct rd_dev_sg_table *sg_prot_array ; struct rd_host *rd_host ; }; struct rd_host { u32 rd_host_dev_id_count ; u32 rd_host_id ; }; enum hrtimer_restart; struct target_stat_scsi_dev_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_dev_stat_grps * , char * ) ; ssize_t (*store)(struct se_dev_stat_grps * , char const * , size_t ) ; }; struct target_stat_scsi_tgt_dev_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_dev_stat_grps * , char * ) ; ssize_t (*store)(struct se_dev_stat_grps * , char const * , size_t ) ; }; struct target_stat_scsi_lu_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_dev_stat_grps * , char * ) ; ssize_t (*store)(struct se_dev_stat_grps * , char const * , size_t ) ; }; struct target_stat_scsi_port_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_port_stat_grps * , char * ) ; ssize_t (*store)(struct se_port_stat_grps * , char const * , size_t ) ; }; union __anonunion___u_393 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_395 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_397 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_399 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_401___0 { struct se_device *__val ; char __c[1U] ; }; struct target_stat_scsi_tgt_port_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_port_stat_grps * , char * ) ; ssize_t (*store)(struct se_port_stat_grps * , char const * , size_t ) ; }; union __anonunion___u_403 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_405 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_407___0 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_409___0 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_411 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_413 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_415 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_417 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_419 { struct se_device *__val ; char __c[1U] ; }; struct target_stat_scsi_transport_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_port_stat_grps * , char * ) ; ssize_t (*store)(struct se_port_stat_grps * , char const * , size_t ) ; }; union __anonunion___u_421 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_423 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_425 { struct se_device *__val ; char __c[1U] ; }; union __anonunion___u_427 { struct se_device *__val ; char __c[1U] ; }; struct target_stat_scsi_auth_intr_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_ml_stat_grps * , char * ) ; ssize_t (*store)(struct se_ml_stat_grps * , char const * , size_t ) ; }; union __anonunion___u_429 { struct se_lun *__val ; char __c[1U] ; }; struct target_stat_scsi_att_intr_port_attribute { struct configfs_attribute attr ; ssize_t (*show)(struct se_ml_stat_grps * , char * ) ; ssize_t (*store)(struct se_ml_stat_grps * , char const * , size_t ) ; }; union __anonunion___u_431 { struct se_lun *__val ; char __c[1U] ; }; enum hrtimer_restart; struct xcopy_pt_cmd; struct xcopy_op { int op_origin ; struct se_cmd *xop_se_cmd ; struct se_device *src_dev ; unsigned char src_tid_wwn[16U] ; struct se_device *dst_dev ; unsigned char dst_tid_wwn[16U] ; unsigned char local_dev_wwn[16U] ; sector_t src_lba ; sector_t dst_lba ; unsigned short stdi ; unsigned short dtdi ; unsigned short nolb ; unsigned int dbl ; struct xcopy_pt_cmd *src_pt_cmd ; struct xcopy_pt_cmd *dst_pt_cmd ; u32 xop_data_nents ; struct scatterlist *xop_data_sg ; struct work_struct xop_work ; }; struct xcopy_pt_cmd { bool remote_port ; struct se_cmd se_cmd ; struct xcopy_op *xcopy_op ; struct completion xpt_passthrough_sem ; unsigned char sense_buffer[96U] ; }; __inline static long ldv__builtin_expect(long exp , long c ) ; extern struct module __this_module ; extern int printk(char const * , ...) ; extern void __dynamic_pr_debug(struct _ddebug * , char const * , ...) ; extern int kstrtoull(char const * , unsigned int , unsigned long long * ) ; __inline static int kstrtoul(char const *s , unsigned int base , unsigned long *res ) { int tmp ; { tmp = kstrtoull(s, base, (unsigned long long *)res); return (tmp); } } extern int kstrtouint(char const * , unsigned int , unsigned int * ) ; __inline static int kstrtou32(char const *s , unsigned int base , u32 *res ) { int tmp ; { tmp = kstrtouint(s, base, res); return (tmp); } } extern int sprintf(char * , char const * , ...) ; extern int snprintf(char * , size_t , char const * , ...) ; extern int sscanf(char const * , char const * , ...) ; bool ldv_is_err(void const *ptr ) ; void *ldv_err_ptr(long error ) ; long ldv_ptr_err(void const *ptr ) ; extern void __bad_percpu_size(void) ; extern struct task_struct *current_task ; __inline static struct task_struct *get_current(void) { struct task_struct *pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& current_task)); goto ldv_2696; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_2696; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_2696; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_2696; default: __bad_percpu_size(); } ldv_2696: ; return (pfo_ret__); } } __inline static void INIT_LIST_HEAD(struct list_head *list ) { { list->next = list; list->prev = list; return; } } extern void __list_add(struct list_head * , struct list_head * , struct list_head * ) ; __inline static void list_add_tail(struct list_head *new , struct list_head *head ) { { __list_add(new, head->prev, head); return; } } extern void list_del(struct list_head * ) ; __inline static int list_empty(struct list_head const *head ) { { return ((unsigned long )((struct list_head const *)head->next) == (unsigned long )head); } } extern void *memcpy(void * , void const * , size_t ) ; extern void *memset(void * , int , size_t ) ; extern size_t strlen(char const * ) ; extern int strcmp(char const * , char const * ) ; extern char *strncpy(char * , char const * , __kernel_size_t ) ; extern int strncmp(char const * , char const * , __kernel_size_t ) ; extern char *strchr(char const * , int ) ; extern char *strim(char * ) ; __inline static char *strstrip(char *str ) { char *tmp ; { tmp = strim(str); return (tmp); } } extern char *strstr(char const * , char const * ) ; extern char *strsep(char ** , char const * ) ; extern char *kstrdup(char const * , gfp_t ) ; extern int strtobool(char const * , bool * ) ; __inline static void *ERR_PTR(long error ) ; __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; __inline static void *ERR_CAST(void const *ptr ) { { return ((void *)ptr); } } __inline static int atomic_read(atomic_t const *v ) { int __var ; { __var = 0; return ((int )*((int const volatile *)(& v->counter))); } } __inline static void atomic_set(atomic_t *v , int i ) { { v->counter = i; return; } } __inline static void atomic_inc(atomic_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; incl %0": "+m" (v->counter)); return; } } __inline static void atomic_dec(atomic_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; decl %0": "+m" (v->counter)); return; } } extern void __mutex_init(struct mutex * , char const * , struct lock_class_key * ) ; extern int mutex_trylock(struct mutex * ) ; int ldv_mutex_trylock_15(struct mutex *ldv_func_arg1 ) ; extern void mutex_unlock(struct mutex * ) ; void ldv_mutex_unlock_11(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_12(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_16(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_18(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_19(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_21(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_23(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_25(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_26(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_28(struct mutex *ldv_func_arg1 ) ; extern void *malloc(size_t ) ; extern void *calloc(size_t , size_t ) ; extern int __VERIFIER_nondet_int(void) ; extern unsigned long __VERIFIER_nondet_ulong(void) ; extern void *__VERIFIER_nondet_pointer(void) ; extern void __VERIFIER_assume(int ) ; void *ldv_malloc(size_t size ) { void *p ; void *tmp ; int tmp___0 ; { tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { return ((void *)0); } else { tmp = malloc(size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } } void *ldv_zalloc(size_t size ) { void *p ; void *tmp ; int tmp___0 ; { tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { return ((void *)0); } else { tmp = calloc(1UL, size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } } void *ldv_init_zalloc(size_t size ) { void *p ; void *tmp ; { tmp = calloc(1UL, size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } void *ldv_memset(void *s , int c , size_t n ) { void *tmp ; { tmp = memset(s, c, n); return (tmp); } } int ldv_undef_int(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); return (tmp); } } void *ldv_undef_ptr(void) { void *tmp ; { tmp = __VERIFIER_nondet_pointer(); return (tmp); } } unsigned long ldv_undef_ulong(void) { unsigned long tmp ; { tmp = __VERIFIER_nondet_ulong(); return (tmp); } } __inline static void ldv_stop(void) { { LDV_STOP: ; goto LDV_STOP; } } __inline static long ldv__builtin_expect(long exp , long c ) { { return (exp); } } extern int mutex_lock_interruptible(struct mutex * ) ; int ldv_mutex_lock_interruptible_24(struct mutex *ldv_func_arg1 ) ; extern void mutex_lock(struct mutex * ) ; void ldv_mutex_lock_10(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_13(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_14(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_17(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_20(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_22(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_27(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_g_tf_lock(struct mutex *lock ) ; void ldv_mutex_unlock_g_tf_lock(struct mutex *lock ) ; int ldv_mutex_lock_interruptible_hba_access_mutex_of_se_hba(struct mutex *lock ) ; void ldv_mutex_lock_hba_access_mutex_of_se_hba(struct mutex *lock ) ; void ldv_mutex_unlock_hba_access_mutex_of_se_hba(struct mutex *lock ) ; void ldv_mutex_lock_i_mutex_of_inode(struct mutex *lock ) ; void ldv_mutex_unlock_i_mutex_of_inode(struct mutex *lock ) ; void ldv_mutex_lock_lock(struct mutex *lock ) ; void ldv_mutex_unlock_lock(struct mutex *lock ) ; void ldv_mutex_lock_mutex_of_device(struct mutex *lock ) ; int ldv_mutex_trylock_mutex_of_device(struct mutex *lock ) ; void ldv_mutex_unlock_mutex_of_device(struct mutex *lock ) ; extern void _raw_spin_lock(raw_spinlock_t * ) ; extern void _raw_spin_unlock(raw_spinlock_t * ) ; __inline static void spin_lock(spinlock_t *lock ) { { _raw_spin_lock(& lock->__annonCompField17.rlock); return; } } __inline static void spin_unlock(spinlock_t *lock ) { { _raw_spin_unlock(& lock->__annonCompField17.rlock); return; } } extern bool queue_work_on(int , struct workqueue_struct * , struct work_struct * ) ; bool ldv_queue_work_on_5(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_7(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; extern bool queue_delayed_work_on(int , struct workqueue_struct * , struct delayed_work * , unsigned long ) ; bool ldv_queue_delayed_work_on_6(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_9(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; extern void flush_workqueue(struct workqueue_struct * ) ; void ldv_flush_workqueue_8(struct workqueue_struct *ldv_func_arg1 ) ; extern void kfree(void const * ) ; extern void *__kmalloc(size_t , gfp_t ) ; __inline static void *kmalloc(size_t size , gfp_t flags ) { void *tmp___2 ; { tmp___2 = __kmalloc(size, flags); return (tmp___2); } } __inline static void *kzalloc(size_t size , gfp_t flags ) { void *tmp ; { tmp = kmalloc(size, flags | 32768U); return (tmp); } } int ldv_state_variable_151 ; int ldv_state_variable_99 ; int ldv_state_variable_47 ; int ldv_state_variable_20 ; struct config_item *target_core_alua_tg_pt_gp_ops_group1 ; struct t10_alua_tg_pt_gp *target_core_alua_tg_pt_gp_implicit_trans_secs_group0 ; int ldv_state_variable_125 ; struct config_item *target_fabric_tpg_attrib_item_ops_group1 ; int ldv_state_variable_173 ; struct se_dev_attrib *target_core_dev_attrib_is_nonrot_group0 ; int ldv_state_variable_54 ; int ldv_state_variable_17 ; int ldv_state_variable_160 ; int ldv_state_variable_66 ; int ldv_state_variable_19 ; struct work_struct *ldv_work_struct_4_3 ; int ldv_state_variable_27 ; int ldv_state_variable_9 ; int ldv_state_variable_100 ; struct se_hba *rd_mcp_ops_group1 ; int ldv_state_variable_83 ; int ldv_work_3_3 ; struct configfs_attribute *target_stat_scsi_transport_attrib_ops_group0 ; int ldv_state_variable_55 ; struct config_item *target_core_dev_item_ops_group1 ; int ldv_work_1_3 ; int ldv_state_variable_145 ; struct work_struct *ldv_work_struct_3_2 ; int ldv_state_variable_80 ; struct work_struct *ldv_work_struct_7_2 ; int ldv_state_variable_64 ; int ldv_state_variable_28 ; struct configfs_attribute *target_fabric_tpg_auth_item_ops_group0 ; struct config_group *target_fabric_mappedlun_stat_group_ops_group0 ; struct se_dev_attrib *target_core_dev_attrib_force_pr_aptpl_group0 ; struct work_struct *ldv_work_struct_6_0 ; int ldv_state_variable_166 ; struct se_dev_attrib *target_core_dev_attrib_unmap_granularity_group0 ; int ldv_work_7_1 ; int ldv_state_variable_78 ; int ldv_state_variable_76 ; struct se_device *target_core_dev_pr_res_aptpl_metadata_group0 ; struct config_group *target_core_group_ops_group0 ; int ldv_work_6_2 ; int ldv_state_variable_137 ; int ldv_state_variable_89 ; int ldv_state_variable_124 ; int ldv_state_variable_8 ; int ldv_state_variable_169 ; int ldv_state_variable_46 ; struct se_dev_attrib *target_core_dev_attrib_max_unmap_block_desc_count_group0 ; struct configfs_attribute *target_fabric_tpg_attrib_item_ops_group0 ; struct t10_alua_tg_pt_gp *target_core_alua_tg_pt_gp_alua_support_standby_group0 ; struct configfs_attribute *target_core_dev_item_ops_group0 ; int ldv_state_variable_75 ; int ldv_state_variable_33 ; struct se_lun_acl *target_fabric_mappedlun_write_protect_group0 ; int ldv_state_variable_123 ; int ldv_state_variable_161 ; struct se_dev_attrib *target_core_dev_attrib_emulate_dpo_group0 ; int ldv_state_variable_172 ; int ldv_work_3_0 ; int ldv_state_variable_65 ; int ldv_state_variable_98 ; struct config_item *target_core_dev_wwn_ops_group1 ; struct config_group *target_fabric_tpg_group_ops_group0 ; struct configfs_attribute *target_stat_scsi_tgt_dev_attrib_ops_group0 ; struct config_item *target_stat_scsi_tgt_port_attrib_ops_group1 ; int ldv_state_variable_194 ; int ldv_state_variable_70 ; struct config_item *target_stat_scsi_dev_attrib_ops_group1 ; int ldv_state_variable_142 ; struct se_dev_attrib *target_core_dev_attrib_max_write_same_len_group0 ; int ldv_state_variable_158 ; int ldv_work_6_1 ; struct config_item *target_fabric_tpg_base_item_ops_group1 ; struct work_struct *ldv_work_struct_1_0 ; struct config_item *target_stat_scsi_transport_attrib_ops_group1 ; int ldv_state_variable_193 ; int ldv_work_7_0 ; int ldv_state_variable_63 ; struct work_struct *ldv_work_struct_7_3 ; int ldv_state_variable_105 ; int ldv_state_variable_2 ; int ldv_work_2_0 ; struct config_item *target_fabric_mappedlun_item_ops_group2 ; int ldv_work_4_2 ; int ldv_state_variable_11 ; int ldv_state_variable_113 ; int ldv_work_1_2 ; int ldv_state_variable_18 ; struct config_item *target_core_dev_pr_ops_group1 ; int ldv_state_variable_150 ; struct work_struct *ldv_work_struct_5_0 ; struct t10_alua_tg_pt_gp *target_core_alua_tg_pt_gp_alua_support_lba_dependent_group0 ; struct config_item *target_stat_scsi_tgt_dev_attrib_ops_group1 ; int ldv_state_variable_90 ; struct configfs_attribute *target_stat_scsi_dev_attrib_ops_group0 ; int ldv_state_variable_97 ; struct config_group *target_core_stat_group_ops_group0 ; int ldv_state_variable_162 ; int ldv_state_variable_192 ; int ldv_state_variable_30 ; struct config_item *target_fabric_tpg_param_item_ops_group1 ; int ldv_state_variable_0 ; struct se_lun *target_fabric_port_alua_tg_pt_offline_group0 ; int ldv_state_variable_81 ; struct config_group *target_core_alua_lu_gps_group_ops_group0 ; struct trace_event_call *event_class_target_sequencer_start_group0 ; int ldv_state_variable_201 ; int ldv_state_variable_102 ; struct config_group *target_fabric_lun_group_ops_group0 ; struct configfs_attribute *target_fabric_tpg_base_item_ops_group0 ; int ldv_state_variable_87 ; int ldv_state_variable_136 ; int ldv_state_variable_73 ; int ldv_state_variable_29 ; struct t10_alua_tg_pt_gp *target_core_alua_tg_pt_gp_alua_support_unavailable_group0 ; int ldv_state_variable_115 ; struct work_struct *ldv_work_struct_2_0 ; struct t10_wwn *target_core_dev_wwn_vpd_protocol_identifier_group0 ; struct se_dev_attrib *target_core_dev_attrib_optimal_sectors_group0 ; int ldv_state_variable_91 ; struct work_struct *ldv_work_struct_6_1 ; int ref_cnt ; int ldv_state_variable_168 ; struct work_struct *ldv_work_struct_3_3 ; int ldv_state_variable_23 ; int ldv_state_variable_143 ; struct work_struct *ldv_work_struct_1_1 ; struct t10_alua_tg_pt_gp *target_core_alua_tg_pt_gp_alua_support_transitioning_group0 ; int ldv_state_variable_59 ; int ldv_state_variable_6 ; int ldv_state_variable_182 ; struct config_item *target_fabric_discovery_item_ops_group1 ; struct work_struct *ldv_work_struct_4_2 ; int ldv_state_variable_178 ; int ldv_state_variable_38 ; int ldv_state_variable_157 ; int ldv_state_variable_126 ; int ldv_state_variable_104 ; int ldv_state_variable_52 ; struct t10_wwn *target_core_dev_wwn_vpd_assoc_scsi_target_device_group0 ; int ldv_state_variable_36 ; int ldv_state_variable_60 ; int ldv_state_variable_103 ; int ldv_state_variable_140 ; int ldv_state_variable_48 ; int ldv_state_variable_107 ; struct se_dev_attrib *target_core_dev_attrib_queue_depth_group0 ; int ldv_state_variable_148 ; struct configfs_attribute *target_core_alua_tg_pt_gp_ops_group0 ; struct se_device *rd_mcp_ops_group0 ; struct se_dev_attrib *target_core_dev_attrib_emulate_tas_group0 ; int ldv_state_variable_163 ; struct t10_alua_tg_pt_gp *target_core_alua_tg_pt_gp_alua_support_active_optimized_group0 ; int ldv_state_variable_191 ; int ldv_work_3_2 ; int ldv_state_variable_138 ; struct t10_alua_lu_gp *target_core_alua_lu_gp_lu_gp_id_group0 ; int ldv_state_variable_82 ; struct work_struct *ldv_work_struct_2_3 ; int ldv_state_variable_49 ; struct t10_alua_tg_pt_gp *target_core_alua_tg_pt_gp_preferred_group0 ; int ldv_state_variable_24 ; int ldv_state_variable_202 ; struct se_lun *target_fabric_port_alua_tg_pt_gp_group0 ; int ldv_state_variable_1 ; int ldv_state_variable_114 ; int ldv_state_variable_176 ; struct work_struct *ldv_work_struct_6_2 ; struct se_dev_attrib *target_core_dev_attrib_enforce_pr_isids_group0 ; struct config_item *target_fabric_tpg_auth_item_ops_group1 ; int ldv_state_variable_16 ; struct config_item *target_fabric_nacl_auth_item_ops_group1 ; struct configfs_attribute *target_fabric_nacl_auth_item_ops_group0 ; int ldv_work_6_3 ; struct configfs_attribute *target_core_dev_wwn_ops_group0 ; int ldv_state_variable_200 ; struct work_struct *ldv_work_struct_3_0 ; struct t10_wwn *target_core_dev_wwn_vpd_assoc_target_port_group0 ; struct configfs_attribute *target_core_hba_item_ops_group0 ; int ldv_state_variable_131 ; int ldv_state_variable_53 ; int ldv_state_variable_67 ; struct config_group *target_core_hba_group_ops_group0 ; struct work_struct *ldv_work_struct_1_2 ; struct configfs_attribute *target_stat_scsi_att_intr_port_attrib_ops_group0 ; struct t10_alua_tg_pt_gp *target_core_alua_tg_pt_gp_nonop_delay_msecs_group0 ; struct se_dev_attrib *target_core_dev_attrib_emulate_write_cache_group0 ; struct work_struct *ldv_work_struct_4_1 ; int ldv_state_variable_92 ; struct config_group *target_core_alua_tg_pt_gps_group_ops_group0 ; struct t10_alua_tg_pt_gp *target_core_alua_tg_pt_gp_alua_access_status_group0 ; int ldv_state_variable_130 ; int ldv_state_variable_189 ; int ldv_state_variable_156 ; int ldv_state_variable_179 ; int ldv_state_variable_35 ; struct t10_wwn *target_core_dev_wwn_vpd_unit_serial_group0 ; struct configfs_attribute *target_fabric_mappedlun_item_ops_group0 ; struct work_struct *ldv_work_struct_3_1 ; struct config_item *target_fabric_nacl_base_item_ops_group1 ; int ldv_state_variable_106 ; struct config_item *target_core_dev_attrib_ops_group1 ; int ldv_work_1_1 ; struct se_cmd *xcopy_pt_tfo_group0 ; int ldv_state_variable_111 ; int ldv_state_variable_149 ; int ldv_state_variable_109 ; int ldv_state_variable_14 ; struct configfs_attribute *target_fabric_discovery_item_ops_group0 ; int ldv_state_variable_37 ; struct configfs_attribute *target_stat_scsi_port_attrib_ops_group0 ; int ldv_state_variable_51 ; struct t10_alua_tg_pt_gp *target_core_alua_tg_pt_gp_tg_pt_gp_id_group0 ; struct se_dev_attrib *target_core_dev_attrib_emulate_tpu_group0 ; int ldv_work_7_2 ; int ldv_state_variable_190 ; struct work_struct *ldv_work_struct_2_2 ; struct work_struct *ldv_work_struct_7_1 ; int ldv_state_variable_42 ; struct work_struct *ldv_work_struct_4_0 ; int ldv_state_variable_7 ; int ldv_state_variable_164 ; int ldv_state_variable_119 ; int ldv_state_variable_174 ; struct configfs_attribute *target_fabric_nacl_attrib_item_ops_group0 ; int ldv_work_4_0 ; struct work_struct *ldv_work_struct_2_1 ; int ldv_state_variable_26 ; struct work_struct *ldv_work_struct_7_0 ; struct se_dev_attrib *target_core_dev_attrib_pi_prot_type_group0 ; int LDV_IN_INTERRUPT = 1; struct configfs_attribute *target_stat_scsi_tgt_port_attrib_ops_group0 ; struct config_item *target_stat_scsi_att_intr_port_attrib_ops_group1 ; int ldv_state_variable_58 ; int ldv_state_variable_155 ; int ldv_state_variable_188 ; int ldv_work_5_2 ; int ldv_state_variable_93 ; int ldv_state_variable_186 ; int ldv_state_variable_177 ; int ldv_state_variable_31 ; int ldv_state_variable_96 ; int ldv_state_variable_141 ; int ldv_state_variable_68 ; int ldv_work_2_1 ; int ldv_state_variable_198 ; int ldv_state_variable_15 ; struct configfs_attribute *target_core_alua_lu_gp_ops_group0 ; struct work_struct *ldv_work_struct_1_3 ; int ldv_state_variable_187 ; int ldv_state_variable_74 ; int ldv_state_variable_21 ; int ldv_state_variable_146 ; struct config_item *target_fabric_mappedlun_item_ops_group1 ; int ldv_state_variable_69 ; int ldv_state_variable_180 ; struct config_group *target_fabric_np_group_ops_group0 ; int ldv_state_variable_197 ; struct se_dev_attrib *target_core_dev_attrib_emulate_fua_read_group0 ; int ldv_state_variable_88 ; struct config_group *target_fabric_nacl_base_group_ops_group0 ; struct configfs_attribute *target_stat_scsi_auth_intr_attrib_ops_group0 ; struct config_item *target_core_hba_item_ops_group1 ; struct configfs_attribute *target_stat_scsi_lu_attrib_ops_group0 ; int ldv_state_variable_139 ; int ldv_state_variable_94 ; struct configfs_attribute *target_fabric_nacl_base_item_ops_group0 ; struct config_item *target_stat_scsi_port_attrib_ops_group1 ; int ldv_state_variable_110 ; int ldv_work_5_3 ; int ldv_state_variable_41 ; int ldv_state_variable_62 ; int ldv_state_variable_40 ; int ldv_state_variable_10 ; int ldv_state_variable_133 ; struct configfs_attribute *target_core_dev_pr_ops_group0 ; int ldv_work_4_1 ; struct se_lun *target_fabric_port_alua_tg_pt_write_md_group0 ; struct config_item *target_fabric_port_item_ops_group2 ; int ldv_state_variable_25 ; struct se_dev_attrib *target_core_dev_attrib_emulate_ua_intlck_ctrl_group0 ; int ldv_state_variable_154 ; struct se_dev_attrib *target_core_dev_attrib_emulate_caw_group0 ; struct config_item *target_fabric_nacl_attrib_item_ops_group1 ; int ldv_state_variable_79 ; int ldv_state_variable_127 ; int ldv_state_variable_183 ; struct configfs_attribute *target_fabric_np_base_item_ops_group0 ; int ldv_work_2_2 ; struct se_dev_attrib *target_core_dev_attrib_emulate_fua_write_group0 ; int ldv_state_variable_108 ; int ldv_state_variable_32 ; struct config_item *target_stat_scsi_lu_attrib_ops_group1 ; struct trace_event_call *event_class_target_cmd_complete_group0 ; int ldv_state_variable_181 ; int ldv_state_variable_45 ; int ldv_state_variable_12 ; int ldv_state_variable_95 ; int ldv_state_variable_122 ; int ldv_state_variable_171 ; int ldv_state_variable_22 ; struct t10_alua_tg_pt_gp *target_core_alua_tg_pt_gp_alua_support_offline_group0 ; int ldv_state_variable_147 ; struct se_dev_attrib *target_core_dev_attrib_emulate_3pc_group0 ; struct t10_wwn *target_core_dev_wwn_vpd_assoc_logical_unit_group0 ; struct se_dev_attrib *target_core_dev_attrib_unmap_granularity_alignment_group0 ; struct config_item *target_fabric_wwn_item_ops_group1 ; struct se_dev_attrib *target_core_dev_attrib_block_size_group0 ; int ldv_state_variable_61 ; int ldv_work_6_0 ; int ldv_state_variable_196 ; struct t10_alua_tg_pt_gp *target_core_alua_tg_pt_gp_alua_support_active_nonoptimized_group0 ; int ldv_state_variable_165 ; int ldv_state_variable_72 ; struct config_item *target_fabric_nacl_param_item_ops_group1 ; int ldv_state_variable_132 ; struct t10_alua_tg_pt_gp *target_core_alua_tg_pt_gp_alua_access_state_group0 ; struct config_item *target_fabric_port_item_ops_group1 ; int ldv_state_variable_120 ; struct config_group *target_fabric_port_stat_group_ops_group0 ; struct se_dev_attrib *target_core_dev_attrib_max_unmap_lba_count_group0 ; int ldv_work_5_0 ; int ldv_state_variable_50 ; int ldv_state_variable_84 ; int ldv_state_variable_86 ; int ldv_state_variable_44 ; struct config_group *target_fabric_nacl_group_ops_group0 ; int ldv_state_variable_116 ; int ldv_state_variable_128 ; int ldv_state_variable_204 ; int ldv_state_variable_39 ; int ldv_state_variable_175 ; int ldv_state_variable_101 ; struct work_struct *ldv_work_struct_5_1 ; int ldv_state_variable_56 ; struct config_item *target_stat_scsi_auth_intr_attrib_ops_group1 ; int ldv_state_variable_112 ; int ldv_state_variable_199 ; int ldv_state_variable_3 ; int ldv_work_1_0 ; int ldv_state_variable_135 ; struct se_dev_attrib *target_core_dev_attrib_emulate_model_alias_group0 ; struct config_item *target_core_alua_lu_gp_ops_group1 ; int ldv_state_variable_184 ; struct configfs_attribute *target_fabric_nacl_param_item_ops_group0 ; int ldv_state_variable_4 ; struct config_item *target_fabric_np_base_item_ops_group1 ; int ldv_state_variable_118 ; struct configfs_attribute *target_core_dev_attrib_ops_group0 ; int ldv_state_variable_117 ; struct work_struct *ldv_work_struct_6_3 ; struct work_struct *ldv_work_struct_5_2 ; struct work_struct *ldv_work_struct_5_3 ; int ldv_state_variable_5 ; int ldv_state_variable_13 ; int ldv_state_variable_170 ; struct t10_alua_tg_pt_gp *target_core_alua_tg_pt_gp_alua_access_type_group0 ; struct config_group *target_core_fabric_group_ops_group0 ; struct t10_alua_tg_pt_gp *target_core_alua_tg_pt_gp_alua_write_metadata_group0 ; int ldv_state_variable_152 ; struct se_dev_attrib *target_core_dev_attrib_emulate_rest_reord_group0 ; struct t10_alua_tg_pt_gp *target_core_alua_tg_pt_gp_trans_delay_msecs_group0 ; struct se_hba *target_core_hba_hba_mode_group0 ; int ldv_work_7_3 ; int ldv_state_variable_153 ; struct configfs_attribute *target_fabric_tpg_param_item_ops_group0 ; struct configfs_attribute *target_fabric_port_item_ops_group0 ; struct se_dev_attrib *target_core_dev_attrib_emulate_tpws_group0 ; int ldv_state_variable_159 ; int ldv_state_variable_85 ; struct configfs_attribute *target_fabric_wwn_item_ops_group0 ; int ldv_state_variable_71 ; int ldv_state_variable_195 ; int ldv_state_variable_77 ; struct config_group *target_fabric_wwn_group_ops_group0 ; int ldv_state_variable_144 ; int ldv_work_4_3 ; int ldv_work_3_1 ; struct se_dev_attrib *target_core_dev_attrib_pi_prot_format_group0 ; int ldv_state_variable_43 ; int ldv_state_variable_121 ; int ldv_work_5_1 ; int ldv_state_variable_57 ; struct se_lun *target_fabric_port_alua_tg_pt_status_group0 ; int ldv_state_variable_134 ; int ldv_state_variable_167 ; int ldv_state_variable_185 ; int ldv_state_variable_129 ; int ldv_state_variable_34 ; int ldv_state_variable_203 ; int ldv_work_2_3 ; void ldv_initialize_configfs_item_operations_112(void) ; void ldv_initialize_target_core_dev_pr_attribute_154(void) ; void ldv_initialize_configfs_item_operations_94(void) ; void ldv_initialize_target_core_alua_lu_gp_attribute_144(void) ; void ldv_initialize_configfs_item_operations_163(void) ; void ldv_initialize_configfs_item_operations_9(void) ; void ldv_initialize_configfs_item_operations_169(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_187(void) ; void ldv_initialize_configfs_group_operations_203(void) ; void ldv_initialize_configfs_item_operations_111(void) ; void work_init_1(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_193(void) ; void ldv_initialize_target_core_alua_tg_pt_gp_attribute_139(void) ; void ldv_initialize_configfs_group_operations_92(void) ; void ldv_initialize_target_core_alua_tg_pt_gp_attribute_138(void) ; void ldv_initialize_configfs_item_operations_95(void) ; void ldv_initialize_target_core_alua_tg_pt_gp_attribute_134(void) ; void ldv_initialize_configfs_group_operations_113(void) ; void ldv_initialize_configfs_group_operations_105(void) ; void ldv_initialize_configfs_item_operations_145(void) ; void ldv_initialize_target_core_alua_tg_pt_gp_attribute_133(void) ; void ldv_initialize_configfs_group_operations_98(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_185(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_177(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_192(void) ; void ldv_initialize_target_core_dev_wwn_attribute_168(void) ; void ldv_initialize_trace_event_class_86(void) ; void ldv_initialize_target_core_dev_wwn_attribute_164(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_195(void) ; void ldv_initialize_configfs_item_operations_96(void) ; void ldv_initialize_configfs_item_operations_142(void) ; void ldv_initialize_target_core_alua_tg_pt_gp_attribute_136(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_189(void) ; void ldv_initialize_configfs_group_operations_116(void) ; void ldv_initialize_configfs_item_operations_52(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_178(void) ; void ldv_initialize_configfs_group_operations_120(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_200(void) ; void ldv_initialize_target_core_alua_tg_pt_gp_attribute_137(void) ; void ldv_initialize_target_backend_ops_83(void) ; void ldv_initialize_configfs_item_operations_36(void) ; void ldv_initialize_configfs_item_operations_97(void) ; void ldv_initialize_target_core_hba_attribute_118(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_188(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_186(void) ; void ldv_initialize_target_core_alua_tg_pt_gp_attribute_135(void) ; void ldv_initialize_trace_event_class_85(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_194(void) ; void ldv_initialize_target_core_alua_tg_pt_gp_attribute_127(void) ; void ldv_initialize_configfs_group_operations_99(void) ; void ldv_initialize_target_core_dev_wwn_attribute_166(void) ; void ldv_initialize_configfs_group_operations_141(void) ; void ldv_initialize_configfs_group_operations_91(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_181(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_175(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_197(void) ; void ldv_initialize_configfs_group_operations_121(void) ; void work_init_5(void) ; void ldv_initialize_target_core_dev_wwn_attribute_165(void) ; void ldv_initialize_target_core_alua_tg_pt_gp_attribute_128(void) ; void ldv_initialize_configfs_item_operations_71(void) ; void ldv_initialize_configfs_item_operations_89(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_199(void) ; void ldv_initialize_configfs_item_operations_16(void) ; void ldv_initialize_target_core_alua_tg_pt_gp_attribute_126(void) ; void ldv_initialize_target_core_alua_tg_pt_gp_attribute_140(void) ; void activate_work_2(struct work_struct *work , int state ) ; void ldv_initialize_target_core_alua_tg_pt_gp_attribute_130(void) ; void ldv_initialize_configfs_item_operations_123(void) ; void ldv_initialize_configfs_group_operations_122(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_176(void) ; void ldv_initialize_configfs_item_operations_114(void) ; void work_init_4(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_196(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_202(void) ; void ldv_initialize_configfs_item_operations_90(void) ; void ldv_initialize_target_core_alua_tg_pt_gp_attribute_129(void) ; void ldv_initialize_target_fabric_port_attribute_101(void) ; void work_init_3(void) ; void ldv_initialize_configfs_item_operations_110(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_174(void) ; void ldv_initialize_configfs_group_operations_108(void) ; void call_and_disable_all_2(int state ) ; void ldv_initialize_target_core_alua_tg_pt_gp_attribute_131(void) ; void ldv_initialize_configfs_item_operations_153(void) ; void work_init_7(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_201(void) ; void ldv_initialize_configfs_item_operations_117(void) ; void ldv_initialize_target_fabric_port_attribute_103(void) ; void ldv_initialize_configfs_item_operations_100(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_191(void) ; void ldv_initialize_target_core_alua_tg_pt_gp_attribute_125(void) ; void ldv_initialize_target_core_dev_wwn_attribute_167(void) ; void ldv_initialize_configfs_group_operations_107(void) ; void ldv_initialize_configfs_item_operations_31(void) ; void ldv_initialize_target_fabric_port_attribute_102(void) ; void work_init_2(void) ; void ldv_initialize_configfs_item_operations_78(void) ; void ldv_initialize_configfs_item_operations_106(void) ; void work_init_6(void) ; void ldv_initialize_configfs_item_operations_46(void) ; void ldv_initialize_target_core_fabric_ops_8(void) ; void ldv_initialize_target_fabric_port_attribute_104(void) ; void ldv_initialize_target_core_alua_tg_pt_gp_attribute_132(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_179(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_183(void) ; void ldv_initialize_configfs_item_operations_109(void) ; void ldv_initialize_target_backend_dev_attrib_attribute_198(void) ; void ldv_initialize_target_fabric_mappedlun_attribute_115(void) ; extern int __request_module(bool , char const * , ...) ; __inline static struct new_utsname *utsname(void) { struct task_struct *tmp ; { tmp = get_current(); return (& ((tmp->nsproxy)->uts_ns)->name); } } extern int match_token(char * , struct match_token const * , substring_t * ) ; extern int match_int(substring_t * , int * ) ; extern char *match_strdup(substring_t const * ) ; __inline static char *config_item_name(struct config_item *item ) { { return (item->ci_name); } } extern void config_item_put(struct config_item * ) ; extern void config_group_init(struct config_group * ) ; extern void config_group_init_type_name(struct config_group * , char const * , struct config_item_type * ) ; __inline static struct config_group *to_config_group(struct config_item *item ) { struct config_item const *__mptr ; struct config_group *tmp ; { if ((unsigned long )item != (unsigned long )((struct config_item *)0)) { __mptr = (struct config_item const *)item; tmp = (struct config_group *)__mptr; } else { tmp = (struct config_group *)0; } return (tmp); } } extern int configfs_register_subsystem(struct configfs_subsystem * ) ; extern void configfs_unregister_subsystem(struct configfs_subsystem * ) ; extern int configfs_depend_item(struct configfs_subsystem * , struct config_item * ) ; struct configfs_attribute *sbc_attrib_attrs[30U] ; struct configfs_attribute *passthrough_attrib_attrs[5U] ; int target_register_template(struct target_core_fabric_ops const *fo ) ; void target_unregister_template(struct target_core_fabric_ops const *fo ) ; int target_depend_item(struct config_item *item ) ; void target_undepend_item(struct config_item *item ) ; int target_fabric_setup_cits(struct target_fabric_configfs *tf ) ; struct t10_alua_lu_gp *default_lu_gp ; int core_dev_setup_virtual_lun0(void) ; void core_dev_release_virtual_lun0(void) ; struct se_device *target_alloc_device(struct se_hba *hba , char const *name ) ; int target_configure_device(struct se_device *dev ) ; void target_free_device(struct se_device *dev ) ; void target_setup_backend_cits(struct target_backend *tb ) ; struct se_hba *core_alloc_hba(char const *plugin_name , u32 plugin_dep_id , u32 hba_flags ) ; int core_delete_hba(struct se_hba *hba ) ; int init_se_kmem_caches(void) ; void release_se_kmem_caches(void) ; void transport_subsystem_check_init(void) ; void transport_dump_dev_state(struct se_device *dev , char *b , int *bl ) ; void transport_dump_vpd_proto_id(struct t10_vpd *vpd , unsigned char *p_buf , int p_buf_len ) ; int transport_dump_vpd_assoc(struct t10_vpd *vpd , unsigned char *p_buf , int p_buf_len ) ; int transport_dump_vpd_ident_type(struct t10_vpd *vpd , unsigned char *p_buf , int p_buf_len ) ; int transport_dump_vpd_ident(struct t10_vpd *vpd , unsigned char *p_buf , int p_buf_len ) ; void target_stat_setup_dev_default_groups(struct se_device *dev ) ; int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *l_tg_pt_gp , struct se_device *l_dev , struct se_lun *l_lun , struct se_node_acl *l_nacl , int new_state , int explicit ) ; char *core_alua_dump_status(int status ) ; struct t10_alua_lba_map *core_alua_allocate_lba_map(struct list_head *list , u64 first_lba , u64 last_lba ) ; int core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map , int pg_id , int state ) ; void core_alua_free_lba_map(struct list_head *lba_list ) ; void core_alua_set_lba_map(struct se_device *dev , struct list_head *lba_map_list , int segment_size , int segment_mult ) ; struct t10_alua_lu_gp *core_alua_allocate_lu_gp(char const *name , int def_group ) ; int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp , u16 lu_gp_id ) ; void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp ) ; struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(char const *name ) ; void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp ) ; void __core_alua_attach_lu_gp_mem(struct t10_alua_lu_gp_member *lu_gp_mem , struct t10_alua_lu_gp *lu_gp ) ; void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *lu_gp_mem , struct t10_alua_lu_gp *lu_gp ) ; struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev , char const *name , int def_group ) ; int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *tg_pt_gp , u16 tg_pt_gp_id ) ; void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *tg_pt_gp ) ; ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *tg_pt_gp , char *page ) ; ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *tg_pt_gp , char const *page , size_t count ) ; ssize_t core_alua_show_nonop_delay_msecs(struct t10_alua_tg_pt_gp *tg_pt_gp , char *page ) ; ssize_t core_alua_store_nonop_delay_msecs(struct t10_alua_tg_pt_gp *tg_pt_gp , char const *page , size_t count ) ; ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *tg_pt_gp , char *page ) ; ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *tg_pt_gp , char const *page , size_t count ) ; ssize_t core_alua_show_implicit_trans_secs(struct t10_alua_tg_pt_gp *tg_pt_gp , char *page ) ; ssize_t core_alua_store_implicit_trans_secs(struct t10_alua_tg_pt_gp *tg_pt_gp , char const *page , size_t count ) ; ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *tg_pt_gp , char *page ) ; ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *tg_pt_gp , char const *page , size_t count ) ; void core_pr_dump_initiator_port(struct t10_pr_registration *pr_reg , char *buf , u32 size ) ; int core_scsi3_alloc_aptpl_registration(struct t10_reservation *pr_tmpl , u64 sa_res_key , unsigned char *i_port , unsigned char *isid , u64 mapped_lun , unsigned char *t_port , u16 tpgt , u64 target_lun , int res_holder , int all_tg_pt , u8 type ) ; unsigned char *core_scsi3_pr_dump_type(int type ) ; int rd_module_init(void) ; void rd_module_exit(void) ; int target_xcopy_setup_pt(void) ; void target_xcopy_release_pt(void) ; static struct list_head g_tf_list = {& g_tf_list, & g_tf_list}; static struct mutex g_tf_lock = {{1}, {{{{{0}}, 3735899821U, 4294967295U, (void *)-1, {0, {0, 0}, "g_tf_lock.wait_lock", 0, 0UL}}}}, {& g_tf_lock.wait_list, & g_tf_lock.wait_list}, 0, (void *)(& g_tf_lock), {0, {0, 0}, "g_tf_lock", 0, 0UL}}; static struct config_group target_core_hbagroup ; static struct config_group alua_group ; static struct config_group alua_lu_gps_group ; __inline static struct se_hba *item_to_hba(struct config_item *item ) { struct config_group const *__mptr ; struct config_group *tmp ; { tmp = to_config_group(item); __mptr = (struct config_group const *)tmp; return ((struct se_hba *)__mptr + 0xffffffffffffff88UL); } } static ssize_t target_core_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct new_utsname *tmp ; struct new_utsname *tmp___0 ; int tmp___1 ; { tmp = utsname(); tmp___0 = utsname(); tmp___1 = sprintf(page, "Target Engine Core ConfigFS Infrastructure %s on %s/%s on 4.2.0-rc1\n", (char *)"v5.0", (char *)(& tmp___0->sysname), (char *)(& tmp->machine)); return ((ssize_t )tmp___1); } } static struct configfs_item_operations target_core_fabric_item_ops = {0, & target_core_attr_show, 0, 0, 0}; static struct configfs_attribute target_core_item_attr_version = {"version", & __this_module, 292U}; static struct target_fabric_configfs *target_core_get_fabric(char const *name ) { struct target_fabric_configfs *tf ; struct list_head const *__mptr ; int tmp ; struct list_head const *__mptr___0 ; { if ((unsigned long )name == (unsigned long )((char const *)0)) { return ((struct target_fabric_configfs *)0); } else { } ldv_mutex_lock_17(& g_tf_lock); __mptr = (struct list_head const *)g_tf_list.next; tf = (struct target_fabric_configfs *)__mptr + 0xfffffffffffffff8UL; goto ldv_62270; ldv_62269: tmp = strcmp((tf->tf_ops)->name, name); if (tmp == 0) { atomic_inc(& tf->tf_access_cnt); ldv_mutex_unlock_18(& g_tf_lock); return (tf); } else { } __mptr___0 = (struct list_head const *)tf->tf_list.next; tf = (struct target_fabric_configfs *)__mptr___0 + 0xfffffffffffffff8UL; ldv_62270: ; if ((unsigned long )(& tf->tf_list) != (unsigned long )(& g_tf_list)) { goto ldv_62269; } else { } ldv_mutex_unlock_19(& g_tf_lock); return ((struct target_fabric_configfs *)0); } } static struct config_group *target_core_register_fabric(struct config_group *group , char const *name ) { struct target_fabric_configfs *tf ; int ret ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; struct _ddebug descriptor___1 ; long tmp___1 ; void *tmp___2 ; struct _ddebug descriptor___2 ; long tmp___3 ; void *tmp___4 ; int tmp___5 ; int tmp___6 ; struct _ddebug descriptor___3 ; long tmp___7 ; void *tmp___8 ; struct _ddebug descriptor___4 ; long tmp___9 ; struct _ddebug descriptor___5 ; long tmp___10 ; struct _ddebug descriptor___6 ; long tmp___11 ; { descriptor.modname = "target_core_mod"; descriptor.function = "target_core_register_fabric"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "Target_Core_ConfigFS: REGISTER -> group: %p name: %s\n"; descriptor.lineno = 151U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Target_Core_ConfigFS: REGISTER -> group: %p name: %s\n", group, name); } else { } tf = target_core_get_fabric(name); if ((unsigned long )tf == (unsigned long )((struct target_fabric_configfs *)0)) { descriptor___0.modname = "target_core_mod"; descriptor___0.function = "target_core_register_fabric"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor___0.format = "target_core_register_fabric() trying autoload for %s\n"; descriptor___0.lineno = 156U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor___0, "target_core_register_fabric() trying autoload for %s\n", name); } else { } tmp___6 = strncmp(name, "iscsi", 5UL); if (tmp___6 == 0) { ret = __request_module(1, "iscsi_target_mod"); if (ret < 0) { descriptor___1.modname = "target_core_mod"; descriptor___1.function = "target_core_register_fabric"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor___1.format = "request_module() failed for iscsi_target_mod.ko: %d\n"; descriptor___1.lineno = 179U; descriptor___1.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor___1, "request_module() failed for iscsi_target_mod.ko: %d\n", ret); } else { } tmp___2 = ERR_PTR(-22L); return ((struct config_group *)tmp___2); } else { } } else { tmp___5 = strncmp(name, "loopback", 8UL); if (tmp___5 == 0) { ret = __request_module(1, "tcm_loop"); if (ret < 0) { descriptor___2.modname = "target_core_mod"; descriptor___2.function = "target_core_register_fabric"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor___2.format = "request_module() failed for tcm_loop.ko: %d\n"; descriptor___2.lineno = 192U; descriptor___2.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_pr_debug(& descriptor___2, "request_module() failed for tcm_loop.ko: %d\n", ret); } else { } tmp___4 = ERR_PTR(-22L); return ((struct config_group *)tmp___4); } else { } } else { } } tf = target_core_get_fabric(name); } else { } if ((unsigned long )tf == (unsigned long )((struct target_fabric_configfs *)0)) { descriptor___3.modname = "target_core_mod"; descriptor___3.function = "target_core_register_fabric"; descriptor___3.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor___3.format = "target_core_get_fabric() failed for %s\n"; descriptor___3.lineno = 202U; descriptor___3.flags = 0U; tmp___7 = ldv__builtin_expect((long )descriptor___3.flags & 1L, 0L); if (tmp___7 != 0L) { __dynamic_pr_debug(& descriptor___3, "target_core_get_fabric() failed for %s\n", name); } else { } tmp___8 = ERR_PTR(-22L); return ((struct config_group *)tmp___8); } else { } descriptor___4.modname = "target_core_mod"; descriptor___4.function = "target_core_register_fabric"; descriptor___4.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor___4.format = "Target_Core_ConfigFS: REGISTER -> Located fabric: %s\n"; descriptor___4.lineno = 206U; descriptor___4.flags = 0U; tmp___9 = ldv__builtin_expect((long )descriptor___4.flags & 1L, 0L); if (tmp___9 != 0L) { __dynamic_pr_debug(& descriptor___4, "Target_Core_ConfigFS: REGISTER -> Located fabric: %s\n", (tf->tf_ops)->name); } else { } descriptor___5.modname = "target_core_mod"; descriptor___5.function = "target_core_register_fabric"; descriptor___5.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor___5.format = "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n"; descriptor___5.lineno = 212U; descriptor___5.flags = 0U; tmp___10 = ldv__builtin_expect((long )descriptor___5.flags & 1L, 0L); if (tmp___10 != 0L) { __dynamic_pr_debug(& descriptor___5, "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n", & tf->tf_wwn_cit); } else { } tf->tf_group.default_groups = (struct config_group **)(& tf->tf_default_groups); *(tf->tf_group.default_groups) = & tf->tf_disc_group; *(tf->tf_group.default_groups + 1UL) = (struct config_group *)0; config_group_init_type_name(& tf->tf_group, name, & tf->tf_wwn_cit); config_group_init_type_name(& tf->tf_disc_group, "discovery_auth", & tf->tf_discovery_cit); descriptor___6.modname = "target_core_mod"; descriptor___6.function = "target_core_register_fabric"; descriptor___6.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor___6.format = "Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n"; descriptor___6.lineno = 223U; descriptor___6.flags = 0U; tmp___11 = ldv__builtin_expect((long )descriptor___6.flags & 1L, 0L); if (tmp___11 != 0L) { __dynamic_pr_debug(& descriptor___6, "Target_Core_ConfigFS: REGISTER -> Allocated Fabric: %s\n", tf->tf_group.cg_item.ci_name); } else { } return (& tf->tf_group); } } static void target_core_deregister_fabric(struct config_group *group , struct config_item *item ) { struct target_fabric_configfs *tf ; struct config_group const *__mptr ; struct config_group *tmp ; struct config_group *tf_group ; struct config_item *df_item ; int i ; struct _ddebug descriptor ; char *tmp___0 ; long tmp___1 ; struct _ddebug descriptor___0 ; long tmp___2 ; struct _ddebug descriptor___1 ; char *tmp___3 ; long tmp___4 ; { tmp = to_config_group(item); __mptr = (struct config_group const *)tmp; tf = (struct target_fabric_configfs *)__mptr + 0xffffffffffffffe8UL; descriptor.modname = "target_core_mod"; descriptor.function = "target_core_deregister_fabric"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in tf list\n"; descriptor.lineno = 241U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp___0 = config_item_name(item); __dynamic_pr_debug(& descriptor, "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in tf list\n", tmp___0); } else { } descriptor___0.modname = "target_core_mod"; descriptor___0.function = "target_core_deregister_fabric"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor___0.format = "Target_Core_ConfigFS: DEREGISTER -> located fabric: %s\n"; descriptor___0.lineno = 244U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_pr_debug(& descriptor___0, "Target_Core_ConfigFS: DEREGISTER -> located fabric: %s\n", (tf->tf_ops)->name); } else { } atomic_dec(& tf->tf_access_cnt); descriptor___1.modname = "target_core_mod"; descriptor___1.function = "target_core_deregister_fabric"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor___1.format = "Target_Core_ConfigFS: DEREGISTER -> Releasing ci %s\n"; descriptor___1.lineno = 248U; descriptor___1.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = config_item_name(item); __dynamic_pr_debug(& descriptor___1, "Target_Core_ConfigFS: DEREGISTER -> Releasing ci %s\n", tmp___3); } else { } tf_group = & tf->tf_group; i = 0; goto ldv_62302; ldv_62301: df_item = & (*(tf_group->default_groups + (unsigned long )i))->cg_item; *(tf_group->default_groups + (unsigned long )i) = (struct config_group *)0; config_item_put(df_item); i = i + 1; ldv_62302: ; if ((unsigned long )*(tf_group->default_groups + (unsigned long )i) != (unsigned long )((struct config_group *)0)) { goto ldv_62301; } else { } config_item_put(item); return; } } static struct configfs_group_operations target_core_fabric_group_ops = {0, & target_core_register_fabric, 0, 0, & target_core_deregister_fabric}; static struct configfs_attribute *target_core_fabric_item_attrs[2U] = { & target_core_item_attr_version, (struct configfs_attribute *)0}; static struct config_item_type target_core_fabrics_item = {& __this_module, & target_core_fabric_item_ops, & target_core_fabric_group_ops, (struct configfs_attribute **)(& target_core_fabric_item_attrs)}; static struct configfs_subsystem target_core_fabrics = {{{0, {'t', 'a', 'r', 'g', 'e', 't', '\000'}, {{0}}, {0, 0}, 0, 0, & target_core_fabrics_item, 0}, {0, 0}, 0, 0}, {{0}, {{{{{0}}, 0U, 0U, 0, {0, {0, 0}, 0, 0, 0UL}}}}, {0, 0}, 0, 0, {0, {0, 0}, 0, 0, 0UL}}}; int target_depend_item(struct config_item *item ) { int tmp ; { tmp = configfs_depend_item(& target_core_fabrics, item); return (tmp); } } static char const __kstrtab_target_depend_item[19U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 'd', 'e', 'p', 'e', 'n', 'd', '_', 'i', 't', 'e', 'm', '\000'}; struct kernel_symbol const __ksymtab_target_depend_item ; struct kernel_symbol const __ksymtab_target_depend_item = {(unsigned long )(& target_depend_item), (char const *)(& __kstrtab_target_depend_item)}; void target_undepend_item(struct config_item *item ) { { return; } } static char const __kstrtab_target_undepend_item[21U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 'u', 'n', 'd', 'e', 'p', 'e', 'n', 'd', '_', 'i', 't', 'e', 'm', '\000'}; struct kernel_symbol const __ksymtab_target_undepend_item ; struct kernel_symbol const __ksymtab_target_undepend_item = {(unsigned long )(& target_undepend_item), (char const *)(& __kstrtab_target_undepend_item)}; static int target_fabric_tf_ops_check(struct target_core_fabric_ops const *tfo ) { size_t tmp ; { if ((unsigned long )tfo->name == (unsigned long )((char const */* const */)0)) { printk("\vMissing tfo->name\n"); return (-22); } else { } tmp = strlen(tfo->name); if (tmp > 31UL) { printk("\vPassed name: %s exceeds TARGET_FABRIC_NAME_SIZE\n", tfo->name); return (-22); } else { } if ((unsigned long )tfo->get_fabric_name == (unsigned long )((char *(*/* const */)(void))0)) { printk("\vMissing tfo->get_fabric_name()\n"); return (-22); } else { } if ((unsigned long )tfo->tpg_get_wwn == (unsigned long )((char *(*/* const */)(struct se_portal_group * ))0)) { printk("\vMissing tfo->tpg_get_wwn()\n"); return (-22); } else { } if ((unsigned long )tfo->tpg_get_tag == (unsigned long )((u16 (*/* const */)(struct se_portal_group * ))0)) { printk("\vMissing tfo->tpg_get_tag()\n"); return (-22); } else { } if ((unsigned long )tfo->tpg_check_demo_mode == (unsigned long )((int (*/* const */)(struct se_portal_group * ))0)) { printk("\vMissing tfo->tpg_check_demo_mode()\n"); return (-22); } else { } if ((unsigned long )tfo->tpg_check_demo_mode_cache == (unsigned long )((int (*/* const */)(struct se_portal_group * ))0)) { printk("\vMissing tfo->tpg_check_demo_mode_cache()\n"); return (-22); } else { } if ((unsigned long )tfo->tpg_check_demo_mode_write_protect == (unsigned long )((int (*/* const */)(struct se_portal_group * ))0)) { printk("\vMissing tfo->tpg_check_demo_mode_write_protect()\n"); return (-22); } else { } if ((unsigned long )tfo->tpg_check_prod_mode_write_protect == (unsigned long )((int (*/* const */)(struct se_portal_group * ))0)) { printk("\vMissing tfo->tpg_check_prod_mode_write_protect()\n"); return (-22); } else { } if ((unsigned long )tfo->tpg_get_inst_index == (unsigned long )((u32 (*/* const */)(struct se_portal_group * ))0)) { printk("\vMissing tfo->tpg_get_inst_index()\n"); return (-22); } else { } if ((unsigned long )tfo->release_cmd == (unsigned long )((void (*/* const */)(struct se_cmd * ))0)) { printk("\vMissing tfo->release_cmd()\n"); return (-22); } else { } if ((unsigned long )tfo->shutdown_session == (unsigned long )((int (*/* const */)(struct se_session * ))0)) { printk("\vMissing tfo->shutdown_session()\n"); return (-22); } else { } if ((unsigned long )tfo->close_session == (unsigned long )((void (*/* const */)(struct se_session * ))0)) { printk("\vMissing tfo->close_session()\n"); return (-22); } else { } if ((unsigned long )tfo->sess_get_index == (unsigned long )((u32 (*/* const */)(struct se_session * ))0)) { printk("\vMissing tfo->sess_get_index()\n"); return (-22); } else { } if ((unsigned long )tfo->write_pending == (unsigned long )((int (*/* const */)(struct se_cmd * ))0)) { printk("\vMissing tfo->write_pending()\n"); return (-22); } else { } if ((unsigned long )tfo->write_pending_status == (unsigned long )((int (*/* const */)(struct se_cmd * ))0)) { printk("\vMissing tfo->write_pending_status()\n"); return (-22); } else { } if ((unsigned long )tfo->set_default_node_attributes == (unsigned long )((void (*/* const */)(struct se_node_acl * ))0)) { printk("\vMissing tfo->set_default_node_attributes()\n"); return (-22); } else { } if ((unsigned long )tfo->get_cmd_state == (unsigned long )((int (*/* const */)(struct se_cmd * ))0)) { printk("\vMissing tfo->get_cmd_state()\n"); return (-22); } else { } if ((unsigned long )tfo->queue_data_in == (unsigned long )((int (*/* const */)(struct se_cmd * ))0)) { printk("\vMissing tfo->queue_data_in()\n"); return (-22); } else { } if ((unsigned long )tfo->queue_status == (unsigned long )((int (*/* const */)(struct se_cmd * ))0)) { printk("\vMissing tfo->queue_status()\n"); return (-22); } else { } if ((unsigned long )tfo->queue_tm_rsp == (unsigned long )((void (*/* const */)(struct se_cmd * ))0)) { printk("\vMissing tfo->queue_tm_rsp()\n"); return (-22); } else { } if ((unsigned long )tfo->aborted_task == (unsigned long )((void (*/* const */)(struct se_cmd * ))0)) { printk("\vMissing tfo->aborted_task()\n"); return (-22); } else { } if ((unsigned long )tfo->fabric_make_wwn == (unsigned long )((struct se_wwn *(*/* const */)(struct target_fabric_configfs * , struct config_group * , char const * ))0)) { printk("\vMissing tfo->fabric_make_wwn()\n"); return (-22); } else { } if ((unsigned long )tfo->fabric_drop_wwn == (unsigned long )((void (*/* const */)(struct se_wwn * ))0)) { printk("\vMissing tfo->fabric_drop_wwn()\n"); return (-22); } else { } if ((unsigned long )tfo->fabric_make_tpg == (unsigned long )((struct se_portal_group *(*/* const */)(struct se_wwn * , struct config_group * , char const * ))0)) { printk("\vMissing tfo->fabric_make_tpg()\n"); return (-22); } else { } if ((unsigned long )tfo->fabric_drop_tpg == (unsigned long )((void (*/* const */)(struct se_portal_group * ))0)) { printk("\vMissing tfo->fabric_drop_tpg()\n"); return (-22); } else { } return (0); } } int target_register_template(struct target_core_fabric_ops const *fo ) { struct target_fabric_configfs *tf ; int ret ; void *tmp ; { ret = target_fabric_tf_ops_check(fo); if (ret != 0) { return (ret); } else { } tmp = kzalloc(944UL, 208U); tf = (struct target_fabric_configfs *)tmp; if ((unsigned long )tf == (unsigned long )((struct target_fabric_configfs *)0)) { printk("\v%s: could not allocate memory!\n", "target_register_template"); return (-12); } else { } INIT_LIST_HEAD(& tf->tf_list); atomic_set(& tf->tf_access_cnt, 0); tf->tf_ops = fo; target_fabric_setup_cits(tf); ldv_mutex_lock_20(& g_tf_lock); list_add_tail(& tf->tf_list, & g_tf_list); ldv_mutex_unlock_21(& g_tf_lock); return (0); } } static char const __kstrtab_target_register_template[25U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 'r', 'e', 'g', 'i', 's', 't', 'e', 'r', '_', 't', 'e', 'm', 'p', 'l', 'a', 't', 'e', '\000'}; struct kernel_symbol const __ksymtab_target_register_template ; struct kernel_symbol const __ksymtab_target_register_template = {(unsigned long )(& target_register_template), (char const *)(& __kstrtab_target_register_template)}; void target_unregister_template(struct target_core_fabric_ops const *fo ) { struct target_fabric_configfs *t ; struct list_head const *__mptr ; int tmp ; long tmp___0 ; int tmp___1 ; struct list_head const *__mptr___0 ; { ldv_mutex_lock_22(& g_tf_lock); __mptr = (struct list_head const *)g_tf_list.next; t = (struct target_fabric_configfs *)__mptr + 0xfffffffffffffff8UL; goto ldv_62355; ldv_62354: tmp___1 = strcmp((t->tf_ops)->name, fo->name); if (tmp___1 == 0) { tmp = atomic_read((atomic_t const *)(& t->tf_access_cnt)); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"), "i" (458), "i" (12UL)); ldv_62352: ; goto ldv_62352; } else { } list_del(& t->tf_list); kfree((void const *)t); goto ldv_62353; } else { } __mptr___0 = (struct list_head const *)t->tf_list.next; t = (struct target_fabric_configfs *)__mptr___0 + 0xfffffffffffffff8UL; ldv_62355: ; if ((unsigned long )(& t->tf_list) != (unsigned long )(& g_tf_list)) { goto ldv_62354; } else { } ldv_62353: ldv_mutex_unlock_23(& g_tf_lock); return; } } static char const __kstrtab_target_unregister_template[27U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 'u', 'n', 'r', 'e', 'g', 'i', 's', 't', 'e', 'r', '_', 't', 'e', 'm', 'p', 'l', 'a', 't', 'e', '\000'}; struct kernel_symbol const __ksymtab_target_unregister_template ; struct kernel_symbol const __ksymtab_target_unregister_template = {(unsigned long )(& target_unregister_template), (char const *)(& __kstrtab_target_unregister_template)}; static ssize_t show_emulate_model_alias(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->emulate_model_alias); return ((ssize_t )tmp); } } static ssize_t show_emulate_dpo(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->emulate_dpo); return ((ssize_t )tmp); } } static ssize_t show_emulate_fua_write(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->emulate_fua_write); return ((ssize_t )tmp); } } static ssize_t show_emulate_fua_read(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->emulate_fua_read); return ((ssize_t )tmp); } } static ssize_t show_emulate_write_cache(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->emulate_write_cache); return ((ssize_t )tmp); } } static ssize_t show_emulate_ua_intlck_ctrl(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->emulate_ua_intlck_ctrl); return ((ssize_t )tmp); } } static ssize_t show_emulate_tas(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->emulate_tas); return ((ssize_t )tmp); } } static ssize_t show_emulate_tpu(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->emulate_tpu); return ((ssize_t )tmp); } } static ssize_t show_emulate_tpws(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->emulate_tpws); return ((ssize_t )tmp); } } static ssize_t show_emulate_caw(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->emulate_caw); return ((ssize_t )tmp); } } static ssize_t show_emulate_3pc(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->emulate_3pc); return ((ssize_t )tmp); } } static ssize_t show_pi_prot_type(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", (unsigned int )da->pi_prot_type); return ((ssize_t )tmp); } } static ssize_t show_hw_pi_prot_type(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", (unsigned int )da->hw_pi_prot_type); return ((ssize_t )tmp); } } static ssize_t show_pi_prot_format(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->pi_prot_format); return ((ssize_t )tmp); } } static ssize_t show_enforce_pr_isids(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->enforce_pr_isids); return ((ssize_t )tmp); } } static ssize_t show_is_nonrot(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->is_nonrot); return ((ssize_t )tmp); } } static ssize_t show_emulate_rest_reord(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->emulate_rest_reord); return ((ssize_t )tmp); } } static ssize_t show_force_pr_aptpl(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->force_pr_aptpl); return ((ssize_t )tmp); } } static ssize_t show_hw_block_size(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->hw_block_size); return ((ssize_t )tmp); } } static ssize_t show_block_size(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->block_size); return ((ssize_t )tmp); } } static ssize_t show_hw_max_sectors(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->hw_max_sectors); return ((ssize_t )tmp); } } static ssize_t show_optimal_sectors(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->optimal_sectors); return ((ssize_t )tmp); } } static ssize_t show_hw_queue_depth(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->hw_queue_depth); return ((ssize_t )tmp); } } static ssize_t show_queue_depth(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->queue_depth); return ((ssize_t )tmp); } } static ssize_t show_max_unmap_lba_count(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->max_unmap_lba_count); return ((ssize_t )tmp); } } static ssize_t show_max_unmap_block_desc_count(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->max_unmap_block_desc_count); return ((ssize_t )tmp); } } static ssize_t show_unmap_granularity(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->unmap_granularity); return ((ssize_t )tmp); } } static ssize_t show_unmap_granularity_alignment(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->unmap_granularity_alignment); return ((ssize_t )tmp); } } static ssize_t show_max_write_same_len(struct se_dev_attrib *da , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", da->max_write_same_len); return ((ssize_t )tmp); } } static ssize_t store_max_unmap_lba_count(struct se_dev_attrib *da , char const *page , size_t count ) { u32 val ; int ret ; { ret = kstrtou32(page, 0U, & val); if (ret < 0) { return ((ssize_t )ret); } else { } da->max_unmap_lba_count = val; return ((ssize_t )count); } } static ssize_t store_max_unmap_block_desc_count(struct se_dev_attrib *da , char const *page , size_t count ) { u32 val ; int ret ; { ret = kstrtou32(page, 0U, & val); if (ret < 0) { return ((ssize_t )ret); } else { } da->max_unmap_block_desc_count = val; return ((ssize_t )count); } } static ssize_t store_unmap_granularity(struct se_dev_attrib *da , char const *page , size_t count ) { u32 val ; int ret ; { ret = kstrtou32(page, 0U, & val); if (ret < 0) { return ((ssize_t )ret); } else { } da->unmap_granularity = val; return ((ssize_t )count); } } static ssize_t store_unmap_granularity_alignment(struct se_dev_attrib *da , char const *page , size_t count ) { u32 val ; int ret ; { ret = kstrtou32(page, 0U, & val); if (ret < 0) { return ((ssize_t )ret); } else { } da->unmap_granularity_alignment = val; return ((ssize_t )count); } } static ssize_t store_max_write_same_len(struct se_dev_attrib *da , char const *page , size_t count ) { u32 val ; int ret ; { ret = kstrtou32(page, 0U, & val); if (ret < 0) { return ((ssize_t )ret); } else { } da->max_write_same_len = val; return ((ssize_t )count); } } static ssize_t store_emulate_fua_write(struct se_dev_attrib *da , char const *page , size_t count ) { bool flag ; int ret ; { ret = strtobool(page, & flag); if (ret < 0) { return ((ssize_t )ret); } else { } da->emulate_fua_write = (int )flag; return ((ssize_t )count); } } static ssize_t store_emulate_caw(struct se_dev_attrib *da , char const *page , size_t count ) { bool flag ; int ret ; { ret = strtobool(page, & flag); if (ret < 0) { return ((ssize_t )ret); } else { } da->emulate_caw = (int )flag; return ((ssize_t )count); } } static ssize_t store_emulate_3pc(struct se_dev_attrib *da , char const *page , size_t count ) { bool flag ; int ret ; { ret = strtobool(page, & flag); if (ret < 0) { return ((ssize_t )ret); } else { } da->emulate_3pc = (int )flag; return ((ssize_t )count); } } static ssize_t store_enforce_pr_isids(struct se_dev_attrib *da , char const *page , size_t count ) { bool flag ; int ret ; { ret = strtobool(page, & flag); if (ret < 0) { return ((ssize_t )ret); } else { } da->enforce_pr_isids = (int )flag; return ((ssize_t )count); } } static ssize_t store_is_nonrot(struct se_dev_attrib *da , char const *page , size_t count ) { bool flag ; int ret ; { ret = strtobool(page, & flag); if (ret < 0) { return ((ssize_t )ret); } else { } da->is_nonrot = (int )flag; return ((ssize_t )count); } } static ssize_t store_emulate_dpo(struct se_dev_attrib *da , char const *page , size_t count ) { bool __print_once ; { if (! __print_once) { __print_once = 1; printk("\fignoring deprecated ##_name## attribute\n"); } else { } return ((ssize_t )count); } } static ssize_t store_emulate_fua_read(struct se_dev_attrib *da , char const *page , size_t count ) { bool __print_once ; { if (! __print_once) { __print_once = 1; printk("\fignoring deprecated ##_name## attribute\n"); } else { } return ((ssize_t )count); } } static void dev_set_t10_wwn_model_alias(struct se_device *dev ) { char const *configname ; char *tmp ; size_t tmp___0 ; { tmp = config_item_name(& dev->dev_group.cg_item); configname = (char const *)tmp; tmp___0 = strlen(configname); if (tmp___0 > 15UL) { printk("\fdev[%p]: Backstore name \'%s\' is too long for INQUIRY_MODEL, truncating to 16 bytes\n", dev, configname); } else { } snprintf((char *)(& dev->t10_wwn.model), 16UL, "%s", configname); return; } } static ssize_t store_emulate_model_alias(struct se_dev_attrib *da , char const *page , size_t count ) { struct se_device *dev ; bool flag ; int ret ; { dev = da->da_dev; if (dev->export_count != 0U) { printk("\vdev[%p]: Unable to change model alias while export_count is %d\n", dev, dev->export_count); return (-22L); } else { } ret = strtobool(page, & flag); if (ret < 0) { return ((ssize_t )ret); } else { } if ((int )flag) { dev_set_t10_wwn_model_alias(dev); } else { strncpy((char *)(& dev->t10_wwn.model), (char const *)(& (dev->transport)->inquiry_prod), 16UL); } da->emulate_model_alias = (int )flag; return ((ssize_t )count); } } static ssize_t store_emulate_write_cache(struct se_dev_attrib *da , char const *page , size_t count ) { bool flag ; int ret ; struct _ddebug descriptor ; long tmp ; { ret = strtobool(page, & flag); if (ret < 0) { return ((ssize_t )ret); } else { } if ((int )flag && (unsigned long )((da->da_dev)->transport)->get_write_cache != (unsigned long )((bool (*/* const */)(struct se_device * ))0)) { printk("\vemulate_write_cache not supported for this device\n"); return (-22L); } else { } da->emulate_write_cache = (int )flag; descriptor.modname = "target_core_mod"; descriptor.function = "store_emulate_write_cache"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n"; descriptor.lineno = 619U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n", da->da_dev, (int )flag); } else { } return ((ssize_t )count); } } static ssize_t store_emulate_ua_intlck_ctrl(struct se_dev_attrib *da , char const *page , size_t count ) { u32 val ; int ret ; struct _ddebug descriptor ; long tmp ; { ret = kstrtou32(page, 0U, & val); if (ret < 0) { return ((ssize_t )ret); } else { } if ((val != 0U && val != 1U) && val != 2U) { printk("\vIllegal value %d\n", val); return (-22L); } else { } if ((da->da_dev)->export_count != 0U) { printk("\vdev[%p]: Unable to change SE Device UA_INTRLCK_CTRL while export_count is %d\n", da->da_dev, (da->da_dev)->export_count); return (-22L); } else { } da->emulate_ua_intlck_ctrl = (int )val; descriptor.modname = "target_core_mod"; descriptor.function = "store_emulate_ua_intlck_ctrl"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n"; descriptor.lineno = 646U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n", da->da_dev, val); } else { } return ((ssize_t )count); } } static ssize_t store_emulate_tas(struct se_dev_attrib *da , char const *page , size_t count ) { bool flag ; int ret ; struct _ddebug descriptor ; long tmp ; { ret = strtobool(page, & flag); if (ret < 0) { return ((ssize_t )ret); } else { } if ((da->da_dev)->export_count != 0U) { printk("\vdev[%p]: Unable to change SE Device TAS while export_count is %d\n", da->da_dev, (da->da_dev)->export_count); return (-22L); } else { } da->emulate_tas = (int )flag; descriptor.modname = "target_core_mod"; descriptor.function = "store_emulate_tas"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "dev[%p]: SE Device TASK_ABORTED status bit: %s\n"; descriptor.lineno = 668U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "dev[%p]: SE Device TASK_ABORTED status bit: %s\n", da->da_dev, (int )flag ? (char *)"Enabled" : (char *)"Disabled"); } else { } return ((ssize_t )count); } } static ssize_t store_emulate_tpu(struct se_dev_attrib *da , char const *page , size_t count ) { bool flag ; int ret ; struct _ddebug descriptor ; long tmp ; { ret = strtobool(page, & flag); if (ret < 0) { return ((ssize_t )ret); } else { } if ((int )flag && da->max_unmap_block_desc_count == 0U) { printk("\vGeneric Block Discard not supported\n"); return (-38L); } else { } da->emulate_tpu = (int )flag; descriptor.modname = "target_core_mod"; descriptor.function = "store_emulate_tpu"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n"; descriptor.lineno = 694U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n", da->da_dev, (int )flag); } else { } return ((ssize_t )count); } } static ssize_t store_emulate_tpws(struct se_dev_attrib *da , char const *page , size_t count ) { bool flag ; int ret ; struct _ddebug descriptor ; long tmp ; { ret = strtobool(page, & flag); if (ret < 0) { return ((ssize_t )ret); } else { } if ((int )flag && da->max_unmap_block_desc_count == 0U) { printk("\vGeneric Block Discard not supported\n"); return (-38L); } else { } da->emulate_tpws = (int )flag; descriptor.modname = "target_core_mod"; descriptor.function = "store_emulate_tpws"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n"; descriptor.lineno = 719U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n", da->da_dev, (int )flag); } else { } return ((ssize_t )count); } } static ssize_t store_pi_prot_type(struct se_dev_attrib *da , char const *page , size_t count ) { int old_prot ; int ret ; struct se_device *dev ; u32 flag ; struct _ddebug descriptor ; long tmp ; { old_prot = (int )da->pi_prot_type; dev = da->da_dev; ret = kstrtou32(page, 0U, & flag); if (ret < 0) { return ((ssize_t )ret); } else { } if (((flag != 0U && flag != 1U) && flag != 2U) && flag != 3U) { printk("\vIllegal value %d for pi_prot_type\n", flag); return (-22L); } else { } if (flag == 2U) { printk("\vDIF TYPE2 protection currently not supported\n"); return (-38L); } else { } if ((unsigned int )da->hw_pi_prot_type != 0U) { printk("\fDIF protection enabled on underlying hardware, ignoring\n"); return ((ssize_t )count); } else { } if ((unsigned long )(dev->transport)->init_prot == (unsigned long )((int (*/* const */)(struct se_device * ))0) || (unsigned long )(dev->transport)->free_prot == (unsigned long )((void (*/* const */)(struct se_device * ))0)) { if (flag == 0U) { return (0L); } else { } printk("\vDIF protection not supported by backend: %s\n", (char const *)(& (dev->transport)->name)); return (-38L); } else { } if ((dev->dev_flags & 1U) == 0U) { printk("\vDIF protection requires device to be configured\n"); return (-19L); } else { } if (dev->export_count != 0U) { printk("\vdev[%p]: Unable to change SE Device PROT type while export_count is %d\n", dev, dev->export_count); return (-22L); } else { } da->pi_prot_type = (enum target_prot_type )flag; if (flag != 0U && old_prot == 0) { ret = (*((dev->transport)->init_prot))(dev); if (ret != 0) { da->pi_prot_type = (enum target_prot_type )old_prot; return ((ssize_t )ret); } else { } } else if (flag == 0U && old_prot != 0) { (*((dev->transport)->free_prot))(dev); } else { } descriptor.modname = "target_core_mod"; descriptor.function = "store_pi_prot_type"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "dev[%p]: SE Device Protection Type: %d\n"; descriptor.lineno = 779U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "dev[%p]: SE Device Protection Type: %d\n", dev, flag); } else { } return ((ssize_t )count); } } static ssize_t store_pi_prot_format(struct se_dev_attrib *da , char const *page , size_t count ) { struct se_device *dev ; bool flag ; int ret ; struct _ddebug descriptor ; long tmp ; { dev = da->da_dev; ret = strtobool(page, & flag); if (ret < 0) { return ((ssize_t )ret); } else { } if (! flag) { return ((ssize_t )count); } else { } if ((unsigned long )(dev->transport)->format_prot == (unsigned long )((int (*/* const */)(struct se_device * ))0)) { printk("\vDIF protection format not supported by backend %s\n", (char const *)(& (dev->transport)->name)); return (-38L); } else { } if ((dev->dev_flags & 1U) == 0U) { printk("\vDIF protection format requires device to be configured\n"); return (-19L); } else { } if (dev->export_count != 0U) { printk("\vdev[%p]: Unable to format SE Device PROT type while export_count is %d\n", dev, dev->export_count); return (-22L); } else { } ret = (*((dev->transport)->format_prot))(dev); if (ret != 0) { return ((ssize_t )ret); } else { } descriptor.modname = "target_core_mod"; descriptor.function = "store_pi_prot_format"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "dev[%p]: SE Device Protection Format complete\n"; descriptor.lineno = 816U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "dev[%p]: SE Device Protection Format complete\n", dev); } else { } return ((ssize_t )count); } } static ssize_t store_force_pr_aptpl(struct se_dev_attrib *da , char const *page , size_t count ) { bool flag ; int ret ; struct _ddebug descriptor ; long tmp ; { ret = strtobool(page, & flag); if (ret < 0) { return ((ssize_t )ret); } else { } if ((da->da_dev)->export_count != 0U) { printk("\vdev[%p]: Unable to set force_pr_aptpl while export_count is %d\n", da->da_dev, (da->da_dev)->export_count); return (-22L); } else { } da->force_pr_aptpl = (int )flag; descriptor.modname = "target_core_mod"; descriptor.function = "store_force_pr_aptpl"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "dev[%p]: SE Device force_pr_aptpl: %d\n"; descriptor.lineno = 837U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "dev[%p]: SE Device force_pr_aptpl: %d\n", da->da_dev, (int )flag); } else { } return ((ssize_t )count); } } static ssize_t store_emulate_rest_reord(struct se_dev_attrib *da , char const *page , size_t count ) { bool flag ; int ret ; struct _ddebug descriptor ; long tmp ; { ret = strtobool(page, & flag); if (ret < 0) { return ((ssize_t )ret); } else { } if ((int )flag) { printk("\vdev[%p]: SE Device emulation of restricted reordering not implemented\n", da->da_dev); return (-38L); } else { } da->emulate_rest_reord = (int )flag; descriptor.modname = "target_core_mod"; descriptor.function = "store_emulate_rest_reord"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "dev[%p]: SE Device emulate_rest_reord: %d\n"; descriptor.lineno = 858U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "dev[%p]: SE Device emulate_rest_reord: %d\n", da->da_dev, (int )flag); } else { } return ((ssize_t )count); } } static ssize_t store_queue_depth(struct se_dev_attrib *da , char const *page , size_t count ) { struct se_device *dev ; u32 val ; int ret ; u32 tmp ; struct _ddebug descriptor ; long tmp___0 ; { dev = da->da_dev; ret = kstrtou32(page, 0U, & val); if (ret < 0) { return ((ssize_t )ret); } else { } if (dev->export_count != 0U) { printk("\vdev[%p]: Unable to change SE Device TCQ while export_count is %d\n", dev, dev->export_count); return (-22L); } else { } if (val == 0U) { printk("\vdev[%p]: Illegal ZERO value for queue_depth\n", dev); return (-22L); } else { } if (dev->dev_attrib.queue_depth < val) { if (dev->dev_attrib.hw_queue_depth < val) { printk("\vdev[%p]: Passed queue_depth: %u exceeds TCM/SE_Device MAX TCQ: %u\n", dev, val, dev->dev_attrib.hw_queue_depth); return (-22L); } else { } } else { } tmp = val; dev->queue_depth = tmp; da->queue_depth = tmp; descriptor.modname = "target_core_mod"; descriptor.function = "store_queue_depth"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "dev[%p]: SE Device TCQ Depth changed to: %u\n"; descriptor.lineno = 897U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "dev[%p]: SE Device TCQ Depth changed to: %u\n", dev, val); } else { } return ((ssize_t )count); } } static ssize_t store_optimal_sectors(struct se_dev_attrib *da , char const *page , size_t count ) { u32 val ; int ret ; struct _ddebug descriptor ; long tmp ; { ret = kstrtou32(page, 0U, & val); if (ret < 0) { return ((ssize_t )ret); } else { } if ((da->da_dev)->export_count != 0U) { printk("\vdev[%p]: Unable to change SE Device optimal_sectors while export_count is %d\n", da->da_dev, (da->da_dev)->export_count); return (-22L); } else { } if (da->hw_max_sectors < val) { printk("\vdev[%p]: Passed optimal_sectors %u cannot be greater than hw_max_sectors: %u\n", da->da_dev, val, da->hw_max_sectors); return (-22L); } else { } da->optimal_sectors = val; descriptor.modname = "target_core_mod"; descriptor.function = "store_optimal_sectors"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "dev[%p]: SE Device optimal_sectors changed to %u\n"; descriptor.lineno = 926U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "dev[%p]: SE Device optimal_sectors changed to %u\n", da->da_dev, val); } else { } return ((ssize_t )count); } } static ssize_t store_block_size(struct se_dev_attrib *da , char const *page , size_t count ) { u32 val ; int ret ; struct _ddebug descriptor ; long tmp ; { ret = kstrtou32(page, 0U, & val); if (ret < 0) { return ((ssize_t )ret); } else { } if ((da->da_dev)->export_count != 0U) { printk("\vdev[%p]: Unable to change SE Device block_size while export_count is %d\n", da->da_dev, (da->da_dev)->export_count); return (-22L); } else { } if (((val != 512U && val != 1024U) && val != 2048U) && val != 4096U) { printk("\vdev[%p]: Illegal value for block_device: %u for SE device, must be 512, 1024, 2048 or 4096\n", da->da_dev, val); return (-22L); } else { } da->block_size = val; if (da->max_bytes_per_io != 0U) { da->hw_max_sectors = da->max_bytes_per_io / val; } else { } descriptor.modname = "target_core_mod"; descriptor.function = "store_block_size"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "dev[%p]: SE Device block_size changed to %u\n"; descriptor.lineno = 959U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "dev[%p]: SE Device block_size changed to %u\n", da->da_dev, val); } else { } return ((ssize_t )count); } } static struct target_backend_dev_attrib_attribute target_core_dev_attrib_emulate_model_alias = {{"emulate_model_alias", & __this_module, 420U}, & show_emulate_model_alias, & store_emulate_model_alias}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_emulate_dpo = {{"emulate_dpo", & __this_module, 420U}, & show_emulate_dpo, & store_emulate_dpo}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_emulate_fua_write = {{"emulate_fua_write", & __this_module, 420U}, & show_emulate_fua_write, & store_emulate_fua_write}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_emulate_fua_read = {{"emulate_fua_read", & __this_module, 420U}, & show_emulate_fua_read, & store_emulate_fua_read}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_emulate_write_cache = {{"emulate_write_cache", & __this_module, 420U}, & show_emulate_write_cache, & store_emulate_write_cache}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_emulate_ua_intlck_ctrl = {{"emulate_ua_intlck_ctrl", & __this_module, 420U}, & show_emulate_ua_intlck_ctrl, & store_emulate_ua_intlck_ctrl}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_emulate_tas = {{"emulate_tas", & __this_module, 420U}, & show_emulate_tas, & store_emulate_tas}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_emulate_tpu = {{"emulate_tpu", & __this_module, 420U}, & show_emulate_tpu, & store_emulate_tpu}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_emulate_tpws = {{"emulate_tpws", & __this_module, 420U}, & show_emulate_tpws, & store_emulate_tpws}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_emulate_caw = {{"emulate_caw", & __this_module, 420U}, & show_emulate_caw, & store_emulate_caw}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_emulate_3pc = {{"emulate_3pc", & __this_module, 420U}, & show_emulate_3pc, & store_emulate_3pc}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_pi_prot_type = {{"pi_prot_type", & __this_module, 420U}, & show_pi_prot_type, & store_pi_prot_type}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_hw_pi_prot_type = {{"hw_pi_prot_type", & __this_module, 292U}, & show_hw_pi_prot_type, 0}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_pi_prot_format = {{"pi_prot_format", & __this_module, 420U}, & show_pi_prot_format, & store_pi_prot_format}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_enforce_pr_isids = {{"enforce_pr_isids", & __this_module, 420U}, & show_enforce_pr_isids, & store_enforce_pr_isids}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_is_nonrot = {{"is_nonrot", & __this_module, 420U}, & show_is_nonrot, & store_is_nonrot}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_emulate_rest_reord = {{"emulate_rest_reord", & __this_module, 420U}, & show_emulate_rest_reord, & store_emulate_rest_reord}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_force_pr_aptpl = {{"force_pr_aptpl", & __this_module, 420U}, & show_force_pr_aptpl, & store_force_pr_aptpl}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_hw_block_size = {{"hw_block_size", & __this_module, 292U}, & show_hw_block_size, 0}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_block_size = {{"block_size", & __this_module, 420U}, & show_block_size, & store_block_size}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_hw_max_sectors = {{"hw_max_sectors", & __this_module, 292U}, & show_hw_max_sectors, 0}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_optimal_sectors = {{"optimal_sectors", & __this_module, 420U}, & show_optimal_sectors, & store_optimal_sectors}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_hw_queue_depth = {{"hw_queue_depth", & __this_module, 292U}, & show_hw_queue_depth, 0}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_queue_depth = {{"queue_depth", & __this_module, 420U}, & show_queue_depth, & store_queue_depth}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_max_unmap_lba_count = {{"max_unmap_lba_count", & __this_module, 420U}, & show_max_unmap_lba_count, & store_max_unmap_lba_count}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_max_unmap_block_desc_count = {{"max_unmap_block_desc_count", & __this_module, 420U}, & show_max_unmap_block_desc_count, & store_max_unmap_block_desc_count}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_unmap_granularity = {{"unmap_granularity", & __this_module, 420U}, & show_unmap_granularity, & store_unmap_granularity}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_unmap_granularity_alignment = {{"unmap_granularity_alignment", & __this_module, 420U}, & show_unmap_granularity_alignment, & store_unmap_granularity_alignment}; static struct target_backend_dev_attrib_attribute target_core_dev_attrib_max_write_same_len = {{"max_write_same_len", & __this_module, 420U}, & show_max_write_same_len, & store_max_write_same_len}; static struct se_dev_attrib *to_target_core_dev_attrib(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_dev_attrib *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_dev_attrib *)__mptr + 0xffffffffffffff80UL; } else { tmp___0 = (struct se_dev_attrib *)0; } return (tmp___0); } } static ssize_t target_core_dev_attrib_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_dev_attrib *se_dev_attrib ; struct se_dev_attrib *tmp ; struct target_core_dev_attrib_attribute *target_core_dev_attrib_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_core_dev_attrib(item); se_dev_attrib = tmp; __mptr = (struct configfs_attribute const *)attr; target_core_dev_attrib_attr = (struct target_core_dev_attrib_attribute *)__mptr; ret = 0L; if ((unsigned long )target_core_dev_attrib_attr->show != (unsigned long )((ssize_t (*)(struct se_dev_attrib * , char * ))0)) { ret = (*(target_core_dev_attrib_attr->show))(se_dev_attrib, page); } else { } return (ret); } } static ssize_t target_core_dev_attrib_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_dev_attrib *se_dev_attrib ; struct se_dev_attrib *tmp ; struct target_core_dev_attrib_attribute *target_core_dev_attrib_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_core_dev_attrib(item); se_dev_attrib = tmp; __mptr = (struct configfs_attribute const *)attr; target_core_dev_attrib_attr = (struct target_core_dev_attrib_attribute *)__mptr; ret = -22L; if ((unsigned long )target_core_dev_attrib_attr->store != (unsigned long )((ssize_t (*)(struct se_dev_attrib * , char const * , size_t ))0)) { ret = (*(target_core_dev_attrib_attr->store))(se_dev_attrib, page, count); } else { } return (ret); } } struct configfs_attribute *sbc_attrib_attrs[30U] = { & target_core_dev_attrib_emulate_model_alias.attr, & target_core_dev_attrib_emulate_dpo.attr, & target_core_dev_attrib_emulate_fua_write.attr, & target_core_dev_attrib_emulate_fua_read.attr, & target_core_dev_attrib_emulate_write_cache.attr, & target_core_dev_attrib_emulate_ua_intlck_ctrl.attr, & target_core_dev_attrib_emulate_tas.attr, & target_core_dev_attrib_emulate_tpu.attr, & target_core_dev_attrib_emulate_tpws.attr, & target_core_dev_attrib_emulate_caw.attr, & target_core_dev_attrib_emulate_3pc.attr, & target_core_dev_attrib_pi_prot_type.attr, & target_core_dev_attrib_hw_pi_prot_type.attr, & target_core_dev_attrib_pi_prot_format.attr, & target_core_dev_attrib_enforce_pr_isids.attr, & target_core_dev_attrib_is_nonrot.attr, & target_core_dev_attrib_emulate_rest_reord.attr, & target_core_dev_attrib_force_pr_aptpl.attr, & target_core_dev_attrib_hw_block_size.attr, & target_core_dev_attrib_block_size.attr, & target_core_dev_attrib_hw_max_sectors.attr, & target_core_dev_attrib_optimal_sectors.attr, & target_core_dev_attrib_hw_queue_depth.attr, & target_core_dev_attrib_queue_depth.attr, & target_core_dev_attrib_max_unmap_lba_count.attr, & target_core_dev_attrib_max_unmap_block_desc_count.attr, & target_core_dev_attrib_unmap_granularity.attr, & target_core_dev_attrib_unmap_granularity_alignment.attr, & target_core_dev_attrib_max_write_same_len.attr, (struct configfs_attribute *)0}; static char const __kstrtab_sbc_attrib_attrs[17U] = { 's', 'b', 'c', '_', 'a', 't', 't', 'r', 'i', 'b', '_', 'a', 't', 't', 'r', 's', '\000'}; struct kernel_symbol const __ksymtab_sbc_attrib_attrs ; struct kernel_symbol const __ksymtab_sbc_attrib_attrs = {(unsigned long )(& sbc_attrib_attrs), (char const *)(& __kstrtab_sbc_attrib_attrs)}; static struct target_backend_dev_attrib_attribute target_pt_dev_attrib_hw_pi_prot_type = {{"hw_pi_prot_type", & __this_module, 292U}, & show_hw_pi_prot_type, 0}; static struct target_backend_dev_attrib_attribute target_pt_dev_attrib_hw_block_size = {{"hw_block_size", & __this_module, 292U}, & show_hw_block_size, 0}; static struct target_backend_dev_attrib_attribute target_pt_dev_attrib_hw_max_sectors = {{"hw_max_sectors", & __this_module, 292U}, & show_hw_max_sectors, 0}; static struct target_backend_dev_attrib_attribute target_pt_dev_attrib_hw_queue_depth = {{"hw_queue_depth", & __this_module, 292U}, & show_hw_queue_depth, 0}; struct configfs_attribute *passthrough_attrib_attrs[5U] = { & target_pt_dev_attrib_hw_pi_prot_type.attr, & target_pt_dev_attrib_hw_block_size.attr, & target_pt_dev_attrib_hw_max_sectors.attr, & target_pt_dev_attrib_hw_queue_depth.attr, (struct configfs_attribute *)0}; static char const __kstrtab_passthrough_attrib_attrs[25U] = { 'p', 'a', 's', 's', 't', 'h', 'r', 'o', 'u', 'g', 'h', '_', 'a', 't', 't', 'r', 'i', 'b', '_', 'a', 't', 't', 'r', 's', '\000'}; struct kernel_symbol const __ksymtab_passthrough_attrib_attrs ; struct kernel_symbol const __ksymtab_passthrough_attrib_attrs = {(unsigned long )(& passthrough_attrib_attrs), (char const *)(& __kstrtab_passthrough_attrib_attrs)}; static struct configfs_item_operations target_core_dev_attrib_ops = {0, & target_core_dev_attrib_attr_show, & target_core_dev_attrib_attr_store, 0, 0}; static void target_core_setup_dev_attrib_cit(struct target_backend *tb ) { struct config_item_type *cit ; struct _ddebug descriptor ; long tmp ; { cit = & tb->tb_dev_attrib_cit; cit->ct_item_ops = & target_core_dev_attrib_ops; cit->ct_group_ops = (struct configfs_group_operations *)0; cit->ct_attrs = (tb->ops)->tb_dev_attrib_attrs; cit->ct_owner = (tb->ops)->owner; descriptor.modname = "target_core_mod"; descriptor.function = "target_core_setup_dev_attrib_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 1071U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"dev_attrib"); } else { } return; } } static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(struct t10_wwn *t10_wwn , char *page ) { int tmp ; { tmp = sprintf(page, "T10 VPD Unit Serial Number: %s\n", (char *)(& t10_wwn->unit_serial)); return ((ssize_t )tmp); } } static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(struct t10_wwn *t10_wwn , char const *page , size_t count ) { struct se_device *dev ; unsigned char buf[254U] ; size_t tmp ; char *tmp___0 ; struct _ddebug descriptor ; long tmp___1 ; { dev = t10_wwn->t10_dev; if ((dev->dev_flags & 2U) != 0U) { printk("\vUnderlying SCSI device firmware provided VPD Unit Serial, ignoring request\n"); return (-95L); } else { } tmp = strlen(page); if (tmp > 253UL) { printk("\vEmulated VPD Unit Serial exceeds INQUIRY_VPD_SERIAL_LEN: %d\n", 254); return (-75L); } else { } if (dev->export_count != 0U) { printk("\vUnable to set VPD Unit Serial while active %d $FABRIC_MOD exports exist\n", dev->export_count); return (-22L); } else { } memset((void *)(& buf), 0, 254UL); snprintf((char *)(& buf), 254UL, "%s", page); tmp___0 = strstrip((char *)(& buf)); snprintf((char *)(& dev->t10_wwn.unit_serial), 254UL, "%s", tmp___0); dev->dev_flags = dev->dev_flags | 4U; descriptor.modname = "target_core_mod"; descriptor.function = "target_core_dev_wwn_store_attr_vpd_unit_serial"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "Target_Core_ConfigFS: Set emulated VPD Unit Serial: %s\n"; descriptor.lineno = 1158U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor, "Target_Core_ConfigFS: Set emulated VPD Unit Serial: %s\n", (char *)(& dev->t10_wwn.unit_serial)); } else { } return ((ssize_t )count); } } static struct target_core_dev_wwn_attribute target_core_dev_wwn_vpd_unit_serial = {{"vpd_unit_serial", & __this_module, 420U}, & target_core_dev_wwn_show_attr_vpd_unit_serial, & target_core_dev_wwn_store_attr_vpd_unit_serial}; static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(struct t10_wwn *t10_wwn , char *page ) { struct t10_vpd *vpd ; unsigned char buf[254U] ; ssize_t len ; struct list_head const *__mptr ; size_t tmp ; int tmp___0 ; struct list_head const *__mptr___0 ; { len = 0L; memset((void *)(& buf), 0, 254UL); spin_lock(& t10_wwn->t10_vpd_lock); __mptr = (struct list_head const *)t10_wwn->t10_vpd_list.next; vpd = (struct t10_vpd *)__mptr + 0xfffffffffffffee8UL; goto ldv_62820; ldv_62819: ; if (vpd->protocol_identifier_set == 0) { goto ldv_62817; } else { } transport_dump_vpd_proto_id(vpd, (unsigned char *)(& buf), 254); tmp = strlen((char const *)(& buf)); if (tmp + (unsigned long )len > 4095UL) { goto ldv_62818; } else { } tmp___0 = sprintf(page + (unsigned long )len, "%s", (unsigned char *)(& buf)); len = (ssize_t )tmp___0 + len; ldv_62817: __mptr___0 = (struct list_head const *)vpd->vpd_list.next; vpd = (struct t10_vpd *)__mptr___0 + 0xfffffffffffffee8UL; ldv_62820: ; if ((unsigned long )(& vpd->vpd_list) != (unsigned long )(& t10_wwn->t10_vpd_list)) { goto ldv_62819; } else { } ldv_62818: spin_unlock(& t10_wwn->t10_vpd_lock); return (len); } } static ssize_t target_core_dev_wwn_store_attr_vpd_protocol_identifier(struct t10_wwn *t10_wwn , char const *page , size_t count ) { { return (-38L); } } static struct target_core_dev_wwn_attribute target_core_dev_wwn_vpd_protocol_identifier = {{"vpd_protocol_identifier", & __this_module, 420U}, & target_core_dev_wwn_show_attr_vpd_protocol_identifier, & target_core_dev_wwn_store_attr_vpd_protocol_identifier}; static ssize_t target_core_dev_wwn_show_attr_vpd_assoc_logical_unit(struct t10_wwn *t10_wwn , char *page ) { struct t10_vpd *vpd ; unsigned char buf[254U] ; ssize_t len ; struct list_head const *__mptr ; size_t tmp ; int tmp___0 ; size_t tmp___1 ; int tmp___2 ; size_t tmp___3 ; int tmp___4 ; struct list_head const *__mptr___0 ; { len = 0L; spin_lock(& t10_wwn->t10_vpd_lock); __mptr = (struct list_head const *)t10_wwn->t10_vpd_list.next; vpd = (struct t10_vpd *)__mptr + 0xfffffffffffffee8UL; goto ldv_62841; ldv_62840: ; if (vpd->association != 0U) { goto ldv_62838; } else { } memset((void *)(& buf), 0, 254UL); transport_dump_vpd_assoc(vpd, (unsigned char *)(& buf), 254); tmp = strlen((char const *)(& buf)); if (tmp + (unsigned long )len > 4095UL) { goto ldv_62839; } else { } tmp___0 = sprintf(page + (unsigned long )len, "%s", (unsigned char *)(& buf)); len = (ssize_t )tmp___0 + len; memset((void *)(& buf), 0, 254UL); transport_dump_vpd_ident_type(vpd, (unsigned char *)(& buf), 254); tmp___1 = strlen((char const *)(& buf)); if (tmp___1 + (unsigned long )len > 4095UL) { goto ldv_62839; } else { } tmp___2 = sprintf(page + (unsigned long )len, "%s", (unsigned char *)(& buf)); len = (ssize_t )tmp___2 + len; memset((void *)(& buf), 0, 254UL); transport_dump_vpd_ident(vpd, (unsigned char *)(& buf), 254); tmp___3 = strlen((char const *)(& buf)); if (tmp___3 + (unsigned long )len > 4095UL) { goto ldv_62839; } else { } tmp___4 = sprintf(page + (unsigned long )len, "%s", (unsigned char *)(& buf)); len = (ssize_t )tmp___4 + len; ldv_62838: __mptr___0 = (struct list_head const *)vpd->vpd_list.next; vpd = (struct t10_vpd *)__mptr___0 + 0xfffffffffffffee8UL; ldv_62841: ; if ((unsigned long )(& vpd->vpd_list) != (unsigned long )(& t10_wwn->t10_vpd_list)) { goto ldv_62840; } else { } ldv_62839: spin_unlock(& t10_wwn->t10_vpd_lock); return (len); } } static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_logical_unit(struct t10_wwn *t10_wwn , char const *page , size_t count ) { { return (-38L); } } static struct target_core_dev_wwn_attribute target_core_dev_wwn_vpd_assoc_logical_unit = {{"vpd_assoc_logical_unit", & __this_module, 420U}, & target_core_dev_wwn_show_attr_vpd_assoc_logical_unit, & target_core_dev_wwn_store_attr_vpd_assoc_logical_unit}; static ssize_t target_core_dev_wwn_show_attr_vpd_assoc_target_port(struct t10_wwn *t10_wwn , char *page ) { struct t10_vpd *vpd ; unsigned char buf[254U] ; ssize_t len ; struct list_head const *__mptr ; size_t tmp ; int tmp___0 ; size_t tmp___1 ; int tmp___2 ; size_t tmp___3 ; int tmp___4 ; struct list_head const *__mptr___0 ; { len = 0L; spin_lock(& t10_wwn->t10_vpd_lock); __mptr = (struct list_head const *)t10_wwn->t10_vpd_list.next; vpd = (struct t10_vpd *)__mptr + 0xfffffffffffffee8UL; goto ldv_62862; ldv_62861: ; if (vpd->association != 16U) { goto ldv_62859; } else { } memset((void *)(& buf), 0, 254UL); transport_dump_vpd_assoc(vpd, (unsigned char *)(& buf), 254); tmp = strlen((char const *)(& buf)); if (tmp + (unsigned long )len > 4095UL) { goto ldv_62860; } else { } tmp___0 = sprintf(page + (unsigned long )len, "%s", (unsigned char *)(& buf)); len = (ssize_t )tmp___0 + len; memset((void *)(& buf), 0, 254UL); transport_dump_vpd_ident_type(vpd, (unsigned char *)(& buf), 254); tmp___1 = strlen((char const *)(& buf)); if (tmp___1 + (unsigned long )len > 4095UL) { goto ldv_62860; } else { } tmp___2 = sprintf(page + (unsigned long )len, "%s", (unsigned char *)(& buf)); len = (ssize_t )tmp___2 + len; memset((void *)(& buf), 0, 254UL); transport_dump_vpd_ident(vpd, (unsigned char *)(& buf), 254); tmp___3 = strlen((char const *)(& buf)); if (tmp___3 + (unsigned long )len > 4095UL) { goto ldv_62860; } else { } tmp___4 = sprintf(page + (unsigned long )len, "%s", (unsigned char *)(& buf)); len = (ssize_t )tmp___4 + len; ldv_62859: __mptr___0 = (struct list_head const *)vpd->vpd_list.next; vpd = (struct t10_vpd *)__mptr___0 + 0xfffffffffffffee8UL; ldv_62862: ; if ((unsigned long )(& vpd->vpd_list) != (unsigned long )(& t10_wwn->t10_vpd_list)) { goto ldv_62861; } else { } ldv_62860: spin_unlock(& t10_wwn->t10_vpd_lock); return (len); } } static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_target_port(struct t10_wwn *t10_wwn , char const *page , size_t count ) { { return (-38L); } } static struct target_core_dev_wwn_attribute target_core_dev_wwn_vpd_assoc_target_port = {{"vpd_assoc_target_port", & __this_module, 420U}, & target_core_dev_wwn_show_attr_vpd_assoc_target_port, & target_core_dev_wwn_store_attr_vpd_assoc_target_port}; static ssize_t target_core_dev_wwn_show_attr_vpd_assoc_scsi_target_device(struct t10_wwn *t10_wwn , char *page ) { struct t10_vpd *vpd ; unsigned char buf[254U] ; ssize_t len ; struct list_head const *__mptr ; size_t tmp ; int tmp___0 ; size_t tmp___1 ; int tmp___2 ; size_t tmp___3 ; int tmp___4 ; struct list_head const *__mptr___0 ; { len = 0L; spin_lock(& t10_wwn->t10_vpd_lock); __mptr = (struct list_head const *)t10_wwn->t10_vpd_list.next; vpd = (struct t10_vpd *)__mptr + 0xfffffffffffffee8UL; goto ldv_62883; ldv_62882: ; if (vpd->association != 32U) { goto ldv_62880; } else { } memset((void *)(& buf), 0, 254UL); transport_dump_vpd_assoc(vpd, (unsigned char *)(& buf), 254); tmp = strlen((char const *)(& buf)); if (tmp + (unsigned long )len > 4095UL) { goto ldv_62881; } else { } tmp___0 = sprintf(page + (unsigned long )len, "%s", (unsigned char *)(& buf)); len = (ssize_t )tmp___0 + len; memset((void *)(& buf), 0, 254UL); transport_dump_vpd_ident_type(vpd, (unsigned char *)(& buf), 254); tmp___1 = strlen((char const *)(& buf)); if (tmp___1 + (unsigned long )len > 4095UL) { goto ldv_62881; } else { } tmp___2 = sprintf(page + (unsigned long )len, "%s", (unsigned char *)(& buf)); len = (ssize_t )tmp___2 + len; memset((void *)(& buf), 0, 254UL); transport_dump_vpd_ident(vpd, (unsigned char *)(& buf), 254); tmp___3 = strlen((char const *)(& buf)); if (tmp___3 + (unsigned long )len > 4095UL) { goto ldv_62881; } else { } tmp___4 = sprintf(page + (unsigned long )len, "%s", (unsigned char *)(& buf)); len = (ssize_t )tmp___4 + len; ldv_62880: __mptr___0 = (struct list_head const *)vpd->vpd_list.next; vpd = (struct t10_vpd *)__mptr___0 + 0xfffffffffffffee8UL; ldv_62883: ; if ((unsigned long )(& vpd->vpd_list) != (unsigned long )(& t10_wwn->t10_vpd_list)) { goto ldv_62882; } else { } ldv_62881: spin_unlock(& t10_wwn->t10_vpd_lock); return (len); } } static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device(struct t10_wwn *t10_wwn , char const *page , size_t count ) { { return (-38L); } } static struct target_core_dev_wwn_attribute target_core_dev_wwn_vpd_assoc_scsi_target_device = {{"vpd_assoc_scsi_target_device", & __this_module, 420U}, & target_core_dev_wwn_show_attr_vpd_assoc_scsi_target_device, & target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device}; static struct t10_wwn *to_target_core_dev_wwn(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct t10_wwn *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct t10_wwn *)__mptr + 0xfffffffffffffe90UL; } else { tmp___0 = (struct t10_wwn *)0; } return (tmp___0); } } static ssize_t target_core_dev_wwn_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct t10_wwn *t10_wwn ; struct t10_wwn *tmp ; struct target_core_dev_wwn_attribute *target_core_dev_wwn_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_core_dev_wwn(item); t10_wwn = tmp; __mptr = (struct configfs_attribute const *)attr; target_core_dev_wwn_attr = (struct target_core_dev_wwn_attribute *)__mptr; ret = 0L; if ((unsigned long )target_core_dev_wwn_attr->show != (unsigned long )((ssize_t (*)(struct t10_wwn * , char * ))0)) { ret = (*(target_core_dev_wwn_attr->show))(t10_wwn, page); } else { } return (ret); } } static ssize_t target_core_dev_wwn_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct t10_wwn *t10_wwn ; struct t10_wwn *tmp ; struct target_core_dev_wwn_attribute *target_core_dev_wwn_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_core_dev_wwn(item); t10_wwn = tmp; __mptr = (struct configfs_attribute const *)attr; target_core_dev_wwn_attr = (struct target_core_dev_wwn_attribute *)__mptr; ret = -22L; if ((unsigned long )target_core_dev_wwn_attr->store != (unsigned long )((ssize_t (*)(struct t10_wwn * , char const * , size_t ))0)) { ret = (*(target_core_dev_wwn_attr->store))(t10_wwn, page, count); } else { } return (ret); } } static struct configfs_attribute *target_core_dev_wwn_attrs[6U] = { & target_core_dev_wwn_vpd_unit_serial.attr, & target_core_dev_wwn_vpd_protocol_identifier.attr, & target_core_dev_wwn_vpd_assoc_logical_unit.attr, & target_core_dev_wwn_vpd_assoc_target_port.attr, & target_core_dev_wwn_vpd_assoc_scsi_target_device.attr, (struct configfs_attribute *)0}; static struct configfs_item_operations target_core_dev_wwn_ops = {0, & target_core_dev_wwn_attr_show, & target_core_dev_wwn_attr_store, 0, 0}; static void target_core_setup_dev_wwn_cit(struct target_backend *tb ) { struct config_item_type *cit ; struct _ddebug descriptor ; long tmp ; { cit = & tb->tb_dev_wwn_cit; cit->ct_item_ops = & target_core_dev_wwn_ops; cit->ct_group_ops = (struct configfs_group_operations *)0; cit->ct_attrs = (struct configfs_attribute **)(& target_core_dev_wwn_attrs); cit->ct_owner = (tb->ops)->owner; descriptor.modname = "target_core_mod"; descriptor.function = "target_core_setup_dev_wwn_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 1306U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"dev_wwn"); } else { } return; } } static ssize_t target_core_dev_pr_show_spc3_res(struct se_device *dev , char *page ) { struct se_node_acl *se_nacl ; struct t10_pr_registration *pr_reg ; char i_buf[21U] ; int tmp ; char *tmp___0 ; int tmp___1 ; { memset((void *)(& i_buf), 0, 21UL); pr_reg = dev->dev_pr_res_holder; if ((unsigned long )pr_reg == (unsigned long )((struct t10_pr_registration *)0)) { tmp = sprintf(page, "No SPC-3 Reservation holder\n"); return ((ssize_t )tmp); } else { } se_nacl = pr_reg->pr_reg_nacl; core_pr_dump_initiator_port(pr_reg, (char *)(& i_buf), 21U); tmp___0 = (*(((se_nacl->se_tpg)->se_tpg_tfo)->get_fabric_name))(); tmp___1 = sprintf(page, "SPC-3 Reservation: %s Initiator: %s%s\n", tmp___0, (char *)(& se_nacl->initiatorname), (char *)(& i_buf)); return ((ssize_t )tmp___1); } } static ssize_t target_core_dev_pr_show_spc2_res(struct se_device *dev , char *page ) { struct se_node_acl *se_nacl ; ssize_t len ; char *tmp ; int tmp___0 ; int tmp___1 ; { se_nacl = dev->dev_reserved_node_acl; if ((unsigned long )se_nacl != (unsigned long )((struct se_node_acl *)0)) { tmp = (*(((se_nacl->se_tpg)->se_tpg_tfo)->get_fabric_name))(); tmp___0 = sprintf(page, "SPC-2 Reservation: %s Initiator: %s\n", tmp, (char *)(& se_nacl->initiatorname)); len = (ssize_t )tmp___0; } else { tmp___1 = sprintf(page, "No SPC-2 Reservation holder\n"); len = (ssize_t )tmp___1; } return (len); } } static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev , char *page ) { int ret ; int tmp ; ssize_t tmp___0 ; ssize_t tmp___1 ; { if ((int )(dev->transport)->transport_flags & 1) { tmp = sprintf(page, "Passthrough\n"); return ((ssize_t )tmp); } else { } spin_lock(& dev->dev_reservation_lock); if ((int )dev->dev_reservation_flags & 1) { tmp___0 = target_core_dev_pr_show_spc2_res(dev, page); ret = (int )tmp___0; } else { tmp___1 = target_core_dev_pr_show_spc3_res(dev, page); ret = (int )tmp___1; } spin_unlock(& dev->dev_reservation_lock); return ((ssize_t )ret); } } static struct target_core_dev_pr_attribute target_core_dev_pr_res_holder = {{"res_holder", & __this_module, 292U}, & target_core_dev_pr_show_attr_res_holder, 0}; static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(struct se_device *dev , char *page ) { ssize_t len ; int tmp ; int tmp___0 ; int tmp___1 ; { len = 0L; spin_lock(& dev->dev_reservation_lock); if ((unsigned long )dev->dev_pr_res_holder == (unsigned long )((struct t10_pr_registration *)0)) { tmp = sprintf(page, "No SPC-3 Reservation holder\n"); len = (ssize_t )tmp; } else if ((dev->dev_pr_res_holder)->pr_reg_all_tg_pt != 0) { tmp___0 = sprintf(page, "SPC-3 Reservation: All Target Ports registration\n"); len = (ssize_t )tmp___0; } else { tmp___1 = sprintf(page, "SPC-3 Reservation: Single Target Port registration\n"); len = (ssize_t )tmp___1; } spin_unlock(& dev->dev_reservation_lock); return (len); } } static struct target_core_dev_pr_attribute target_core_dev_pr_res_pr_all_tgt_pts = {{"res_pr_all_tgt_pts", & __this_module, 292U}, & target_core_dev_pr_show_attr_res_pr_all_tgt_pts, 0}; static ssize_t target_core_dev_pr_show_attr_res_pr_generation(struct se_device *dev , char *page ) { int tmp ; { tmp = sprintf(page, "0x%08x\n", dev->t10_pr.pr_generation); return ((ssize_t )tmp); } } static struct target_core_dev_pr_attribute target_core_dev_pr_res_pr_generation = {{"res_pr_generation", & __this_module, 292U}, & target_core_dev_pr_show_attr_res_pr_generation, 0}; static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(struct se_device *dev , char *page ) { struct se_node_acl *se_nacl ; struct se_portal_group *se_tpg ; struct t10_pr_registration *pr_reg ; struct target_core_fabric_ops const *tfo ; ssize_t len ; int tmp ; char *tmp___0 ; char *tmp___1 ; int tmp___2 ; char *tmp___3 ; u16 tmp___4 ; char *tmp___5 ; int tmp___6 ; { len = 0L; spin_lock(& dev->dev_reservation_lock); pr_reg = dev->dev_pr_res_holder; if ((unsigned long )pr_reg == (unsigned long )((struct t10_pr_registration *)0)) { tmp = sprintf(page, "No SPC-3 Reservation holder\n"); len = (ssize_t )tmp; goto out_unlock; } else { } se_nacl = pr_reg->pr_reg_nacl; se_tpg = se_nacl->se_tpg; tfo = se_tpg->se_tpg_tfo; tmp___0 = (*(tfo->tpg_get_wwn))(se_tpg); tmp___1 = (*(tfo->get_fabric_name))(); tmp___2 = sprintf(page + (unsigned long )len, "SPC-3 Reservation: %s Target Node Endpoint: %s\n", tmp___1, tmp___0); len = (ssize_t )tmp___2 + len; tmp___3 = (*(tfo->get_fabric_name))(); tmp___4 = (*(tfo->tpg_get_tag))(se_tpg); tmp___5 = (*(tfo->get_fabric_name))(); tmp___6 = sprintf(page + (unsigned long )len, "SPC-3 Reservation: Relative Port Identifier Tag: %hu %s Portal Group Tag: %hu %s Logical Unit: %llu\n", (int )pr_reg->tg_pt_sep_rtpi, tmp___5, (int )tmp___4, tmp___3, pr_reg->pr_aptpl_target_lun); len = (ssize_t )tmp___6 + len; out_unlock: spin_unlock(& dev->dev_reservation_lock); return (len); } } static struct target_core_dev_pr_attribute target_core_dev_pr_res_pr_holder_tg_port = {{"res_pr_holder_tg_port", & __this_module, 292U}, & target_core_dev_pr_show_attr_res_pr_holder_tg_port, 0}; static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(struct se_device *dev , char *page ) { struct target_core_fabric_ops const *tfo ; struct t10_pr_registration *pr_reg ; unsigned char buf[384U] ; char i_buf[21U] ; ssize_t len ; int reg_count ; int tmp ; struct list_head const *__mptr ; char *tmp___0 ; size_t tmp___1 ; int tmp___2 ; struct list_head const *__mptr___0 ; int tmp___3 ; { len = 0L; reg_count = 0; tmp = sprintf(page + (unsigned long )len, "SPC-3 PR Registrations:\n"); len = (ssize_t )tmp + len; spin_lock(& dev->t10_pr.registration_lock); __mptr = (struct list_head const *)dev->t10_pr.registration_list.next; pr_reg = (struct t10_pr_registration *)__mptr + 0xfffffffffffffd90UL; goto ldv_62990; ldv_62989: memset((void *)(& buf), 0, 384UL); memset((void *)(& i_buf), 0, 21UL); tfo = ((pr_reg->pr_reg_nacl)->se_tpg)->se_tpg_tfo; core_pr_dump_initiator_port(pr_reg, (char *)(& i_buf), 21U); tmp___0 = (*(tfo->get_fabric_name))(); sprintf((char *)(& buf), "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n", tmp___0, (char *)(& (pr_reg->pr_reg_nacl)->initiatorname), (char *)(& i_buf), pr_reg->pr_res_key, pr_reg->pr_res_generation); tmp___1 = strlen((char const *)(& buf)); if (tmp___1 + (unsigned long )len > 4095UL) { goto ldv_62988; } else { } tmp___2 = sprintf(page + (unsigned long )len, "%s", (unsigned char *)(& buf)); len = (ssize_t )tmp___2 + len; reg_count = reg_count + 1; __mptr___0 = (struct list_head const *)pr_reg->pr_reg_list.next; pr_reg = (struct t10_pr_registration *)__mptr___0 + 0xfffffffffffffd90UL; ldv_62990: ; if ((unsigned long )(& pr_reg->pr_reg_list) != (unsigned long )(& dev->t10_pr.registration_list)) { goto ldv_62989; } else { } ldv_62988: spin_unlock(& dev->t10_pr.registration_lock); if (reg_count == 0) { tmp___3 = sprintf(page + (unsigned long )len, "None\n"); len = (ssize_t )tmp___3 + len; } else { } return (len); } } static struct target_core_dev_pr_attribute target_core_dev_pr_res_pr_registered_i_pts = {{"res_pr_registered_i_pts", & __this_module, 292U}, & target_core_dev_pr_show_attr_res_pr_registered_i_pts, 0}; static ssize_t target_core_dev_pr_show_attr_res_pr_type(struct se_device *dev , char *page ) { struct t10_pr_registration *pr_reg ; ssize_t len ; unsigned char *tmp ; int tmp___0 ; int tmp___1 ; { len = 0L; spin_lock(& dev->dev_reservation_lock); pr_reg = dev->dev_pr_res_holder; if ((unsigned long )pr_reg != (unsigned long )((struct t10_pr_registration *)0)) { tmp = core_scsi3_pr_dump_type(pr_reg->pr_res_type); tmp___0 = sprintf(page, "SPC-3 Reservation Type: %s\n", tmp); len = (ssize_t )tmp___0; } else { tmp___1 = sprintf(page, "No SPC-3 Reservation holder\n"); len = (ssize_t )tmp___1; } spin_unlock(& dev->dev_reservation_lock); return (len); } } static struct target_core_dev_pr_attribute target_core_dev_pr_res_pr_type = {{"res_pr_type", & __this_module, 292U}, & target_core_dev_pr_show_attr_res_pr_type, 0}; static ssize_t target_core_dev_pr_show_attr_res_type(struct se_device *dev , char *page ) { int tmp ; int tmp___0 ; int tmp___1 ; { if ((int )(dev->transport)->transport_flags & 1) { tmp = sprintf(page, "SPC_PASSTHROUGH\n"); return ((ssize_t )tmp); } else if ((int )dev->dev_reservation_flags & 1) { tmp___0 = sprintf(page, "SPC2_RESERVATIONS\n"); return ((ssize_t )tmp___0); } else { tmp___1 = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n"); return ((ssize_t )tmp___1); } } } static struct target_core_dev_pr_attribute target_core_dev_pr_res_type = {{"res_type", & __this_module, 292U}, & target_core_dev_pr_show_attr_res_type, 0}; static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(struct se_device *dev , char *page ) { int tmp ; { if ((int )(dev->transport)->transport_flags & 1) { return (0L); } else { } tmp = sprintf(page, "APTPL Bit Status: %s\n", dev->t10_pr.pr_aptpl_active != 0 ? (char *)"Activated" : (char *)"Disabled"); return ((ssize_t )tmp); } } static struct target_core_dev_pr_attribute target_core_dev_pr_res_aptpl_active = {{"res_aptpl_active", & __this_module, 292U}, & target_core_dev_pr_show_attr_res_aptpl_active, 0}; static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(struct se_device *dev , char *page ) { int tmp ; { if ((int )(dev->transport)->transport_flags & 1) { return (0L); } else { } tmp = sprintf(page, "Ready to process PR APTPL metadata..\n"); return ((ssize_t )tmp); } } static struct match_token tokens[15U] = { {0, "initiator_fabric=%s"}, {1, "initiator_node=%s"}, {2, "initiator_sid=%s"}, {3, "sa_res_key=%s"}, {4, "res_holder=%d"}, {5, "res_type=%d"}, {6, "res_scope=%d"}, {7, "res_all_tg_pt=%d"}, {8, "mapped_lun=%lld"}, {9, "target_fabric=%s"}, {10, "target_node=%s"}, {11, "tpgt=%d"}, {12, "port_rtpi=%d"}, {13, "target_lun=%lld"}, {14, (char const *)0}}; static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(struct se_device *dev , char const *page , size_t count ) { unsigned char *i_fabric ; unsigned char *i_port ; unsigned char *isid ; unsigned char *t_fabric ; unsigned char *t_port ; char *orig ; char *ptr ; char *opts ; substring_t args[3U] ; unsigned long long tmp_ll ; u64 sa_res_key ; u64 mapped_lun ; u64 target_lun ; int ret ; int res_holder ; int all_tg_pt ; int arg ; int token ; u16 tpgt ; u8 type ; struct _ddebug descriptor ; long tmp ; char *tmp___0 ; char *tmp___1 ; size_t tmp___2 ; char *tmp___3 ; size_t tmp___4 ; char *tmp___5 ; char *tmp___6 ; size_t tmp___7 ; { i_fabric = (unsigned char *)0U; i_port = (unsigned char *)0U; isid = (unsigned char *)0U; t_fabric = (unsigned char *)0U; t_port = (unsigned char *)0U; sa_res_key = 0ULL; mapped_lun = 0ULL; target_lun = 0ULL; ret = -1; res_holder = 0; all_tg_pt = 0; tpgt = 0U; type = 0U; if ((int )(dev->transport)->transport_flags & 1) { return (0L); } else { } if ((int )dev->dev_reservation_flags & 1) { return (0L); } else { } if (dev->export_count != 0U) { descriptor.modname = "target_core_mod"; descriptor.function = "target_core_dev_pr_store_attr_res_aptpl_metadata"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "Unable to process APTPL metadata while active fabric exports exist\n"; descriptor.lineno = 1599U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Unable to process APTPL metadata while active fabric exports exist\n"); } else { } return (-22L); } else { } opts = kstrdup(page, 208U); if ((unsigned long )opts == (unsigned long )((char *)0)) { return (-12L); } else { } orig = opts; goto ldv_63057; ldv_63075: ; if ((int )((signed char )*ptr) == 0) { goto ldv_63057; } else { } token = match_token(ptr, (struct match_token const *)(& tokens), (substring_t *)(& args)); switch (token) { case 0: tmp___0 = match_strdup((substring_t const *)(& args)); i_fabric = (unsigned char *)tmp___0; if ((unsigned long )i_fabric == (unsigned long )((unsigned char *)0U)) { ret = -12; goto out; } else { } goto ldv_63060; case 1: tmp___1 = match_strdup((substring_t const *)(& args)); i_port = (unsigned char *)tmp___1; if ((unsigned long )i_port == (unsigned long )((unsigned char *)0U)) { ret = -12; goto out; } else { } tmp___2 = strlen((char const *)i_port); if (tmp___2 > 255UL) { printk("\vAPTPL metadata initiator_node= exceeds PR_APTPL_MAX_IPORT_LEN: %d\n", 256); ret = -22; goto ldv_63060; } else { } goto ldv_63060; case 2: tmp___3 = match_strdup((substring_t const *)(& args)); isid = (unsigned char *)tmp___3; if ((unsigned long )isid == (unsigned long )((unsigned char *)0U)) { ret = -12; goto out; } else { } tmp___4 = strlen((char const *)isid); if (tmp___4 > 15UL) { printk("\vAPTPL metadata initiator_isid= exceeds PR_REG_ISID_LEN: %d\n", 16); ret = -22; goto ldv_63060; } else { } goto ldv_63060; case 3: ret = kstrtoull((char const *)((substring_t *)(& args))->from, 0U, & tmp_ll); if (ret < 0) { printk("\vkstrtoull() failed for sa_res_key=\n"); goto out; } else { } sa_res_key = tmp_ll; goto ldv_63060; case 4: match_int((substring_t *)(& args), & arg); res_holder = arg; goto ldv_63060; case 5: match_int((substring_t *)(& args), & arg); type = (unsigned char )arg; goto ldv_63060; case 6: match_int((substring_t *)(& args), & arg); goto ldv_63060; case 7: match_int((substring_t *)(& args), & arg); all_tg_pt = arg; goto ldv_63060; case 8: match_int((substring_t *)(& args), & arg); mapped_lun = (unsigned long long )arg; goto ldv_63060; case 9: tmp___5 = match_strdup((substring_t const *)(& args)); t_fabric = (unsigned char *)tmp___5; if ((unsigned long )t_fabric == (unsigned long )((unsigned char *)0U)) { ret = -12; goto out; } else { } goto ldv_63060; case 10: tmp___6 = match_strdup((substring_t const *)(& args)); t_port = (unsigned char *)tmp___6; if ((unsigned long )t_port == (unsigned long )((unsigned char *)0U)) { ret = -12; goto out; } else { } tmp___7 = strlen((char const *)t_port); if (tmp___7 > 255UL) { printk("\vAPTPL metadata target_node= exceeds PR_APTPL_MAX_TPORT_LEN: %d\n", 256); ret = -22; goto ldv_63060; } else { } goto ldv_63060; case 11: match_int((substring_t *)(& args), & arg); tpgt = (unsigned short )arg; goto ldv_63060; case 12: match_int((substring_t *)(& args), & arg); goto ldv_63060; case 13: match_int((substring_t *)(& args), & arg); target_lun = (unsigned long long )arg; goto ldv_63060; default: ; goto ldv_63060; } ldv_63060: ; ldv_63057: ptr = strsep(& opts, ",\n"); if ((unsigned long )ptr != (unsigned long )((char *)0)) { goto ldv_63075; } else { } if (((unsigned long )i_port == (unsigned long )((unsigned char *)0U) || (unsigned long )t_port == (unsigned long )((unsigned char *)0U)) || sa_res_key == 0ULL) { printk("\vIllegal parameters for APTPL registration\n"); ret = -22; goto out; } else { } if (res_holder != 0 && (unsigned int )type == 0U) { printk("\vIllegal PR type: 0x%02x for reservation holder\n", (int )type); ret = -22; goto out; } else { } ret = core_scsi3_alloc_aptpl_registration(& dev->t10_pr, sa_res_key, i_port, isid, mapped_lun, t_port, (int )tpgt, target_lun, res_holder, all_tg_pt, (int )type); out: kfree((void const *)i_fabric); kfree((void const *)i_port); kfree((void const *)isid); kfree((void const *)t_fabric); kfree((void const *)t_port); kfree((void const *)orig); return ((ssize_t )(ret != 0 ? (size_t )ret : count)); } } static struct target_core_dev_pr_attribute target_core_dev_pr_res_aptpl_metadata = {{"res_aptpl_metadata", & __this_module, 420U}, & target_core_dev_pr_show_attr_res_aptpl_metadata, & target_core_dev_pr_store_attr_res_aptpl_metadata}; static struct se_device *to_target_core_dev_pr(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_device *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_device *)__mptr + 0xfffffffffffff750UL; } else { tmp___0 = (struct se_device *)0; } return (tmp___0); } } static ssize_t target_core_dev_pr_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_device *se_device ; struct se_device *tmp ; struct target_core_dev_pr_attribute *target_core_dev_pr_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_core_dev_pr(item); se_device = tmp; __mptr = (struct configfs_attribute const *)attr; target_core_dev_pr_attr = (struct target_core_dev_pr_attribute *)__mptr; ret = 0L; if ((unsigned long )target_core_dev_pr_attr->show != (unsigned long )((ssize_t (*)(struct se_device * , char * ))0)) { ret = (*(target_core_dev_pr_attr->show))(se_device, page); } else { } return (ret); } } static ssize_t target_core_dev_pr_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_device *se_device ; struct se_device *tmp ; struct target_core_dev_pr_attribute *target_core_dev_pr_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_core_dev_pr(item); se_device = tmp; __mptr = (struct configfs_attribute const *)attr; target_core_dev_pr_attr = (struct target_core_dev_pr_attribute *)__mptr; ret = -22L; if ((unsigned long )target_core_dev_pr_attr->store != (unsigned long )((ssize_t (*)(struct se_device * , char const * , size_t ))0)) { ret = (*(target_core_dev_pr_attr->store))(se_device, page, count); } else { } return (ret); } } static struct configfs_attribute *target_core_dev_pr_attrs[10U] = { & target_core_dev_pr_res_holder.attr, & target_core_dev_pr_res_pr_all_tgt_pts.attr, & target_core_dev_pr_res_pr_generation.attr, & target_core_dev_pr_res_pr_holder_tg_port.attr, & target_core_dev_pr_res_pr_registered_i_pts.attr, & target_core_dev_pr_res_pr_type.attr, & target_core_dev_pr_res_type.attr, & target_core_dev_pr_res_aptpl_active.attr, & target_core_dev_pr_res_aptpl_metadata.attr, (struct configfs_attribute *)0}; static struct configfs_item_operations target_core_dev_pr_ops = {0, & target_core_dev_pr_attr_show, & target_core_dev_pr_attr_store, 0, 0}; static void target_core_setup_dev_pr_cit(struct target_backend *tb ) { struct config_item_type *cit ; struct _ddebug descriptor ; long tmp ; { cit = & tb->tb_dev_pr_cit; cit->ct_item_ops = & target_core_dev_pr_ops; cit->ct_group_ops = (struct configfs_group_operations *)0; cit->ct_attrs = (struct configfs_attribute **)(& target_core_dev_pr_attrs); cit->ct_owner = (tb->ops)->owner; descriptor.modname = "target_core_mod"; descriptor.function = "target_core_setup_dev_pr_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 1767U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"dev_pr"); } else { } return; } } static ssize_t target_core_show_dev_info(void *p , char *page ) { struct se_device *dev ; int bl ; ssize_t read_bytes ; ssize_t tmp ; { dev = (struct se_device *)p; bl = 0; read_bytes = 0L; transport_dump_dev_state(dev, page, & bl); read_bytes = (ssize_t )bl + read_bytes; tmp = (*((dev->transport)->show_configfs_dev_params))(dev, page + (unsigned long )read_bytes); read_bytes = tmp + read_bytes; return (read_bytes); } } static struct target_core_configfs_attribute target_core_attr_dev_info = {{"info", & __this_module, 292U}, & target_core_show_dev_info, (ssize_t (*)(void * , char const * , size_t ))0}; static ssize_t target_core_store_dev_control(void *p , char const *page , size_t count ) { struct se_device *dev ; ssize_t tmp ; { dev = (struct se_device *)p; tmp = (*((dev->transport)->set_configfs_dev_params))(dev, page, (ssize_t )count); return (tmp); } } static struct target_core_configfs_attribute target_core_attr_dev_control = {{"control", & __this_module, 128U}, (ssize_t (*)(void * , char * ))0, & target_core_store_dev_control}; static ssize_t target_core_show_dev_alias(void *p , char *page ) { struct se_device *dev ; int tmp ; { dev = (struct se_device *)p; if ((dev->dev_flags & 16U) == 0U) { return (0L); } else { } tmp = snprintf(page, 4096UL, "%s\n", (unsigned char *)(& dev->dev_alias)); return ((ssize_t )tmp); } } static ssize_t target_core_store_dev_alias(void *p , char const *page , size_t count ) { struct se_device *dev ; struct se_hba *hba ; ssize_t read_bytes ; int tmp ; struct _ddebug descriptor ; char *tmp___0 ; char *tmp___1 ; long tmp___2 ; { dev = (struct se_device *)p; hba = dev->se_hba; if (count > 511UL) { printk("\valias count: %d exceeds SE_DEV_ALIAS_LEN-1: %u\n", (int )count, 511); return (-22L); } else { } tmp = snprintf((char *)(& dev->dev_alias), 512UL, "%s", page); read_bytes = (ssize_t )tmp; if (read_bytes == 0L) { return (-22L); } else { } if ((unsigned int )dev->dev_alias[read_bytes + -1L] == 10U) { dev->dev_alias[read_bytes + -1L] = 0U; } else { } dev->dev_flags = dev->dev_flags | 16U; descriptor.modname = "target_core_mod"; descriptor.function = "target_core_store_dev_alias"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "Target_Core_ConfigFS: %s/%s set alias: %s\n"; descriptor.lineno = 1849U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___0 = config_item_name(& dev->dev_group.cg_item); tmp___1 = config_item_name(& hba->hba_group.cg_item); __dynamic_pr_debug(& descriptor, "Target_Core_ConfigFS: %s/%s set alias: %s\n", tmp___1, tmp___0, (unsigned char *)(& dev->dev_alias)); } else { } return (read_bytes); } } static struct target_core_configfs_attribute target_core_attr_dev_alias = {{"alias", & __this_module, 420U}, & target_core_show_dev_alias, & target_core_store_dev_alias}; static ssize_t target_core_show_dev_udev_path(void *p , char *page ) { struct se_device *dev ; int tmp ; { dev = (struct se_device *)p; if ((dev->dev_flags & 8U) == 0U) { return (0L); } else { } tmp = snprintf(page, 4096UL, "%s\n", (unsigned char *)(& dev->udev_path)); return ((ssize_t )tmp); } } static ssize_t target_core_store_dev_udev_path(void *p , char const *page , size_t count ) { struct se_device *dev ; struct se_hba *hba ; ssize_t read_bytes ; int tmp ; struct _ddebug descriptor ; char *tmp___0 ; char *tmp___1 ; long tmp___2 ; { dev = (struct se_device *)p; hba = dev->se_hba; if (count > 511UL) { printk("\vudev_path count: %d exceeds SE_UDEV_PATH_LEN-1: %u\n", (int )count, 511); return (-22L); } else { } tmp = snprintf((char *)(& dev->udev_path), 512UL, "%s", page); read_bytes = (ssize_t )tmp; if (read_bytes == 0L) { return (-22L); } else { } if ((unsigned int )dev->udev_path[read_bytes + -1L] == 10U) { dev->udev_path[read_bytes + -1L] = 0U; } else { } dev->dev_flags = dev->dev_flags | 8U; descriptor.modname = "target_core_mod"; descriptor.function = "target_core_store_dev_udev_path"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "Target_Core_ConfigFS: %s/%s set udev_path: %s\n"; descriptor.lineno = 1900U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___0 = config_item_name(& dev->dev_group.cg_item); tmp___1 = config_item_name(& hba->hba_group.cg_item); __dynamic_pr_debug(& descriptor, "Target_Core_ConfigFS: %s/%s set udev_path: %s\n", tmp___1, tmp___0, (unsigned char *)(& dev->udev_path)); } else { } return (read_bytes); } } static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {{"udev_path", & __this_module, 420U}, & target_core_show_dev_udev_path, & target_core_store_dev_udev_path}; static ssize_t target_core_show_dev_enable(void *p , char *page ) { struct se_device *dev ; int tmp ; { dev = (struct se_device *)p; tmp = snprintf(page, 4096UL, "%d\n", (int )dev->dev_flags & 1); return ((ssize_t )tmp); } } static ssize_t target_core_store_dev_enable(void *p , char const *page , size_t count ) { struct se_device *dev ; char *ptr ; int ret ; { dev = (struct se_device *)p; ptr = strstr(page, "1"); if ((unsigned long )ptr == (unsigned long )((char *)0)) { printk("\vFor dev_enable ops, only valid value is \"1\"\n"); return (-22L); } else { } ret = target_configure_device(dev); if (ret != 0) { return ((ssize_t )ret); } else { } return ((ssize_t )count); } } static struct target_core_configfs_attribute target_core_attr_dev_enable = {{"enable", & __this_module, 420U}, & target_core_show_dev_enable, & target_core_store_dev_enable}; static ssize_t target_core_show_alua_lu_gp(void *p , char *page ) { struct se_device *dev ; struct config_item *lu_ci ; struct t10_alua_lu_gp *lu_gp ; struct t10_alua_lu_gp_member *lu_gp_mem ; ssize_t len ; char *tmp ; int tmp___0 ; { dev = (struct se_device *)p; len = 0L; lu_gp_mem = dev->dev_alua_lu_gp_mem; if ((unsigned long )lu_gp_mem == (unsigned long )((struct t10_alua_lu_gp_member *)0)) { return (0L); } else { } spin_lock(& lu_gp_mem->lu_gp_mem_lock); lu_gp = lu_gp_mem->lu_gp; if ((unsigned long )lu_gp != (unsigned long )((struct t10_alua_lu_gp *)0)) { lu_ci = & lu_gp->lu_gp_group.cg_item; tmp = config_item_name(lu_ci); tmp___0 = sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n", tmp, (int )lu_gp->lu_gp_id); len = (ssize_t )tmp___0 + len; } else { } spin_unlock(& lu_gp_mem->lu_gp_mem_lock); return (len); } } static ssize_t target_core_store_alua_lu_gp(void *p , char const *page , size_t count ) { struct se_device *dev ; struct se_hba *hba ; struct t10_alua_lu_gp *lu_gp ; struct t10_alua_lu_gp *lu_gp_new ; struct t10_alua_lu_gp_member *lu_gp_mem ; unsigned char buf[256U] ; int move ; char *tmp ; char *tmp___0 ; int tmp___1 ; struct _ddebug descriptor ; char *tmp___2 ; char *tmp___3 ; char *tmp___4 ; long tmp___5 ; struct _ddebug descriptor___0 ; char *tmp___6 ; char *tmp___7 ; char *tmp___8 ; long tmp___9 ; { dev = (struct se_device *)p; hba = dev->se_hba; lu_gp = (struct t10_alua_lu_gp *)0; lu_gp_new = (struct t10_alua_lu_gp *)0; move = 0; lu_gp_mem = dev->dev_alua_lu_gp_mem; if ((unsigned long )lu_gp_mem == (unsigned long )((struct t10_alua_lu_gp_member *)0)) { return (0L); } else { } if (count > 256UL) { printk("\vALUA LU Group Alias too large!\n"); return (-22L); } else { } memset((void *)(& buf), 0, 256UL); memcpy((void *)(& buf), (void const *)page, count); tmp___0 = strstrip((char *)(& buf)); tmp___1 = strcmp((char const *)tmp___0, "NULL"); if (tmp___1 != 0) { tmp = strstrip((char *)(& buf)); lu_gp_new = core_alua_get_lu_gp_by_name((char const *)tmp); if ((unsigned long )lu_gp_new == (unsigned long )((struct t10_alua_lu_gp *)0)) { return (-19L); } else { } } else { } spin_lock(& lu_gp_mem->lu_gp_mem_lock); lu_gp = lu_gp_mem->lu_gp; if ((unsigned long )lu_gp != (unsigned long )((struct t10_alua_lu_gp *)0)) { if ((unsigned long )lu_gp_new == (unsigned long )((struct t10_alua_lu_gp *)0)) { descriptor.modname = "target_core_mod"; descriptor.function = "target_core_store_alua_lu_gp"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "Target_Core_ConfigFS: Releasing %s/%s from ALUA LU Group: core/alua/lu_gps/%s, ID: %hu\n"; descriptor.lineno = 2025U; descriptor.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___5 != 0L) { tmp___2 = config_item_name(& lu_gp->lu_gp_group.cg_item); tmp___3 = config_item_name(& dev->dev_group.cg_item); tmp___4 = config_item_name(& hba->hba_group.cg_item); __dynamic_pr_debug(& descriptor, "Target_Core_ConfigFS: Releasing %s/%s from ALUA LU Group: core/alua/lu_gps/%s, ID: %hu\n", tmp___4, tmp___3, tmp___2, (int )lu_gp->lu_gp_id); } else { } __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp); spin_unlock(& lu_gp_mem->lu_gp_mem_lock); return ((ssize_t )count); } else { } __core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp); move = 1; } else { } __core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new); spin_unlock(& lu_gp_mem->lu_gp_mem_lock); descriptor___0.modname = "target_core_mod"; descriptor___0.function = "target_core_store_alua_lu_gp"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor___0.format = "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group: core/alua/lu_gps/%s, ID: %hu\n"; descriptor___0.lineno = 2050U; descriptor___0.flags = 0U; tmp___9 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___9 != 0L) { tmp___6 = config_item_name(& lu_gp_new->lu_gp_group.cg_item); tmp___7 = config_item_name(& dev->dev_group.cg_item); tmp___8 = config_item_name(& hba->hba_group.cg_item); __dynamic_pr_debug(& descriptor___0, "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group: core/alua/lu_gps/%s, ID: %hu\n", move != 0 ? (char *)"Moving" : (char *)"Adding", tmp___8, tmp___7, tmp___6, (int )lu_gp_new->lu_gp_id); } else { } core_alua_put_lu_gp_from_name(lu_gp_new); return ((ssize_t )count); } } static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {{"alua_lu_gp", & __this_module, 420U}, & target_core_show_alua_lu_gp, & target_core_store_alua_lu_gp}; static ssize_t target_core_show_dev_lba_map(void *p , char *page ) { struct se_device *dev ; struct t10_alua_lba_map *map ; struct t10_alua_lba_map_member *mem ; char *b ; int bl ; char state ; int tmp ; int tmp___0 ; struct list_head const *__mptr ; int tmp___1 ; struct list_head const *__mptr___0 ; int tmp___2 ; struct list_head const *__mptr___1 ; int tmp___3 ; struct list_head const *__mptr___2 ; { dev = (struct se_device *)p; b = page; bl = 0; spin_lock(& dev->t10_alua.lba_map_lock); tmp___0 = list_empty((struct list_head const *)(& dev->t10_alua.lba_map_list)); if (tmp___0 == 0) { tmp = sprintf(b + (unsigned long )bl, "%u %u\n", dev->t10_alua.lba_map_segment_size, dev->t10_alua.lba_map_segment_multiplier); bl = tmp + bl; } else { } __mptr = (struct list_head const *)dev->t10_alua.lba_map_list.next; map = (struct t10_alua_lba_map *)__mptr + 0xfffffffffffffff0UL; goto ldv_63226; ldv_63225: tmp___1 = sprintf(b + (unsigned long )bl, "%llu %llu", map->lba_map_first_lba, map->lba_map_last_lba); bl = tmp___1 + bl; __mptr___0 = (struct list_head const *)map->lba_map_mem_list.next; mem = (struct t10_alua_lba_map_member *)__mptr___0; goto ldv_63223; ldv_63222: ; switch (mem->lba_map_mem_alua_state) { case 0: state = 79; goto ldv_63217; case 1: state = 65; goto ldv_63217; case 2: state = 83; goto ldv_63217; case 3: state = 85; goto ldv_63217; default: state = 46; goto ldv_63217; } ldv_63217: tmp___2 = sprintf(b + (unsigned long )bl, " %d:%c", mem->lba_map_mem_alua_pg_id, (int )state); bl = tmp___2 + bl; __mptr___1 = (struct list_head const *)mem->lba_map_mem_list.next; mem = (struct t10_alua_lba_map_member *)__mptr___1; ldv_63223: ; if ((unsigned long )(& mem->lba_map_mem_list) != (unsigned long )(& map->lba_map_mem_list)) { goto ldv_63222; } else { } tmp___3 = sprintf(b + (unsigned long )bl, "\n"); bl = tmp___3 + bl; __mptr___2 = (struct list_head const *)map->lba_map_list.next; map = (struct t10_alua_lba_map *)__mptr___2 + 0xfffffffffffffff0UL; ldv_63226: ; if ((unsigned long )(& map->lba_map_list) != (unsigned long )(& dev->t10_alua.lba_map_list)) { goto ldv_63225; } else { } spin_unlock(& dev->t10_alua.lba_map_lock); return ((ssize_t )bl); } } static ssize_t target_core_store_dev_lba_map(void *p , char const *page , size_t count ) { struct se_device *dev ; struct t10_alua_lba_map *lba_map ; struct list_head lba_list ; char *map_entries ; char *ptr ; char state ; int pg_num ; int pg ; int ret ; int num ; int pg_id ; int alua_state ; unsigned long start_lba ; unsigned long end_lba ; unsigned long segment_size ; unsigned long segment_mult ; int tmp ; int tmp___0 ; long tmp___1 ; bool tmp___2 ; int tmp___3 ; { dev = (struct se_device *)p; lba_map = (struct t10_alua_lba_map *)0; pg_num = -1; ret = 0; num = 0; start_lba = 0xffffffffffffffffUL; end_lba = 0xffffffffffffffffUL; segment_size = 0xffffffffffffffffUL; segment_mult = 0xffffffffffffffffUL; map_entries = kstrdup(page, 208U); if ((unsigned long )map_entries == (unsigned long )((char *)0)) { return (-12L); } else { } INIT_LIST_HEAD(& lba_list); goto ldv_63249; ldv_63261: ; if ((int )((signed char )*ptr) == 0) { goto ldv_63249; } else { } if (num == 0) { tmp = sscanf((char const *)ptr, "%lu %lu\n", & segment_size, & segment_mult); if (tmp != 2) { printk("\vInvalid line %d\n", num); ret = -22; goto ldv_63250; } else { } num = num + 1; goto ldv_63249; } else { } tmp___0 = sscanf((char const *)ptr, "%lu %lu", & start_lba, & end_lba); if (tmp___0 != 2) { printk("\vInvalid line %d\n", num); ret = -22; goto ldv_63250; } else { } ptr = strchr((char const *)ptr, 32); if ((unsigned long )ptr == (unsigned long )((char *)0)) { printk("\vInvalid line %d, missing end lba\n", num); ret = -22; goto ldv_63250; } else { } ptr = ptr + 1; ptr = strchr((char const *)ptr, 32); if ((unsigned long )ptr == (unsigned long )((char *)0)) { printk("\vInvalid line %d, missing state definitions\n", num); ret = -22; goto ldv_63250; } else { } ptr = ptr + 1; lba_map = core_alua_allocate_lba_map(& lba_list, (u64 )start_lba, (u64 )end_lba); tmp___2 = IS_ERR((void const *)lba_map); if ((int )tmp___2) { tmp___1 = PTR_ERR((void const *)lba_map); ret = (int )tmp___1; goto ldv_63250; } else { } pg = 0; goto ldv_63260; ldv_63259: ; switch ((int )state) { case 79: alua_state = 0; goto ldv_63252; case 65: alua_state = 1; goto ldv_63252; case 83: alua_state = 2; goto ldv_63252; case 85: alua_state = 3; goto ldv_63252; default: printk("\vInvalid ALUA state \'%c\'\n", (int )state); ret = -22; goto out; } ldv_63252: ret = core_alua_allocate_lba_map_mem(lba_map, pg_id, alua_state); if (ret != 0) { printk("\vInvalid target descriptor %d:%c at line %d\n", pg_id, (int )state, num); goto ldv_63258; } else { } pg = pg + 1; ptr = strchr((char const *)ptr, 32); if ((unsigned long )ptr != (unsigned long )((char *)0)) { ptr = ptr + 1; } else { goto ldv_63258; } ldv_63260: tmp___3 = sscanf((char const *)ptr, "%d:%c", & pg_id, & state); if (tmp___3 == 2) { goto ldv_63259; } else { } ldv_63258: ; if (pg_num == -1) { pg_num = pg; } else if (pg != pg_num) { printk("\vOnly %d from %d port groups definitions at line %d\n", pg, pg_num, num); ret = -22; goto ldv_63250; } else { } num = num + 1; ldv_63249: ptr = strsep(& map_entries, "\n"); if ((unsigned long )ptr != (unsigned long )((char *)0)) { goto ldv_63261; } else { } ldv_63250: ; out: ; if (ret != 0) { core_alua_free_lba_map(& lba_list); count = (size_t )ret; } else { core_alua_set_lba_map(dev, & lba_list, (int )segment_size, (int )segment_mult); } kfree((void const *)map_entries); return ((ssize_t )count); } } static struct target_core_configfs_attribute target_core_attr_dev_lba_map = {{"lba_map", & __this_module, 420U}, & target_core_show_dev_lba_map, & target_core_store_dev_lba_map}; static struct configfs_attribute *target_core_dev_attrs[8U] = { & target_core_attr_dev_info.attr, & target_core_attr_dev_control.attr, & target_core_attr_dev_alias.attr, & target_core_attr_dev_udev_path.attr, & target_core_attr_dev_enable.attr, & target_core_attr_dev_alua_lu_gp.attr, & target_core_attr_dev_lba_map.attr, (struct configfs_attribute *)0}; static void target_core_dev_release(struct config_item *item ) { struct config_group *dev_cg ; struct config_group *tmp ; struct se_device *dev ; struct config_group const *__mptr ; { tmp = to_config_group(item); dev_cg = tmp; __mptr = (struct config_group const *)dev_cg; dev = (struct se_device *)__mptr + 0xfffffffffffff7c0UL; kfree((void const *)dev_cg->default_groups); target_free_device(dev); return; } } static ssize_t target_core_dev_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct config_group *dev_cg ; struct config_group *tmp ; struct se_device *dev ; struct config_group const *__mptr ; struct target_core_configfs_attribute *tc_attr ; struct configfs_attribute const *__mptr___0 ; ssize_t tmp___0 ; { tmp = to_config_group(item); dev_cg = tmp; __mptr = (struct config_group const *)dev_cg; dev = (struct se_device *)__mptr + 0xfffffffffffff7c0UL; __mptr___0 = (struct configfs_attribute const *)attr; tc_attr = (struct target_core_configfs_attribute *)__mptr___0; if ((unsigned long )tc_attr->show == (unsigned long )((ssize_t (*)(void * , char * ))0)) { return (-22L); } else { } tmp___0 = (*(tc_attr->show))((void *)dev, page); return (tmp___0); } } static ssize_t target_core_dev_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct config_group *dev_cg ; struct config_group *tmp ; struct se_device *dev ; struct config_group const *__mptr ; struct target_core_configfs_attribute *tc_attr ; struct configfs_attribute const *__mptr___0 ; ssize_t tmp___0 ; { tmp = to_config_group(item); dev_cg = tmp; __mptr = (struct config_group const *)dev_cg; dev = (struct se_device *)__mptr + 0xfffffffffffff7c0UL; __mptr___0 = (struct configfs_attribute const *)attr; tc_attr = (struct target_core_configfs_attribute *)__mptr___0; if ((unsigned long )tc_attr->store == (unsigned long )((ssize_t (*)(void * , char const * , size_t ))0)) { return (-22L); } else { } tmp___0 = (*(tc_attr->store))((void *)dev, page, count); return (tmp___0); } } static struct configfs_item_operations target_core_dev_item_ops = {& target_core_dev_release, & target_core_dev_show, & target_core_dev_store, 0, 0}; static void target_core_setup_dev_cit(struct target_backend *tb ) { struct config_item_type *cit ; struct _ddebug descriptor ; long tmp ; { cit = & tb->tb_dev_cit; cit->ct_item_ops = & target_core_dev_item_ops; cit->ct_group_ops = (struct configfs_group_operations *)0; cit->ct_attrs = (struct configfs_attribute **)(& target_core_dev_attrs); cit->ct_owner = (tb->ops)->owner; descriptor.modname = "target_core_mod"; descriptor.function = "target_core_setup_dev_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 2293U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"dev"); } else { } return; } } static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id(struct t10_alua_lu_gp *lu_gp , char *page ) { int tmp ; { if (lu_gp->lu_gp_valid_id == 0) { return (0L); } else { } tmp = sprintf(page, "%hu\n", (int )lu_gp->lu_gp_id); return ((ssize_t )tmp); } } static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(struct t10_alua_lu_gp *lu_gp , char const *page , size_t count ) { struct config_group *alua_lu_gp_cg ; unsigned long lu_gp_id ; int ret ; struct _ddebug descriptor ; char *tmp ; long tmp___0 ; { alua_lu_gp_cg = & lu_gp->lu_gp_group; ret = kstrtoul(page, 0U, & lu_gp_id); if (ret < 0) { printk("\vkstrtoul() returned %d for lu_gp_id\n", ret); return ((ssize_t )ret); } else { } if (lu_gp_id > 65535UL) { printk("\vALUA lu_gp_id: %lu exceeds maximum: 0x0000ffff\n", lu_gp_id); return (-22L); } else { } ret = core_alua_set_lu_gp_id(lu_gp, (int )((unsigned short )lu_gp_id)); if (ret < 0) { return (-22L); } else { } descriptor.modname = "target_core_mod"; descriptor.function = "target_core_alua_lu_gp_store_attr_lu_gp_id"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "Target_Core_ConfigFS: Set ALUA Logical Unit Group: core/alua/lu_gps/%s to ID: %hu\n"; descriptor.lineno = 2354U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = config_item_name(& alua_lu_gp_cg->cg_item); __dynamic_pr_debug(& descriptor, "Target_Core_ConfigFS: Set ALUA Logical Unit Group: core/alua/lu_gps/%s to ID: %hu\n", tmp, (int )lu_gp->lu_gp_id); } else { } return ((ssize_t )count); } } static struct target_core_alua_lu_gp_attribute target_core_alua_lu_gp_lu_gp_id = {{"lu_gp_id", & __this_module, 420U}, & target_core_alua_lu_gp_show_attr_lu_gp_id, & target_core_alua_lu_gp_store_attr_lu_gp_id}; static ssize_t target_core_alua_lu_gp_show_attr_members(struct t10_alua_lu_gp *lu_gp , char *page ) { struct se_device *dev ; struct se_hba *hba ; struct t10_alua_lu_gp_member *lu_gp_mem ; ssize_t len ; ssize_t cur_len ; unsigned char buf[256U] ; struct list_head const *__mptr ; char *tmp ; char *tmp___0 ; int tmp___1 ; struct list_head const *__mptr___0 ; { len = 0L; memset((void *)(& buf), 0, 256UL); spin_lock(& lu_gp->lu_gp_lock); __mptr = (struct list_head const *)lu_gp->lu_gp_mem_list.next; lu_gp_mem = (struct t10_alua_lu_gp_member *)__mptr + 0xffffffffffffffa0UL; goto ldv_63343; ldv_63342: dev = lu_gp_mem->lu_gp_mem_dev; hba = dev->se_hba; tmp = config_item_name(& dev->dev_group.cg_item); tmp___0 = config_item_name(& hba->hba_group.cg_item); tmp___1 = snprintf((char *)(& buf), 256UL, "%s/%s\n", tmp___0, tmp); cur_len = (ssize_t )tmp___1; cur_len = cur_len + 1L; if ((unsigned long )(cur_len + len) > 4096UL) { printk("\fRan out of lu_gp_show_attr_members buffer\n"); goto ldv_63341; } else { } memcpy((void *)page + (unsigned long )len, (void const *)(& buf), (size_t )cur_len); len = len + cur_len; __mptr___0 = (struct list_head const *)lu_gp_mem->lu_gp_mem_list.next; lu_gp_mem = (struct t10_alua_lu_gp_member *)__mptr___0 + 0xffffffffffffffa0UL; ldv_63343: ; if ((unsigned long )(& lu_gp_mem->lu_gp_mem_list) != (unsigned long )(& lu_gp->lu_gp_mem_list)) { goto ldv_63342; } else { } ldv_63341: spin_unlock(& lu_gp->lu_gp_lock); return (len); } } static struct target_core_alua_lu_gp_attribute target_core_alua_lu_gp_members = {{"members", & __this_module, 292U}, & target_core_alua_lu_gp_show_attr_members, 0}; static struct t10_alua_lu_gp *to_target_core_alua_lu_gp(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct t10_alua_lu_gp *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct t10_alua_lu_gp *)__mptr + 0xffffffffffffffa8UL; } else { tmp___0 = (struct t10_alua_lu_gp *)0; } return (tmp___0); } } static ssize_t target_core_alua_lu_gp_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct t10_alua_lu_gp *t10_alua_lu_gp ; struct t10_alua_lu_gp *tmp ; struct target_core_alua_lu_gp_attribute *target_core_alua_lu_gp_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_core_alua_lu_gp(item); t10_alua_lu_gp = tmp; __mptr = (struct configfs_attribute const *)attr; target_core_alua_lu_gp_attr = (struct target_core_alua_lu_gp_attribute *)__mptr; ret = 0L; if ((unsigned long )target_core_alua_lu_gp_attr->show != (unsigned long )((ssize_t (*)(struct t10_alua_lu_gp * , char * ))0)) { ret = (*(target_core_alua_lu_gp_attr->show))(t10_alua_lu_gp, page); } else { } return (ret); } } static ssize_t target_core_alua_lu_gp_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct t10_alua_lu_gp *t10_alua_lu_gp ; struct t10_alua_lu_gp *tmp ; struct target_core_alua_lu_gp_attribute *target_core_alua_lu_gp_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_core_alua_lu_gp(item); t10_alua_lu_gp = tmp; __mptr = (struct configfs_attribute const *)attr; target_core_alua_lu_gp_attr = (struct target_core_alua_lu_gp_attribute *)__mptr; ret = -22L; if ((unsigned long )target_core_alua_lu_gp_attr->store != (unsigned long )((ssize_t (*)(struct t10_alua_lu_gp * , char const * , size_t ))0)) { ret = (*(target_core_alua_lu_gp_attr->store))(t10_alua_lu_gp, page, count); } else { } return (ret); } } static struct configfs_attribute *target_core_alua_lu_gp_attrs[3U] = { & target_core_alua_lu_gp_lu_gp_id.attr, & target_core_alua_lu_gp_members.attr, (struct configfs_attribute *)0}; static void target_core_alua_lu_gp_release(struct config_item *item ) { struct t10_alua_lu_gp *lu_gp ; struct config_group const *__mptr ; struct config_group *tmp ; { tmp = to_config_group(item); __mptr = (struct config_group const *)tmp; lu_gp = (struct t10_alua_lu_gp *)__mptr + 0xffffffffffffffa8UL; core_alua_free_lu_gp(lu_gp); return; } } static struct configfs_item_operations target_core_alua_lu_gp_ops = {& target_core_alua_lu_gp_release, & target_core_alua_lu_gp_attr_show, & target_core_alua_lu_gp_attr_store, 0, 0}; static struct config_item_type target_core_alua_lu_gp_cit = {& __this_module, & target_core_alua_lu_gp_ops, 0, (struct configfs_attribute **)(& target_core_alua_lu_gp_attrs)}; static struct config_group *target_core_alua_create_lu_gp(struct config_group *group , char const *name ) { struct t10_alua_lu_gp *lu_gp ; struct config_group *alua_lu_gp_cg ; struct config_item *alua_lu_gp_ci ; bool tmp ; struct _ddebug descriptor ; char *tmp___0 ; long tmp___1 ; { alua_lu_gp_cg = (struct config_group *)0; alua_lu_gp_ci = (struct config_item *)0; lu_gp = core_alua_allocate_lu_gp(name, 0); tmp = IS_ERR((void const *)lu_gp); if ((int )tmp) { return ((struct config_group *)0); } else { } alua_lu_gp_cg = & lu_gp->lu_gp_group; alua_lu_gp_ci = & alua_lu_gp_cg->cg_item; config_group_init_type_name(alua_lu_gp_cg, name, & target_core_alua_lu_gp_cit); descriptor.modname = "target_core_mod"; descriptor.function = "target_core_alua_create_lu_gp"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "Target_Core_ConfigFS: Allocated ALUA Logical Unit Group: core/alua/lu_gps/%s\n"; descriptor.lineno = 2453U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp___0 = config_item_name(alua_lu_gp_ci); __dynamic_pr_debug(& descriptor, "Target_Core_ConfigFS: Allocated ALUA Logical Unit Group: core/alua/lu_gps/%s\n", tmp___0); } else { } return (alua_lu_gp_cg); } } static void target_core_alua_drop_lu_gp(struct config_group *group , struct config_item *item ) { struct t10_alua_lu_gp *lu_gp ; struct config_group const *__mptr ; struct config_group *tmp ; struct _ddebug descriptor ; char *tmp___0 ; long tmp___1 ; { tmp = to_config_group(item); __mptr = (struct config_group const *)tmp; lu_gp = (struct t10_alua_lu_gp *)__mptr + 0xffffffffffffffa8UL; descriptor.modname = "target_core_mod"; descriptor.function = "target_core_alua_drop_lu_gp"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "Target_Core_ConfigFS: Releasing ALUA Logical Unit Group: core/alua/lu_gps/%s, ID: %hu\n"; descriptor.lineno = 2468U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp___0 = config_item_name(item); __dynamic_pr_debug(& descriptor, "Target_Core_ConfigFS: Releasing ALUA Logical Unit Group: core/alua/lu_gps/%s, ID: %hu\n", tmp___0, (int )lu_gp->lu_gp_id); } else { } config_item_put(item); return; } } static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {0, & target_core_alua_create_lu_gp, 0, 0, & target_core_alua_drop_lu_gp}; static struct config_item_type target_core_alua_lu_gps_cit = {& __this_module, (struct configfs_item_operations *)0, & target_core_alua_lu_gps_group_ops, 0}; static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_state(struct t10_alua_tg_pt_gp *tg_pt_gp , char *page ) { int tmp ; int tmp___0 ; { tmp = atomic_read((atomic_t const *)(& tg_pt_gp->tg_pt_gp_alua_access_state)); tmp___0 = sprintf(page, "%d\n", tmp); return ((ssize_t )tmp___0); } } static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(struct t10_alua_tg_pt_gp *tg_pt_gp , char const *page , size_t count ) { struct se_device *dev ; unsigned long tmp ; int new_state ; int ret ; { dev = tg_pt_gp->tg_pt_gp_dev; if (tg_pt_gp->tg_pt_gp_valid_id == 0) { printk("\vUnable to do implicit ALUA on non valid tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); return (-22L); } else { } if ((dev->dev_flags & 1U) == 0U) { printk("\vUnable to set alua_access_state while device is not configured\n"); return (-19L); } else { } ret = kstrtoul(page, 0U, & tmp); if (ret < 0) { printk("\vUnable to extract new ALUA access state from %s\n", page); return ((ssize_t )ret); } else { } new_state = (int )tmp; if ((tg_pt_gp->tg_pt_gp_alua_access_type & 16) == 0) { printk("\vUnable to process implicit configfs ALUA transition while TPGS_IMPLICIT_ALUA is disabled\n"); return (-22L); } else { } if ((tg_pt_gp->tg_pt_gp_alua_access_type & 32) != 0 && new_state == 4) { printk("\vUnable to process implicit configfs ALUA transition while explicit ALUA management is enabled\n"); return (-22L); } else { } ret = core_alua_do_port_transition(tg_pt_gp, dev, (struct se_lun *)0, (struct se_node_acl *)0, new_state, 0); return (ret == 0 ? (ssize_t )count : -22L); } } static struct target_core_alua_tg_pt_gp_attribute target_core_alua_tg_pt_gp_alua_access_state = {{"alua_access_state", & __this_module, 420U}, & target_core_alua_tg_pt_gp_show_attr_alua_access_state, & target_core_alua_tg_pt_gp_store_attr_alua_access_state}; static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_status(struct t10_alua_tg_pt_gp *tg_pt_gp , char *page ) { char *tmp ; int tmp___0 ; { tmp = core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status); tmp___0 = sprintf(page, "%s\n", tmp); return ((ssize_t )tmp___0); } } static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(struct t10_alua_tg_pt_gp *tg_pt_gp , char const *page , size_t count ) { unsigned long tmp ; int new_status ; int ret ; { if (tg_pt_gp->tg_pt_gp_valid_id == 0) { printk("\vUnable to do set ALUA access status on non valid tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id); return (-22L); } else { } ret = kstrtoul(page, 0U, & tmp); if (ret < 0) { printk("\vUnable to extract new ALUA access status from %s\n", page); return ((ssize_t )ret); } else { } new_status = (int )tmp; if ((new_status != 0 && new_status != 1) && new_status != 2) { printk("\vIllegal ALUA access status: 0x%02x\n", new_status); return (-22L); } else { } tg_pt_gp->tg_pt_gp_alua_access_status = new_status; return ((ssize_t )count); } } static struct target_core_alua_tg_pt_gp_attribute target_core_alua_tg_pt_gp_alua_access_status = {{"alua_access_status", & __this_module, 420U}, & target_core_alua_tg_pt_gp_show_attr_alua_access_status, & target_core_alua_tg_pt_gp_store_attr_alua_access_status}; static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_type(struct t10_alua_tg_pt_gp *tg_pt_gp , char *page ) { ssize_t tmp ; { tmp = core_alua_show_access_type(tg_pt_gp, page); return (tmp); } } static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(struct t10_alua_tg_pt_gp *tg_pt_gp , char const *page , size_t count ) { ssize_t tmp ; { tmp = core_alua_store_access_type(tg_pt_gp, page, count); return (tmp); } } static struct target_core_alua_tg_pt_gp_attribute target_core_alua_tg_pt_gp_alua_access_type = {{"alua_access_type", & __this_module, 420U}, & target_core_alua_tg_pt_gp_show_attr_alua_access_type, & target_core_alua_tg_pt_gp_store_attr_alua_access_type}; static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_support_transitioning(struct t10_alua_tg_pt_gp *t , char *p ) { int tmp ; { tmp = sprintf(p, "%d\n", (t->tg_pt_gp_alua_supported_states & 128) != 0); return ((ssize_t )tmp); } } static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_transitioning(struct t10_alua_tg_pt_gp *t , char const *p , size_t c ) { unsigned long tmp ; int ret ; { if (t->tg_pt_gp_valid_id == 0) { printk("\vUnable to do set ##_name ALUA state on non valid tg_pt_gp ID: %hu\n", t->tg_pt_gp_valid_id); return (-22L); } else { } ret = kstrtoul(p, 0U, & tmp); if (ret < 0) { printk("\vInvalid value \'%s\', must be \'0\' or \'1\'\n", p); return (-22L); } else { } if (tmp > 1UL) { printk("\vInvalid value \'%ld\', must be \'0\' or \'1\'\n", tmp); return (-22L); } else { } if (tmp != 0UL) { t->tg_pt_gp_alua_supported_states = t->tg_pt_gp_alua_supported_states | 128; } else { t->tg_pt_gp_alua_supported_states = t->tg_pt_gp_alua_supported_states & -129; } return ((ssize_t )c); } } static struct target_core_alua_tg_pt_gp_attribute target_core_alua_tg_pt_gp_alua_support_transitioning = {{"alua_support_transitioning", & __this_module, 420U}, & target_core_alua_tg_pt_gp_show_attr_alua_support_transitioning, & target_core_alua_tg_pt_gp_store_attr_alua_support_transitioning}; static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_support_offline(struct t10_alua_tg_pt_gp *t , char *p ) { int tmp ; { tmp = sprintf(p, "%d\n", (t->tg_pt_gp_alua_supported_states & 64) != 0); return ((ssize_t )tmp); } } static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_offline(struct t10_alua_tg_pt_gp *t , char const *p , size_t c ) { unsigned long tmp ; int ret ; { if (t->tg_pt_gp_valid_id == 0) { printk("\vUnable to do set ##_name ALUA state on non valid tg_pt_gp ID: %hu\n", t->tg_pt_gp_valid_id); return (-22L); } else { } ret = kstrtoul(p, 0U, & tmp); if (ret < 0) { printk("\vInvalid value \'%s\', must be \'0\' or \'1\'\n", p); return (-22L); } else { } if (tmp > 1UL) { printk("\vInvalid value \'%ld\', must be \'0\' or \'1\'\n", tmp); return (-22L); } else { } if (tmp != 0UL) { t->tg_pt_gp_alua_supported_states = t->tg_pt_gp_alua_supported_states | 64; } else { t->tg_pt_gp_alua_supported_states = t->tg_pt_gp_alua_supported_states & -65; } return ((ssize_t )c); } } static struct target_core_alua_tg_pt_gp_attribute target_core_alua_tg_pt_gp_alua_support_offline = {{"alua_support_offline", & __this_module, 420U}, & target_core_alua_tg_pt_gp_show_attr_alua_support_offline, & target_core_alua_tg_pt_gp_store_attr_alua_support_offline}; static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_support_lba_dependent(struct t10_alua_tg_pt_gp *t , char *p ) { int tmp ; { tmp = sprintf(p, "%d\n", (t->tg_pt_gp_alua_supported_states & 16) != 0); return ((ssize_t )tmp); } } static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_lba_dependent(struct t10_alua_tg_pt_gp *t , char const *p , size_t c ) { unsigned long tmp ; int ret ; { if (t->tg_pt_gp_valid_id == 0) { printk("\vUnable to do set ##_name ALUA state on non valid tg_pt_gp ID: %hu\n", t->tg_pt_gp_valid_id); return (-22L); } else { } ret = kstrtoul(p, 0U, & tmp); if (ret < 0) { printk("\vInvalid value \'%s\', must be \'0\' or \'1\'\n", p); return (-22L); } else { } if (tmp > 1UL) { printk("\vInvalid value \'%ld\', must be \'0\' or \'1\'\n", tmp); return (-22L); } else { } if (tmp != 0UL) { t->tg_pt_gp_alua_supported_states = t->tg_pt_gp_alua_supported_states | 16; } else { t->tg_pt_gp_alua_supported_states = t->tg_pt_gp_alua_supported_states & -17; } return ((ssize_t )c); } } static struct target_core_alua_tg_pt_gp_attribute target_core_alua_tg_pt_gp_alua_support_lba_dependent = {{"alua_support_lba_dependent", & __this_module, 292U}, & target_core_alua_tg_pt_gp_show_attr_alua_support_lba_dependent, & target_core_alua_tg_pt_gp_store_attr_alua_support_lba_dependent}; static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_support_unavailable(struct t10_alua_tg_pt_gp *t , char *p ) { int tmp ; { tmp = sprintf(p, "%d\n", (t->tg_pt_gp_alua_supported_states & 8) != 0); return ((ssize_t )tmp); } } static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_unavailable(struct t10_alua_tg_pt_gp *t , char const *p , size_t c ) { unsigned long tmp ; int ret ; { if (t->tg_pt_gp_valid_id == 0) { printk("\vUnable to do set ##_name ALUA state on non valid tg_pt_gp ID: %hu\n", t->tg_pt_gp_valid_id); return (-22L); } else { } ret = kstrtoul(p, 0U, & tmp); if (ret < 0) { printk("\vInvalid value \'%s\', must be \'0\' or \'1\'\n", p); return (-22L); } else { } if (tmp > 1UL) { printk("\vInvalid value \'%ld\', must be \'0\' or \'1\'\n", tmp); return (-22L); } else { } if (tmp != 0UL) { t->tg_pt_gp_alua_supported_states = t->tg_pt_gp_alua_supported_states | 8; } else { t->tg_pt_gp_alua_supported_states = t->tg_pt_gp_alua_supported_states & -9; } return ((ssize_t )c); } } static struct target_core_alua_tg_pt_gp_attribute target_core_alua_tg_pt_gp_alua_support_unavailable = {{"alua_support_unavailable", & __this_module, 420U}, & target_core_alua_tg_pt_gp_show_attr_alua_support_unavailable, & target_core_alua_tg_pt_gp_store_attr_alua_support_unavailable}; static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_support_standby(struct t10_alua_tg_pt_gp *t , char *p ) { int tmp ; { tmp = sprintf(p, "%d\n", (t->tg_pt_gp_alua_supported_states & 4) != 0); return ((ssize_t )tmp); } } static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_standby(struct t10_alua_tg_pt_gp *t , char const *p , size_t c ) { unsigned long tmp ; int ret ; { if (t->tg_pt_gp_valid_id == 0) { printk("\vUnable to do set ##_name ALUA state on non valid tg_pt_gp ID: %hu\n", t->tg_pt_gp_valid_id); return (-22L); } else { } ret = kstrtoul(p, 0U, & tmp); if (ret < 0) { printk("\vInvalid value \'%s\', must be \'0\' or \'1\'\n", p); return (-22L); } else { } if (tmp > 1UL) { printk("\vInvalid value \'%ld\', must be \'0\' or \'1\'\n", tmp); return (-22L); } else { } if (tmp != 0UL) { t->tg_pt_gp_alua_supported_states = t->tg_pt_gp_alua_supported_states | 4; } else { t->tg_pt_gp_alua_supported_states = t->tg_pt_gp_alua_supported_states & -5; } return ((ssize_t )c); } } static struct target_core_alua_tg_pt_gp_attribute target_core_alua_tg_pt_gp_alua_support_standby = {{"alua_support_standby", & __this_module, 420U}, & target_core_alua_tg_pt_gp_show_attr_alua_support_standby, & target_core_alua_tg_pt_gp_store_attr_alua_support_standby}; static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_support_active_optimized(struct t10_alua_tg_pt_gp *t , char *p ) { int tmp ; { tmp = sprintf(p, "%d\n", t->tg_pt_gp_alua_supported_states & 1); return ((ssize_t )tmp); } } static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_active_optimized(struct t10_alua_tg_pt_gp *t , char const *p , size_t c ) { unsigned long tmp ; int ret ; { if (t->tg_pt_gp_valid_id == 0) { printk("\vUnable to do set ##_name ALUA state on non valid tg_pt_gp ID: %hu\n", t->tg_pt_gp_valid_id); return (-22L); } else { } ret = kstrtoul(p, 0U, & tmp); if (ret < 0) { printk("\vInvalid value \'%s\', must be \'0\' or \'1\'\n", p); return (-22L); } else { } if (tmp > 1UL) { printk("\vInvalid value \'%ld\', must be \'0\' or \'1\'\n", tmp); return (-22L); } else { } if (tmp != 0UL) { t->tg_pt_gp_alua_supported_states = t->tg_pt_gp_alua_supported_states | 1; } else { t->tg_pt_gp_alua_supported_states = t->tg_pt_gp_alua_supported_states & -2; } return ((ssize_t )c); } } static struct target_core_alua_tg_pt_gp_attribute target_core_alua_tg_pt_gp_alua_support_active_optimized = {{"alua_support_active_optimized", & __this_module, 420U}, & target_core_alua_tg_pt_gp_show_attr_alua_support_active_optimized, & target_core_alua_tg_pt_gp_store_attr_alua_support_active_optimized}; static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_support_active_nonoptimized(struct t10_alua_tg_pt_gp *t , char *p ) { int tmp ; { tmp = sprintf(p, "%d\n", (t->tg_pt_gp_alua_supported_states & 2) != 0); return ((ssize_t )tmp); } } static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_support_active_nonoptimized(struct t10_alua_tg_pt_gp *t , char const *p , size_t c ) { unsigned long tmp ; int ret ; { if (t->tg_pt_gp_valid_id == 0) { printk("\vUnable to do set ##_name ALUA state on non valid tg_pt_gp ID: %hu\n", t->tg_pt_gp_valid_id); return (-22L); } else { } ret = kstrtoul(p, 0U, & tmp); if (ret < 0) { printk("\vInvalid value \'%s\', must be \'0\' or \'1\'\n", p); return (-22L); } else { } if (tmp > 1UL) { printk("\vInvalid value \'%ld\', must be \'0\' or \'1\'\n", tmp); return (-22L); } else { } if (tmp != 0UL) { t->tg_pt_gp_alua_supported_states = t->tg_pt_gp_alua_supported_states | 2; } else { t->tg_pt_gp_alua_supported_states = t->tg_pt_gp_alua_supported_states & -3; } return ((ssize_t )c); } } static struct target_core_alua_tg_pt_gp_attribute target_core_alua_tg_pt_gp_alua_support_active_nonoptimized = {{"alua_support_active_nonoptimized", & __this_module, 420U}, & target_core_alua_tg_pt_gp_show_attr_alua_support_active_nonoptimized, & target_core_alua_tg_pt_gp_store_attr_alua_support_active_nonoptimized}; static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(struct t10_alua_tg_pt_gp *tg_pt_gp , char *page ) { int tmp ; { tmp = sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_write_metadata); return ((ssize_t )tmp); } } static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(struct t10_alua_tg_pt_gp *tg_pt_gp , char const *page , size_t count ) { unsigned long tmp ; int ret ; { ret = kstrtoul(page, 0U, & tmp); if (ret < 0) { printk("\vUnable to extract alua_write_metadata\n"); return ((ssize_t )ret); } else { } if (tmp != 0UL && tmp != 1UL) { printk("\vIllegal value for alua_write_metadata: %lu\n", tmp); return (-22L); } else { } tg_pt_gp->tg_pt_gp_write_metadata = (int )tmp; return ((ssize_t )count); } } static struct target_core_alua_tg_pt_gp_attribute target_core_alua_tg_pt_gp_alua_write_metadata = {{"alua_write_metadata", & __this_module, 420U}, & target_core_alua_tg_pt_gp_show_attr_alua_write_metadata, & target_core_alua_tg_pt_gp_store_attr_alua_write_metadata}; static ssize_t target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs(struct t10_alua_tg_pt_gp *tg_pt_gp , char *page ) { ssize_t tmp ; { tmp = core_alua_show_nonop_delay_msecs(tg_pt_gp, page); return (tmp); } } static ssize_t target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs(struct t10_alua_tg_pt_gp *tg_pt_gp , char const *page , size_t count ) { ssize_t tmp ; { tmp = core_alua_store_nonop_delay_msecs(tg_pt_gp, page, count); return (tmp); } } static struct target_core_alua_tg_pt_gp_attribute target_core_alua_tg_pt_gp_nonop_delay_msecs = {{"nonop_delay_msecs", & __this_module, 420U}, & target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs, & target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs}; static ssize_t target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs(struct t10_alua_tg_pt_gp *tg_pt_gp , char *page ) { ssize_t tmp ; { tmp = core_alua_show_trans_delay_msecs(tg_pt_gp, page); return (tmp); } } static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(struct t10_alua_tg_pt_gp *tg_pt_gp , char const *page , size_t count ) { ssize_t tmp ; { tmp = core_alua_store_trans_delay_msecs(tg_pt_gp, page, count); return (tmp); } } static struct target_core_alua_tg_pt_gp_attribute target_core_alua_tg_pt_gp_trans_delay_msecs = {{"trans_delay_msecs", & __this_module, 420U}, & target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs, & target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs}; static ssize_t target_core_alua_tg_pt_gp_show_attr_implicit_trans_secs(struct t10_alua_tg_pt_gp *tg_pt_gp , char *page ) { ssize_t tmp ; { tmp = core_alua_show_implicit_trans_secs(tg_pt_gp, page); return (tmp); } } static ssize_t target_core_alua_tg_pt_gp_store_attr_implicit_trans_secs(struct t10_alua_tg_pt_gp *tg_pt_gp , char const *page , size_t count ) { ssize_t tmp ; { tmp = core_alua_store_implicit_trans_secs(tg_pt_gp, page, count); return (tmp); } } static struct target_core_alua_tg_pt_gp_attribute target_core_alua_tg_pt_gp_implicit_trans_secs = {{"implicit_trans_secs", & __this_module, 420U}, & target_core_alua_tg_pt_gp_show_attr_implicit_trans_secs, & target_core_alua_tg_pt_gp_store_attr_implicit_trans_secs}; static ssize_t target_core_alua_tg_pt_gp_show_attr_preferred(struct t10_alua_tg_pt_gp *tg_pt_gp , char *page ) { ssize_t tmp ; { tmp = core_alua_show_preferred_bit(tg_pt_gp, page); return (tmp); } } static ssize_t target_core_alua_tg_pt_gp_store_attr_preferred(struct t10_alua_tg_pt_gp *tg_pt_gp , char const *page , size_t count ) { ssize_t tmp ; { tmp = core_alua_store_preferred_bit(tg_pt_gp, page, count); return (tmp); } } static struct target_core_alua_tg_pt_gp_attribute target_core_alua_tg_pt_gp_preferred = {{"preferred", & __this_module, 420U}, & target_core_alua_tg_pt_gp_show_attr_preferred, & target_core_alua_tg_pt_gp_store_attr_preferred}; static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(struct t10_alua_tg_pt_gp *tg_pt_gp , char *page ) { int tmp ; { if (tg_pt_gp->tg_pt_gp_valid_id == 0) { return (0L); } else { } tmp = sprintf(page, "%hu\n", (int )tg_pt_gp->tg_pt_gp_id); return ((ssize_t )tmp); } } static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(struct t10_alua_tg_pt_gp *tg_pt_gp , char const *page , size_t count ) { struct config_group *alua_tg_pt_gp_cg ; unsigned long tg_pt_gp_id ; int ret ; struct _ddebug descriptor ; char *tmp ; long tmp___0 ; { alua_tg_pt_gp_cg = & tg_pt_gp->tg_pt_gp_group; ret = kstrtoul(page, 0U, & tg_pt_gp_id); if (ret < 0) { printk("\vkstrtoul() returned %d for tg_pt_gp_id\n", ret); return ((ssize_t )ret); } else { } if (tg_pt_gp_id > 65535UL) { printk("\vALUA tg_pt_gp_id: %lu exceeds maximum: 0x0000ffff\n", tg_pt_gp_id); return (-22L); } else { } ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (int )((unsigned short )tg_pt_gp_id)); if (ret < 0) { return (-22L); } else { } descriptor.modname = "target_core_mod"; descriptor.function = "target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "Target_Core_ConfigFS: Set ALUA Target Port Group: core/alua/tg_pt_gps/%s to ID: %hu\n"; descriptor.lineno = 2877U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = config_item_name(& alua_tg_pt_gp_cg->cg_item); __dynamic_pr_debug(& descriptor, "Target_Core_ConfigFS: Set ALUA Target Port Group: core/alua/tg_pt_gps/%s to ID: %hu\n", tmp, (int )tg_pt_gp->tg_pt_gp_id); } else { } return ((ssize_t )count); } } static struct target_core_alua_tg_pt_gp_attribute target_core_alua_tg_pt_gp_tg_pt_gp_id = {{"tg_pt_gp_id", & __this_module, 420U}, & target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id, & target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id}; static ssize_t target_core_alua_tg_pt_gp_show_attr_members(struct t10_alua_tg_pt_gp *tg_pt_gp , char *page ) { struct se_lun *lun ; ssize_t len ; ssize_t cur_len ; unsigned char buf[256U] ; struct list_head const *__mptr ; struct se_portal_group *tpg ; char *tmp ; u16 tmp___0 ; char *tmp___1 ; char *tmp___2 ; int tmp___3 ; struct list_head const *__mptr___0 ; { len = 0L; memset((void *)(& buf), 0, 256UL); spin_lock(& tg_pt_gp->tg_pt_gp_lock); __mptr = (struct list_head const *)tg_pt_gp->tg_pt_gp_lun_list.next; lun = (struct se_lun *)__mptr + 0xfffffffffffffed0UL; goto ldv_63612; ldv_63611: tpg = lun->lun_tpg; tmp = config_item_name(& lun->lun_group.cg_item); tmp___0 = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); tmp___1 = (*((tpg->se_tpg_tfo)->tpg_get_wwn))(tpg); tmp___2 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); tmp___3 = snprintf((char *)(& buf), 256UL, "%s/%s/tpgt_%hu/%s\n", tmp___2, tmp___1, (int )tmp___0, tmp); cur_len = (ssize_t )tmp___3; cur_len = cur_len + 1L; if ((unsigned long )(cur_len + len) > 4096UL) { printk("\fRan out of lu_gp_show_attr_members buffer\n"); goto ldv_63610; } else { } memcpy((void *)page + (unsigned long )len, (void const *)(& buf), (size_t )cur_len); len = len + cur_len; __mptr___0 = (struct list_head const *)lun->lun_tg_pt_gp_link.next; lun = (struct se_lun *)__mptr___0 + 0xfffffffffffffed0UL; ldv_63612: ; if ((unsigned long )(& lun->lun_tg_pt_gp_link) != (unsigned long )(& tg_pt_gp->tg_pt_gp_lun_list)) { goto ldv_63611; } else { } ldv_63610: spin_unlock(& tg_pt_gp->tg_pt_gp_lock); return (len); } } static struct target_core_alua_tg_pt_gp_attribute target_core_alua_tg_pt_gp_members = {{"members", & __this_module, 292U}, & target_core_alua_tg_pt_gp_show_attr_members, 0}; static struct t10_alua_tg_pt_gp *to_target_core_alua_tg_pt_gp(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct t10_alua_tg_pt_gp *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct t10_alua_tg_pt_gp *)__mptr + 0xfffffffffffffed0UL; } else { tmp___0 = (struct t10_alua_tg_pt_gp *)0; } return (tmp___0); } } static ssize_t target_core_alua_tg_pt_gp_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct t10_alua_tg_pt_gp *t10_alua_tg_pt_gp ; struct t10_alua_tg_pt_gp *tmp ; struct target_core_alua_tg_pt_gp_attribute *target_core_alua_tg_pt_gp_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_core_alua_tg_pt_gp(item); t10_alua_tg_pt_gp = tmp; __mptr = (struct configfs_attribute const *)attr; target_core_alua_tg_pt_gp_attr = (struct target_core_alua_tg_pt_gp_attribute *)__mptr; ret = 0L; if ((unsigned long )target_core_alua_tg_pt_gp_attr->show != (unsigned long )((ssize_t (*)(struct t10_alua_tg_pt_gp * , char * ))0)) { ret = (*(target_core_alua_tg_pt_gp_attr->show))(t10_alua_tg_pt_gp, page); } else { } return (ret); } } static ssize_t target_core_alua_tg_pt_gp_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct t10_alua_tg_pt_gp *t10_alua_tg_pt_gp ; struct t10_alua_tg_pt_gp *tmp ; struct target_core_alua_tg_pt_gp_attribute *target_core_alua_tg_pt_gp_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_core_alua_tg_pt_gp(item); t10_alua_tg_pt_gp = tmp; __mptr = (struct configfs_attribute const *)attr; target_core_alua_tg_pt_gp_attr = (struct target_core_alua_tg_pt_gp_attribute *)__mptr; ret = -22L; if ((unsigned long )target_core_alua_tg_pt_gp_attr->store != (unsigned long )((ssize_t (*)(struct t10_alua_tg_pt_gp * , char const * , size_t ))0)) { ret = (*(target_core_alua_tg_pt_gp_attr->store))(t10_alua_tg_pt_gp, page, count); } else { } return (ret); } } static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[18U] = { & target_core_alua_tg_pt_gp_alua_access_state.attr, & target_core_alua_tg_pt_gp_alua_access_status.attr, & target_core_alua_tg_pt_gp_alua_access_type.attr, & target_core_alua_tg_pt_gp_alua_support_transitioning.attr, & target_core_alua_tg_pt_gp_alua_support_offline.attr, & target_core_alua_tg_pt_gp_alua_support_lba_dependent.attr, & target_core_alua_tg_pt_gp_alua_support_unavailable.attr, & target_core_alua_tg_pt_gp_alua_support_standby.attr, & target_core_alua_tg_pt_gp_alua_support_active_nonoptimized.attr, & target_core_alua_tg_pt_gp_alua_support_active_optimized.attr, & target_core_alua_tg_pt_gp_alua_write_metadata.attr, & target_core_alua_tg_pt_gp_nonop_delay_msecs.attr, & target_core_alua_tg_pt_gp_trans_delay_msecs.attr, & target_core_alua_tg_pt_gp_implicit_trans_secs.attr, & target_core_alua_tg_pt_gp_preferred.attr, & target_core_alua_tg_pt_gp_tg_pt_gp_id.attr, & target_core_alua_tg_pt_gp_members.attr, (struct configfs_attribute *)0}; static void target_core_alua_tg_pt_gp_release(struct config_item *item ) { struct t10_alua_tg_pt_gp *tg_pt_gp ; struct config_group const *__mptr ; struct config_group *tmp ; { tmp = to_config_group(item); __mptr = (struct config_group const *)tmp; tg_pt_gp = (struct t10_alua_tg_pt_gp *)__mptr + 0xfffffffffffffed0UL; core_alua_free_tg_pt_gp(tg_pt_gp); return; } } static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {& target_core_alua_tg_pt_gp_release, & target_core_alua_tg_pt_gp_attr_show, & target_core_alua_tg_pt_gp_attr_store, 0, 0}; static struct config_item_type target_core_alua_tg_pt_gp_cit = {& __this_module, & target_core_alua_tg_pt_gp_ops, 0, (struct configfs_attribute **)(& target_core_alua_tg_pt_gp_attrs)}; static struct config_group *target_core_alua_create_tg_pt_gp(struct config_group *group , char const *name ) { struct t10_alua *alua ; struct config_group const *__mptr ; struct t10_alua_tg_pt_gp *tg_pt_gp ; struct config_group *alua_tg_pt_gp_cg ; struct config_item *alua_tg_pt_gp_ci ; struct _ddebug descriptor ; char *tmp ; long tmp___0 ; { __mptr = (struct config_group const *)group; alua = (struct t10_alua *)__mptr + 0xffffffffffffff40UL; alua_tg_pt_gp_cg = (struct config_group *)0; alua_tg_pt_gp_ci = (struct config_item *)0; tg_pt_gp = core_alua_allocate_tg_pt_gp(alua->t10_dev, name, 0); if ((unsigned long )tg_pt_gp == (unsigned long )((struct t10_alua_tg_pt_gp *)0)) { return ((struct config_group *)0); } else { } alua_tg_pt_gp_cg = & tg_pt_gp->tg_pt_gp_group; alua_tg_pt_gp_ci = & alua_tg_pt_gp_cg->cg_item; config_group_init_type_name(alua_tg_pt_gp_cg, name, & target_core_alua_tg_pt_gp_cit); descriptor.modname = "target_core_mod"; descriptor.function = "target_core_alua_create_tg_pt_gp"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "Target_Core_ConfigFS: Allocated ALUA Target Port Group: alua/tg_pt_gps/%s\n"; descriptor.lineno = 2994U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = config_item_name(alua_tg_pt_gp_ci); __dynamic_pr_debug(& descriptor, "Target_Core_ConfigFS: Allocated ALUA Target Port Group: alua/tg_pt_gps/%s\n", tmp); } else { } return (alua_tg_pt_gp_cg); } } static void target_core_alua_drop_tg_pt_gp(struct config_group *group , struct config_item *item ) { struct t10_alua_tg_pt_gp *tg_pt_gp ; struct config_group const *__mptr ; struct config_group *tmp ; struct _ddebug descriptor ; char *tmp___0 ; long tmp___1 ; { tmp = to_config_group(item); __mptr = (struct config_group const *)tmp; tg_pt_gp = (struct t10_alua_tg_pt_gp *)__mptr + 0xfffffffffffffed0UL; descriptor.modname = "target_core_mod"; descriptor.function = "target_core_alua_drop_tg_pt_gp"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "Target_Core_ConfigFS: Releasing ALUA Target Port Group: alua/tg_pt_gps/%s, ID: %hu\n"; descriptor.lineno = 3008U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp___0 = config_item_name(item); __dynamic_pr_debug(& descriptor, "Target_Core_ConfigFS: Releasing ALUA Target Port Group: alua/tg_pt_gps/%s, ID: %hu\n", tmp___0, (int )tg_pt_gp->tg_pt_gp_id); } else { } config_item_put(item); return; } } static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {0, & target_core_alua_create_tg_pt_gp, 0, 0, & target_core_alua_drop_tg_pt_gp}; static void target_core_setup_dev_alua_tg_pt_gps_cit(struct target_backend *tb ) { struct config_item_type *cit ; struct _ddebug descriptor ; long tmp ; { cit = & tb->tb_dev_alua_tg_pt_gps_cit; cit->ct_item_ops = (struct configfs_item_operations *)0; cit->ct_group_ops = & target_core_alua_tg_pt_gps_group_ops; cit->ct_attrs = (struct configfs_attribute **)0; cit->ct_owner = (tb->ops)->owner; descriptor.modname = "target_core_mod"; descriptor.function = "target_core_setup_dev_alua_tg_pt_gps_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 3021U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"dev_alua_tg_pt_gps"); } else { } return; } } static struct config_item_type target_core_alua_cit = {& __this_module, (struct configfs_item_operations *)0, 0, (struct configfs_attribute **)0}; static struct config_group *target_core_stat_mkdir(struct config_group *group , char const *name ) { void *tmp ; { tmp = ERR_PTR(-38L); return ((struct config_group *)tmp); } } static void target_core_stat_rmdir(struct config_group *group , struct config_item *item ) { { return; } } static struct configfs_group_operations target_core_stat_group_ops = {0, & target_core_stat_mkdir, 0, 0, & target_core_stat_rmdir}; static void target_core_setup_dev_stat_cit(struct target_backend *tb ) { struct config_item_type *cit ; struct _ddebug descriptor ; long tmp ; { cit = & tb->tb_dev_stat_cit; cit->ct_item_ops = (struct configfs_item_operations *)0; cit->ct_group_ops = & target_core_stat_group_ops; cit->ct_attrs = (struct configfs_attribute **)0; cit->ct_owner = (tb->ops)->owner; descriptor.modname = "target_core_mod"; descriptor.function = "target_core_setup_dev_stat_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 3062U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"dev_stat"); } else { } return; } } static struct config_group *target_core_make_subdev(struct config_group *group , char const *name ) { struct t10_alua_tg_pt_gp *tg_pt_gp ; struct config_item *hba_ci ; struct se_hba *hba ; struct se_hba *tmp ; struct target_backend *tb ; struct se_device *dev ; struct config_group *dev_cg ; struct config_group *tg_pt_gp_cg ; struct config_group *dev_stat_grp ; int errno ; int ret ; void *tmp___0 ; void *tmp___1 ; void *tmp___2 ; void *tmp___3 ; void *tmp___4 ; { hba_ci = & group->cg_item; tmp = item_to_hba(hba_ci); hba = tmp; tb = hba->backend; dev_cg = (struct config_group *)0; tg_pt_gp_cg = (struct config_group *)0; dev_stat_grp = (struct config_group *)0; errno = -12; ret = ldv_mutex_lock_interruptible_24(& hba->hba_access_mutex); if (ret != 0) { tmp___0 = ERR_PTR((long )ret); return ((struct config_group *)tmp___0); } else { } dev = target_alloc_device(hba, name); if ((unsigned long )dev == (unsigned long )((struct se_device *)0)) { goto out_unlock; } else { } dev_cg = & dev->dev_group; tmp___1 = kmalloc(48UL, 208U); dev_cg->default_groups = (struct config_group **)tmp___1; if ((unsigned long )dev_cg->default_groups == (unsigned long )((struct config_group **)0)) { goto out_free_device; } else { } config_group_init_type_name(dev_cg, name, & tb->tb_dev_cit); config_group_init_type_name(& dev->dev_attrib.da_group, "attrib", & tb->tb_dev_attrib_cit); config_group_init_type_name(& dev->dev_pr_group, "pr", & tb->tb_dev_pr_cit); config_group_init_type_name(& dev->t10_wwn.t10_wwn_group, "wwn", & tb->tb_dev_wwn_cit); config_group_init_type_name(& dev->t10_alua.alua_tg_pt_gps_group, "alua", & tb->tb_dev_alua_tg_pt_gps_cit); config_group_init_type_name(& dev->dev_stat_grps.stat_group, "statistics", & tb->tb_dev_stat_cit); *(dev_cg->default_groups) = & dev->dev_attrib.da_group; *(dev_cg->default_groups + 1UL) = & dev->dev_pr_group; *(dev_cg->default_groups + 2UL) = & dev->t10_wwn.t10_wwn_group; *(dev_cg->default_groups + 3UL) = & dev->t10_alua.alua_tg_pt_gps_group; *(dev_cg->default_groups + 4UL) = & dev->dev_stat_grps.stat_group; *(dev_cg->default_groups + 5UL) = (struct config_group *)0; tg_pt_gp = core_alua_allocate_tg_pt_gp(dev, "default_tg_pt_gp", 1); if ((unsigned long )tg_pt_gp == (unsigned long )((struct t10_alua_tg_pt_gp *)0)) { goto out_free_dev_cg_default_groups; } else { } dev->t10_alua.default_tg_pt_gp = tg_pt_gp; tg_pt_gp_cg = & dev->t10_alua.alua_tg_pt_gps_group; tmp___2 = kmalloc(16UL, 208U); tg_pt_gp_cg->default_groups = (struct config_group **)tmp___2; if ((unsigned long )tg_pt_gp_cg->default_groups == (unsigned long )((struct config_group **)0)) { printk("\vUnable to allocate tg_pt_gp_cg->default_groups\n"); goto out_free_tg_pt_gp; } else { } config_group_init_type_name(& tg_pt_gp->tg_pt_gp_group, "default_tg_pt_gp", & target_core_alua_tg_pt_gp_cit); *(tg_pt_gp_cg->default_groups) = & tg_pt_gp->tg_pt_gp_group; *(tg_pt_gp_cg->default_groups + 1UL) = (struct config_group *)0; dev_stat_grp = & dev->dev_stat_grps.stat_group; tmp___3 = kmalloc(32UL, 208U); dev_stat_grp->default_groups = (struct config_group **)tmp___3; if ((unsigned long )dev_stat_grp->default_groups == (unsigned long )((struct config_group **)0)) { printk("\vUnable to allocate dev_stat_grp->default_groups\n"); goto out_free_tg_pt_gp_cg_default_groups; } else { } target_stat_setup_dev_default_groups(dev); ldv_mutex_unlock_25(& hba->hba_access_mutex); return (dev_cg); out_free_tg_pt_gp_cg_default_groups: kfree((void const *)tg_pt_gp_cg->default_groups); out_free_tg_pt_gp: core_alua_free_tg_pt_gp(tg_pt_gp); out_free_dev_cg_default_groups: kfree((void const *)dev_cg->default_groups); out_free_device: target_free_device(dev); out_unlock: ldv_mutex_unlock_26(& hba->hba_access_mutex); tmp___4 = ERR_PTR((long )errno); return ((struct config_group *)tmp___4); } } static void target_core_drop_subdev(struct config_group *group , struct config_item *item ) { struct config_group *dev_cg ; struct config_group *tmp ; struct se_device *dev ; struct config_group const *__mptr ; struct se_hba *hba ; struct config_item *df_item ; struct config_group *tg_pt_gp_cg ; struct config_group *dev_stat_grp ; int i ; { tmp = to_config_group(item); dev_cg = tmp; __mptr = (struct config_group const *)dev_cg; dev = (struct se_device *)__mptr + 0xfffffffffffff7c0UL; hba = item_to_hba(& (dev->se_hba)->hba_group.cg_item); ldv_mutex_lock_27(& hba->hba_access_mutex); dev_stat_grp = & dev->dev_stat_grps.stat_group; i = 0; goto ldv_63726; ldv_63725: df_item = & (*(dev_stat_grp->default_groups + (unsigned long )i))->cg_item; *(dev_stat_grp->default_groups + (unsigned long )i) = (struct config_group *)0; config_item_put(df_item); i = i + 1; ldv_63726: ; if ((unsigned long )*(dev_stat_grp->default_groups + (unsigned long )i) != (unsigned long )((struct config_group *)0)) { goto ldv_63725; } else { } kfree((void const *)dev_stat_grp->default_groups); tg_pt_gp_cg = & dev->t10_alua.alua_tg_pt_gps_group; i = 0; goto ldv_63729; ldv_63728: df_item = & (*(tg_pt_gp_cg->default_groups + (unsigned long )i))->cg_item; *(tg_pt_gp_cg->default_groups + (unsigned long )i) = (struct config_group *)0; config_item_put(df_item); i = i + 1; ldv_63729: ; if ((unsigned long )*(tg_pt_gp_cg->default_groups + (unsigned long )i) != (unsigned long )((struct config_group *)0)) { goto ldv_63728; } else { } kfree((void const *)tg_pt_gp_cg->default_groups); dev->t10_alua.default_tg_pt_gp = (struct t10_alua_tg_pt_gp *)0; i = 0; goto ldv_63732; ldv_63731: df_item = & (*(dev_cg->default_groups + (unsigned long )i))->cg_item; *(dev_cg->default_groups + (unsigned long )i) = (struct config_group *)0; config_item_put(df_item); i = i + 1; ldv_63732: ; if ((unsigned long )*(dev_cg->default_groups + (unsigned long )i) != (unsigned long )((struct config_group *)0)) { goto ldv_63731; } else { } config_item_put(item); ldv_mutex_unlock_28(& hba->hba_access_mutex); return; } } static struct configfs_group_operations target_core_hba_group_ops = {0, & target_core_make_subdev, 0, 0, & target_core_drop_subdev}; static ssize_t target_core_hba_show_attr_hba_info(struct se_hba *hba , char *page ) { int tmp ; { tmp = sprintf(page, "HBA Index: %d plugin: %s version: %s\n", hba->hba_id, (char const *)(& ((hba->backend)->ops)->name), (char *)"v5.0"); return ((ssize_t )tmp); } } static struct target_core_hba_attribute target_core_hba_hba_info = {{"hba_info", & __this_module, 292U}, & target_core_hba_show_attr_hba_info, 0}; static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba , char *page ) { int hba_mode ; int tmp ; { hba_mode = 0; if ((hba->hba_flags & 2U) != 0U) { hba_mode = 1; } else { } tmp = sprintf(page, "%d\n", hba_mode); return ((ssize_t )tmp); } } static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba , char const *page , size_t count ) { unsigned long mode_flag ; int ret ; { if ((unsigned long )((hba->backend)->ops)->pmode_enable_hba == (unsigned long )((int (*/* const */)(struct se_hba * , unsigned long ))0)) { return (-22L); } else { } ret = kstrtoul(page, 0U, & mode_flag); if (ret < 0) { printk("\vUnable to extract hba mode flag: %d\n", ret); return ((ssize_t )ret); } else { } if (hba->dev_count != 0U) { printk("\vUnable to set hba_mode with active devices\n"); return (-22L); } else { } ret = (*(((hba->backend)->ops)->pmode_enable_hba))(hba, mode_flag); if (ret < 0) { return (-22L); } else { } if (ret > 0) { hba->hba_flags = hba->hba_flags | 2U; } else if (ret == 0) { hba->hba_flags = hba->hba_flags & 4294967293U; } else { } return ((ssize_t )count); } } static struct target_core_hba_attribute target_core_hba_hba_mode = {{"hba_mode", & __this_module, 420U}, & target_core_hba_show_attr_hba_mode, & target_core_hba_store_attr_hba_mode}; static struct se_hba *to_target_core_hba(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_hba *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_hba *)__mptr + 0xffffffffffffff88UL; } else { tmp___0 = (struct se_hba *)0; } return (tmp___0); } } static ssize_t target_core_hba_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_hba *se_hba ; struct se_hba *tmp ; struct target_core_hba_attribute *target_core_hba_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_core_hba(item); se_hba = tmp; __mptr = (struct configfs_attribute const *)attr; target_core_hba_attr = (struct target_core_hba_attribute *)__mptr; ret = 0L; if ((unsigned long )target_core_hba_attr->show != (unsigned long )((ssize_t (*)(struct se_hba * , char * ))0)) { ret = (*(target_core_hba_attr->show))(se_hba, page); } else { } return (ret); } } static ssize_t target_core_hba_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_hba *se_hba ; struct se_hba *tmp ; struct target_core_hba_attribute *target_core_hba_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_core_hba(item); se_hba = tmp; __mptr = (struct configfs_attribute const *)attr; target_core_hba_attr = (struct target_core_hba_attribute *)__mptr; ret = -22L; if ((unsigned long )target_core_hba_attr->store != (unsigned long )((ssize_t (*)(struct se_hba * , char const * , size_t ))0)) { ret = (*(target_core_hba_attr->store))(se_hba, page, count); } else { } return (ret); } } static void target_core_hba_release(struct config_item *item ) { struct se_hba *hba ; struct config_group const *__mptr ; struct config_group *tmp ; { tmp = to_config_group(item); __mptr = (struct config_group const *)tmp; hba = (struct se_hba *)__mptr + 0xffffffffffffff88UL; core_delete_hba(hba); return; } } static struct configfs_attribute *target_core_hba_attrs[3U] = { & target_core_hba_hba_info.attr, & target_core_hba_hba_mode.attr, (struct configfs_attribute *)0}; static struct configfs_item_operations target_core_hba_item_ops = {& target_core_hba_release, & target_core_hba_attr_show, & target_core_hba_attr_store, 0, 0}; static struct config_item_type target_core_hba_cit = {& __this_module, & target_core_hba_item_ops, & target_core_hba_group_ops, (struct configfs_attribute **)(& target_core_hba_attrs)}; static struct config_group *target_core_call_addhbatotarget(struct config_group *group , char const *name ) { char *se_plugin_str ; char *str ; char *str2 ; struct se_hba *hba ; char buf[64U] ; unsigned long plugin_dep_id ; int ret ; size_t tmp ; void *tmp___0 ; size_t tmp___1 ; void *tmp___2 ; void *tmp___3 ; void *tmp___4 ; bool tmp___5 ; { plugin_dep_id = 0UL; memset((void *)(& buf), 0, 64UL); tmp___1 = strlen(name); if (tmp___1 > 63UL) { tmp = strlen(name); printk("\vPassed *name strlen(): %d exceeds TARGET_CORE_NAME_MAX_LEN: %d\n", (int )tmp, 64); tmp___0 = ERR_PTR(-36L); return ((struct config_group *)tmp___0); } else { } snprintf((char *)(& buf), 64UL, "%s", name); str = strstr((char const *)(& buf), "_"); if ((unsigned long )str == (unsigned long )((char *)0)) { printk("\vUnable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n"); tmp___2 = ERR_PTR(-22L); return ((struct config_group *)tmp___2); } else { } se_plugin_str = (char *)(& buf); str2 = strstr((char const *)str + 1U, "_"); if ((unsigned long )str2 != (unsigned long )((char *)0)) { *str2 = 0; str2 = str2 + 1; str = str2; } else { *str = 0; str = str + 1; } ret = kstrtoul((char const *)str, 0U, & plugin_dep_id); if (ret < 0) { printk("\vkstrtoul() returned %d for plugin_dep_id\n", ret); tmp___3 = ERR_PTR((long )ret); return ((struct config_group *)tmp___3); } else { } transport_subsystem_check_init(); hba = core_alloc_hba((char const *)se_plugin_str, (u32 )plugin_dep_id, 0U); tmp___5 = IS_ERR((void const *)hba); if ((int )tmp___5) { tmp___4 = ERR_CAST((void const *)hba); return ((struct config_group *)tmp___4); } else { } config_group_init_type_name(& hba->hba_group, name, & target_core_hba_cit); return (& hba->hba_group); } } static void target_core_call_delhbafromtarget(struct config_group *group , struct config_item *item ) { { config_item_put(item); return; } } static struct configfs_group_operations target_core_group_ops = {0, & target_core_call_addhbatotarget, 0, 0, & target_core_call_delhbafromtarget}; static struct config_item_type target_core_cit = {& __this_module, (struct configfs_item_operations *)0, & target_core_group_ops, (struct configfs_attribute **)0}; void target_setup_backend_cits(struct target_backend *tb ) { { target_core_setup_dev_cit(tb); target_core_setup_dev_attrib_cit(tb); target_core_setup_dev_pr_cit(tb); target_core_setup_dev_wwn_cit(tb); target_core_setup_dev_alua_tg_pt_gps_cit(tb); target_core_setup_dev_stat_cit(tb); return; } } static int target_core_init_configfs(void) { struct config_group *target_cg ; struct config_group *hba_cg ; struct config_group *alua_cg ; struct config_group *lu_gp_cg ; struct configfs_subsystem *subsys ; struct t10_alua_lu_gp *lu_gp ; int ret ; struct _ddebug descriptor ; struct new_utsname *tmp ; struct new_utsname *tmp___0 ; long tmp___1 ; struct lock_class_key __key ; void *tmp___2 ; void *tmp___3 ; void *tmp___4 ; bool tmp___5 ; void *tmp___6 ; struct _ddebug descriptor___0 ; struct new_utsname *tmp___7 ; struct new_utsname *tmp___8 ; long tmp___9 ; { hba_cg = (struct config_group *)0; alua_cg = (struct config_group *)0; lu_gp_cg = (struct config_group *)0; subsys = & target_core_fabrics; descriptor.modname = "target_core_mod"; descriptor.function = "target_core_init_configfs"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "TARGET_CORE[0]: Loading Generic Kernel Storage Engine: %s on %s/%s on 4.2.0-rc1\n"; descriptor.lineno = 3419U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp = utsname(); tmp___0 = utsname(); __dynamic_pr_debug(& descriptor, "TARGET_CORE[0]: Loading Generic Kernel Storage Engine: %s on %s/%s on 4.2.0-rc1\n", (char *)"v5.0", (char *)(& tmp___0->sysname), (char *)(& tmp->machine)); } else { } config_group_init(& subsys->su_group); __mutex_init(& subsys->su_mutex, "&subsys->su_mutex", & __key); ret = init_se_kmem_caches(); if (ret < 0) { return (ret); } else { } target_cg = & subsys->su_group; tmp___2 = kmalloc(16UL, 208U); target_cg->default_groups = (struct config_group **)tmp___2; if ((unsigned long )target_cg->default_groups == (unsigned long )((struct config_group **)0)) { printk("\vUnable to allocate target_cg->default_groups\n"); ret = -12; goto out_global; } else { } config_group_init_type_name(& target_core_hbagroup, "core", & target_core_cit); *(target_cg->default_groups) = & target_core_hbagroup; *(target_cg->default_groups + 1UL) = (struct config_group *)0; hba_cg = & target_core_hbagroup; tmp___3 = kmalloc(16UL, 208U); hba_cg->default_groups = (struct config_group **)tmp___3; if ((unsigned long )hba_cg->default_groups == (unsigned long )((struct config_group **)0)) { printk("\vUnable to allocate hba_cg->default_groups\n"); ret = -12; goto out_global; } else { } config_group_init_type_name(& alua_group, "alua", & target_core_alua_cit); *(hba_cg->default_groups) = & alua_group; *(hba_cg->default_groups + 1UL) = (struct config_group *)0; alua_cg = & alua_group; tmp___4 = kmalloc(16UL, 208U); alua_cg->default_groups = (struct config_group **)tmp___4; if ((unsigned long )alua_cg->default_groups == (unsigned long )((struct config_group **)0)) { printk("\vUnable to allocate alua_cg->default_groups\n"); ret = -12; goto out_global; } else { } config_group_init_type_name(& alua_lu_gps_group, "lu_gps", & target_core_alua_lu_gps_cit); *(alua_cg->default_groups) = & alua_lu_gps_group; *(alua_cg->default_groups + 1UL) = (struct config_group *)0; lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1); tmp___5 = IS_ERR((void const *)lu_gp); if ((int )tmp___5) { ret = -12; goto out_global; } else { } lu_gp_cg = & alua_lu_gps_group; tmp___6 = kmalloc(16UL, 208U); lu_gp_cg->default_groups = (struct config_group **)tmp___6; if ((unsigned long )lu_gp_cg->default_groups == (unsigned long )((struct config_group **)0)) { printk("\vUnable to allocate lu_gp_cg->default_groups\n"); ret = -12; goto out_global; } else { } config_group_init_type_name(& lu_gp->lu_gp_group, "default_lu_gp", & target_core_alua_lu_gp_cit); *(lu_gp_cg->default_groups) = & lu_gp->lu_gp_group; *(lu_gp_cg->default_groups + 1UL) = (struct config_group *)0; default_lu_gp = lu_gp; ret = configfs_register_subsystem(subsys); if (ret < 0) { printk("\vError %d while registering subsystem %s\n", ret, (char *)(& subsys->su_group.cg_item.ci_namebuf)); goto out_global; } else { } descriptor___0.modname = "target_core_mod"; descriptor___0.function = "target_core_init_configfs"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor___0.format = "TARGET_CORE[0]: Initialized ConfigFS Fabric Infrastructure: v5.0 on %s/%s on 4.2.0-rc1\n"; descriptor___0.lineno = 3510U; descriptor___0.flags = 0U; tmp___9 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___9 != 0L) { tmp___7 = utsname(); tmp___8 = utsname(); __dynamic_pr_debug(& descriptor___0, "TARGET_CORE[0]: Initialized ConfigFS Fabric Infrastructure: v5.0 on %s/%s on 4.2.0-rc1\n", (char *)(& tmp___8->sysname), (char *)(& tmp___7->machine)); } else { } ret = rd_module_init(); if (ret < 0) { goto out; } else { } ret = core_dev_setup_virtual_lun0(); if (ret < 0) { goto out; } else { } ret = target_xcopy_setup_pt(); if (ret < 0) { goto out; } else { } return (0); out: configfs_unregister_subsystem(subsys); core_dev_release_virtual_lun0(); rd_module_exit(); out_global: ; if ((unsigned long )default_lu_gp != (unsigned long )((struct t10_alua_lu_gp *)0)) { core_alua_free_lu_gp(default_lu_gp); default_lu_gp = (struct t10_alua_lu_gp *)0; } else { } if ((unsigned long )lu_gp_cg != (unsigned long )((struct config_group *)0)) { kfree((void const *)lu_gp_cg->default_groups); } else { } if ((unsigned long )alua_cg != (unsigned long )((struct config_group *)0)) { kfree((void const *)alua_cg->default_groups); } else { } if ((unsigned long )hba_cg != (unsigned long )((struct config_group *)0)) { kfree((void const *)hba_cg->default_groups); } else { } kfree((void const *)target_cg->default_groups); release_se_kmem_caches(); return (ret); } } static void target_core_exit_configfs(void) { struct config_group *hba_cg ; struct config_group *alua_cg ; struct config_group *lu_gp_cg ; struct config_item *item ; int i ; struct _ddebug descriptor ; long tmp ; { lu_gp_cg = & alua_lu_gps_group; i = 0; goto ldv_63842; ldv_63841: item = & (*(lu_gp_cg->default_groups + (unsigned long )i))->cg_item; *(lu_gp_cg->default_groups + (unsigned long )i) = (struct config_group *)0; config_item_put(item); i = i + 1; ldv_63842: ; if ((unsigned long )*(lu_gp_cg->default_groups + (unsigned long )i) != (unsigned long )((struct config_group *)0)) { goto ldv_63841; } else { } kfree((void const *)lu_gp_cg->default_groups); lu_gp_cg->default_groups = (struct config_group **)0; alua_cg = & alua_group; i = 0; goto ldv_63845; ldv_63844: item = & (*(alua_cg->default_groups + (unsigned long )i))->cg_item; *(alua_cg->default_groups + (unsigned long )i) = (struct config_group *)0; config_item_put(item); i = i + 1; ldv_63845: ; if ((unsigned long )*(alua_cg->default_groups + (unsigned long )i) != (unsigned long )((struct config_group *)0)) { goto ldv_63844; } else { } kfree((void const *)alua_cg->default_groups); alua_cg->default_groups = (struct config_group **)0; hba_cg = & target_core_hbagroup; i = 0; goto ldv_63848; ldv_63847: item = & (*(hba_cg->default_groups + (unsigned long )i))->cg_item; *(hba_cg->default_groups + (unsigned long )i) = (struct config_group *)0; config_item_put(item); i = i + 1; ldv_63848: ; if ((unsigned long )*(hba_cg->default_groups + (unsigned long )i) != (unsigned long )((struct config_group *)0)) { goto ldv_63847; } else { } kfree((void const *)hba_cg->default_groups); hba_cg->default_groups = (struct config_group **)0; configfs_unregister_subsystem(& target_core_fabrics); kfree((void const *)target_core_fabrics.su_group.default_groups); core_alua_free_lu_gp(default_lu_gp); default_lu_gp = (struct t10_alua_lu_gp *)0; descriptor.modname = "target_core_mod"; descriptor.function = "target_core_exit_configfs"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_configfs.c"; descriptor.format = "TARGET_CORE[0]: Released ConfigFS Fabric Infrastructure\n"; descriptor.lineno = 3591U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "TARGET_CORE[0]: Released ConfigFS Fabric Infrastructure\n"); } else { } core_dev_release_virtual_lun0(); rd_module_exit(); target_xcopy_release_pt(); release_se_kmem_caches(); return; } } extern int ldv_probe_117(void) ; int ldv_retval_0 ; extern int ldv_probe_142(void) ; extern void ldv_initialize(void) ; void ldv_check_final_state(void) ; extern int ldv_probe_145(void) ; extern int ldv_probe_123(void) ; void ldv_initialize_target_core_dev_pr_attribute_154(void) { void *tmp ; { tmp = ldv_init_zalloc(5048UL); target_core_dev_pr_res_aptpl_metadata_group0 = (struct se_device *)tmp; return; } } void ldv_initialize_target_core_alua_lu_gp_attribute_144(void) { void *tmp ; { tmp = ldv_init_zalloc(232UL); target_core_alua_lu_gp_lu_gp_id_group0 = (struct t10_alua_lu_gp *)tmp; return; } } void ldv_initialize_configfs_item_operations_163(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_core_dev_wwn_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_core_dev_wwn_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_configfs_item_operations_169(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_core_dev_attrib_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_core_dev_attrib_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_187(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_is_nonrot_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_configfs_group_operations_203(void) { void *tmp ; { tmp = ldv_init_zalloc(112UL); target_core_fabric_group_ops_group0 = (struct config_group *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_193(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_emulate_caw_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_target_core_alua_tg_pt_gp_attribute_139(void) { void *tmp ; { tmp = ldv_init_zalloc(696UL); target_core_alua_tg_pt_gp_alua_access_status_group0 = (struct t10_alua_tg_pt_gp *)tmp; return; } } void ldv_initialize_target_core_alua_tg_pt_gp_attribute_138(void) { void *tmp ; { tmp = ldv_init_zalloc(696UL); target_core_alua_tg_pt_gp_alua_access_type_group0 = (struct t10_alua_tg_pt_gp *)tmp; return; } } void ldv_initialize_target_core_alua_tg_pt_gp_attribute_134(void) { void *tmp ; { tmp = ldv_init_zalloc(696UL); target_core_alua_tg_pt_gp_alua_support_unavailable_group0 = (struct t10_alua_tg_pt_gp *)tmp; return; } } void ldv_initialize_configfs_item_operations_145(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_core_dev_item_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_core_dev_item_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_target_core_alua_tg_pt_gp_attribute_133(void) { void *tmp ; { tmp = ldv_init_zalloc(696UL); target_core_alua_tg_pt_gp_alua_support_standby_group0 = (struct t10_alua_tg_pt_gp *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_185(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_force_pr_aptpl_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_177(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_max_unmap_block_desc_count_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_192(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_emulate_3pc_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_target_core_dev_wwn_attribute_168(void) { void *tmp ; { tmp = ldv_init_zalloc(496UL); target_core_dev_wwn_vpd_unit_serial_group0 = (struct t10_wwn *)tmp; return; } } void ldv_initialize_target_core_dev_wwn_attribute_164(void) { void *tmp ; { tmp = ldv_init_zalloc(496UL); target_core_dev_wwn_vpd_assoc_scsi_target_device_group0 = (struct t10_wwn *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_195(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_emulate_tpu_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_configfs_item_operations_142(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_core_alua_lu_gp_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_core_alua_lu_gp_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_target_core_alua_tg_pt_gp_attribute_136(void) { void *tmp ; { tmp = ldv_init_zalloc(696UL); target_core_alua_tg_pt_gp_alua_support_offline_group0 = (struct t10_alua_tg_pt_gp *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_189(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_pi_prot_format_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_configfs_group_operations_116(void) { void *tmp ; { tmp = ldv_init_zalloc(112UL); target_core_group_ops_group0 = (struct config_group *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_178(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_max_unmap_lba_count_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_configfs_group_operations_120(void) { void *tmp ; { tmp = ldv_init_zalloc(112UL); target_core_hba_group_ops_group0 = (struct config_group *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_200(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_emulate_fua_write_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_target_core_alua_tg_pt_gp_attribute_137(void) { void *tmp ; { tmp = ldv_init_zalloc(696UL); target_core_alua_tg_pt_gp_alua_support_transitioning_group0 = (struct t10_alua_tg_pt_gp *)tmp; return; } } void ldv_initialize_target_core_hba_attribute_118(void) { void *tmp ; { tmp = ldv_init_zalloc(400UL); target_core_hba_hba_mode_group0 = (struct se_hba *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_188(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_enforce_pr_isids_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_186(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_emulate_rest_reord_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_target_core_alua_tg_pt_gp_attribute_135(void) { void *tmp ; { tmp = ldv_init_zalloc(696UL); target_core_alua_tg_pt_gp_alua_support_lba_dependent_group0 = (struct t10_alua_tg_pt_gp *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_194(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_emulate_tpws_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_target_core_alua_tg_pt_gp_attribute_127(void) { void *tmp ; { tmp = ldv_init_zalloc(696UL); target_core_alua_tg_pt_gp_implicit_trans_secs_group0 = (struct t10_alua_tg_pt_gp *)tmp; return; } } void ldv_initialize_target_core_dev_wwn_attribute_166(void) { void *tmp ; { tmp = ldv_init_zalloc(496UL); target_core_dev_wwn_vpd_assoc_logical_unit_group0 = (struct t10_wwn *)tmp; return; } } void ldv_initialize_configfs_group_operations_141(void) { void *tmp ; { tmp = ldv_init_zalloc(112UL); target_core_alua_lu_gps_group_ops_group0 = (struct config_group *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_181(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_optimal_sectors_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_175(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_unmap_granularity_alignment_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_197(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_emulate_ua_intlck_ctrl_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_configfs_group_operations_121(void) { void *tmp ; { tmp = ldv_init_zalloc(112UL); target_core_stat_group_ops_group0 = (struct config_group *)tmp; return; } } void ldv_initialize_target_core_dev_wwn_attribute_165(void) { void *tmp ; { tmp = ldv_init_zalloc(496UL); target_core_dev_wwn_vpd_assoc_target_port_group0 = (struct t10_wwn *)tmp; return; } } void ldv_initialize_target_core_alua_tg_pt_gp_attribute_128(void) { void *tmp ; { tmp = ldv_init_zalloc(696UL); target_core_alua_tg_pt_gp_trans_delay_msecs_group0 = (struct t10_alua_tg_pt_gp *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_199(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_emulate_fua_read_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_target_core_alua_tg_pt_gp_attribute_126(void) { void *tmp ; { tmp = ldv_init_zalloc(696UL); target_core_alua_tg_pt_gp_preferred_group0 = (struct t10_alua_tg_pt_gp *)tmp; return; } } void ldv_initialize_target_core_alua_tg_pt_gp_attribute_140(void) { void *tmp ; { tmp = ldv_init_zalloc(696UL); target_core_alua_tg_pt_gp_alua_access_state_group0 = (struct t10_alua_tg_pt_gp *)tmp; return; } } void ldv_initialize_target_core_alua_tg_pt_gp_attribute_130(void) { void *tmp ; { tmp = ldv_init_zalloc(696UL); target_core_alua_tg_pt_gp_alua_write_metadata_group0 = (struct t10_alua_tg_pt_gp *)tmp; return; } } void ldv_initialize_configfs_item_operations_123(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_core_alua_tg_pt_gp_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_core_alua_tg_pt_gp_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_configfs_group_operations_122(void) { void *tmp ; { tmp = ldv_init_zalloc(112UL); target_core_alua_tg_pt_gps_group_ops_group0 = (struct config_group *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_176(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_unmap_granularity_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_196(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_emulate_tas_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_202(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_emulate_model_alias_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_target_core_alua_tg_pt_gp_attribute_129(void) { void *tmp ; { tmp = ldv_init_zalloc(696UL); target_core_alua_tg_pt_gp_nonop_delay_msecs_group0 = (struct t10_alua_tg_pt_gp *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_174(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_max_write_same_len_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_target_core_alua_tg_pt_gp_attribute_131(void) { void *tmp ; { tmp = ldv_init_zalloc(696UL); target_core_alua_tg_pt_gp_alua_support_active_nonoptimized_group0 = (struct t10_alua_tg_pt_gp *)tmp; return; } } void ldv_initialize_configfs_item_operations_153(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_core_dev_pr_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_core_dev_pr_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_201(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_emulate_dpo_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_configfs_item_operations_117(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_core_hba_item_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_core_hba_item_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_191(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_pi_prot_type_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_target_core_alua_tg_pt_gp_attribute_125(void) { void *tmp ; { tmp = ldv_init_zalloc(696UL); target_core_alua_tg_pt_gp_tg_pt_gp_id_group0 = (struct t10_alua_tg_pt_gp *)tmp; return; } } void ldv_initialize_target_core_dev_wwn_attribute_167(void) { void *tmp ; { tmp = ldv_init_zalloc(496UL); target_core_dev_wwn_vpd_protocol_identifier_group0 = (struct t10_wwn *)tmp; return; } } void ldv_initialize_target_core_alua_tg_pt_gp_attribute_132(void) { void *tmp ; { tmp = ldv_init_zalloc(696UL); target_core_alua_tg_pt_gp_alua_support_active_optimized_group0 = (struct t10_alua_tg_pt_gp *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_179(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_queue_depth_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_183(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_block_size_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_initialize_target_backend_dev_attrib_attribute_198(void) { void *tmp ; { tmp = ldv_init_zalloc(240UL); target_core_dev_attrib_emulate_write_cache_group0 = (struct se_dev_attrib *)tmp; return; } } void ldv_main_exported_84(void) ; void ldv_main_exported_83(void) ; void ldv_main_exported_33(void) ; void ldv_main_exported_32(void) ; void ldv_main_exported_63(void) ; void ldv_main_exported_21(void) ; void ldv_main_exported_71(void) ; void ldv_main_exported_80(void) ; void ldv_main_exported_26(void) ; void ldv_main_exported_18(void) ; void ldv_main_exported_72(void) ; void ldv_main_exported_16(void) ; void ldv_main_exported_44(void) ; void ldv_main_exported_55(void) ; void ldv_main_exported_74(void) ; void ldv_main_exported_27(void) ; void ldv_main_exported_57(void) ; void ldv_main_exported_61(void) ; void ldv_main_exported_20(void) ; void ldv_main_exported_10(void) ; void ldv_main_exported_31(void) ; void ldv_main_exported_35(void) ; void ldv_main_exported_11(void) ; void ldv_main_exported_78(void) ; void ldv_main_exported_48(void) ; void ldv_main_exported_77(void) ; void ldv_main_exported_65(void) ; void ldv_main_exported_29(void) ; void ldv_main_exported_50(void) ; void ldv_main_exported_39(void) ; void ldv_main_exported_64(void) ; void ldv_main_exported_58(void) ; void ldv_main_exported_41(void) ; void ldv_main_exported_12(void) ; void ldv_main_exported_15(void) ; void ldv_main_exported_81(void) ; void ldv_main_exported_52(void) ; void ldv_main_exported_60(void) ; void ldv_main_exported_56(void) ; void ldv_main_exported_73(void) ; void ldv_main_exported_66(void) ; void ldv_main_exported_45(void) ; void ldv_main_exported_76(void) ; void ldv_main_exported_19(void) ; void ldv_main_exported_62(void) ; void ldv_main_exported_54(void) ; void ldv_main_exported_67(void) ; void ldv_main_exported_70(void) ; void ldv_main_exported_68(void) ; void ldv_main_exported_17(void) ; void ldv_main_exported_30(void) ; void ldv_main_exported_82(void) ; void ldv_main_exported_25(void) ; void ldv_main_exported_28(void) ; void ldv_main_exported_75(void) ; void ldv_main_exported_40(void) ; void ldv_main_exported_14(void) ; void ldv_main_exported_59(void) ; void ldv_main_exported_69(void) ; void ldv_main_exported_49(void) ; void ldv_main_exported_24(void) ; void ldv_main_exported_53(void) ; void ldv_main_exported_79(void) ; void ldv_main_exported_22(void) ; void ldv_main_exported_42(void) ; void ldv_main_exported_46(void) ; void ldv_main_exported_23(void) ; void ldv_main_exported_13(void) ; void ldv_main_exported_36(void) ; void ldv_main_exported_9(void) ; void ldv_main_exported_51(void) ; void ldv_main_exported_47(void) ; void ldv_main_exported_38(void) ; void ldv_main_exported_34(void) ; void ldv_main_exported_37(void) ; void ldv_main_exported_43(void) ; void ldv_main_exported_104(void) ; void ldv_main_exported_90(void) ; void ldv_main_exported_102(void) ; void ldv_main_exported_91(void) ; void ldv_main_exported_107(void) ; void ldv_main_exported_99(void) ; void ldv_main_exported_93(void) ; void ldv_main_exported_106(void) ; void ldv_main_exported_105(void) ; void ldv_main_exported_100(void) ; void ldv_main_exported_96(void) ; void ldv_main_exported_110(void) ; void ldv_main_exported_95(void) ; void ldv_main_exported_94(void) ; void ldv_main_exported_97(void) ; void ldv_main_exported_114(void) ; void ldv_main_exported_111(void) ; void ldv_main_exported_108(void) ; void ldv_main_exported_115(void) ; void ldv_main_exported_112(void) ; void ldv_main_exported_109(void) ; void ldv_main_exported_92(void) ; void ldv_main_exported_98(void) ; void ldv_main_exported_103(void) ; void ldv_main_exported_89(void) ; void ldv_main_exported_101(void) ; void ldv_main_exported_113(void) ; void ldv_main_exported_87(void) ; void ldv_main_exported_88(void) ; void ldv_main_exported_85(void) ; void ldv_main_exported_86(void) ; void ldv_main_exported_8(void) ; int main(void) { size_t ldvarg1 ; char *ldvarg0 ; void *tmp ; char *ldvarg2 ; void *tmp___0 ; char *ldvarg8 ; void *tmp___1 ; char *ldvarg10 ; void *tmp___2 ; size_t ldvarg9 ; size_t ldvarg18 ; char *ldvarg17 ; void *tmp___3 ; char *ldvarg19 ; void *tmp___4 ; char *ldvarg24 ; void *tmp___5 ; size_t ldvarg23 ; char *ldvarg22 ; void *tmp___6 ; char *ldvarg39 ; void *tmp___7 ; struct se_dev_attrib *ldvarg38 ; void *tmp___8 ; char *ldvarg41 ; void *tmp___9 ; struct se_device *ldvarg40 ; void *tmp___10 ; size_t ldvarg43 ; char *ldvarg42 ; void *tmp___11 ; char *ldvarg44 ; void *tmp___12 ; char *ldvarg54 ; void *tmp___13 ; char *ldvarg53 ; void *tmp___14 ; size_t ldvarg52 ; void *ldvarg58 ; void *tmp___15 ; char *ldvarg60 ; void *tmp___16 ; size_t ldvarg59 ; size_t ldvarg65 ; char *ldvarg66 ; void *tmp___17 ; char *ldvarg64 ; void *tmp___18 ; size_t ldvarg70 ; void *ldvarg67 ; void *tmp___19 ; char *ldvarg71 ; void *tmp___20 ; char *ldvarg68 ; void *tmp___21 ; void *ldvarg69 ; void *tmp___22 ; char *ldvarg87 ; void *tmp___23 ; struct se_device *ldvarg86 ; void *tmp___24 ; size_t ldvarg93 ; char *ldvarg94 ; void *tmp___25 ; char *ldvarg92 ; void *tmp___26 ; char *ldvarg95 ; void *tmp___27 ; struct config_item *ldvarg96 ; void *tmp___28 ; size_t ldvarg98 ; char *ldvarg97 ; void *tmp___29 ; char *ldvarg99 ; void *tmp___30 ; char *ldvarg102 ; void *tmp___31 ; size_t ldvarg101 ; char *ldvarg100 ; void *tmp___32 ; char *ldvarg110 ; void *tmp___33 ; size_t ldvarg108 ; char *ldvarg109 ; void *tmp___34 ; char *ldvarg113 ; void *tmp___35 ; size_t ldvarg114 ; char *ldvarg115 ; void *tmp___36 ; char *ldvarg134 ; void *tmp___37 ; struct configfs_attribute *ldvarg135 ; void *tmp___38 ; struct config_item *ldvarg133 ; void *tmp___39 ; char *ldvarg136 ; void *tmp___40 ; char *ldvarg138 ; void *tmp___41 ; size_t ldvarg137 ; size_t ldvarg140 ; char *ldvarg141 ; void *tmp___42 ; char *ldvarg139 ; void *tmp___43 ; size_t ldvarg143 ; char *ldvarg144 ; void *tmp___44 ; char *ldvarg142 ; void *tmp___45 ; char *ldvarg147 ; void *tmp___46 ; char *ldvarg145 ; void *tmp___47 ; size_t ldvarg146 ; char *ldvarg150 ; void *tmp___48 ; size_t ldvarg151 ; char *ldvarg152 ; void *tmp___49 ; char *ldvarg159 ; void *tmp___50 ; char *ldvarg162 ; void *tmp___51 ; size_t ldvarg161 ; void *ldvarg160 ; void *tmp___52 ; void *ldvarg158 ; void *tmp___53 ; size_t ldvarg164 ; char *ldvarg165 ; void *tmp___54 ; char *ldvarg163 ; void *tmp___55 ; size_t ldvarg167 ; char *ldvarg168 ; void *tmp___56 ; char *ldvarg166 ; void *tmp___57 ; char *ldvarg170 ; void *tmp___58 ; struct se_dev_attrib *ldvarg169 ; void *tmp___59 ; char *ldvarg171 ; void *tmp___60 ; char *ldvarg173 ; void *tmp___61 ; size_t ldvarg172 ; char *ldvarg183 ; void *tmp___62 ; char *ldvarg181 ; void *tmp___63 ; size_t ldvarg182 ; char *ldvarg185 ; void *tmp___64 ; struct se_dev_attrib *ldvarg184 ; void *tmp___65 ; char *ldvarg187 ; void *tmp___66 ; char *ldvarg188 ; void *tmp___67 ; size_t ldvarg186 ; char *ldvarg191 ; void *tmp___68 ; char *ldvarg193 ; void *tmp___69 ; size_t ldvarg192 ; size_t ldvarg197 ; char *ldvarg198 ; void *tmp___70 ; char *ldvarg196 ; void *tmp___71 ; size_t ldvarg200 ; char *ldvarg199 ; void *tmp___72 ; char *ldvarg201 ; void *tmp___73 ; char *ldvarg203 ; void *tmp___74 ; struct t10_alua_tg_pt_gp *ldvarg202 ; void *tmp___75 ; char *ldvarg209 ; void *tmp___76 ; size_t ldvarg208 ; char *ldvarg207 ; void *tmp___77 ; size_t ldvarg211 ; char *ldvarg212 ; void *tmp___78 ; char *ldvarg210 ; void *tmp___79 ; char *ldvarg213 ; void *tmp___80 ; struct config_item *ldvarg214 ; void *tmp___81 ; size_t ldvarg218 ; char *ldvarg217 ; void *tmp___82 ; char *ldvarg219 ; void *tmp___83 ; char *ldvarg225 ; void *tmp___84 ; char *ldvarg227 ; void *tmp___85 ; size_t ldvarg226 ; char *ldvarg229 ; void *tmp___86 ; struct se_device *ldvarg228 ; void *tmp___87 ; struct se_device *ldvarg230 ; void *tmp___88 ; char *ldvarg231 ; void *tmp___89 ; char *ldvarg234 ; void *tmp___90 ; size_t ldvarg233 ; char *ldvarg232 ; void *tmp___91 ; char *ldvarg242 ; void *tmp___92 ; size_t ldvarg241 ; char *ldvarg243 ; void *tmp___93 ; char *ldvarg246 ; void *tmp___94 ; size_t ldvarg247 ; char *ldvarg248 ; void *tmp___95 ; char *ldvarg250 ; void *tmp___96 ; struct se_dev_attrib *ldvarg249 ; void *tmp___97 ; char *ldvarg263 ; void *tmp___98 ; size_t ldvarg262 ; char *ldvarg261 ; void *tmp___99 ; struct se_hba *ldvarg264 ; void *tmp___100 ; char *ldvarg265 ; void *tmp___101 ; struct se_dev_attrib *ldvarg266 ; void *tmp___102 ; char *ldvarg267 ; void *tmp___103 ; char *ldvarg272 ; void *tmp___104 ; size_t ldvarg271 ; char *ldvarg270 ; void *tmp___105 ; struct se_device *ldvarg273 ; void *tmp___106 ; char *ldvarg274 ; void *tmp___107 ; char *ldvarg280 ; void *tmp___108 ; struct se_dev_attrib *ldvarg279 ; void *tmp___109 ; char *ldvarg293 ; void *tmp___110 ; char *ldvarg295 ; void *tmp___111 ; size_t ldvarg294 ; void *ldvarg300 ; void *tmp___112 ; char *ldvarg301 ; void *tmp___113 ; size_t ldvarg303 ; char *ldvarg302 ; void *tmp___114 ; char *ldvarg304 ; void *tmp___115 ; char *ldvarg307 ; void *tmp___116 ; size_t ldvarg305 ; char *ldvarg306 ; void *tmp___117 ; char *ldvarg310 ; void *tmp___118 ; char *ldvarg312 ; void *tmp___119 ; size_t ldvarg311 ; char *ldvarg322 ; void *tmp___120 ; char *ldvarg320 ; void *tmp___121 ; size_t ldvarg321 ; char *ldvarg325 ; void *tmp___122 ; char *ldvarg327 ; void *tmp___123 ; size_t ldvarg326 ; size_t ldvarg331 ; char *ldvarg329 ; void *tmp___124 ; void *ldvarg330 ; void *tmp___125 ; void *ldvarg328 ; void *tmp___126 ; char *ldvarg332 ; void *tmp___127 ; char *ldvarg334 ; void *tmp___128 ; size_t ldvarg333 ; char *ldvarg335 ; void *tmp___129 ; struct se_dev_attrib *ldvarg352 ; void *tmp___130 ; char *ldvarg353 ; void *tmp___131 ; char *ldvarg366 ; void *tmp___132 ; char *ldvarg368 ; void *tmp___133 ; size_t ldvarg367 ; char *ldvarg371 ; void *tmp___134 ; size_t ldvarg372 ; char *ldvarg373 ; void *tmp___135 ; char *ldvarg376 ; void *tmp___136 ; size_t ldvarg375 ; char *ldvarg374 ; void *tmp___137 ; char *ldvarg380 ; void *tmp___138 ; struct config_item *ldvarg381 ; void *tmp___139 ; char *ldvarg382 ; void *tmp___140 ; char *ldvarg384 ; void *tmp___141 ; size_t ldvarg383 ; char *ldvarg385 ; void *tmp___142 ; struct config_item *ldvarg386 ; void *tmp___143 ; size_t ldvarg395 ; char *ldvarg396 ; void *tmp___144 ; char *ldvarg394 ; void *tmp___145 ; char *ldvarg399 ; void *tmp___146 ; struct config_item *ldvarg400 ; void *tmp___147 ; struct se_device *ldvarg401 ; void *tmp___148 ; char *ldvarg402 ; void *tmp___149 ; char *ldvarg403 ; void *tmp___150 ; size_t ldvarg404 ; char *ldvarg405 ; void *tmp___151 ; char *ldvarg418 ; void *tmp___152 ; char *ldvarg416 ; void *tmp___153 ; size_t ldvarg417 ; size_t ldvarg422 ; char *ldvarg421 ; void *tmp___154 ; char *ldvarg423 ; void *tmp___155 ; void *ldvarg426 ; void *tmp___156 ; void *ldvarg424 ; void *tmp___157 ; char *ldvarg428 ; void *tmp___158 ; char *ldvarg425 ; void *tmp___159 ; size_t ldvarg427 ; struct se_device *ldvarg429 ; void *tmp___160 ; char *ldvarg430 ; void *tmp___161 ; char *ldvarg433 ; void *tmp___162 ; char *ldvarg431 ; void *tmp___163 ; size_t ldvarg432 ; char *ldvarg436 ; void *tmp___164 ; struct config_item *ldvarg437 ; void *tmp___165 ; char *ldvarg439 ; void *tmp___166 ; struct t10_alua_lu_gp *ldvarg438 ; void *tmp___167 ; char *ldvarg441 ; void *tmp___168 ; struct se_device *ldvarg440 ; void *tmp___169 ; size_t ldvarg456 ; char *ldvarg457 ; void *tmp___170 ; char *ldvarg455 ; void *tmp___171 ; char *ldvarg463 ; void *tmp___172 ; char *ldvarg461 ; void *tmp___173 ; size_t ldvarg462 ; void *ldvarg469 ; void *tmp___174 ; void *ldvarg467 ; void *tmp___175 ; char *ldvarg471 ; void *tmp___176 ; size_t ldvarg470 ; char *ldvarg468 ; void *tmp___177 ; char *ldvarg485 ; void *tmp___178 ; char *ldvarg486 ; void *tmp___179 ; size_t ldvarg484 ; size_t ldvarg488 ; char *ldvarg489 ; void *tmp___180 ; char *ldvarg487 ; void *tmp___181 ; size_t ldvarg491 ; char *ldvarg492 ; void *tmp___182 ; char *ldvarg490 ; void *tmp___183 ; char *ldvarg495 ; void *tmp___184 ; char *ldvarg493 ; void *tmp___185 ; size_t ldvarg494 ; char *ldvarg497 ; void *tmp___186 ; struct se_dev_attrib *ldvarg496 ; void *tmp___187 ; int tmp___188 ; int tmp___189 ; int tmp___190 ; int tmp___191 ; int tmp___192 ; int tmp___193 ; int tmp___194 ; int tmp___195 ; int tmp___196 ; int tmp___197 ; int tmp___198 ; int tmp___199 ; int tmp___200 ; int tmp___201 ; int tmp___202 ; int tmp___203 ; int tmp___204 ; int tmp___205 ; int tmp___206 ; int tmp___207 ; int tmp___208 ; int tmp___209 ; int tmp___210 ; int tmp___211 ; int tmp___212 ; int tmp___213 ; int tmp___214 ; int tmp___215 ; int tmp___216 ; int tmp___217 ; int tmp___218 ; int tmp___219 ; int tmp___220 ; int tmp___221 ; int tmp___222 ; int tmp___223 ; int tmp___224 ; int tmp___225 ; int tmp___226 ; int tmp___227 ; int tmp___228 ; int tmp___229 ; int tmp___230 ; int tmp___231 ; int tmp___232 ; int tmp___233 ; int tmp___234 ; int tmp___235 ; int tmp___236 ; int tmp___237 ; int tmp___238 ; int tmp___239 ; int tmp___240 ; int tmp___241 ; int tmp___242 ; int tmp___243 ; int tmp___244 ; int tmp___245 ; int tmp___246 ; int tmp___247 ; int tmp___248 ; int tmp___249 ; int tmp___250 ; int tmp___251 ; int tmp___252 ; int tmp___253 ; int tmp___254 ; int tmp___255 ; int tmp___256 ; int tmp___257 ; int tmp___258 ; int tmp___259 ; int tmp___260 ; int tmp___261 ; int tmp___262 ; int tmp___263 ; int tmp___264 ; int tmp___265 ; int tmp___266 ; int tmp___267 ; int tmp___268 ; int tmp___269 ; int tmp___270 ; int tmp___271 ; int tmp___272 ; int tmp___273 ; int tmp___274 ; int tmp___275 ; int tmp___276 ; int tmp___277 ; int tmp___278 ; { tmp = ldv_init_zalloc(1UL); ldvarg0 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg2 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg8 = (char *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg10 = (char *)tmp___2; tmp___3 = ldv_init_zalloc(1UL); ldvarg17 = (char *)tmp___3; tmp___4 = ldv_init_zalloc(1UL); ldvarg19 = (char *)tmp___4; tmp___5 = ldv_init_zalloc(1UL); ldvarg24 = (char *)tmp___5; tmp___6 = ldv_init_zalloc(1UL); ldvarg22 = (char *)tmp___6; tmp___7 = ldv_init_zalloc(1UL); ldvarg39 = (char *)tmp___7; tmp___8 = ldv_init_zalloc(240UL); ldvarg38 = (struct se_dev_attrib *)tmp___8; tmp___9 = ldv_init_zalloc(1UL); ldvarg41 = (char *)tmp___9; tmp___10 = ldv_init_zalloc(5048UL); ldvarg40 = (struct se_device *)tmp___10; tmp___11 = ldv_init_zalloc(1UL); ldvarg42 = (char *)tmp___11; tmp___12 = ldv_init_zalloc(1UL); ldvarg44 = (char *)tmp___12; tmp___13 = ldv_init_zalloc(1UL); ldvarg54 = (char *)tmp___13; tmp___14 = ldv_init_zalloc(1UL); ldvarg53 = (char *)tmp___14; tmp___15 = ldv_init_zalloc(1UL); ldvarg58 = tmp___15; tmp___16 = ldv_init_zalloc(1UL); ldvarg60 = (char *)tmp___16; tmp___17 = ldv_init_zalloc(1UL); ldvarg66 = (char *)tmp___17; tmp___18 = ldv_init_zalloc(1UL); ldvarg64 = (char *)tmp___18; tmp___19 = ldv_init_zalloc(1UL); ldvarg67 = tmp___19; tmp___20 = ldv_init_zalloc(1UL); ldvarg71 = (char *)tmp___20; tmp___21 = ldv_init_zalloc(1UL); ldvarg68 = (char *)tmp___21; tmp___22 = ldv_init_zalloc(1UL); ldvarg69 = tmp___22; tmp___23 = ldv_init_zalloc(1UL); ldvarg87 = (char *)tmp___23; tmp___24 = ldv_init_zalloc(5048UL); ldvarg86 = (struct se_device *)tmp___24; tmp___25 = ldv_init_zalloc(1UL); ldvarg94 = (char *)tmp___25; tmp___26 = ldv_init_zalloc(1UL); ldvarg92 = (char *)tmp___26; tmp___27 = ldv_init_zalloc(1UL); ldvarg95 = (char *)tmp___27; tmp___28 = ldv_init_zalloc(80UL); ldvarg96 = (struct config_item *)tmp___28; tmp___29 = ldv_init_zalloc(1UL); ldvarg97 = (char *)tmp___29; tmp___30 = ldv_init_zalloc(1UL); ldvarg99 = (char *)tmp___30; tmp___31 = ldv_init_zalloc(1UL); ldvarg102 = (char *)tmp___31; tmp___32 = ldv_init_zalloc(1UL); ldvarg100 = (char *)tmp___32; tmp___33 = ldv_init_zalloc(1UL); ldvarg110 = (char *)tmp___33; tmp___34 = ldv_init_zalloc(1UL); ldvarg109 = (char *)tmp___34; tmp___35 = ldv_init_zalloc(1UL); ldvarg113 = (char *)tmp___35; tmp___36 = ldv_init_zalloc(1UL); ldvarg115 = (char *)tmp___36; tmp___37 = ldv_init_zalloc(1UL); ldvarg134 = (char *)tmp___37; tmp___38 = ldv_init_zalloc(24UL); ldvarg135 = (struct configfs_attribute *)tmp___38; tmp___39 = ldv_init_zalloc(80UL); ldvarg133 = (struct config_item *)tmp___39; tmp___40 = ldv_init_zalloc(1UL); ldvarg136 = (char *)tmp___40; tmp___41 = ldv_init_zalloc(1UL); ldvarg138 = (char *)tmp___41; tmp___42 = ldv_init_zalloc(1UL); ldvarg141 = (char *)tmp___42; tmp___43 = ldv_init_zalloc(1UL); ldvarg139 = (char *)tmp___43; tmp___44 = ldv_init_zalloc(1UL); ldvarg144 = (char *)tmp___44; tmp___45 = ldv_init_zalloc(1UL); ldvarg142 = (char *)tmp___45; tmp___46 = ldv_init_zalloc(1UL); ldvarg147 = (char *)tmp___46; tmp___47 = ldv_init_zalloc(1UL); ldvarg145 = (char *)tmp___47; tmp___48 = ldv_init_zalloc(1UL); ldvarg150 = (char *)tmp___48; tmp___49 = ldv_init_zalloc(1UL); ldvarg152 = (char *)tmp___49; tmp___50 = ldv_init_zalloc(1UL); ldvarg159 = (char *)tmp___50; tmp___51 = ldv_init_zalloc(1UL); ldvarg162 = (char *)tmp___51; tmp___52 = ldv_init_zalloc(1UL); ldvarg160 = tmp___52; tmp___53 = ldv_init_zalloc(1UL); ldvarg158 = tmp___53; tmp___54 = ldv_init_zalloc(1UL); ldvarg165 = (char *)tmp___54; tmp___55 = ldv_init_zalloc(1UL); ldvarg163 = (char *)tmp___55; tmp___56 = ldv_init_zalloc(1UL); ldvarg168 = (char *)tmp___56; tmp___57 = ldv_init_zalloc(1UL); ldvarg166 = (char *)tmp___57; tmp___58 = ldv_init_zalloc(1UL); ldvarg170 = (char *)tmp___58; tmp___59 = ldv_init_zalloc(240UL); ldvarg169 = (struct se_dev_attrib *)tmp___59; tmp___60 = ldv_init_zalloc(1UL); ldvarg171 = (char *)tmp___60; tmp___61 = ldv_init_zalloc(1UL); ldvarg173 = (char *)tmp___61; tmp___62 = ldv_init_zalloc(1UL); ldvarg183 = (char *)tmp___62; tmp___63 = ldv_init_zalloc(1UL); ldvarg181 = (char *)tmp___63; tmp___64 = ldv_init_zalloc(1UL); ldvarg185 = (char *)tmp___64; tmp___65 = ldv_init_zalloc(240UL); ldvarg184 = (struct se_dev_attrib *)tmp___65; tmp___66 = ldv_init_zalloc(1UL); ldvarg187 = (char *)tmp___66; tmp___67 = ldv_init_zalloc(1UL); ldvarg188 = (char *)tmp___67; tmp___68 = ldv_init_zalloc(1UL); ldvarg191 = (char *)tmp___68; tmp___69 = ldv_init_zalloc(1UL); ldvarg193 = (char *)tmp___69; tmp___70 = ldv_init_zalloc(1UL); ldvarg198 = (char *)tmp___70; tmp___71 = ldv_init_zalloc(1UL); ldvarg196 = (char *)tmp___71; tmp___72 = ldv_init_zalloc(1UL); ldvarg199 = (char *)tmp___72; tmp___73 = ldv_init_zalloc(1UL); ldvarg201 = (char *)tmp___73; tmp___74 = ldv_init_zalloc(1UL); ldvarg203 = (char *)tmp___74; tmp___75 = ldv_init_zalloc(696UL); ldvarg202 = (struct t10_alua_tg_pt_gp *)tmp___75; tmp___76 = ldv_init_zalloc(1UL); ldvarg209 = (char *)tmp___76; tmp___77 = ldv_init_zalloc(1UL); ldvarg207 = (char *)tmp___77; tmp___78 = ldv_init_zalloc(1UL); ldvarg212 = (char *)tmp___78; tmp___79 = ldv_init_zalloc(1UL); ldvarg210 = (char *)tmp___79; tmp___80 = ldv_init_zalloc(1UL); ldvarg213 = (char *)tmp___80; tmp___81 = ldv_init_zalloc(80UL); ldvarg214 = (struct config_item *)tmp___81; tmp___82 = ldv_init_zalloc(1UL); ldvarg217 = (char *)tmp___82; tmp___83 = ldv_init_zalloc(1UL); ldvarg219 = (char *)tmp___83; tmp___84 = ldv_init_zalloc(1UL); ldvarg225 = (char *)tmp___84; tmp___85 = ldv_init_zalloc(1UL); ldvarg227 = (char *)tmp___85; tmp___86 = ldv_init_zalloc(1UL); ldvarg229 = (char *)tmp___86; tmp___87 = ldv_init_zalloc(5048UL); ldvarg228 = (struct se_device *)tmp___87; tmp___88 = ldv_init_zalloc(5048UL); ldvarg230 = (struct se_device *)tmp___88; tmp___89 = ldv_init_zalloc(1UL); ldvarg231 = (char *)tmp___89; tmp___90 = ldv_init_zalloc(1UL); ldvarg234 = (char *)tmp___90; tmp___91 = ldv_init_zalloc(1UL); ldvarg232 = (char *)tmp___91; tmp___92 = ldv_init_zalloc(1UL); ldvarg242 = (char *)tmp___92; tmp___93 = ldv_init_zalloc(1UL); ldvarg243 = (char *)tmp___93; tmp___94 = ldv_init_zalloc(1UL); ldvarg246 = (char *)tmp___94; tmp___95 = ldv_init_zalloc(1UL); ldvarg248 = (char *)tmp___95; tmp___96 = ldv_init_zalloc(1UL); ldvarg250 = (char *)tmp___96; tmp___97 = ldv_init_zalloc(240UL); ldvarg249 = (struct se_dev_attrib *)tmp___97; tmp___98 = ldv_init_zalloc(1UL); ldvarg263 = (char *)tmp___98; tmp___99 = ldv_init_zalloc(1UL); ldvarg261 = (char *)tmp___99; tmp___100 = ldv_init_zalloc(400UL); ldvarg264 = (struct se_hba *)tmp___100; tmp___101 = ldv_init_zalloc(1UL); ldvarg265 = (char *)tmp___101; tmp___102 = ldv_init_zalloc(240UL); ldvarg266 = (struct se_dev_attrib *)tmp___102; tmp___103 = ldv_init_zalloc(1UL); ldvarg267 = (char *)tmp___103; tmp___104 = ldv_init_zalloc(1UL); ldvarg272 = (char *)tmp___104; tmp___105 = ldv_init_zalloc(1UL); ldvarg270 = (char *)tmp___105; tmp___106 = ldv_init_zalloc(5048UL); ldvarg273 = (struct se_device *)tmp___106; tmp___107 = ldv_init_zalloc(1UL); ldvarg274 = (char *)tmp___107; tmp___108 = ldv_init_zalloc(1UL); ldvarg280 = (char *)tmp___108; tmp___109 = ldv_init_zalloc(240UL); ldvarg279 = (struct se_dev_attrib *)tmp___109; tmp___110 = ldv_init_zalloc(1UL); ldvarg293 = (char *)tmp___110; tmp___111 = ldv_init_zalloc(1UL); ldvarg295 = (char *)tmp___111; tmp___112 = ldv_init_zalloc(1UL); ldvarg300 = tmp___112; tmp___113 = ldv_init_zalloc(1UL); ldvarg301 = (char *)tmp___113; tmp___114 = ldv_init_zalloc(1UL); ldvarg302 = (char *)tmp___114; tmp___115 = ldv_init_zalloc(1UL); ldvarg304 = (char *)tmp___115; tmp___116 = ldv_init_zalloc(1UL); ldvarg307 = (char *)tmp___116; tmp___117 = ldv_init_zalloc(1UL); ldvarg306 = (char *)tmp___117; tmp___118 = ldv_init_zalloc(1UL); ldvarg310 = (char *)tmp___118; tmp___119 = ldv_init_zalloc(1UL); ldvarg312 = (char *)tmp___119; tmp___120 = ldv_init_zalloc(1UL); ldvarg322 = (char *)tmp___120; tmp___121 = ldv_init_zalloc(1UL); ldvarg320 = (char *)tmp___121; tmp___122 = ldv_init_zalloc(1UL); ldvarg325 = (char *)tmp___122; tmp___123 = ldv_init_zalloc(1UL); ldvarg327 = (char *)tmp___123; tmp___124 = ldv_init_zalloc(1UL); ldvarg329 = (char *)tmp___124; tmp___125 = ldv_init_zalloc(1UL); ldvarg330 = tmp___125; tmp___126 = ldv_init_zalloc(1UL); ldvarg328 = tmp___126; tmp___127 = ldv_init_zalloc(1UL); ldvarg332 = (char *)tmp___127; tmp___128 = ldv_init_zalloc(1UL); ldvarg334 = (char *)tmp___128; tmp___129 = ldv_init_zalloc(1UL); ldvarg335 = (char *)tmp___129; tmp___130 = ldv_init_zalloc(240UL); ldvarg352 = (struct se_dev_attrib *)tmp___130; tmp___131 = ldv_init_zalloc(1UL); ldvarg353 = (char *)tmp___131; tmp___132 = ldv_init_zalloc(1UL); ldvarg366 = (char *)tmp___132; tmp___133 = ldv_init_zalloc(1UL); ldvarg368 = (char *)tmp___133; tmp___134 = ldv_init_zalloc(1UL); ldvarg371 = (char *)tmp___134; tmp___135 = ldv_init_zalloc(1UL); ldvarg373 = (char *)tmp___135; tmp___136 = ldv_init_zalloc(1UL); ldvarg376 = (char *)tmp___136; tmp___137 = ldv_init_zalloc(1UL); ldvarg374 = (char *)tmp___137; tmp___138 = ldv_init_zalloc(1UL); ldvarg380 = (char *)tmp___138; tmp___139 = ldv_init_zalloc(80UL); ldvarg381 = (struct config_item *)tmp___139; tmp___140 = ldv_init_zalloc(1UL); ldvarg382 = (char *)tmp___140; tmp___141 = ldv_init_zalloc(1UL); ldvarg384 = (char *)tmp___141; tmp___142 = ldv_init_zalloc(1UL); ldvarg385 = (char *)tmp___142; tmp___143 = ldv_init_zalloc(80UL); ldvarg386 = (struct config_item *)tmp___143; tmp___144 = ldv_init_zalloc(1UL); ldvarg396 = (char *)tmp___144; tmp___145 = ldv_init_zalloc(1UL); ldvarg394 = (char *)tmp___145; tmp___146 = ldv_init_zalloc(1UL); ldvarg399 = (char *)tmp___146; tmp___147 = ldv_init_zalloc(80UL); ldvarg400 = (struct config_item *)tmp___147; tmp___148 = ldv_init_zalloc(5048UL); ldvarg401 = (struct se_device *)tmp___148; tmp___149 = ldv_init_zalloc(1UL); ldvarg402 = (char *)tmp___149; tmp___150 = ldv_init_zalloc(1UL); ldvarg403 = (char *)tmp___150; tmp___151 = ldv_init_zalloc(1UL); ldvarg405 = (char *)tmp___151; tmp___152 = ldv_init_zalloc(1UL); ldvarg418 = (char *)tmp___152; tmp___153 = ldv_init_zalloc(1UL); ldvarg416 = (char *)tmp___153; tmp___154 = ldv_init_zalloc(1UL); ldvarg421 = (char *)tmp___154; tmp___155 = ldv_init_zalloc(1UL); ldvarg423 = (char *)tmp___155; tmp___156 = ldv_init_zalloc(1UL); ldvarg426 = tmp___156; tmp___157 = ldv_init_zalloc(1UL); ldvarg424 = tmp___157; tmp___158 = ldv_init_zalloc(1UL); ldvarg428 = (char *)tmp___158; tmp___159 = ldv_init_zalloc(1UL); ldvarg425 = (char *)tmp___159; tmp___160 = ldv_init_zalloc(5048UL); ldvarg429 = (struct se_device *)tmp___160; tmp___161 = ldv_init_zalloc(1UL); ldvarg430 = (char *)tmp___161; tmp___162 = ldv_init_zalloc(1UL); ldvarg433 = (char *)tmp___162; tmp___163 = ldv_init_zalloc(1UL); ldvarg431 = (char *)tmp___163; tmp___164 = ldv_init_zalloc(1UL); ldvarg436 = (char *)tmp___164; tmp___165 = ldv_init_zalloc(80UL); ldvarg437 = (struct config_item *)tmp___165; tmp___166 = ldv_init_zalloc(1UL); ldvarg439 = (char *)tmp___166; tmp___167 = ldv_init_zalloc(232UL); ldvarg438 = (struct t10_alua_lu_gp *)tmp___167; tmp___168 = ldv_init_zalloc(1UL); ldvarg441 = (char *)tmp___168; tmp___169 = ldv_init_zalloc(5048UL); ldvarg440 = (struct se_device *)tmp___169; tmp___170 = ldv_init_zalloc(1UL); ldvarg457 = (char *)tmp___170; tmp___171 = ldv_init_zalloc(1UL); ldvarg455 = (char *)tmp___171; tmp___172 = ldv_init_zalloc(1UL); ldvarg463 = (char *)tmp___172; tmp___173 = ldv_init_zalloc(1UL); ldvarg461 = (char *)tmp___173; tmp___174 = ldv_init_zalloc(1UL); ldvarg469 = tmp___174; tmp___175 = ldv_init_zalloc(1UL); ldvarg467 = tmp___175; tmp___176 = ldv_init_zalloc(1UL); ldvarg471 = (char *)tmp___176; tmp___177 = ldv_init_zalloc(1UL); ldvarg468 = (char *)tmp___177; tmp___178 = ldv_init_zalloc(1UL); ldvarg485 = (char *)tmp___178; tmp___179 = ldv_init_zalloc(1UL); ldvarg486 = (char *)tmp___179; tmp___180 = ldv_init_zalloc(1UL); ldvarg489 = (char *)tmp___180; tmp___181 = ldv_init_zalloc(1UL); ldvarg487 = (char *)tmp___181; tmp___182 = ldv_init_zalloc(1UL); ldvarg492 = (char *)tmp___182; tmp___183 = ldv_init_zalloc(1UL); ldvarg490 = (char *)tmp___183; tmp___184 = ldv_init_zalloc(1UL); ldvarg495 = (char *)tmp___184; tmp___185 = ldv_init_zalloc(1UL); ldvarg493 = (char *)tmp___185; tmp___186 = ldv_init_zalloc(1UL); ldvarg497 = (char *)tmp___186; tmp___187 = ldv_init_zalloc(240UL); ldvarg496 = (struct se_dev_attrib *)tmp___187; ldv_initialize(); ldv_memset((void *)(& ldvarg1), 0, 8UL); ldv_memset((void *)(& ldvarg9), 0, 8UL); ldv_memset((void *)(& ldvarg18), 0, 8UL); ldv_memset((void *)(& ldvarg23), 0, 8UL); ldv_memset((void *)(& ldvarg43), 0, 8UL); ldv_memset((void *)(& ldvarg52), 0, 8UL); ldv_memset((void *)(& ldvarg59), 0, 8UL); ldv_memset((void *)(& ldvarg65), 0, 8UL); ldv_memset((void *)(& ldvarg70), 0, 8UL); ldv_memset((void *)(& ldvarg93), 0, 8UL); ldv_memset((void *)(& ldvarg98), 0, 8UL); ldv_memset((void *)(& ldvarg101), 0, 8UL); ldv_memset((void *)(& ldvarg108), 0, 8UL); ldv_memset((void *)(& ldvarg114), 0, 8UL); ldv_memset((void *)(& ldvarg137), 0, 8UL); ldv_memset((void *)(& ldvarg140), 0, 8UL); ldv_memset((void *)(& ldvarg143), 0, 8UL); ldv_memset((void *)(& ldvarg146), 0, 8UL); ldv_memset((void *)(& ldvarg151), 0, 8UL); ldv_memset((void *)(& ldvarg161), 0, 8UL); ldv_memset((void *)(& ldvarg164), 0, 8UL); ldv_memset((void *)(& ldvarg167), 0, 8UL); ldv_memset((void *)(& ldvarg172), 0, 8UL); ldv_memset((void *)(& ldvarg182), 0, 8UL); ldv_memset((void *)(& ldvarg186), 0, 8UL); ldv_memset((void *)(& ldvarg192), 0, 8UL); ldv_memset((void *)(& ldvarg197), 0, 8UL); ldv_memset((void *)(& ldvarg200), 0, 8UL); ldv_memset((void *)(& ldvarg208), 0, 8UL); ldv_memset((void *)(& ldvarg211), 0, 8UL); ldv_memset((void *)(& ldvarg218), 0, 8UL); ldv_memset((void *)(& ldvarg226), 0, 8UL); ldv_memset((void *)(& ldvarg233), 0, 8UL); ldv_memset((void *)(& ldvarg241), 0, 8UL); ldv_memset((void *)(& ldvarg247), 0, 8UL); ldv_memset((void *)(& ldvarg262), 0, 8UL); ldv_memset((void *)(& ldvarg271), 0, 8UL); ldv_memset((void *)(& ldvarg294), 0, 8UL); ldv_memset((void *)(& ldvarg303), 0, 8UL); ldv_memset((void *)(& ldvarg305), 0, 8UL); ldv_memset((void *)(& ldvarg311), 0, 8UL); ldv_memset((void *)(& ldvarg321), 0, 8UL); ldv_memset((void *)(& ldvarg326), 0, 8UL); ldv_memset((void *)(& ldvarg331), 0, 8UL); ldv_memset((void *)(& ldvarg333), 0, 8UL); ldv_memset((void *)(& ldvarg367), 0, 8UL); ldv_memset((void *)(& ldvarg372), 0, 8UL); ldv_memset((void *)(& ldvarg375), 0, 8UL); ldv_memset((void *)(& ldvarg383), 0, 8UL); ldv_memset((void *)(& ldvarg395), 0, 8UL); ldv_memset((void *)(& ldvarg404), 0, 8UL); ldv_memset((void *)(& ldvarg417), 0, 8UL); ldv_memset((void *)(& ldvarg422), 0, 8UL); ldv_memset((void *)(& ldvarg427), 0, 8UL); ldv_memset((void *)(& ldvarg432), 0, 8UL); ldv_memset((void *)(& ldvarg456), 0, 8UL); ldv_memset((void *)(& ldvarg462), 0, 8UL); ldv_memset((void *)(& ldvarg470), 0, 8UL); ldv_memset((void *)(& ldvarg484), 0, 8UL); ldv_memset((void *)(& ldvarg488), 0, 8UL); ldv_memset((void *)(& ldvarg491), 0, 8UL); ldv_memset((void *)(& ldvarg494), 0, 8UL); ldv_state_variable_127 = 0; ldv_state_variable_32 = 0; ldv_state_variable_90 = 0; ldv_state_variable_118 = 0; ldv_state_variable_71 = 0; ldv_state_variable_102 = 0; ldv_state_variable_200 = 0; ldv_state_variable_18 = 0; ldv_state_variable_125 = 0; ldv_state_variable_16 = 0; ldv_state_variable_44 = 0; ldv_state_variable_55 = 0; ldv_state_variable_84 = 0; ldv_state_variable_27 = 0; ldv_state_variable_190 = 0; ldv_state_variable_161 = 0; ldv_state_variable_194 = 0; ldv_state_variable_95 = 0; ldv_state_variable_57 = 0; ldv_state_variable_20 = 0; ldv_state_variable_163 = 0; ldv_state_variable_109 = 0; ldv_state_variable_151 = 0; ldv_state_variable_89 = 0; ldv_state_variable_175 = 0; ldv_state_variable_148 = 0; ldv_state_variable_31 = 0; ldv_state_variable_35 = 0; ldv_state_variable_11 = 0; ldv_state_variable_78 = 0; ldv_state_variable_93 = 0; ldv_state_variable_106 = 0; ldv_state_variable_157 = 0; ldv_state_variable_65 = 0; ldv_state_variable_29 = 0; ldv_state_variable_197 = 0; ldv_state_variable_203 = 0; ldv_state_variable_138 = 0; ldv_state_variable_199 = 0; ldv_state_variable_114 = 0; ldv_state_variable_58 = 0; ldv_state_variable_153 = 0; ldv_state_variable_15 = 0; ldv_state_variable_137 = 0; ldv_state_variable_81 = 0; ldv_state_variable_60 = 0; ldv_state_variable_101 = 0; ldv_state_variable_73 = 0; ldv_state_variable_86 = 0; ldv_state_variable_76 = 0; ldv_state_variable_62 = 0; ldv_state_variable_67 = 0; ldv_state_variable_204 = 0; ldv_state_variable_165 = 0; ldv_state_variable_198 = 0; ldv_state_variable_139 = 0; ldv_state_variable_129 = 0; work_init_2(); ldv_state_variable_2 = 1; ldv_state_variable_17 = 0; ldv_state_variable_186 = 0; ldv_state_variable_110 = 0; ldv_state_variable_82 = 0; ldv_state_variable_147 = 0; ldv_state_variable_202 = 0; ldv_state_variable_168 = 0; ldv_state_variable_184 = 0; ldv_state_variable_135 = 0; ldv_state_variable_14 = 0; ldv_state_variable_112 = 0; ldv_state_variable_69 = 0; ldv_state_variable_191 = 0; ldv_state_variable_172 = 0; ldv_state_variable_145 = 0; ldv_state_variable_49 = 0; ldv_state_variable_178 = 0; ldv_state_variable_24 = 0; ldv_state_variable_187 = 0; ldv_state_variable_140 = 0; ldv_state_variable_124 = 0; ldv_state_variable_104 = 0; ldv_state_variable_131 = 0; ldv_state_variable_181 = 0; ldv_state_variable_121 = 0; ldv_state_variable_79 = 0; ldv_state_variable_154 = 0; ref_cnt = 0; ldv_state_variable_0 = 1; ldv_state_variable_23 = 0; ldv_state_variable_96 = 0; ldv_state_variable_126 = 0; ldv_state_variable_159 = 0; ldv_state_variable_160 = 0; ldv_state_variable_176 = 0; ldv_state_variable_47 = 0; ldv_state_variable_8 = 0; ldv_state_variable_98 = 0; ldv_state_variable_37 = 0; ldv_state_variable_117 = 0; ldv_state_variable_43 = 0; ldv_state_variable_195 = 0; work_init_5(); ldv_state_variable_5 = 1; ldv_state_variable_170 = 0; ldv_state_variable_33 = 0; ldv_state_variable_21 = 0; ldv_state_variable_63 = 0; work_init_7(); ldv_state_variable_7 = 1; ldv_state_variable_26 = 0; ldv_state_variable_80 = 0; ldv_state_variable_193 = 0; ldv_state_variable_119 = 0; ldv_state_variable_180 = 0; ldv_state_variable_99 = 0; ldv_state_variable_179 = 0; ldv_state_variable_162 = 0; ldv_state_variable_72 = 0; ldv_state_variable_74 = 0; ldv_state_variable_182 = 0; ldv_state_variable_61 = 0; ldv_state_variable_108 = 0; ldv_state_variable_115 = 0; ldv_state_variable_92 = 0; ldv_state_variable_103 = 0; ldv_state_variable_201 = 0; ldv_state_variable_10 = 0; ldv_state_variable_113 = 0; ldv_state_variable_152 = 0; ldv_state_variable_189 = 0; ldv_state_variable_142 = 0; ldv_state_variable_91 = 0; ldv_state_variable_167 = 0; ldv_state_variable_48 = 0; ldv_state_variable_107 = 0; ldv_state_variable_87 = 0; ldv_state_variable_174 = 0; ldv_state_variable_77 = 0; ldv_state_variable_133 = 0; ldv_state_variable_149 = 0; ldv_state_variable_123 = 0; ldv_state_variable_50 = 0; ldv_state_variable_39 = 0; ldv_state_variable_64 = 0; ldv_state_variable_97 = 0; ldv_state_variable_12 = 0; ldv_state_variable_41 = 0; ldv_state_variable_52 = 0; ldv_state_variable_173 = 0; ldv_state_variable_56 = 0; ldv_state_variable_45 = 0; ldv_state_variable_66 = 0; ldv_state_variable_19 = 0; ldv_state_variable_54 = 0; ldv_state_variable_70 = 0; ldv_state_variable_188 = 0; ldv_state_variable_68 = 0; ldv_state_variable_166 = 0; work_init_1(); ldv_state_variable_1 = 1; ldv_state_variable_136 = 0; ldv_state_variable_88 = 0; ldv_state_variable_116 = 0; ldv_state_variable_144 = 0; ldv_state_variable_141 = 0; ldv_state_variable_30 = 0; ldv_state_variable_100 = 0; ldv_state_variable_25 = 0; ldv_state_variable_128 = 0; ldv_state_variable_28 = 0; ldv_state_variable_120 = 0; ldv_state_variable_156 = 0; ldv_state_variable_134 = 0; ldv_state_variable_40 = 0; ldv_state_variable_75 = 0; ldv_state_variable_83 = 0; ldv_state_variable_192 = 0; ldv_state_variable_59 = 0; ldv_state_variable_177 = 0; ldv_state_variable_150 = 0; ldv_state_variable_155 = 0; ldv_state_variable_130 = 0; ldv_state_variable_53 = 0; ldv_state_variable_122 = 0; ldv_state_variable_143 = 0; ldv_state_variable_158 = 0; ldv_state_variable_42 = 0; ldv_state_variable_22 = 0; ldv_state_variable_46 = 0; ldv_state_variable_13 = 0; ldv_state_variable_105 = 0; work_init_6(); ldv_state_variable_6 = 1; ldv_state_variable_85 = 0; ldv_state_variable_185 = 0; ldv_state_variable_36 = 0; work_init_3(); ldv_state_variable_3 = 1; ldv_state_variable_183 = 0; ldv_state_variable_94 = 0; ldv_state_variable_146 = 0; ldv_state_variable_51 = 0; ldv_state_variable_9 = 0; ldv_state_variable_111 = 0; ldv_state_variable_38 = 0; work_init_4(); ldv_state_variable_4 = 1; ldv_state_variable_34 = 0; ldv_state_variable_169 = 0; ldv_state_variable_164 = 0; ldv_state_variable_132 = 0; ldv_state_variable_196 = 0; ldv_state_variable_171 = 0; ldv_65270: tmp___188 = __VERIFIER_nondet_int(); switch (tmp___188) { case 0: ; if (ldv_state_variable_127 != 0) { tmp___189 = __VERIFIER_nondet_int(); switch (tmp___189) { case 0: ; if (ldv_state_variable_127 == 1) { target_core_alua_tg_pt_gp_store_attr_implicit_trans_secs(target_core_alua_tg_pt_gp_implicit_trans_secs_group0, (char const *)ldvarg2, ldvarg1); ldv_state_variable_127 = 1; } else { } goto ldv_64718; case 1: ; if (ldv_state_variable_127 == 1) { target_core_alua_tg_pt_gp_show_attr_implicit_trans_secs(target_core_alua_tg_pt_gp_implicit_trans_secs_group0, ldvarg0); ldv_state_variable_127 = 1; } else { } goto ldv_64718; default: ldv_stop(); } ldv_64718: ; } else { } goto ldv_64721; case 1: ; if (ldv_state_variable_32 != 0) { ldv_main_exported_32(); } else { } goto ldv_64721; case 2: ; if (ldv_state_variable_90 != 0) { ldv_main_exported_90(); } else { } goto ldv_64721; case 3: ; if (ldv_state_variable_118 != 0) { tmp___190 = __VERIFIER_nondet_int(); switch (tmp___190) { case 0: ; if (ldv_state_variable_118 == 1) { target_core_hba_store_attr_hba_mode(target_core_hba_hba_mode_group0, (char const *)ldvarg10, ldvarg9); ldv_state_variable_118 = 1; } else { } goto ldv_64726; case 1: ; if (ldv_state_variable_118 == 1) { target_core_hba_show_attr_hba_mode(target_core_hba_hba_mode_group0, ldvarg8); ldv_state_variable_118 = 1; } else { } goto ldv_64726; default: ldv_stop(); } ldv_64726: ; } else { } goto ldv_64721; case 4: ; if (ldv_state_variable_71 != 0) { ldv_main_exported_71(); } else { } goto ldv_64721; case 5: ; if (ldv_state_variable_102 != 0) { ldv_main_exported_102(); } else { } goto ldv_64721; case 6: ; if (ldv_state_variable_200 != 0) { tmp___191 = __VERIFIER_nondet_int(); switch (tmp___191) { case 0: ; if (ldv_state_variable_200 == 1) { store_emulate_fua_write(target_core_dev_attrib_emulate_fua_write_group0, (char const *)ldvarg19, ldvarg18); ldv_state_variable_200 = 1; } else { } goto ldv_64733; case 1: ; if (ldv_state_variable_200 == 1) { show_emulate_fua_write(target_core_dev_attrib_emulate_fua_write_group0, ldvarg17); ldv_state_variable_200 = 1; } else { } goto ldv_64733; default: ldv_stop(); } ldv_64733: ; } else { } goto ldv_64721; case 7: ; if (ldv_state_variable_18 != 0) { ldv_main_exported_18(); } else { } goto ldv_64721; case 8: ; if (ldv_state_variable_125 != 0) { tmp___192 = __VERIFIER_nondet_int(); switch (tmp___192) { case 0: ; if (ldv_state_variable_125 == 1) { target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(target_core_alua_tg_pt_gp_tg_pt_gp_id_group0, (char const *)ldvarg24, ldvarg23); ldv_state_variable_125 = 1; } else { } goto ldv_64739; case 1: ; if (ldv_state_variable_125 == 1) { target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(target_core_alua_tg_pt_gp_tg_pt_gp_id_group0, ldvarg22); ldv_state_variable_125 = 1; } else { } goto ldv_64739; default: ldv_stop(); } ldv_64739: ; } else { } goto ldv_64721; case 9: ; if (ldv_state_variable_16 != 0) { ldv_main_exported_16(); } else { } goto ldv_64721; case 10: ; if (ldv_state_variable_44 != 0) { ldv_main_exported_44(); } else { } goto ldv_64721; case 11: ; if (ldv_state_variable_55 != 0) { ldv_main_exported_55(); } else { } goto ldv_64721; case 12: ; if (ldv_state_variable_84 != 0) { ldv_main_exported_84(); } else { } goto ldv_64721; case 13: ; if (ldv_state_variable_27 != 0) { ldv_main_exported_27(); } else { } goto ldv_64721; case 14: ; if (ldv_state_variable_190 != 0) { tmp___193 = __VERIFIER_nondet_int(); switch (tmp___193) { case 0: ; if (ldv_state_variable_190 == 1) { show_hw_pi_prot_type(ldvarg38, ldvarg39); ldv_state_variable_190 = 1; } else { } goto ldv_64749; default: ldv_stop(); } ldv_64749: ; } else { } goto ldv_64721; case 15: ; if (ldv_state_variable_161 != 0) { tmp___194 = __VERIFIER_nondet_int(); switch (tmp___194) { case 0: ; if (ldv_state_variable_161 == 1) { target_core_dev_pr_show_attr_res_pr_all_tgt_pts(ldvarg40, ldvarg41); ldv_state_variable_161 = 1; } else { } goto ldv_64753; default: ldv_stop(); } ldv_64753: ; } else { } goto ldv_64721; case 16: ; if (ldv_state_variable_194 != 0) { tmp___195 = __VERIFIER_nondet_int(); switch (tmp___195) { case 0: ; if (ldv_state_variable_194 == 1) { store_emulate_tpws(target_core_dev_attrib_emulate_tpws_group0, (char const *)ldvarg44, ldvarg43); ldv_state_variable_194 = 1; } else { } goto ldv_64757; case 1: ; if (ldv_state_variable_194 == 1) { show_emulate_tpws(target_core_dev_attrib_emulate_tpws_group0, ldvarg42); ldv_state_variable_194 = 1; } else { } goto ldv_64757; default: ldv_stop(); } ldv_64757: ; } else { } goto ldv_64721; case 17: ; if (ldv_state_variable_95 != 0) { ldv_main_exported_95(); } else { } goto ldv_64721; case 18: ; if (ldv_state_variable_57 != 0) { ldv_main_exported_57(); } else { } goto ldv_64721; case 19: ; if (ldv_state_variable_20 != 0) { ldv_main_exported_20(); } else { } goto ldv_64721; case 20: ; if (ldv_state_variable_163 != 0) { tmp___196 = __VERIFIER_nondet_int(); switch (tmp___196) { case 0: ; if (ldv_state_variable_163 == 1) { target_core_dev_wwn_attr_show(target_core_dev_wwn_ops_group1, target_core_dev_wwn_ops_group0, ldvarg54); ldv_state_variable_163 = 1; } else { } goto ldv_64765; case 1: ; if (ldv_state_variable_163 == 1) { target_core_dev_wwn_attr_store(target_core_dev_wwn_ops_group1, target_core_dev_wwn_ops_group0, (char const *)ldvarg53, ldvarg52); ldv_state_variable_163 = 1; } else { } goto ldv_64765; default: ldv_stop(); } ldv_64765: ; } else { } goto ldv_64721; case 21: ; if (ldv_state_variable_109 != 0) { ldv_main_exported_109(); } else { } goto ldv_64721; case 22: ; if (ldv_state_variable_151 != 0) { tmp___197 = __VERIFIER_nondet_int(); switch (tmp___197) { case 0: ; if (ldv_state_variable_151 == 1) { target_core_store_dev_control(ldvarg58, (char const *)ldvarg60, ldvarg59); ldv_state_variable_151 = 1; } else { } goto ldv_64771; default: ldv_stop(); } ldv_64771: ; } else { } goto ldv_64721; case 23: ; if (ldv_state_variable_89 != 0) { ldv_main_exported_89(); } else { } goto ldv_64721; case 24: ; if (ldv_state_variable_175 != 0) { tmp___198 = __VERIFIER_nondet_int(); switch (tmp___198) { case 0: ; if (ldv_state_variable_175 == 1) { store_unmap_granularity_alignment(target_core_dev_attrib_unmap_granularity_alignment_group0, (char const *)ldvarg66, ldvarg65); ldv_state_variable_175 = 1; } else { } goto ldv_64776; case 1: ; if (ldv_state_variable_175 == 1) { show_unmap_granularity_alignment(target_core_dev_attrib_unmap_granularity_alignment_group0, ldvarg64); ldv_state_variable_175 = 1; } else { } goto ldv_64776; default: ldv_stop(); } ldv_64776: ; } else { } goto ldv_64721; case 25: ; if (ldv_state_variable_148 != 0) { tmp___199 = __VERIFIER_nondet_int(); switch (tmp___199) { case 0: ; if (ldv_state_variable_148 == 1) { target_core_store_dev_enable(ldvarg69, (char const *)ldvarg71, ldvarg70); ldv_state_variable_148 = 1; } else { } goto ldv_64781; case 1: ; if (ldv_state_variable_148 == 1) { target_core_show_dev_enable(ldvarg67, ldvarg68); ldv_state_variable_148 = 1; } else { } goto ldv_64781; default: ldv_stop(); } ldv_64781: ; } else { } goto ldv_64721; case 26: ; if (ldv_state_variable_31 != 0) { ldv_main_exported_31(); } else { } goto ldv_64721; case 27: ; if (ldv_state_variable_35 != 0) { ldv_main_exported_35(); } else { } goto ldv_64721; case 28: ; if (ldv_state_variable_11 != 0) { ldv_main_exported_11(); } else { } goto ldv_64721; case 29: ; if (ldv_state_variable_78 != 0) { ldv_main_exported_78(); } else { } goto ldv_64721; case 30: ; if (ldv_state_variable_93 != 0) { ldv_main_exported_93(); } else { } goto ldv_64721; case 31: ; if (ldv_state_variable_106 != 0) { ldv_main_exported_106(); } else { } goto ldv_64721; case 32: ; if (ldv_state_variable_157 != 0) { tmp___200 = __VERIFIER_nondet_int(); switch (tmp___200) { case 0: ; if (ldv_state_variable_157 == 1) { target_core_dev_pr_show_attr_res_pr_type(ldvarg86, ldvarg87); ldv_state_variable_157 = 1; } else { } goto ldv_64792; default: ldv_stop(); } ldv_64792: ; } else { } goto ldv_64721; case 33: ; if (ldv_state_variable_65 != 0) { ldv_main_exported_65(); } else { } goto ldv_64721; case 34: ; if (ldv_state_variable_29 != 0) { ldv_main_exported_29(); } else { } goto ldv_64721; case 35: ; if (ldv_state_variable_197 != 0) { tmp___201 = __VERIFIER_nondet_int(); switch (tmp___201) { case 0: ; if (ldv_state_variable_197 == 1) { store_emulate_ua_intlck_ctrl(target_core_dev_attrib_emulate_ua_intlck_ctrl_group0, (char const *)ldvarg94, ldvarg93); ldv_state_variable_197 = 1; } else { } goto ldv_64798; case 1: ; if (ldv_state_variable_197 == 1) { show_emulate_ua_intlck_ctrl(target_core_dev_attrib_emulate_ua_intlck_ctrl_group0, ldvarg92); ldv_state_variable_197 = 1; } else { } goto ldv_64798; default: ldv_stop(); } ldv_64798: ; } else { } goto ldv_64721; case 36: ; if (ldv_state_variable_203 != 0) { tmp___202 = __VERIFIER_nondet_int(); switch (tmp___202) { case 0: ; if (ldv_state_variable_203 == 1) { target_core_deregister_fabric(target_core_fabric_group_ops_group0, ldvarg96); ldv_state_variable_203 = 1; } else { } goto ldv_64803; case 1: ; if (ldv_state_variable_203 == 1) { target_core_register_fabric(target_core_fabric_group_ops_group0, (char const *)ldvarg95); ldv_state_variable_203 = 1; } else { } goto ldv_64803; default: ldv_stop(); } ldv_64803: ; } else { } goto ldv_64721; case 37: ; if (ldv_state_variable_138 != 0) { tmp___203 = __VERIFIER_nondet_int(); switch (tmp___203) { case 0: ; if (ldv_state_variable_138 == 1) { target_core_alua_tg_pt_gp_store_attr_alua_access_type(target_core_alua_tg_pt_gp_alua_access_type_group0, (char const *)ldvarg99, ldvarg98); ldv_state_variable_138 = 1; } else { } goto ldv_64808; case 1: ; if (ldv_state_variable_138 == 1) { target_core_alua_tg_pt_gp_show_attr_alua_access_type(target_core_alua_tg_pt_gp_alua_access_type_group0, ldvarg97); ldv_state_variable_138 = 1; } else { } goto ldv_64808; default: ldv_stop(); } ldv_64808: ; } else { } goto ldv_64721; case 38: ; if (ldv_state_variable_199 != 0) { tmp___204 = __VERIFIER_nondet_int(); switch (tmp___204) { case 0: ; if (ldv_state_variable_199 == 1) { store_emulate_fua_read(target_core_dev_attrib_emulate_fua_read_group0, (char const *)ldvarg102, ldvarg101); ldv_state_variable_199 = 1; } else { } goto ldv_64813; case 1: ; if (ldv_state_variable_199 == 1) { show_emulate_fua_read(target_core_dev_attrib_emulate_fua_read_group0, ldvarg100); ldv_state_variable_199 = 1; } else { } goto ldv_64813; default: ldv_stop(); } ldv_64813: ; } else { } goto ldv_64721; case 39: ; if (ldv_state_variable_114 != 0) { ldv_main_exported_114(); } else { } goto ldv_64721; case 40: ; if (ldv_state_variable_58 != 0) { ldv_main_exported_58(); } else { } goto ldv_64721; case 41: ; if (ldv_state_variable_153 != 0) { tmp___205 = __VERIFIER_nondet_int(); switch (tmp___205) { case 0: ; if (ldv_state_variable_153 == 1) { target_core_dev_pr_attr_show(target_core_dev_pr_ops_group1, target_core_dev_pr_ops_group0, ldvarg110); ldv_state_variable_153 = 1; } else { } goto ldv_64820; case 1: ; if (ldv_state_variable_153 == 1) { target_core_dev_pr_attr_store(target_core_dev_pr_ops_group1, target_core_dev_pr_ops_group0, (char const *)ldvarg109, ldvarg108); ldv_state_variable_153 = 1; } else { } goto ldv_64820; default: ldv_stop(); } ldv_64820: ; } else { } goto ldv_64721; case 42: ; if (ldv_state_variable_15 != 0) { ldv_main_exported_15(); } else { } goto ldv_64721; case 43: ; if (ldv_state_variable_137 != 0) { tmp___206 = __VERIFIER_nondet_int(); switch (tmp___206) { case 0: ; if (ldv_state_variable_137 == 1) { target_core_alua_tg_pt_gp_store_attr_alua_support_transitioning(target_core_alua_tg_pt_gp_alua_support_transitioning_group0, (char const *)ldvarg115, ldvarg114); ldv_state_variable_137 = 1; } else { } goto ldv_64826; case 1: ; if (ldv_state_variable_137 == 1) { target_core_alua_tg_pt_gp_show_attr_alua_support_transitioning(target_core_alua_tg_pt_gp_alua_support_transitioning_group0, ldvarg113); ldv_state_variable_137 = 1; } else { } goto ldv_64826; default: ldv_stop(); } ldv_64826: ; } else { } goto ldv_64721; case 44: ; if (ldv_state_variable_81 != 0) { ldv_main_exported_81(); } else { } goto ldv_64721; case 45: ; if (ldv_state_variable_60 != 0) { ldv_main_exported_60(); } else { } goto ldv_64721; case 46: ; if (ldv_state_variable_101 != 0) { ldv_main_exported_101(); } else { } goto ldv_64721; case 47: ; if (ldv_state_variable_73 != 0) { ldv_main_exported_73(); } else { } goto ldv_64721; case 48: ; if (ldv_state_variable_86 != 0) { ldv_main_exported_86(); } else { } goto ldv_64721; case 49: ; if (ldv_state_variable_76 != 0) { ldv_main_exported_76(); } else { } goto ldv_64721; case 50: ; if (ldv_state_variable_62 != 0) { ldv_main_exported_62(); } else { } goto ldv_64721; case 51: ; if (ldv_state_variable_67 != 0) { ldv_main_exported_67(); } else { } goto ldv_64721; case 52: ; if (ldv_state_variable_204 != 0) { tmp___207 = __VERIFIER_nondet_int(); switch (tmp___207) { case 0: ; if (ldv_state_variable_204 == 1) { target_core_attr_show(ldvarg133, ldvarg135, ldvarg134); ldv_state_variable_204 = 1; } else { } goto ldv_64839; default: ldv_stop(); } ldv_64839: ; } else { } goto ldv_64721; case 53: ; if (ldv_state_variable_165 != 0) { tmp___208 = __VERIFIER_nondet_int(); switch (tmp___208) { case 0: ; if (ldv_state_variable_165 == 1) { target_core_dev_wwn_store_attr_vpd_assoc_target_port(target_core_dev_wwn_vpd_assoc_target_port_group0, (char const *)ldvarg138, ldvarg137); ldv_state_variable_165 = 1; } else { } goto ldv_64843; case 1: ; if (ldv_state_variable_165 == 1) { target_core_dev_wwn_show_attr_vpd_assoc_target_port(target_core_dev_wwn_vpd_assoc_target_port_group0, ldvarg136); ldv_state_variable_165 = 1; } else { } goto ldv_64843; default: ldv_stop(); } ldv_64843: ; } else { } goto ldv_64721; case 54: ; if (ldv_state_variable_198 != 0) { tmp___209 = __VERIFIER_nondet_int(); switch (tmp___209) { case 0: ; if (ldv_state_variable_198 == 1) { store_emulate_write_cache(target_core_dev_attrib_emulate_write_cache_group0, (char const *)ldvarg141, ldvarg140); ldv_state_variable_198 = 1; } else { } goto ldv_64848; case 1: ; if (ldv_state_variable_198 == 1) { show_emulate_write_cache(target_core_dev_attrib_emulate_write_cache_group0, ldvarg139); ldv_state_variable_198 = 1; } else { } goto ldv_64848; default: ldv_stop(); } ldv_64848: ; } else { } goto ldv_64721; case 55: ; if (ldv_state_variable_139 != 0) { tmp___210 = __VERIFIER_nondet_int(); switch (tmp___210) { case 0: ; if (ldv_state_variable_139 == 1) { target_core_alua_tg_pt_gp_store_attr_alua_access_status(target_core_alua_tg_pt_gp_alua_access_status_group0, (char const *)ldvarg144, ldvarg143); ldv_state_variable_139 = 1; } else { } goto ldv_64853; case 1: ; if (ldv_state_variable_139 == 1) { target_core_alua_tg_pt_gp_show_attr_alua_access_status(target_core_alua_tg_pt_gp_alua_access_status_group0, ldvarg142); ldv_state_variable_139 = 1; } else { } goto ldv_64853; default: ldv_stop(); } ldv_64853: ; } else { } goto ldv_64721; case 56: ; if (ldv_state_variable_129 != 0) { tmp___211 = __VERIFIER_nondet_int(); switch (tmp___211) { case 0: ; if (ldv_state_variable_129 == 1) { target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs(target_core_alua_tg_pt_gp_nonop_delay_msecs_group0, (char const *)ldvarg147, ldvarg146); ldv_state_variable_129 = 1; } else { } goto ldv_64858; case 1: ; if (ldv_state_variable_129 == 1) { target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs(target_core_alua_tg_pt_gp_nonop_delay_msecs_group0, ldvarg145); ldv_state_variable_129 = 1; } else { } goto ldv_64858; default: ldv_stop(); } ldv_64858: ; } else { } goto ldv_64721; case 57: ; goto ldv_64721; case 58: ; if (ldv_state_variable_17 != 0) { ldv_main_exported_17(); } else { } goto ldv_64721; case 59: ; if (ldv_state_variable_186 != 0) { tmp___212 = __VERIFIER_nondet_int(); switch (tmp___212) { case 0: ; if (ldv_state_variable_186 == 1) { store_emulate_rest_reord(target_core_dev_attrib_emulate_rest_reord_group0, (char const *)ldvarg152, ldvarg151); ldv_state_variable_186 = 1; } else { } goto ldv_64865; case 1: ; if (ldv_state_variable_186 == 1) { show_emulate_rest_reord(target_core_dev_attrib_emulate_rest_reord_group0, ldvarg150); ldv_state_variable_186 = 1; } else { } goto ldv_64865; default: ldv_stop(); } ldv_64865: ; } else { } goto ldv_64721; case 60: ; if (ldv_state_variable_110 != 0) { ldv_main_exported_110(); } else { } goto ldv_64721; case 61: ; if (ldv_state_variable_82 != 0) { ldv_main_exported_82(); } else { } goto ldv_64721; case 62: ; if (ldv_state_variable_147 != 0) { tmp___213 = __VERIFIER_nondet_int(); switch (tmp___213) { case 0: ; if (ldv_state_variable_147 == 1) { target_core_store_alua_lu_gp(ldvarg160, (char const *)ldvarg162, ldvarg161); ldv_state_variable_147 = 1; } else { } goto ldv_64872; case 1: ; if (ldv_state_variable_147 == 1) { target_core_show_alua_lu_gp(ldvarg158, ldvarg159); ldv_state_variable_147 = 1; } else { } goto ldv_64872; default: ldv_stop(); } ldv_64872: ; } else { } goto ldv_64721; case 63: ; if (ldv_state_variable_202 != 0) { tmp___214 = __VERIFIER_nondet_int(); switch (tmp___214) { case 0: ; if (ldv_state_variable_202 == 1) { store_emulate_model_alias(target_core_dev_attrib_emulate_model_alias_group0, (char const *)ldvarg165, ldvarg164); ldv_state_variable_202 = 1; } else { } goto ldv_64877; case 1: ; if (ldv_state_variable_202 == 1) { show_emulate_model_alias(target_core_dev_attrib_emulate_model_alias_group0, ldvarg163); ldv_state_variable_202 = 1; } else { } goto ldv_64877; default: ldv_stop(); } ldv_64877: ; } else { } goto ldv_64721; case 64: ; if (ldv_state_variable_168 != 0) { tmp___215 = __VERIFIER_nondet_int(); switch (tmp___215) { case 0: ; if (ldv_state_variable_168 == 1) { target_core_dev_wwn_store_attr_vpd_unit_serial(target_core_dev_wwn_vpd_unit_serial_group0, (char const *)ldvarg168, ldvarg167); ldv_state_variable_168 = 1; } else { } goto ldv_64882; case 1: ; if (ldv_state_variable_168 == 1) { target_core_dev_wwn_show_attr_vpd_unit_serial(target_core_dev_wwn_vpd_unit_serial_group0, ldvarg166); ldv_state_variable_168 = 1; } else { } goto ldv_64882; default: ldv_stop(); } ldv_64882: ; } else { } goto ldv_64721; case 65: ; if (ldv_state_variable_184 != 0) { tmp___216 = __VERIFIER_nondet_int(); switch (tmp___216) { case 0: ; if (ldv_state_variable_184 == 1) { show_hw_block_size(ldvarg169, ldvarg170); ldv_state_variable_184 = 1; } else { } goto ldv_64887; default: ldv_stop(); } ldv_64887: ; } else { } goto ldv_64721; case 66: ; if (ldv_state_variable_135 != 0) { tmp___217 = __VERIFIER_nondet_int(); switch (tmp___217) { case 0: ; if (ldv_state_variable_135 == 1) { target_core_alua_tg_pt_gp_store_attr_alua_support_lba_dependent(target_core_alua_tg_pt_gp_alua_support_lba_dependent_group0, (char const *)ldvarg173, ldvarg172); ldv_state_variable_135 = 1; } else { } goto ldv_64891; case 1: ; if (ldv_state_variable_135 == 1) { target_core_alua_tg_pt_gp_show_attr_alua_support_lba_dependent(target_core_alua_tg_pt_gp_alua_support_lba_dependent_group0, ldvarg171); ldv_state_variable_135 = 1; } else { } goto ldv_64891; default: ldv_stop(); } ldv_64891: ; } else { } goto ldv_64721; case 67: ; if (ldv_state_variable_14 != 0) { ldv_main_exported_14(); } else { } goto ldv_64721; case 68: ; if (ldv_state_variable_112 != 0) { ldv_main_exported_112(); } else { } goto ldv_64721; case 69: ; if (ldv_state_variable_69 != 0) { ldv_main_exported_69(); } else { } goto ldv_64721; case 70: ; if (ldv_state_variable_191 != 0) { tmp___218 = __VERIFIER_nondet_int(); switch (tmp___218) { case 0: ; if (ldv_state_variable_191 == 1) { store_pi_prot_type(target_core_dev_attrib_pi_prot_type_group0, (char const *)ldvarg183, ldvarg182); ldv_state_variable_191 = 1; } else { } goto ldv_64899; case 1: ; if (ldv_state_variable_191 == 1) { show_pi_prot_type(target_core_dev_attrib_pi_prot_type_group0, ldvarg181); ldv_state_variable_191 = 1; } else { } goto ldv_64899; default: ldv_stop(); } ldv_64899: ; } else { } goto ldv_64721; case 71: ; if (ldv_state_variable_172 != 0) { tmp___219 = __VERIFIER_nondet_int(); switch (tmp___219) { case 0: ; if (ldv_state_variable_172 == 1) { show_hw_block_size(ldvarg184, ldvarg185); ldv_state_variable_172 = 1; } else { } goto ldv_64904; default: ldv_stop(); } ldv_64904: ; } else { } goto ldv_64721; case 72: ; if (ldv_state_variable_145 != 0) { tmp___220 = __VERIFIER_nondet_int(); switch (tmp___220) { case 0: ; if (ldv_state_variable_145 == 1) { target_core_dev_show(target_core_dev_item_ops_group1, target_core_dev_item_ops_group0, ldvarg188); ldv_state_variable_145 = 1; } else { } if (ldv_state_variable_145 == 2) { target_core_dev_show(target_core_dev_item_ops_group1, target_core_dev_item_ops_group0, ldvarg188); ldv_state_variable_145 = 2; } else { } goto ldv_64908; case 1: ; if (ldv_state_variable_145 == 2) { target_core_dev_release(target_core_dev_item_ops_group1); ldv_state_variable_145 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_64908; case 2: ; if (ldv_state_variable_145 == 1) { target_core_dev_store(target_core_dev_item_ops_group1, target_core_dev_item_ops_group0, (char const *)ldvarg187, ldvarg186); ldv_state_variable_145 = 1; } else { } if (ldv_state_variable_145 == 2) { target_core_dev_store(target_core_dev_item_ops_group1, target_core_dev_item_ops_group0, (char const *)ldvarg187, ldvarg186); ldv_state_variable_145 = 2; } else { } goto ldv_64908; case 3: ; if (ldv_state_variable_145 == 1) { ldv_probe_145(); ldv_state_variable_145 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_64908; default: ldv_stop(); } ldv_64908: ; } else { } goto ldv_64721; case 73: ; if (ldv_state_variable_49 != 0) { ldv_main_exported_49(); } else { } goto ldv_64721; case 74: ; if (ldv_state_variable_178 != 0) { tmp___221 = __VERIFIER_nondet_int(); switch (tmp___221) { case 0: ; if (ldv_state_variable_178 == 1) { store_max_unmap_lba_count(target_core_dev_attrib_max_unmap_lba_count_group0, (char const *)ldvarg193, ldvarg192); ldv_state_variable_178 = 1; } else { } goto ldv_64916; case 1: ; if (ldv_state_variable_178 == 1) { show_max_unmap_lba_count(target_core_dev_attrib_max_unmap_lba_count_group0, ldvarg191); ldv_state_variable_178 = 1; } else { } goto ldv_64916; default: ldv_stop(); } ldv_64916: ; } else { } goto ldv_64721; case 75: ; if (ldv_state_variable_24 != 0) { ldv_main_exported_24(); } else { } goto ldv_64721; case 76: ; if (ldv_state_variable_187 != 0) { tmp___222 = __VERIFIER_nondet_int(); switch (tmp___222) { case 0: ; if (ldv_state_variable_187 == 1) { store_is_nonrot(target_core_dev_attrib_is_nonrot_group0, (char const *)ldvarg198, ldvarg197); ldv_state_variable_187 = 1; } else { } goto ldv_64922; case 1: ; if (ldv_state_variable_187 == 1) { show_is_nonrot(target_core_dev_attrib_is_nonrot_group0, ldvarg196); ldv_state_variable_187 = 1; } else { } goto ldv_64922; default: ldv_stop(); } ldv_64922: ; } else { } goto ldv_64721; case 77: ; if (ldv_state_variable_140 != 0) { tmp___223 = __VERIFIER_nondet_int(); switch (tmp___223) { case 0: ; if (ldv_state_variable_140 == 1) { target_core_alua_tg_pt_gp_store_attr_alua_access_state(target_core_alua_tg_pt_gp_alua_access_state_group0, (char const *)ldvarg201, ldvarg200); ldv_state_variable_140 = 1; } else { } goto ldv_64927; case 1: ; if (ldv_state_variable_140 == 1) { target_core_alua_tg_pt_gp_show_attr_alua_access_state(target_core_alua_tg_pt_gp_alua_access_state_group0, ldvarg199); ldv_state_variable_140 = 1; } else { } goto ldv_64927; default: ldv_stop(); } ldv_64927: ; } else { } goto ldv_64721; case 78: ; if (ldv_state_variable_124 != 0) { tmp___224 = __VERIFIER_nondet_int(); switch (tmp___224) { case 0: ; if (ldv_state_variable_124 == 1) { target_core_alua_tg_pt_gp_show_attr_members(ldvarg202, ldvarg203); ldv_state_variable_124 = 1; } else { } goto ldv_64932; default: ldv_stop(); } ldv_64932: ; } else { } goto ldv_64721; case 79: ; if (ldv_state_variable_104 != 0) { ldv_main_exported_104(); } else { } goto ldv_64721; case 80: ; if (ldv_state_variable_131 != 0) { tmp___225 = __VERIFIER_nondet_int(); switch (tmp___225) { case 0: ; if (ldv_state_variable_131 == 1) { target_core_alua_tg_pt_gp_store_attr_alua_support_active_nonoptimized(target_core_alua_tg_pt_gp_alua_support_active_nonoptimized_group0, (char const *)ldvarg209, ldvarg208); ldv_state_variable_131 = 1; } else { } goto ldv_64937; case 1: ; if (ldv_state_variable_131 == 1) { target_core_alua_tg_pt_gp_show_attr_alua_support_active_nonoptimized(target_core_alua_tg_pt_gp_alua_support_active_nonoptimized_group0, ldvarg207); ldv_state_variable_131 = 1; } else { } goto ldv_64937; default: ldv_stop(); } ldv_64937: ; } else { } goto ldv_64721; case 81: ; if (ldv_state_variable_181 != 0) { tmp___226 = __VERIFIER_nondet_int(); switch (tmp___226) { case 0: ; if (ldv_state_variable_181 == 1) { store_optimal_sectors(target_core_dev_attrib_optimal_sectors_group0, (char const *)ldvarg212, ldvarg211); ldv_state_variable_181 = 1; } else { } goto ldv_64942; case 1: ; if (ldv_state_variable_181 == 1) { show_optimal_sectors(target_core_dev_attrib_optimal_sectors_group0, ldvarg210); ldv_state_variable_181 = 1; } else { } goto ldv_64942; default: ldv_stop(); } ldv_64942: ; } else { } goto ldv_64721; case 82: ; if (ldv_state_variable_121 != 0) { tmp___227 = __VERIFIER_nondet_int(); switch (tmp___227) { case 0: ; if (ldv_state_variable_121 == 1) { target_core_stat_rmdir(target_core_stat_group_ops_group0, ldvarg214); ldv_state_variable_121 = 1; } else { } goto ldv_64947; case 1: ; if (ldv_state_variable_121 == 1) { target_core_stat_mkdir(target_core_stat_group_ops_group0, (char const *)ldvarg213); ldv_state_variable_121 = 1; } else { } goto ldv_64947; default: ldv_stop(); } ldv_64947: ; } else { } goto ldv_64721; case 83: ; if (ldv_state_variable_79 != 0) { ldv_main_exported_79(); } else { } goto ldv_64721; case 84: ; if (ldv_state_variable_154 != 0) { tmp___228 = __VERIFIER_nondet_int(); switch (tmp___228) { case 0: ; if (ldv_state_variable_154 == 1) { target_core_dev_pr_store_attr_res_aptpl_metadata(target_core_dev_pr_res_aptpl_metadata_group0, (char const *)ldvarg219, ldvarg218); ldv_state_variable_154 = 1; } else { } goto ldv_64953; case 1: ; if (ldv_state_variable_154 == 1) { target_core_dev_pr_show_attr_res_aptpl_metadata(target_core_dev_pr_res_aptpl_metadata_group0, ldvarg217); ldv_state_variable_154 = 1; } else { } goto ldv_64953; default: ldv_stop(); } ldv_64953: ; } else { } goto ldv_64721; case 85: ; if (ldv_state_variable_0 != 0) { tmp___229 = __VERIFIER_nondet_int(); switch (tmp___229) { case 0: ; if (ldv_state_variable_0 == 3 && ref_cnt == 0) { target_core_exit_configfs(); ldv_state_variable_0 = 2; goto ldv_final; } else { } goto ldv_64959; case 1: ; if (ldv_state_variable_0 == 1) { ldv_retval_0 = target_core_init_configfs(); if (ldv_retval_0 == 0) { ldv_state_variable_0 = 3; ldv_state_variable_171 = 1; ldv_state_variable_196 = 1; ldv_initialize_target_backend_dev_attrib_attribute_196(); ldv_state_variable_132 = 1; ldv_initialize_target_core_alua_tg_pt_gp_attribute_132(); ldv_state_variable_164 = 1; ldv_initialize_target_core_dev_wwn_attribute_164(); ldv_state_variable_169 = 1; ldv_initialize_configfs_item_operations_169(); ldv_state_variable_34 = 1; ldv_state_variable_38 = 1; ldv_state_variable_111 = 1; ldv_initialize_configfs_item_operations_111(); ldv_state_variable_9 = 1; ldv_initialize_configfs_item_operations_9(); ldv_state_variable_51 = 1; ldv_state_variable_146 = 1; ldv_state_variable_94 = 1; ldv_initialize_configfs_item_operations_94(); ldv_state_variable_183 = 1; ldv_initialize_target_backend_dev_attrib_attribute_183(); ldv_state_variable_36 = 1; ldv_initialize_configfs_item_operations_36(); ldv_state_variable_185 = 1; ldv_initialize_target_backend_dev_attrib_attribute_185(); ldv_state_variable_85 = 1; ldv_initialize_trace_event_class_85(); ldv_state_variable_105 = 1; ldv_initialize_configfs_group_operations_105(); ldv_state_variable_13 = 1; ldv_state_variable_46 = 1; ldv_initialize_configfs_item_operations_46(); ldv_state_variable_22 = 1; ldv_state_variable_42 = 1; ldv_state_variable_158 = 1; ldv_state_variable_143 = 1; ldv_state_variable_122 = 1; ldv_initialize_configfs_group_operations_122(); ldv_state_variable_53 = 1; ldv_state_variable_130 = 1; ldv_initialize_target_core_alua_tg_pt_gp_attribute_130(); ldv_state_variable_155 = 1; ldv_state_variable_150 = 1; ldv_state_variable_177 = 1; ldv_initialize_target_backend_dev_attrib_attribute_177(); ldv_state_variable_59 = 1; ldv_state_variable_192 = 1; ldv_initialize_target_backend_dev_attrib_attribute_192(); ldv_state_variable_134 = 1; ldv_initialize_target_core_alua_tg_pt_gp_attribute_134(); ldv_state_variable_83 = 1; ldv_initialize_target_backend_ops_83(); ldv_state_variable_75 = 1; ldv_state_variable_40 = 1; ldv_state_variable_156 = 1; ldv_state_variable_120 = 1; ldv_initialize_configfs_group_operations_120(); ldv_state_variable_28 = 1; ldv_state_variable_128 = 1; ldv_initialize_target_core_alua_tg_pt_gp_attribute_128(); ldv_state_variable_25 = 1; ldv_state_variable_100 = 1; ldv_initialize_configfs_item_operations_100(); ldv_state_variable_30 = 1; ldv_state_variable_141 = 1; ldv_initialize_configfs_group_operations_141(); ldv_state_variable_144 = 1; ldv_initialize_target_core_alua_lu_gp_attribute_144(); ldv_state_variable_116 = 1; ldv_initialize_configfs_group_operations_116(); ldv_state_variable_88 = 1; ldv_state_variable_136 = 1; ldv_initialize_target_core_alua_tg_pt_gp_attribute_136(); ldv_state_variable_166 = 1; ldv_initialize_target_core_dev_wwn_attribute_166(); ldv_state_variable_68 = 1; ldv_state_variable_188 = 1; ldv_initialize_target_backend_dev_attrib_attribute_188(); ldv_state_variable_70 = 1; ldv_state_variable_54 = 1; ldv_state_variable_19 = 1; ldv_state_variable_66 = 1; ldv_state_variable_45 = 1; ldv_state_variable_56 = 1; ldv_state_variable_173 = 1; ldv_state_variable_52 = 1; ldv_initialize_configfs_item_operations_52(); ldv_state_variable_41 = 1; ldv_state_variable_12 = 1; ldv_state_variable_97 = 1; ldv_initialize_configfs_item_operations_97(); ldv_state_variable_64 = 1; ldv_state_variable_39 = 1; ldv_state_variable_50 = 1; ldv_state_variable_123 = 1; ldv_initialize_configfs_item_operations_123(); ldv_state_variable_149 = 1; ldv_state_variable_133 = 1; ldv_initialize_target_core_alua_tg_pt_gp_attribute_133(); ldv_state_variable_77 = 1; ldv_state_variable_174 = 1; ldv_initialize_target_backend_dev_attrib_attribute_174(); ldv_state_variable_87 = 1; ldv_state_variable_107 = 1; ldv_initialize_configfs_group_operations_107(); ldv_state_variable_48 = 1; ldv_state_variable_167 = 1; ldv_initialize_target_core_dev_wwn_attribute_167(); ldv_state_variable_91 = 1; ldv_initialize_configfs_group_operations_91(); ldv_state_variable_142 = 1; ldv_initialize_configfs_item_operations_142(); ldv_state_variable_189 = 1; ldv_initialize_target_backend_dev_attrib_attribute_189(); ldv_state_variable_152 = 1; ldv_state_variable_113 = 1; ldv_initialize_configfs_group_operations_113(); ldv_state_variable_10 = 1; ldv_state_variable_201 = 1; ldv_initialize_target_backend_dev_attrib_attribute_201(); ldv_state_variable_103 = 1; ldv_initialize_target_fabric_port_attribute_103(); ldv_state_variable_92 = 1; ldv_initialize_configfs_group_operations_92(); ldv_state_variable_115 = 1; ldv_initialize_target_fabric_mappedlun_attribute_115(); ldv_state_variable_108 = 1; ldv_initialize_configfs_group_operations_108(); ldv_state_variable_61 = 1; ldv_state_variable_182 = 1; ldv_state_variable_74 = 1; ldv_state_variable_72 = 1; ldv_state_variable_162 = 1; ldv_state_variable_179 = 1; ldv_initialize_target_backend_dev_attrib_attribute_179(); ldv_state_variable_99 = 1; ldv_initialize_configfs_group_operations_99(); ldv_state_variable_180 = 1; ldv_state_variable_119 = 1; ldv_state_variable_193 = 1; ldv_initialize_target_backend_dev_attrib_attribute_193(); ldv_state_variable_80 = 1; ldv_state_variable_26 = 1; ldv_state_variable_63 = 1; ldv_state_variable_21 = 1; ldv_state_variable_33 = 1; ldv_state_variable_170 = 1; ldv_state_variable_195 = 1; ldv_initialize_target_backend_dev_attrib_attribute_195(); ldv_state_variable_43 = 1; ldv_state_variable_117 = 1; ldv_initialize_configfs_item_operations_117(); ldv_state_variable_37 = 1; ldv_state_variable_98 = 1; ldv_initialize_configfs_group_operations_98(); ldv_state_variable_8 = 1; ldv_initialize_target_core_fabric_ops_8(); ldv_state_variable_47 = 1; ldv_state_variable_176 = 1; ldv_initialize_target_backend_dev_attrib_attribute_176(); ldv_state_variable_160 = 1; ldv_state_variable_159 = 1; ldv_state_variable_126 = 1; ldv_initialize_target_core_alua_tg_pt_gp_attribute_126(); ldv_state_variable_96 = 1; ldv_initialize_configfs_item_operations_96(); ldv_state_variable_23 = 1; ldv_state_variable_154 = 1; ldv_initialize_target_core_dev_pr_attribute_154(); ldv_state_variable_79 = 1; ldv_state_variable_121 = 1; ldv_initialize_configfs_group_operations_121(); ldv_state_variable_181 = 1; ldv_initialize_target_backend_dev_attrib_attribute_181(); ldv_state_variable_131 = 1; ldv_initialize_target_core_alua_tg_pt_gp_attribute_131(); ldv_state_variable_104 = 1; ldv_initialize_target_fabric_port_attribute_104(); ldv_state_variable_124 = 1; ldv_state_variable_140 = 1; ldv_initialize_target_core_alua_tg_pt_gp_attribute_140(); ldv_state_variable_187 = 1; ldv_initialize_target_backend_dev_attrib_attribute_187(); ldv_state_variable_24 = 1; ldv_state_variable_178 = 1; ldv_initialize_target_backend_dev_attrib_attribute_178(); ldv_state_variable_49 = 1; ldv_state_variable_145 = 1; ldv_initialize_configfs_item_operations_145(); ldv_state_variable_172 = 1; ldv_state_variable_191 = 1; ldv_initialize_target_backend_dev_attrib_attribute_191(); ldv_state_variable_69 = 1; ldv_state_variable_112 = 1; ldv_initialize_configfs_item_operations_112(); ldv_state_variable_14 = 1; ldv_state_variable_135 = 1; ldv_initialize_target_core_alua_tg_pt_gp_attribute_135(); ldv_state_variable_184 = 1; ldv_state_variable_168 = 1; ldv_initialize_target_core_dev_wwn_attribute_168(); ldv_state_variable_202 = 1; ldv_initialize_target_backend_dev_attrib_attribute_202(); ldv_state_variable_147 = 1; ldv_state_variable_82 = 1; ldv_state_variable_110 = 1; ldv_initialize_configfs_item_operations_110(); ldv_state_variable_186 = 1; ldv_initialize_target_backend_dev_attrib_attribute_186(); ldv_state_variable_17 = 1; ldv_state_variable_129 = 1; ldv_initialize_target_core_alua_tg_pt_gp_attribute_129(); ldv_state_variable_139 = 1; ldv_initialize_target_core_alua_tg_pt_gp_attribute_139(); ldv_state_variable_198 = 1; ldv_initialize_target_backend_dev_attrib_attribute_198(); ldv_state_variable_165 = 1; ldv_initialize_target_core_dev_wwn_attribute_165(); ldv_state_variable_204 = 1; ldv_state_variable_67 = 1; ldv_state_variable_62 = 1; ldv_state_variable_76 = 1; ldv_state_variable_86 = 1; ldv_initialize_trace_event_class_86(); ldv_state_variable_73 = 1; ldv_state_variable_101 = 1; ldv_initialize_target_fabric_port_attribute_101(); ldv_state_variable_60 = 1; ldv_state_variable_81 = 1; ldv_state_variable_137 = 1; ldv_initialize_target_core_alua_tg_pt_gp_attribute_137(); ldv_state_variable_15 = 1; ldv_state_variable_153 = 1; ldv_initialize_configfs_item_operations_153(); ldv_state_variable_58 = 1; ldv_state_variable_114 = 1; ldv_initialize_configfs_item_operations_114(); ldv_state_variable_199 = 1; ldv_initialize_target_backend_dev_attrib_attribute_199(); ldv_state_variable_138 = 1; ldv_initialize_target_core_alua_tg_pt_gp_attribute_138(); ldv_state_variable_203 = 1; ldv_initialize_configfs_group_operations_203(); ldv_state_variable_197 = 1; ldv_initialize_target_backend_dev_attrib_attribute_197(); ldv_state_variable_29 = 1; ldv_state_variable_65 = 1; ldv_state_variable_157 = 1; ldv_state_variable_106 = 1; ldv_initialize_configfs_item_operations_106(); ldv_state_variable_93 = 1; ldv_state_variable_78 = 1; ldv_initialize_configfs_item_operations_78(); ldv_state_variable_11 = 1; ldv_state_variable_35 = 1; ldv_state_variable_31 = 1; ldv_initialize_configfs_item_operations_31(); ldv_state_variable_148 = 1; ldv_state_variable_175 = 1; ldv_initialize_target_backend_dev_attrib_attribute_175(); ldv_state_variable_89 = 1; ldv_initialize_configfs_item_operations_89(); ldv_state_variable_151 = 1; ldv_state_variable_109 = 1; ldv_initialize_configfs_item_operations_109(); ldv_state_variable_163 = 1; ldv_initialize_configfs_item_operations_163(); ldv_state_variable_20 = 1; ldv_state_variable_57 = 1; ldv_state_variable_95 = 1; ldv_initialize_configfs_item_operations_95(); ldv_state_variable_194 = 1; ldv_initialize_target_backend_dev_attrib_attribute_194(); ldv_state_variable_161 = 1; ldv_state_variable_190 = 1; ldv_state_variable_27 = 1; ldv_state_variable_84 = 1; ldv_state_variable_55 = 1; ldv_state_variable_44 = 1; ldv_state_variable_16 = 1; ldv_initialize_configfs_item_operations_16(); ldv_state_variable_125 = 1; ldv_initialize_target_core_alua_tg_pt_gp_attribute_125(); ldv_state_variable_18 = 1; ldv_state_variable_200 = 1; ldv_initialize_target_backend_dev_attrib_attribute_200(); ldv_state_variable_102 = 1; ldv_initialize_target_fabric_port_attribute_102(); ldv_state_variable_71 = 1; ldv_initialize_configfs_item_operations_71(); ldv_state_variable_118 = 1; ldv_initialize_target_core_hba_attribute_118(); ldv_state_variable_90 = 1; ldv_initialize_configfs_item_operations_90(); ldv_state_variable_32 = 1; ldv_state_variable_127 = 1; ldv_initialize_target_core_alua_tg_pt_gp_attribute_127(); } else { } if (ldv_retval_0 != 0) { ldv_state_variable_0 = 2; goto ldv_final; } else { } } else { } goto ldv_64959; default: ldv_stop(); } ldv_64959: ; } else { } goto ldv_64721; case 86: ; if (ldv_state_variable_23 != 0) { ldv_main_exported_23(); } else { } goto ldv_64721; case 87: ; if (ldv_state_variable_96 != 0) { ldv_main_exported_96(); } else { } goto ldv_64721; case 88: ; if (ldv_state_variable_126 != 0) { tmp___230 = __VERIFIER_nondet_int(); switch (tmp___230) { case 0: ; if (ldv_state_variable_126 == 1) { target_core_alua_tg_pt_gp_store_attr_preferred(target_core_alua_tg_pt_gp_preferred_group0, (char const *)ldvarg227, ldvarg226); ldv_state_variable_126 = 1; } else { } goto ldv_64966; case 1: ; if (ldv_state_variable_126 == 1) { target_core_alua_tg_pt_gp_show_attr_preferred(target_core_alua_tg_pt_gp_preferred_group0, ldvarg225); ldv_state_variable_126 = 1; } else { } goto ldv_64966; default: ldv_stop(); } ldv_64966: ; } else { } goto ldv_64721; case 89: ; if (ldv_state_variable_159 != 0) { tmp___231 = __VERIFIER_nondet_int(); switch (tmp___231) { case 0: ; if (ldv_state_variable_159 == 1) { target_core_dev_pr_show_attr_res_pr_holder_tg_port(ldvarg228, ldvarg229); ldv_state_variable_159 = 1; } else { } goto ldv_64971; default: ldv_stop(); } ldv_64971: ; } else { } goto ldv_64721; case 90: ; if (ldv_state_variable_160 != 0) { tmp___232 = __VERIFIER_nondet_int(); switch (tmp___232) { case 0: ; if (ldv_state_variable_160 == 1) { target_core_dev_pr_show_attr_res_pr_generation(ldvarg230, ldvarg231); ldv_state_variable_160 = 1; } else { } goto ldv_64975; default: ldv_stop(); } ldv_64975: ; } else { } goto ldv_64721; case 91: ; if (ldv_state_variable_176 != 0) { tmp___233 = __VERIFIER_nondet_int(); switch (tmp___233) { case 0: ; if (ldv_state_variable_176 == 1) { store_unmap_granularity(target_core_dev_attrib_unmap_granularity_group0, (char const *)ldvarg234, ldvarg233); ldv_state_variable_176 = 1; } else { } goto ldv_64979; case 1: ; if (ldv_state_variable_176 == 1) { show_unmap_granularity(target_core_dev_attrib_unmap_granularity_group0, ldvarg232); ldv_state_variable_176 = 1; } else { } goto ldv_64979; default: ldv_stop(); } ldv_64979: ; } else { } goto ldv_64721; case 92: ; if (ldv_state_variable_47 != 0) { ldv_main_exported_47(); } else { } goto ldv_64721; case 93: ; if (ldv_state_variable_8 != 0) { ldv_main_exported_8(); } else { } goto ldv_64721; case 94: ; if (ldv_state_variable_98 != 0) { ldv_main_exported_98(); } else { } goto ldv_64721; case 95: ; if (ldv_state_variable_37 != 0) { ldv_main_exported_37(); } else { } goto ldv_64721; case 96: ; if (ldv_state_variable_117 != 0) { tmp___234 = __VERIFIER_nondet_int(); switch (tmp___234) { case 0: ; if (ldv_state_variable_117 == 1) { target_core_hba_attr_show(target_core_hba_item_ops_group1, target_core_hba_item_ops_group0, ldvarg243); ldv_state_variable_117 = 1; } else { } if (ldv_state_variable_117 == 2) { target_core_hba_attr_show(target_core_hba_item_ops_group1, target_core_hba_item_ops_group0, ldvarg243); ldv_state_variable_117 = 2; } else { } goto ldv_64988; case 1: ; if (ldv_state_variable_117 == 2) { target_core_hba_release(target_core_hba_item_ops_group1); ldv_state_variable_117 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_64988; case 2: ; if (ldv_state_variable_117 == 1) { target_core_hba_attr_store(target_core_hba_item_ops_group1, target_core_hba_item_ops_group0, (char const *)ldvarg242, ldvarg241); ldv_state_variable_117 = 1; } else { } if (ldv_state_variable_117 == 2) { target_core_hba_attr_store(target_core_hba_item_ops_group1, target_core_hba_item_ops_group0, (char const *)ldvarg242, ldvarg241); ldv_state_variable_117 = 2; } else { } goto ldv_64988; case 3: ; if (ldv_state_variable_117 == 1) { ldv_probe_117(); ldv_state_variable_117 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_64988; default: ldv_stop(); } ldv_64988: ; } else { } goto ldv_64721; case 97: ; if (ldv_state_variable_43 != 0) { ldv_main_exported_43(); } else { } goto ldv_64721; case 98: ; if (ldv_state_variable_195 != 0) { tmp___235 = __VERIFIER_nondet_int(); switch (tmp___235) { case 0: ; if (ldv_state_variable_195 == 1) { store_emulate_tpu(target_core_dev_attrib_emulate_tpu_group0, (char const *)ldvarg248, ldvarg247); ldv_state_variable_195 = 1; } else { } goto ldv_64996; case 1: ; if (ldv_state_variable_195 == 1) { show_emulate_tpu(target_core_dev_attrib_emulate_tpu_group0, ldvarg246); ldv_state_variable_195 = 1; } else { } goto ldv_64996; default: ldv_stop(); } ldv_64996: ; } else { } goto ldv_64721; case 99: ; goto ldv_64721; case 100: ; if (ldv_state_variable_170 != 0) { tmp___236 = __VERIFIER_nondet_int(); switch (tmp___236) { case 0: ; if (ldv_state_variable_170 == 1) { show_hw_queue_depth(ldvarg249, ldvarg250); ldv_state_variable_170 = 1; } else { } goto ldv_65002; default: ldv_stop(); } ldv_65002: ; } else { } goto ldv_64721; case 101: ; if (ldv_state_variable_33 != 0) { ldv_main_exported_33(); } else { } goto ldv_64721; case 102: ; if (ldv_state_variable_21 != 0) { ldv_main_exported_21(); } else { } goto ldv_64721; case 103: ; if (ldv_state_variable_63 != 0) { ldv_main_exported_63(); } else { } goto ldv_64721; case 104: ; goto ldv_64721; case 105: ; if (ldv_state_variable_26 != 0) { ldv_main_exported_26(); } else { } goto ldv_64721; case 106: ; if (ldv_state_variable_80 != 0) { ldv_main_exported_80(); } else { } goto ldv_64721; case 107: ; if (ldv_state_variable_193 != 0) { tmp___237 = __VERIFIER_nondet_int(); switch (tmp___237) { case 0: ; if (ldv_state_variable_193 == 1) { store_emulate_caw(target_core_dev_attrib_emulate_caw_group0, (char const *)ldvarg263, ldvarg262); ldv_state_variable_193 = 1; } else { } goto ldv_65012; case 1: ; if (ldv_state_variable_193 == 1) { show_emulate_caw(target_core_dev_attrib_emulate_caw_group0, ldvarg261); ldv_state_variable_193 = 1; } else { } goto ldv_65012; default: ldv_stop(); } ldv_65012: ; } else { } goto ldv_64721; case 108: ; if (ldv_state_variable_119 != 0) { tmp___238 = __VERIFIER_nondet_int(); switch (tmp___238) { case 0: ; if (ldv_state_variable_119 == 1) { target_core_hba_show_attr_hba_info(ldvarg264, ldvarg265); ldv_state_variable_119 = 1; } else { } goto ldv_65017; default: ldv_stop(); } ldv_65017: ; } else { } goto ldv_64721; case 109: ; if (ldv_state_variable_180 != 0) { tmp___239 = __VERIFIER_nondet_int(); switch (tmp___239) { case 0: ; if (ldv_state_variable_180 == 1) { show_hw_queue_depth(ldvarg266, ldvarg267); ldv_state_variable_180 = 1; } else { } goto ldv_65021; default: ldv_stop(); } ldv_65021: ; } else { } goto ldv_64721; case 110: ; if (ldv_state_variable_99 != 0) { ldv_main_exported_99(); } else { } goto ldv_64721; case 111: ; if (ldv_state_variable_179 != 0) { tmp___240 = __VERIFIER_nondet_int(); switch (tmp___240) { case 0: ; if (ldv_state_variable_179 == 1) { store_queue_depth(target_core_dev_attrib_queue_depth_group0, (char const *)ldvarg272, ldvarg271); ldv_state_variable_179 = 1; } else { } goto ldv_65026; case 1: ; if (ldv_state_variable_179 == 1) { show_queue_depth(target_core_dev_attrib_queue_depth_group0, ldvarg270); ldv_state_variable_179 = 1; } else { } goto ldv_65026; default: ldv_stop(); } ldv_65026: ; } else { } goto ldv_64721; case 112: ; if (ldv_state_variable_162 != 0) { tmp___241 = __VERIFIER_nondet_int(); switch (tmp___241) { case 0: ; if (ldv_state_variable_162 == 1) { target_core_dev_pr_show_attr_res_holder(ldvarg273, ldvarg274); ldv_state_variable_162 = 1; } else { } goto ldv_65031; default: ldv_stop(); } ldv_65031: ; } else { } goto ldv_64721; case 113: ; if (ldv_state_variable_72 != 0) { ldv_main_exported_72(); } else { } goto ldv_64721; case 114: ; if (ldv_state_variable_74 != 0) { ldv_main_exported_74(); } else { } goto ldv_64721; case 115: ; if (ldv_state_variable_182 != 0) { tmp___242 = __VERIFIER_nondet_int(); switch (tmp___242) { case 0: ; if (ldv_state_variable_182 == 1) { show_hw_max_sectors(ldvarg279, ldvarg280); ldv_state_variable_182 = 1; } else { } goto ldv_65037; default: ldv_stop(); } ldv_65037: ; } else { } goto ldv_64721; case 116: ; if (ldv_state_variable_61 != 0) { ldv_main_exported_61(); } else { } goto ldv_64721; case 117: ; if (ldv_state_variable_108 != 0) { ldv_main_exported_108(); } else { } goto ldv_64721; case 118: ; if (ldv_state_variable_115 != 0) { ldv_main_exported_115(); } else { } goto ldv_64721; case 119: ; if (ldv_state_variable_92 != 0) { ldv_main_exported_92(); } else { } goto ldv_64721; case 120: ; if (ldv_state_variable_103 != 0) { ldv_main_exported_103(); } else { } goto ldv_64721; case 121: ; if (ldv_state_variable_201 != 0) { tmp___243 = __VERIFIER_nondet_int(); switch (tmp___243) { case 0: ; if (ldv_state_variable_201 == 1) { store_emulate_dpo(target_core_dev_attrib_emulate_dpo_group0, (char const *)ldvarg295, ldvarg294); ldv_state_variable_201 = 1; } else { } goto ldv_65046; case 1: ; if (ldv_state_variable_201 == 1) { show_emulate_dpo(target_core_dev_attrib_emulate_dpo_group0, ldvarg293); ldv_state_variable_201 = 1; } else { } goto ldv_65046; default: ldv_stop(); } ldv_65046: ; } else { } goto ldv_64721; case 122: ; if (ldv_state_variable_10 != 0) { ldv_main_exported_10(); } else { } goto ldv_64721; case 123: ; if (ldv_state_variable_113 != 0) { ldv_main_exported_113(); } else { } goto ldv_64721; case 124: ; if (ldv_state_variable_152 != 0) { tmp___244 = __VERIFIER_nondet_int(); switch (tmp___244) { case 0: ; if (ldv_state_variable_152 == 1) { target_core_show_dev_info(ldvarg300, ldvarg301); ldv_state_variable_152 = 1; } else { } goto ldv_65053; default: ldv_stop(); } ldv_65053: ; } else { } goto ldv_64721; case 125: ; if (ldv_state_variable_189 != 0) { tmp___245 = __VERIFIER_nondet_int(); switch (tmp___245) { case 0: ; if (ldv_state_variable_189 == 1) { store_pi_prot_format(target_core_dev_attrib_pi_prot_format_group0, (char const *)ldvarg304, ldvarg303); ldv_state_variable_189 = 1; } else { } goto ldv_65057; case 1: ; if (ldv_state_variable_189 == 1) { show_pi_prot_format(target_core_dev_attrib_pi_prot_format_group0, ldvarg302); ldv_state_variable_189 = 1; } else { } goto ldv_65057; default: ldv_stop(); } ldv_65057: ; } else { } goto ldv_64721; case 126: ; if (ldv_state_variable_142 != 0) { tmp___246 = __VERIFIER_nondet_int(); switch (tmp___246) { case 0: ; if (ldv_state_variable_142 == 1) { target_core_alua_lu_gp_attr_show(target_core_alua_lu_gp_ops_group1, target_core_alua_lu_gp_ops_group0, ldvarg307); ldv_state_variable_142 = 1; } else { } if (ldv_state_variable_142 == 2) { target_core_alua_lu_gp_attr_show(target_core_alua_lu_gp_ops_group1, target_core_alua_lu_gp_ops_group0, ldvarg307); ldv_state_variable_142 = 2; } else { } goto ldv_65062; case 1: ; if (ldv_state_variable_142 == 2) { target_core_alua_lu_gp_release(target_core_alua_lu_gp_ops_group1); ldv_state_variable_142 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_65062; case 2: ; if (ldv_state_variable_142 == 1) { target_core_alua_lu_gp_attr_store(target_core_alua_lu_gp_ops_group1, target_core_alua_lu_gp_ops_group0, (char const *)ldvarg306, ldvarg305); ldv_state_variable_142 = 1; } else { } if (ldv_state_variable_142 == 2) { target_core_alua_lu_gp_attr_store(target_core_alua_lu_gp_ops_group1, target_core_alua_lu_gp_ops_group0, (char const *)ldvarg306, ldvarg305); ldv_state_variable_142 = 2; } else { } goto ldv_65062; case 3: ; if (ldv_state_variable_142 == 1) { ldv_probe_142(); ldv_state_variable_142 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_65062; default: ldv_stop(); } ldv_65062: ; } else { } goto ldv_64721; case 127: ; if (ldv_state_variable_91 != 0) { ldv_main_exported_91(); } else { } goto ldv_64721; case 128: ; if (ldv_state_variable_167 != 0) { tmp___247 = __VERIFIER_nondet_int(); switch (tmp___247) { case 0: ; if (ldv_state_variable_167 == 1) { target_core_dev_wwn_store_attr_vpd_protocol_identifier(target_core_dev_wwn_vpd_protocol_identifier_group0, (char const *)ldvarg312, ldvarg311); ldv_state_variable_167 = 1; } else { } goto ldv_65070; case 1: ; if (ldv_state_variable_167 == 1) { target_core_dev_wwn_show_attr_vpd_protocol_identifier(target_core_dev_wwn_vpd_protocol_identifier_group0, ldvarg310); ldv_state_variable_167 = 1; } else { } goto ldv_65070; default: ldv_stop(); } ldv_65070: ; } else { } goto ldv_64721; case 129: ; if (ldv_state_variable_48 != 0) { ldv_main_exported_48(); } else { } goto ldv_64721; case 130: ; if (ldv_state_variable_107 != 0) { ldv_main_exported_107(); } else { } goto ldv_64721; case 131: ; if (ldv_state_variable_87 != 0) { ldv_main_exported_87(); } else { } goto ldv_64721; case 132: ; if (ldv_state_variable_174 != 0) { tmp___248 = __VERIFIER_nondet_int(); switch (tmp___248) { case 0: ; if (ldv_state_variable_174 == 1) { store_max_write_same_len(target_core_dev_attrib_max_write_same_len_group0, (char const *)ldvarg322, ldvarg321); ldv_state_variable_174 = 1; } else { } goto ldv_65078; case 1: ; if (ldv_state_variable_174 == 1) { show_max_write_same_len(target_core_dev_attrib_max_write_same_len_group0, ldvarg320); ldv_state_variable_174 = 1; } else { } goto ldv_65078; default: ldv_stop(); } ldv_65078: ; } else { } goto ldv_64721; case 133: ; if (ldv_state_variable_77 != 0) { ldv_main_exported_77(); } else { } goto ldv_64721; case 134: ; if (ldv_state_variable_133 != 0) { tmp___249 = __VERIFIER_nondet_int(); switch (tmp___249) { case 0: ; if (ldv_state_variable_133 == 1) { target_core_alua_tg_pt_gp_store_attr_alua_support_standby(target_core_alua_tg_pt_gp_alua_support_standby_group0, (char const *)ldvarg327, ldvarg326); ldv_state_variable_133 = 1; } else { } goto ldv_65084; case 1: ; if (ldv_state_variable_133 == 1) { target_core_alua_tg_pt_gp_show_attr_alua_support_standby(target_core_alua_tg_pt_gp_alua_support_standby_group0, ldvarg325); ldv_state_variable_133 = 1; } else { } goto ldv_65084; default: ldv_stop(); } ldv_65084: ; } else { } goto ldv_64721; case 135: ; if (ldv_state_variable_149 != 0) { tmp___250 = __VERIFIER_nondet_int(); switch (tmp___250) { case 0: ; if (ldv_state_variable_149 == 1) { target_core_store_dev_udev_path(ldvarg330, (char const *)ldvarg332, ldvarg331); ldv_state_variable_149 = 1; } else { } goto ldv_65089; case 1: ; if (ldv_state_variable_149 == 1) { target_core_show_dev_udev_path(ldvarg328, ldvarg329); ldv_state_variable_149 = 1; } else { } goto ldv_65089; default: ldv_stop(); } ldv_65089: ; } else { } goto ldv_64721; case 136: ; if (ldv_state_variable_123 != 0) { tmp___251 = __VERIFIER_nondet_int(); switch (tmp___251) { case 0: ; if (ldv_state_variable_123 == 1) { target_core_alua_tg_pt_gp_attr_show(target_core_alua_tg_pt_gp_ops_group1, target_core_alua_tg_pt_gp_ops_group0, ldvarg335); ldv_state_variable_123 = 1; } else { } if (ldv_state_variable_123 == 2) { target_core_alua_tg_pt_gp_attr_show(target_core_alua_tg_pt_gp_ops_group1, target_core_alua_tg_pt_gp_ops_group0, ldvarg335); ldv_state_variable_123 = 2; } else { } goto ldv_65094; case 1: ; if (ldv_state_variable_123 == 2) { target_core_alua_tg_pt_gp_release(target_core_alua_tg_pt_gp_ops_group1); ldv_state_variable_123 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_65094; case 2: ; if (ldv_state_variable_123 == 1) { target_core_alua_tg_pt_gp_attr_store(target_core_alua_tg_pt_gp_ops_group1, target_core_alua_tg_pt_gp_ops_group0, (char const *)ldvarg334, ldvarg333); ldv_state_variable_123 = 1; } else { } if (ldv_state_variable_123 == 2) { target_core_alua_tg_pt_gp_attr_store(target_core_alua_tg_pt_gp_ops_group1, target_core_alua_tg_pt_gp_ops_group0, (char const *)ldvarg334, ldvarg333); ldv_state_variable_123 = 2; } else { } goto ldv_65094; case 3: ; if (ldv_state_variable_123 == 1) { ldv_probe_123(); ldv_state_variable_123 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_65094; default: ldv_stop(); } ldv_65094: ; } else { } goto ldv_64721; case 137: ; if (ldv_state_variable_50 != 0) { ldv_main_exported_50(); } else { } goto ldv_64721; case 138: ; if (ldv_state_variable_39 != 0) { ldv_main_exported_39(); } else { } goto ldv_64721; case 139: ; if (ldv_state_variable_64 != 0) { ldv_main_exported_64(); } else { } goto ldv_64721; case 140: ; if (ldv_state_variable_97 != 0) { ldv_main_exported_97(); } else { } goto ldv_64721; case 141: ; if (ldv_state_variable_12 != 0) { ldv_main_exported_12(); } else { } goto ldv_64721; case 142: ; if (ldv_state_variable_41 != 0) { ldv_main_exported_41(); } else { } goto ldv_64721; case 143: ; if (ldv_state_variable_52 != 0) { ldv_main_exported_52(); } else { } goto ldv_64721; case 144: ; if (ldv_state_variable_173 != 0) { tmp___252 = __VERIFIER_nondet_int(); switch (tmp___252) { case 0: ; if (ldv_state_variable_173 == 1) { show_hw_pi_prot_type(ldvarg352, ldvarg353); ldv_state_variable_173 = 1; } else { } goto ldv_65108; default: ldv_stop(); } ldv_65108: ; } else { } goto ldv_64721; case 145: ; if (ldv_state_variable_56 != 0) { ldv_main_exported_56(); } else { } goto ldv_64721; case 146: ; if (ldv_state_variable_45 != 0) { ldv_main_exported_45(); } else { } goto ldv_64721; case 147: ; if (ldv_state_variable_66 != 0) { ldv_main_exported_66(); } else { } goto ldv_64721; case 148: ; if (ldv_state_variable_19 != 0) { ldv_main_exported_19(); } else { } goto ldv_64721; case 149: ; if (ldv_state_variable_54 != 0) { ldv_main_exported_54(); } else { } goto ldv_64721; case 150: ; if (ldv_state_variable_70 != 0) { ldv_main_exported_70(); } else { } goto ldv_64721; case 151: ; if (ldv_state_variable_188 != 0) { tmp___253 = __VERIFIER_nondet_int(); switch (tmp___253) { case 0: ; if (ldv_state_variable_188 == 1) { store_enforce_pr_isids(target_core_dev_attrib_enforce_pr_isids_group0, (char const *)ldvarg368, ldvarg367); ldv_state_variable_188 = 1; } else { } goto ldv_65118; case 1: ; if (ldv_state_variable_188 == 1) { show_enforce_pr_isids(target_core_dev_attrib_enforce_pr_isids_group0, ldvarg366); ldv_state_variable_188 = 1; } else { } goto ldv_65118; default: ldv_stop(); } ldv_65118: ; } else { } goto ldv_64721; case 152: ; if (ldv_state_variable_68 != 0) { ldv_main_exported_68(); } else { } goto ldv_64721; case 153: ; if (ldv_state_variable_166 != 0) { tmp___254 = __VERIFIER_nondet_int(); switch (tmp___254) { case 0: ; if (ldv_state_variable_166 == 1) { target_core_dev_wwn_store_attr_vpd_assoc_logical_unit(target_core_dev_wwn_vpd_assoc_logical_unit_group0, (char const *)ldvarg373, ldvarg372); ldv_state_variable_166 = 1; } else { } goto ldv_65124; case 1: ; if (ldv_state_variable_166 == 1) { target_core_dev_wwn_show_attr_vpd_assoc_logical_unit(target_core_dev_wwn_vpd_assoc_logical_unit_group0, ldvarg371); ldv_state_variable_166 = 1; } else { } goto ldv_65124; default: ldv_stop(); } ldv_65124: ; } else { } goto ldv_64721; case 154: ; goto ldv_64721; case 155: ; if (ldv_state_variable_136 != 0) { tmp___255 = __VERIFIER_nondet_int(); switch (tmp___255) { case 0: ; if (ldv_state_variable_136 == 1) { target_core_alua_tg_pt_gp_store_attr_alua_support_offline(target_core_alua_tg_pt_gp_alua_support_offline_group0, (char const *)ldvarg376, ldvarg375); ldv_state_variable_136 = 1; } else { } goto ldv_65130; case 1: ; if (ldv_state_variable_136 == 1) { target_core_alua_tg_pt_gp_show_attr_alua_support_offline(target_core_alua_tg_pt_gp_alua_support_offline_group0, ldvarg374); ldv_state_variable_136 = 1; } else { } goto ldv_65130; default: ldv_stop(); } ldv_65130: ; } else { } goto ldv_64721; case 156: ; if (ldv_state_variable_88 != 0) { ldv_main_exported_88(); } else { } goto ldv_64721; case 157: ; if (ldv_state_variable_116 != 0) { tmp___256 = __VERIFIER_nondet_int(); switch (tmp___256) { case 0: ; if (ldv_state_variable_116 == 1) { target_core_call_delhbafromtarget(target_core_group_ops_group0, ldvarg381); ldv_state_variable_116 = 1; } else { } goto ldv_65136; case 1: ; if (ldv_state_variable_116 == 1) { target_core_call_addhbatotarget(target_core_group_ops_group0, (char const *)ldvarg380); ldv_state_variable_116 = 1; } else { } goto ldv_65136; default: ldv_stop(); } ldv_65136: ; } else { } goto ldv_64721; case 158: ; if (ldv_state_variable_144 != 0) { tmp___257 = __VERIFIER_nondet_int(); switch (tmp___257) { case 0: ; if (ldv_state_variable_144 == 1) { target_core_alua_lu_gp_store_attr_lu_gp_id(target_core_alua_lu_gp_lu_gp_id_group0, (char const *)ldvarg384, ldvarg383); ldv_state_variable_144 = 1; } else { } goto ldv_65141; case 1: ; if (ldv_state_variable_144 == 1) { target_core_alua_lu_gp_show_attr_lu_gp_id(target_core_alua_lu_gp_lu_gp_id_group0, ldvarg382); ldv_state_variable_144 = 1; } else { } goto ldv_65141; default: ldv_stop(); } ldv_65141: ; } else { } goto ldv_64721; case 159: ; if (ldv_state_variable_141 != 0) { tmp___258 = __VERIFIER_nondet_int(); switch (tmp___258) { case 0: ; if (ldv_state_variable_141 == 1) { target_core_alua_drop_lu_gp(target_core_alua_lu_gps_group_ops_group0, ldvarg386); ldv_state_variable_141 = 1; } else { } goto ldv_65146; case 1: ; if (ldv_state_variable_141 == 1) { target_core_alua_create_lu_gp(target_core_alua_lu_gps_group_ops_group0, (char const *)ldvarg385); ldv_state_variable_141 = 1; } else { } goto ldv_65146; default: ldv_stop(); } ldv_65146: ; } else { } goto ldv_64721; case 160: ; if (ldv_state_variable_30 != 0) { ldv_main_exported_30(); } else { } goto ldv_64721; case 161: ; if (ldv_state_variable_100 != 0) { ldv_main_exported_100(); } else { } goto ldv_64721; case 162: ; if (ldv_state_variable_25 != 0) { ldv_main_exported_25(); } else { } goto ldv_64721; case 163: ; if (ldv_state_variable_128 != 0) { tmp___259 = __VERIFIER_nondet_int(); switch (tmp___259) { case 0: ; if (ldv_state_variable_128 == 1) { target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(target_core_alua_tg_pt_gp_trans_delay_msecs_group0, (char const *)ldvarg396, ldvarg395); ldv_state_variable_128 = 1; } else { } goto ldv_65154; case 1: ; if (ldv_state_variable_128 == 1) { target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs(target_core_alua_tg_pt_gp_trans_delay_msecs_group0, ldvarg394); ldv_state_variable_128 = 1; } else { } goto ldv_65154; default: ldv_stop(); } ldv_65154: ; } else { } goto ldv_64721; case 164: ; if (ldv_state_variable_28 != 0) { ldv_main_exported_28(); } else { } goto ldv_64721; case 165: ; if (ldv_state_variable_120 != 0) { tmp___260 = __VERIFIER_nondet_int(); switch (tmp___260) { case 0: ; if (ldv_state_variable_120 == 1) { target_core_drop_subdev(target_core_hba_group_ops_group0, ldvarg400); ldv_state_variable_120 = 1; } else { } goto ldv_65160; case 1: ; if (ldv_state_variable_120 == 1) { target_core_make_subdev(target_core_hba_group_ops_group0, (char const *)ldvarg399); ldv_state_variable_120 = 1; } else { } goto ldv_65160; default: ldv_stop(); } ldv_65160: ; } else { } goto ldv_64721; case 166: ; if (ldv_state_variable_156 != 0) { tmp___261 = __VERIFIER_nondet_int(); switch (tmp___261) { case 0: ; if (ldv_state_variable_156 == 1) { target_core_dev_pr_show_attr_res_type(ldvarg401, ldvarg402); ldv_state_variable_156 = 1; } else { } goto ldv_65165; default: ldv_stop(); } ldv_65165: ; } else { } goto ldv_64721; case 167: ; if (ldv_state_variable_134 != 0) { tmp___262 = __VERIFIER_nondet_int(); switch (tmp___262) { case 0: ; if (ldv_state_variable_134 == 1) { target_core_alua_tg_pt_gp_store_attr_alua_support_unavailable(target_core_alua_tg_pt_gp_alua_support_unavailable_group0, (char const *)ldvarg405, ldvarg404); ldv_state_variable_134 = 1; } else { } goto ldv_65169; case 1: ; if (ldv_state_variable_134 == 1) { target_core_alua_tg_pt_gp_show_attr_alua_support_unavailable(target_core_alua_tg_pt_gp_alua_support_unavailable_group0, ldvarg403); ldv_state_variable_134 = 1; } else { } goto ldv_65169; default: ldv_stop(); } ldv_65169: ; } else { } goto ldv_64721; case 168: ; if (ldv_state_variable_40 != 0) { ldv_main_exported_40(); } else { } goto ldv_64721; case 169: ; if (ldv_state_variable_75 != 0) { ldv_main_exported_75(); } else { } goto ldv_64721; case 170: ; if (ldv_state_variable_83 != 0) { ldv_main_exported_83(); } else { } goto ldv_64721; case 171: ; if (ldv_state_variable_192 != 0) { tmp___263 = __VERIFIER_nondet_int(); switch (tmp___263) { case 0: ; if (ldv_state_variable_192 == 1) { store_emulate_3pc(target_core_dev_attrib_emulate_3pc_group0, (char const *)ldvarg418, ldvarg417); ldv_state_variable_192 = 1; } else { } goto ldv_65177; case 1: ; if (ldv_state_variable_192 == 1) { show_emulate_3pc(target_core_dev_attrib_emulate_3pc_group0, ldvarg416); ldv_state_variable_192 = 1; } else { } goto ldv_65177; default: ldv_stop(); } ldv_65177: ; } else { } goto ldv_64721; case 172: ; if (ldv_state_variable_59 != 0) { ldv_main_exported_59(); } else { } goto ldv_64721; case 173: ; if (ldv_state_variable_177 != 0) { tmp___264 = __VERIFIER_nondet_int(); switch (tmp___264) { case 0: ; if (ldv_state_variable_177 == 1) { store_max_unmap_block_desc_count(target_core_dev_attrib_max_unmap_block_desc_count_group0, (char const *)ldvarg423, ldvarg422); ldv_state_variable_177 = 1; } else { } goto ldv_65183; case 1: ; if (ldv_state_variable_177 == 1) { show_max_unmap_block_desc_count(target_core_dev_attrib_max_unmap_block_desc_count_group0, ldvarg421); ldv_state_variable_177 = 1; } else { } goto ldv_65183; default: ldv_stop(); } ldv_65183: ; } else { } goto ldv_64721; case 174: ; if (ldv_state_variable_150 != 0) { tmp___265 = __VERIFIER_nondet_int(); switch (tmp___265) { case 0: ; if (ldv_state_variable_150 == 1) { target_core_store_dev_alias(ldvarg426, (char const *)ldvarg428, ldvarg427); ldv_state_variable_150 = 1; } else { } goto ldv_65188; case 1: ; if (ldv_state_variable_150 == 1) { target_core_show_dev_alias(ldvarg424, ldvarg425); ldv_state_variable_150 = 1; } else { } goto ldv_65188; default: ldv_stop(); } ldv_65188: ; } else { } goto ldv_64721; case 175: ; if (ldv_state_variable_155 != 0) { tmp___266 = __VERIFIER_nondet_int(); switch (tmp___266) { case 0: ; if (ldv_state_variable_155 == 1) { target_core_dev_pr_show_attr_res_aptpl_active(ldvarg429, ldvarg430); ldv_state_variable_155 = 1; } else { } goto ldv_65193; default: ldv_stop(); } ldv_65193: ; } else { } goto ldv_64721; case 176: ; if (ldv_state_variable_130 != 0) { tmp___267 = __VERIFIER_nondet_int(); switch (tmp___267) { case 0: ; if (ldv_state_variable_130 == 1) { target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(target_core_alua_tg_pt_gp_alua_write_metadata_group0, (char const *)ldvarg433, ldvarg432); ldv_state_variable_130 = 1; } else { } goto ldv_65197; case 1: ; if (ldv_state_variable_130 == 1) { target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(target_core_alua_tg_pt_gp_alua_write_metadata_group0, ldvarg431); ldv_state_variable_130 = 1; } else { } goto ldv_65197; default: ldv_stop(); } ldv_65197: ; } else { } goto ldv_64721; case 177: ; if (ldv_state_variable_53 != 0) { ldv_main_exported_53(); } else { } goto ldv_64721; case 178: ; if (ldv_state_variable_122 != 0) { tmp___268 = __VERIFIER_nondet_int(); switch (tmp___268) { case 0: ; if (ldv_state_variable_122 == 1) { target_core_alua_drop_tg_pt_gp(target_core_alua_tg_pt_gps_group_ops_group0, ldvarg437); ldv_state_variable_122 = 1; } else { } goto ldv_65203; case 1: ; if (ldv_state_variable_122 == 1) { target_core_alua_create_tg_pt_gp(target_core_alua_tg_pt_gps_group_ops_group0, (char const *)ldvarg436); ldv_state_variable_122 = 1; } else { } goto ldv_65203; default: ldv_stop(); } ldv_65203: ; } else { } goto ldv_64721; case 179: ; if (ldv_state_variable_143 != 0) { tmp___269 = __VERIFIER_nondet_int(); switch (tmp___269) { case 0: ; if (ldv_state_variable_143 == 1) { target_core_alua_lu_gp_show_attr_members(ldvarg438, ldvarg439); ldv_state_variable_143 = 1; } else { } goto ldv_65208; default: ldv_stop(); } ldv_65208: ; } else { } goto ldv_64721; case 180: ; if (ldv_state_variable_158 != 0) { tmp___270 = __VERIFIER_nondet_int(); switch (tmp___270) { case 0: ; if (ldv_state_variable_158 == 1) { target_core_dev_pr_show_attr_res_pr_registered_i_pts(ldvarg440, ldvarg441); ldv_state_variable_158 = 1; } else { } goto ldv_65212; default: ldv_stop(); } ldv_65212: ; } else { } goto ldv_64721; case 181: ; if (ldv_state_variable_42 != 0) { ldv_main_exported_42(); } else { } goto ldv_64721; case 182: ; if (ldv_state_variable_22 != 0) { ldv_main_exported_22(); } else { } goto ldv_64721; case 183: ; if (ldv_state_variable_46 != 0) { ldv_main_exported_46(); } else { } goto ldv_64721; case 184: ; if (ldv_state_variable_13 != 0) { ldv_main_exported_13(); } else { } goto ldv_64721; case 185: ; if (ldv_state_variable_105 != 0) { ldv_main_exported_105(); } else { } goto ldv_64721; case 186: ; goto ldv_64721; case 187: ; if (ldv_state_variable_85 != 0) { ldv_main_exported_85(); } else { } goto ldv_64721; case 188: ; if (ldv_state_variable_185 != 0) { tmp___271 = __VERIFIER_nondet_int(); switch (tmp___271) { case 0: ; if (ldv_state_variable_185 == 1) { store_force_pr_aptpl(target_core_dev_attrib_force_pr_aptpl_group0, (char const *)ldvarg457, ldvarg456); ldv_state_variable_185 = 1; } else { } goto ldv_65223; case 1: ; if (ldv_state_variable_185 == 1) { show_force_pr_aptpl(target_core_dev_attrib_force_pr_aptpl_group0, ldvarg455); ldv_state_variable_185 = 1; } else { } goto ldv_65223; default: ldv_stop(); } ldv_65223: ; } else { } goto ldv_64721; case 189: ; if (ldv_state_variable_36 != 0) { ldv_main_exported_36(); } else { } goto ldv_64721; case 190: ; goto ldv_64721; case 191: ; if (ldv_state_variable_183 != 0) { tmp___272 = __VERIFIER_nondet_int(); switch (tmp___272) { case 0: ; if (ldv_state_variable_183 == 1) { store_block_size(target_core_dev_attrib_block_size_group0, (char const *)ldvarg463, ldvarg462); ldv_state_variable_183 = 1; } else { } goto ldv_65230; case 1: ; if (ldv_state_variable_183 == 1) { show_block_size(target_core_dev_attrib_block_size_group0, ldvarg461); ldv_state_variable_183 = 1; } else { } goto ldv_65230; default: ldv_stop(); } ldv_65230: ; } else { } goto ldv_64721; case 192: ; if (ldv_state_variable_94 != 0) { ldv_main_exported_94(); } else { } goto ldv_64721; case 193: ; if (ldv_state_variable_146 != 0) { tmp___273 = __VERIFIER_nondet_int(); switch (tmp___273) { case 0: ; if (ldv_state_variable_146 == 1) { target_core_store_dev_lba_map(ldvarg469, (char const *)ldvarg471, ldvarg470); ldv_state_variable_146 = 1; } else { } goto ldv_65236; case 1: ; if (ldv_state_variable_146 == 1) { target_core_show_dev_lba_map(ldvarg467, ldvarg468); ldv_state_variable_146 = 1; } else { } goto ldv_65236; default: ldv_stop(); } ldv_65236: ; } else { } goto ldv_64721; case 194: ; if (ldv_state_variable_51 != 0) { ldv_main_exported_51(); } else { } goto ldv_64721; case 195: ; if (ldv_state_variable_9 != 0) { ldv_main_exported_9(); } else { } goto ldv_64721; case 196: ; if (ldv_state_variable_111 != 0) { ldv_main_exported_111(); } else { } goto ldv_64721; case 197: ; if (ldv_state_variable_38 != 0) { ldv_main_exported_38(); } else { } goto ldv_64721; case 198: ; goto ldv_64721; case 199: ; if (ldv_state_variable_34 != 0) { ldv_main_exported_34(); } else { } goto ldv_64721; case 200: ; if (ldv_state_variable_169 != 0) { tmp___274 = __VERIFIER_nondet_int(); switch (tmp___274) { case 0: ; if (ldv_state_variable_169 == 1) { target_core_dev_attrib_attr_show(target_core_dev_attrib_ops_group1, target_core_dev_attrib_ops_group0, ldvarg486); ldv_state_variable_169 = 1; } else { } goto ldv_65247; case 1: ; if (ldv_state_variable_169 == 1) { target_core_dev_attrib_attr_store(target_core_dev_attrib_ops_group1, target_core_dev_attrib_ops_group0, (char const *)ldvarg485, ldvarg484); ldv_state_variable_169 = 1; } else { } goto ldv_65247; default: ldv_stop(); } ldv_65247: ; } else { } goto ldv_64721; case 201: ; if (ldv_state_variable_164 != 0) { tmp___275 = __VERIFIER_nondet_int(); switch (tmp___275) { case 0: ; if (ldv_state_variable_164 == 1) { target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device(target_core_dev_wwn_vpd_assoc_scsi_target_device_group0, (char const *)ldvarg489, ldvarg488); ldv_state_variable_164 = 1; } else { } goto ldv_65252; case 1: ; if (ldv_state_variable_164 == 1) { target_core_dev_wwn_show_attr_vpd_assoc_scsi_target_device(target_core_dev_wwn_vpd_assoc_scsi_target_device_group0, ldvarg487); ldv_state_variable_164 = 1; } else { } goto ldv_65252; default: ldv_stop(); } ldv_65252: ; } else { } goto ldv_64721; case 202: ; if (ldv_state_variable_132 != 0) { tmp___276 = __VERIFIER_nondet_int(); switch (tmp___276) { case 0: ; if (ldv_state_variable_132 == 1) { target_core_alua_tg_pt_gp_store_attr_alua_support_active_optimized(target_core_alua_tg_pt_gp_alua_support_active_optimized_group0, (char const *)ldvarg492, ldvarg491); ldv_state_variable_132 = 1; } else { } goto ldv_65257; case 1: ; if (ldv_state_variable_132 == 1) { target_core_alua_tg_pt_gp_show_attr_alua_support_active_optimized(target_core_alua_tg_pt_gp_alua_support_active_optimized_group0, ldvarg490); ldv_state_variable_132 = 1; } else { } goto ldv_65257; default: ldv_stop(); } ldv_65257: ; } else { } goto ldv_64721; case 203: ; if (ldv_state_variable_196 != 0) { tmp___277 = __VERIFIER_nondet_int(); switch (tmp___277) { case 0: ; if (ldv_state_variable_196 == 1) { store_emulate_tas(target_core_dev_attrib_emulate_tas_group0, (char const *)ldvarg495, ldvarg494); ldv_state_variable_196 = 1; } else { } goto ldv_65262; case 1: ; if (ldv_state_variable_196 == 1) { show_emulate_tas(target_core_dev_attrib_emulate_tas_group0, ldvarg493); ldv_state_variable_196 = 1; } else { } goto ldv_65262; default: ldv_stop(); } ldv_65262: ; } else { } goto ldv_64721; case 204: ; if (ldv_state_variable_171 != 0) { tmp___278 = __VERIFIER_nondet_int(); switch (tmp___278) { case 0: ; if (ldv_state_variable_171 == 1) { show_hw_max_sectors(ldvarg496, ldvarg497); ldv_state_variable_171 = 1; } else { } goto ldv_65267; default: ldv_stop(); } ldv_65267: ; } else { } goto ldv_64721; default: ldv_stop(); } ldv_64721: ; goto ldv_65270; ldv_final: ldv_check_final_state(); return 0; } } __inline static void *ERR_PTR(long error ) { void *tmp ; { tmp = ldv_err_ptr(error); return (tmp); } } __inline static long PTR_ERR(void const *ptr ) { long tmp ; { tmp = ldv_ptr_err(ptr); return (tmp); } } __inline static bool IS_ERR(void const *ptr ) { bool tmp ; { tmp = ldv_is_err(ptr); return (tmp); } } bool ldv_queue_work_on_5(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_6(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_7(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_8(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_9(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_10(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_11(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_12(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_13(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_lock_14(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_15(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_16(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_17(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_g_tf_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_18(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_g_tf_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_19(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_g_tf_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_20(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_g_tf_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_21(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_g_tf_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_22(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_g_tf_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_23(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_g_tf_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } int ldv_mutex_lock_interruptible_24(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___4 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_lock_interruptible(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_lock_interruptible_hba_access_mutex_of_se_hba(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_25(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_hba_access_mutex_of_se_hba(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_26(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_hba_access_mutex_of_se_hba(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_27(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_hba_access_mutex_of_se_hba(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_28(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_hba_access_mutex_of_se_hba(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static void __read_once_size(void const volatile *p , void *res , int size ) { { switch (size) { case 1: *((__u8 *)res) = *((__u8 volatile *)p); goto ldv_880; case 2: *((__u16 *)res) = *((__u16 volatile *)p); goto ldv_880; case 4: *((__u32 *)res) = *((__u32 volatile *)p); goto ldv_880; case 8: *((__u64 *)res) = *((__u64 volatile *)p); goto ldv_880; default: __asm__ volatile ("": : : "memory"); __builtin_memcpy(res, (void const *)p, (unsigned long )size); __asm__ volatile ("": : : "memory"); } ldv_880: ; return; } } __inline static void clear_bit(long nr , unsigned long volatile *addr ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; btr %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr)); return; } } __inline static __u16 __fswab16(__u16 val ) { { return ((__u16 )((int )((short )((int )val << 8)) | (int )((short )((int )val >> 8)))); } } __inline static __u16 __swab16p(__u16 const *p ) { __u16 tmp ; { tmp = __fswab16((int )*p); return (tmp); } } __inline static __u16 __be16_to_cpup(__be16 const *p ) { __u16 tmp ; { tmp = __swab16p(p); return (tmp); } } extern void __bad_size_call_parameter(void) ; __inline static void __hlist_del(struct hlist_node *n ) { struct hlist_node *next ; struct hlist_node **pprev ; { next = n->next; pprev = n->pprev; *pprev = next; if ((unsigned long )next != (unsigned long )((struct hlist_node *)0)) { next->pprev = pprev; } else { } return; } } extern void warn_slowpath_null(char const * , int const ) ; __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; extern void __xadd_wrong_size(void) ; __inline static int atomic_sub_and_test(int i , atomic_t *v ) { char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; subl %2, %0; sete %1": "+m" (v->counter), "=qm" (c): "er" (i): "memory"); return ((int )((signed char )c) != 0); } } __inline static int atomic_add_return(int i , atomic_t *v ) { int __ret ; { __ret = i; switch (4UL) { case 1UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddb %b0, %1\n": "+q" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5596; case 2UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddw %w0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5596; case 4UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddl %0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5596; case 8UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddq %q0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5596; default: __xadd_wrong_size(); } ldv_5596: ; return (__ret + i); } } __inline static void atomic64_add(long i , atomic64_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; addq %1,%0": "=m" (v->counter): "er" (i), "m" (v->counter)); return; } } __inline static void atomic64_inc(atomic64_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; incq %0": "=m" (v->counter): "m" (v->counter)); return; } } __inline static void atomic_long_inc(atomic_long_t *l ) { atomic64_t *v ; { v = l; atomic64_inc(v); return; } } __inline static void atomic_long_add(long i , atomic_long_t *l ) { atomic64_t *v ; { v = l; atomic64_add(i, v); return; } } extern void lockdep_init_map(struct lockdep_map * , char const * , struct lock_class_key * , int ) ; extern void lock_acquire(struct lockdep_map * , unsigned int , int , int , int , struct lockdep_map * , unsigned long ) ; extern void lock_release(struct lockdep_map * , int , unsigned long ) ; extern int lock_is_held(struct lockdep_map * ) ; extern void lockdep_rcu_suspicious(char const * , int const , char const * ) ; int ldv_mutex_trylock_65(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_63(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_66(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_67(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_70(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_72(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_74(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_75(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_76(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_79(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_80(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_82(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_84(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_86(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_89(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_62(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_64(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_68(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_69(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_71(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_73(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_77(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_78(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_81(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_83(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_85(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_88(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_acl_node_mutex_of_se_portal_group(struct mutex *lock ) ; void ldv_mutex_unlock_acl_node_mutex_of_se_portal_group(struct mutex *lock ) ; void ldv_mutex_lock_g_device_mutex(struct mutex *lock ) ; void ldv_mutex_unlock_g_device_mutex(struct mutex *lock ) ; void ldv_mutex_lock_lun_entry_mutex_of_se_node_acl(struct mutex *lock ) ; void ldv_mutex_unlock_lun_entry_mutex_of_se_node_acl(struct mutex *lock ) ; extern int __preempt_count ; __inline static void __preempt_count_add(int val ) { int pao_ID__ ; { pao_ID__ = 0; switch (4UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (__preempt_count): "qi" (val)); } goto ldv_6845; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (__preempt_count): "ri" (val)); } goto ldv_6845; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (__preempt_count): "ri" (val)); } goto ldv_6845; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (__preempt_count): "re" (val)); } goto ldv_6845; default: __bad_percpu_size(); } ldv_6845: ; return; } } __inline static void __preempt_count_sub(int val ) { int pao_ID__ ; { pao_ID__ = 0; switch (4UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (__preempt_count): "qi" (- val)); } goto ldv_6857; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (__preempt_count): "ri" (- val)); } goto ldv_6857; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (__preempt_count): "ri" (- val)); } goto ldv_6857; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (__preempt_count): "re" (- val)); } goto ldv_6857; default: __bad_percpu_size(); } ldv_6857: ; return; } } extern void __raw_spin_lock_init(raw_spinlock_t * , char const * , struct lock_class_key * ) ; extern unsigned long _raw_spin_lock_irqsave(raw_spinlock_t * ) ; extern void _raw_spin_unlock_irqrestore(raw_spinlock_t * , unsigned long ) ; __inline static raw_spinlock_t *spinlock_check(spinlock_t *lock ) { { return (& lock->__annonCompField17.rlock); } } __inline static void spin_unlock_irqrestore(spinlock_t *lock , unsigned long flags ) { { _raw_spin_unlock_irqrestore(& lock->__annonCompField17.rlock, flags); return; } } extern unsigned long volatile jiffies ; __inline static u64 get_jiffies_64(void) { { return ((u64 )jiffies); } } extern void __init_waitqueue_head(wait_queue_head_t * , char const * , struct lock_class_key * ) ; __inline static void init_completion(struct completion *x ) { struct lock_class_key __key ; { x->done = 0U; __init_waitqueue_head(& x->wait, "&x->wait", & __key); return; } } extern void wait_for_completion(struct completion * ) ; extern void complete(struct completion * ) ; __inline static void __rcu_read_lock(void) { { __preempt_count_add(1); __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock(void) { { __asm__ volatile ("": : : "memory"); __preempt_count_sub(1); return; } } extern void kfree_call_rcu(struct callback_head * , void (*)(struct callback_head * ) ) ; extern bool rcu_is_watching(void) ; __inline static void rcu_lock_acquire(struct lockdep_map *map ) { { lock_acquire(map, 0U, 0, 2, 0, (struct lockdep_map *)0, 0UL); return; } } __inline static void rcu_lock_release(struct lockdep_map *map ) { { lock_release(map, 1, 0UL); return; } } extern struct lockdep_map rcu_lock_map ; extern struct lockdep_map rcu_sched_lock_map ; extern int debug_lockdep_rcu_enabled(void) ; extern int rcu_read_lock_held(void) ; __inline static void rcu_read_lock(void) { bool __warned ; int tmp ; bool tmp___0 ; int tmp___1 ; { __rcu_read_lock(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 849, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock(void) { bool __warned ; int tmp ; bool tmp___0 ; int tmp___1 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 900, "rcu_read_unlock() used illegally while idle"); } else { } } else { } __rcu_read_unlock(); rcu_lock_release(& rcu_lock_map); return; } } __inline static void rcu_read_lock_sched(void) { bool __warned ; int tmp ; bool tmp___0 ; int tmp___1 ; { __preempt_count_add(1); __asm__ volatile ("": : : "memory"); rcu_lock_acquire(& rcu_sched_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 965, "rcu_read_lock_sched() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock_sched(void) { bool __warned ; int tmp ; bool tmp___0 ; int tmp___1 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 983, "rcu_read_unlock_sched() used illegally while idle"); } else { } } else { } rcu_lock_release(& rcu_sched_lock_map); __asm__ volatile ("": : : "memory"); __preempt_count_sub(1); return; } } extern void __init_work(struct work_struct * , int ) ; extern struct workqueue_struct *__alloc_workqueue_key(char const * , unsigned int , int , struct lock_class_key * , char const * , ...) ; extern void destroy_workqueue(struct workqueue_struct * ) ; void ldv_destroy_workqueue_87(struct workqueue_struct *ldv_func_arg1 ) ; bool ldv_queue_work_on_57(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_59(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_58(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_61(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_60(struct workqueue_struct *ldv_func_arg1 ) ; __inline static void hlist_del_rcu(struct hlist_node *n ) { { __hlist_del(n); n->pprev = (struct hlist_node **)-2401263026316508672L; return; } } extern void __compiletime_assert_402(void) ; __inline static void hlist_add_head_rcu(struct hlist_node *n , struct hlist_head *h ) { struct hlist_node *first ; bool __cond ; struct hlist_node *__var ; { first = h->first; n->next = first; n->pprev = & h->first; __cond = 0; if ((int )__cond) { __compiletime_assert_402(); } else { } __asm__ volatile ("": : : "memory"); __var = (struct hlist_node *)0; *((struct hlist_node * volatile *)(& h->first)) = n; if ((unsigned long )first != (unsigned long )((struct hlist_node *)0)) { first->pprev = & n->next; } else { } return; } } extern void __compiletime_assert_430(void) ; __inline static bool __ref_is_percpu(struct percpu_ref *ref , unsigned long **percpu_countp ) { unsigned long percpu_ptr ; unsigned long _________p1 ; union __anonunion___u_192 __u ; long tmp ; { __read_once_size((void const volatile *)(& ref->percpu_count_ptr), (void *)(& __u.__c), 8); _________p1 = __u.__val; percpu_ptr = _________p1; tmp = ldv__builtin_expect((percpu_ptr & 3UL) != 0UL, 0L); if (tmp != 0L) { return (0); } else { } *percpu_countp = (unsigned long *)percpu_ptr; return (1); } } __inline static void percpu_ref_get_many(struct percpu_ref *ref , unsigned long nr ) { unsigned long *percpu_count ; void const *__vpp_verify ; int pao_ID__ ; int pao_ID_____0 ; int pao_ID_____1 ; int pao_ID_____2 ; bool tmp ; { rcu_read_lock_sched(); tmp = __ref_is_percpu(ref, & percpu_count); if ((int )tmp) { __vpp_verify = (void const *)0; switch (8UL) { case 1UL: pao_ID__ = 0; switch (8UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (nr)); } goto ldv_16562; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (nr)); } goto ldv_16562; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (nr)); } goto ldv_16562; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (nr)); } goto ldv_16562; default: __bad_percpu_size(); } ldv_16562: ; goto ldv_16567; case 2UL: pao_ID_____0 = 0; switch (8UL) { case 1UL: ; if (pao_ID_____0 == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (nr)); } goto ldv_16573; case 2UL: ; if (pao_ID_____0 == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (nr)); } goto ldv_16573; case 4UL: ; if (pao_ID_____0 == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (nr)); } goto ldv_16573; case 8UL: ; if (pao_ID_____0 == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (nr)); } goto ldv_16573; default: __bad_percpu_size(); } ldv_16573: ; goto ldv_16567; case 4UL: pao_ID_____1 = 0; switch (8UL) { case 1UL: ; if (pao_ID_____1 == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (nr)); } goto ldv_16583; case 2UL: ; if (pao_ID_____1 == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (nr)); } goto ldv_16583; case 4UL: ; if (pao_ID_____1 == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (nr)); } goto ldv_16583; case 8UL: ; if (pao_ID_____1 == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (nr)); } goto ldv_16583; default: __bad_percpu_size(); } ldv_16583: ; goto ldv_16567; case 8UL: pao_ID_____2 = 0; switch (8UL) { case 1UL: ; if (pao_ID_____2 == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (nr)); } goto ldv_16593; case 2UL: ; if (pao_ID_____2 == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (nr)); } goto ldv_16593; case 4UL: ; if (pao_ID_____2 == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (nr)); } goto ldv_16593; case 8UL: ; if (pao_ID_____2 == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (nr)); } goto ldv_16593; default: __bad_percpu_size(); } ldv_16593: ; goto ldv_16567; default: __bad_size_call_parameter(); goto ldv_16567; } ldv_16567: ; } else { atomic_long_add((long )nr, & ref->count); } rcu_read_unlock_sched(); return; } } __inline static void percpu_ref_get(struct percpu_ref *ref ) { { percpu_ref_get_many(ref, 1UL); return; } } void invoke_work_1(void) ; void call_and_disable_work_1(struct work_struct *work ) ; void disable_work_1(struct work_struct *work ) ; void activate_work_1(struct work_struct *work , int state ) ; void call_and_disable_all_1(int state ) ; __inline static u16 get_unaligned_be16(void const *p ) { __u16 tmp ; { tmp = __be16_to_cpup((__be16 const *)p); return (tmp); } } __inline static void kref_init(struct kref *kref ) { { atomic_set(& kref->refcount, 1); return; } } __inline static void kref_get(struct kref *kref ) { bool __warned ; int __ret_warn_once ; int tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = atomic_add_return(1, & kref->refcount); __ret_warn_once = tmp <= 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 47); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return; } } __inline static int kref_sub(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub(kref, 1U, release); return (tmp); } } __inline static void sema_init(struct semaphore *sem , int val ) { struct lock_class_key __key ; struct semaphore __constr_expr_0 ; { __constr_expr_0.lock.raw_lock.val.counter = 0; __constr_expr_0.lock.magic = 3735899821U; __constr_expr_0.lock.owner_cpu = 4294967295U; __constr_expr_0.lock.owner = (void *)-1; __constr_expr_0.lock.dep_map.key = 0; __constr_expr_0.lock.dep_map.class_cache[0] = 0; __constr_expr_0.lock.dep_map.class_cache[1] = 0; __constr_expr_0.lock.dep_map.name = "(*sem).lock"; __constr_expr_0.lock.dep_map.cpu = 0; __constr_expr_0.lock.dep_map.ip = 0UL; __constr_expr_0.count = (unsigned int )val; __constr_expr_0.wait_list.next = & sem->wait_list; __constr_expr_0.wait_list.prev = & sem->wait_list; *sem = __constr_expr_0; lockdep_init_map(& sem->lock.dep_map, "semaphore->lock", & __key, 0); return; } } extern char const *scsi_device_type(unsigned int ) ; sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd ) ; bool target_lun_is_rdonly(struct se_cmd *cmd ) ; sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd , sense_reason_t (*exec_cmd)(struct se_cmd * ) ) ; sense_reason_t transport_lookup_cmd_lun(struct se_cmd *se_cmd , u64 unpacked_lun ) ; int transport_lookup_tmr_lun(struct se_cmd *se_cmd , u64 unpacked_lun ) ; struct mutex g_device_mutex ; struct list_head g_device_list ; int core_alloc_rtpi(struct se_lun *lun , struct se_device *dev ) ; struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *nacl , u16 rtpi ) ; void target_pr_kref_release(struct kref *kref ) ; void core_free_device_list_for_node(struct se_node_acl *nacl , struct se_portal_group *tpg ) ; void core_update_device_list_access(u64 mapped_lun , u32 lun_access , struct se_node_acl *nacl ) ; struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl , u64 mapped_lun ) ; int core_enable_device_list_for_node(struct se_lun *lun , struct se_lun_acl *lun_acl , u64 mapped_lun , u32 lun_access , struct se_node_acl *nacl , struct se_portal_group *tpg ) ; void core_disable_device_list_for_node(struct se_lun *lun , struct se_dev_entry *orig , struct se_node_acl *nacl , struct se_portal_group *tpg ) ; void core_clear_lun_from_tpg(struct se_lun *lun , struct se_portal_group *tpg ) ; int core_dev_add_lun(struct se_portal_group *tpg , struct se_device *dev , struct se_lun *lun ) ; void core_dev_del_lun(struct se_portal_group *tpg , struct se_lun *lun ) ; struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *tpg , struct se_node_acl *nacl , u64 mapped_lun , int *ret ) ; int core_dev_add_initiator_node_lun_acl(struct se_portal_group *tpg , struct se_lun_acl *lacl , struct se_lun *lun , u32 lun_access ) ; int core_dev_del_initiator_node_lun_acl(struct se_lun *lun , struct se_lun_acl *lacl ) ; void core_dev_free_initiator_node_lun_acl(struct se_portal_group *tpg , struct se_lun_acl *lacl ) ; struct se_device *g_lun0_dev ; void core_tpg_add_node_to_devs(struct se_node_acl *acl , struct se_portal_group *tpg , struct se_lun *lun_orig ) ; int core_tpg_add_lun(struct se_portal_group *tpg , struct se_lun *lun , u32 lun_access , struct se_device *dev ) ; void core_tpg_remove_lun(struct se_portal_group *tpg , struct se_lun *lun ) ; u32 scsi_get_new_index(scsi_index_t type ) ; void target_qf_do_work(struct work_struct *work ) ; struct se_portal_group xcopy_pt_tpg ; void core_alua_free_lu_gp_mem(struct se_device *dev ) ; int core_setup_alua(struct se_device *dev ) ; int core_scsi3_check_aptpl_registration(struct se_device *dev , struct se_portal_group *tpg , struct se_lun *lun , struct se_node_acl *nacl , u64 mapped_lun ) ; void core_scsi3_free_pr_reg_from_nacl(struct se_device *dev , struct se_node_acl *nacl ) ; void core_scsi3_free_all_registrations(struct se_device *dev ) ; int core_scsi3_ua_allocate(struct se_dev_entry *deve , u8 asc , u8 ascq ) ; void core_scsi3_ua_release_all(struct se_dev_entry *deve ) ; struct mutex g_device_mutex = {{1}, {{{{{0}}, 3735899821U, 4294967295U, (void *)-1, {0, {0, 0}, "g_device_mutex.wait_lock", 0, 0UL}}}}, {& g_device_mutex.wait_list, & g_device_mutex.wait_list}, 0, (void *)(& g_device_mutex), {0, {0, 0}, "g_device_mutex", 0, 0UL}}; struct list_head g_device_list = {& g_device_list, & g_device_list}; static struct se_hba *lun0_hba ; sense_reason_t transport_lookup_cmd_lun(struct se_cmd *se_cmd , u64 unpacked_lun ) { struct se_lun *se_lun ; struct se_session *se_sess ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; char *tmp ; struct se_lun *________p1 ; struct se_lun *_________p1 ; union __anonunion___u_382 __u ; bool __warned ; int tmp___0 ; int tmp___1 ; struct se_lun *________p1___0 ; struct se_lun *_________p1___0 ; union __anonunion___u_384 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; char *tmp___4 ; struct se_device *________p1___1 ; struct se_device *_________p1___1 ; union __anonunion___u_386 __u___1 ; int tmp___5 ; { se_lun = (struct se_lun *)0; se_sess = se_cmd->se_sess; nacl = se_sess->se_node_acl; rcu_read_lock(); deve = target_nacl_find_deve(nacl, unpacked_lun); if ((unsigned long )deve != (unsigned long )((struct se_dev_entry *)0)) { atomic_long_inc(& deve->total_cmds); if ((unsigned int )se_cmd->data_direction == 1U && (int )deve->lun_flags & 1) { tmp = (*((se_cmd->se_tfo)->get_fabric_name))(); printk("\vTARGET_CORE[%s]: Detected WRITE_PROTECTED LUN Access for 0x%08llx\n", tmp, unpacked_lun); rcu_read_unlock(); return (12U); } else { } if ((unsigned int )se_cmd->data_direction == 1U) { atomic_long_add((long )se_cmd->data_length, & deve->write_bytes); } else if ((unsigned int )se_cmd->data_direction == 2U) { atomic_long_add((long )se_cmd->data_length, & deve->read_bytes); } else { } __read_once_size((void const volatile *)(& deve->se_lun), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { tmp___1 = rcu_read_lock_held(); if (tmp___1 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c", 88, "suspicious rcu_dereference_check() usage"); } else { } } else { } se_lun = ________p1; __read_once_size((void const volatile *)(& deve->se_lun), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_held(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c", 89, "suspicious rcu_dereference_check() usage"); } else { } } else { } se_cmd->se_lun = ________p1___0; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; se_cmd->se_cmd_flags = se_cmd->se_cmd_flags | 256U; percpu_ref_get(& se_lun->lun_ref); se_cmd->lun_ref_active = 1; } else { } rcu_read_unlock(); if ((unsigned long )se_lun == (unsigned long )((struct se_lun *)0)) { if (unpacked_lun != 0ULL) { tmp___4 = (*((se_cmd->se_tfo)->get_fabric_name))(); printk("\vTARGET_CORE[%s]: Detected NON_EXISTENT_LUN Access for 0x%08llx\n", tmp___4, unpacked_lun); return (1U); } else { } if ((unsigned int )se_cmd->data_direction != 2U && (unsigned int )se_cmd->data_direction != 3U) { return (12U); } else { } se_lun = (se_sess->se_tpg)->tpg_virt_lun0; se_cmd->se_lun = (se_sess->se_tpg)->tpg_virt_lun0; se_cmd->orig_fe_lun = 0ULL; se_cmd->se_cmd_flags = se_cmd->se_cmd_flags | 256U; percpu_ref_get(& se_lun->lun_ref); se_cmd->lun_ref_active = 1; } else { } __read_once_size((void const volatile *)(& se_lun->lun_se_dev), (void *)(& __u___1.__c), 8); _________p1___1 = __u___1.__val; ________p1___1 = _________p1___1; tmp___5 = debug_lockdep_rcu_enabled(); se_cmd->se_dev = ________p1___1; atomic_long_inc(& (se_cmd->se_dev)->num_cmds); if ((unsigned int )se_cmd->data_direction == 1U) { atomic_long_add((long )se_cmd->data_length, & (se_cmd->se_dev)->write_bytes); } else if ((unsigned int )se_cmd->data_direction == 2U) { atomic_long_add((long )se_cmd->data_length, & (se_cmd->se_dev)->read_bytes); } else { } return (0U); } } static char const __kstrtab_transport_lookup_cmd_lun[25U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'l', 'o', 'o', 'k', 'u', 'p', '_', 'c', 'm', 'd', '_', 'l', 'u', 'n', '\000'}; struct kernel_symbol const __ksymtab_transport_lookup_cmd_lun ; struct kernel_symbol const __ksymtab_transport_lookup_cmd_lun = {(unsigned long )(& transport_lookup_cmd_lun), (char const *)(& __kstrtab_transport_lookup_cmd_lun)}; int transport_lookup_tmr_lun(struct se_cmd *se_cmd , u64 unpacked_lun ) { struct se_dev_entry *deve ; struct se_lun *se_lun ; struct se_session *se_sess ; struct se_node_acl *nacl ; struct se_tmr_req *se_tmr ; unsigned long flags ; struct se_lun *________p1 ; struct se_lun *_________p1 ; union __anonunion___u_388 __u ; bool __warned ; int tmp ; int tmp___0 ; struct se_lun *________p1___0 ; struct se_lun *_________p1___0 ; union __anonunion___u_390 __u___0 ; bool __warned___0 ; int tmp___1 ; int tmp___2 ; struct se_lun *________p1___1 ; struct se_lun *_________p1___1 ; union __anonunion___u_392 __u___1 ; bool __warned___1 ; int tmp___3 ; int tmp___4 ; struct _ddebug descriptor ; char *tmp___5 ; long tmp___6 ; struct se_device *________p1___2 ; struct se_device *_________p1___2 ; union __anonunion___u_394 __u___2 ; int tmp___7 ; struct se_device *________p1___3 ; struct se_device *_________p1___3 ; union __anonunion___u_396 __u___3 ; int tmp___8 ; raw_spinlock_t *tmp___9 ; { se_lun = (struct se_lun *)0; se_sess = se_cmd->se_sess; nacl = se_sess->se_node_acl; se_tmr = se_cmd->se_tmr_req; rcu_read_lock(); deve = target_nacl_find_deve(nacl, unpacked_lun); if ((unsigned long )deve != (unsigned long )((struct se_dev_entry *)0)) { __read_once_size((void const volatile *)(& deve->se_lun), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c", 159, "suspicious rcu_dereference_check() usage"); } else { } } else { } se_tmr->tmr_lun = ________p1; __read_once_size((void const volatile *)(& deve->se_lun), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned___0) { tmp___2 = rcu_read_lock_held(); if (tmp___2 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c", 160, "suspicious rcu_dereference_check() usage"); } else { } } else { } se_cmd->se_lun = ________p1___0; __read_once_size((void const volatile *)(& deve->se_lun), (void *)(& __u___1.__c), 8); _________p1___1 = __u___1.__val; ________p1___1 = _________p1___1; tmp___3 = debug_lockdep_rcu_enabled(); if (tmp___3 != 0 && ! __warned___1) { tmp___4 = rcu_read_lock_held(); if (tmp___4 == 0) { __warned___1 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c", 161, "suspicious rcu_dereference_check() usage"); } else { } } else { } se_lun = ________p1___1; se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; } else { } rcu_read_unlock(); if ((unsigned long )se_lun == (unsigned long )((struct se_lun *)0)) { descriptor.modname = "target_core_mod"; descriptor.function = "transport_lookup_tmr_lun"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c"; descriptor.format = "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN Access for 0x%08llx\n"; descriptor.lineno = 171U; descriptor.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___6 != 0L) { tmp___5 = (*((se_cmd->se_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN Access for 0x%08llx\n", tmp___5, unpacked_lun); } else { } return (-19); } else { } __read_once_size((void const volatile *)(& se_lun->lun_se_dev), (void *)(& __u___2.__c), 8); _________p1___2 = __u___2.__val; ________p1___2 = _________p1___2; tmp___7 = debug_lockdep_rcu_enabled(); se_cmd->se_dev = ________p1___2; __read_once_size((void const volatile *)(& se_lun->lun_se_dev), (void *)(& __u___3.__c), 8); _________p1___3 = __u___3.__val; ________p1___3 = _________p1___3; tmp___8 = debug_lockdep_rcu_enabled(); se_tmr->tmr_dev = ________p1___3; tmp___9 = spinlock_check(& (se_tmr->tmr_dev)->se_tmr_lock); flags = _raw_spin_lock_irqsave(tmp___9); list_add_tail(& se_tmr->tmr_list, & (se_tmr->tmr_dev)->dev_tmr_list); spin_unlock_irqrestore(& (se_tmr->tmr_dev)->se_tmr_lock, flags); return (0); } } static char const __kstrtab_transport_lookup_tmr_lun[25U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'l', 'o', 'o', 'k', 'u', 'p', '_', 't', 'm', 'r', '_', 'l', 'u', 'n', '\000'}; struct kernel_symbol const __ksymtab_transport_lookup_tmr_lun ; struct kernel_symbol const __ksymtab_transport_lookup_tmr_lun = {(unsigned long )(& transport_lookup_tmr_lun), (char const *)(& __kstrtab_transport_lookup_tmr_lun)}; bool target_lun_is_rdonly(struct se_cmd *cmd ) { struct se_session *se_sess ; struct se_dev_entry *deve ; bool ret ; { se_sess = cmd->se_sess; rcu_read_lock(); deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun); ret = (bool )((unsigned long )deve != (unsigned long )((struct se_dev_entry *)0) && (int )deve->lun_flags & 1); rcu_read_unlock(); return (ret); } } static char const __kstrtab_target_lun_is_rdonly[21U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 'l', 'u', 'n', '_', 'i', 's', '_', 'r', 'd', 'o', 'n', 'l', 'y', '\000'}; struct kernel_symbol const __ksymtab_target_lun_is_rdonly ; struct kernel_symbol const __ksymtab_target_lun_is_rdonly = {(unsigned long )(& target_lun_is_rdonly), (char const *)(& __kstrtab_target_lun_is_rdonly)}; struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *nacl , u16 rtpi ) { struct se_dev_entry *deve ; struct se_lun *lun ; struct se_portal_group *tpg ; struct hlist_node *____ptr ; struct hlist_node *________p1 ; struct hlist_node *_________p1 ; union __anonunion___u_398 __u ; int tmp ; struct hlist_node const *__mptr ; struct se_dev_entry *tmp___0 ; struct se_lun *________p1___0 ; struct se_lun *_________p1___0 ; union __anonunion___u_400 __u___0 ; bool __warned___0 ; int tmp___1 ; int tmp___2 ; char *tmp___3 ; struct hlist_node *____ptr___0 ; struct hlist_node *________p1___1 ; struct hlist_node *_________p1___1 ; union __anonunion___u_402 __u___1 ; int tmp___4 ; struct hlist_node const *__mptr___0 ; struct se_dev_entry *tmp___5 ; { tpg = nacl->se_tpg; rcu_read_lock(); __read_once_size((void const volatile *)(& nacl->lun_entry_hlist.first), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); ____ptr = ________p1; if ((unsigned long )____ptr != (unsigned long )((struct hlist_node *)0)) { __mptr = (struct hlist_node const *)____ptr; tmp___0 = (struct se_dev_entry *)__mptr + 0xfffffffffffffed0UL; } else { tmp___0 = (struct se_dev_entry *)0; } deve = tmp___0; goto ldv_57648; ldv_57647: __read_once_size((void const volatile *)(& deve->se_lun), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned___0) { tmp___2 = rcu_read_lock_held(); if (tmp___2 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c", 218, "suspicious rcu_dereference_check() usage"); } else { } } else { } lun = ________p1___0; if ((unsigned long )lun == (unsigned long )((struct se_lun *)0)) { tmp___3 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); printk("\v%s device entries device pointer is NULL, but Initiator has access.\n", tmp___3); goto ldv_57646; } else { } if ((int )lun->lun_rtpi != (int )rtpi) { goto ldv_57646; } else { } kref_get(& deve->pr_kref); rcu_read_unlock(); return (deve); ldv_57646: __read_once_size((void const volatile *)(& deve->link.next), (void *)(& __u___1.__c), 8); _________p1___1 = __u___1.__val; ________p1___1 = _________p1___1; tmp___4 = debug_lockdep_rcu_enabled(); ____ptr___0 = ________p1___1; if ((unsigned long )____ptr___0 != (unsigned long )((struct hlist_node *)0)) { __mptr___0 = (struct hlist_node const *)____ptr___0; tmp___5 = (struct se_dev_entry *)__mptr___0 + 0xfffffffffffffed0UL; } else { tmp___5 = (struct se_dev_entry *)0; } deve = tmp___5; ldv_57648: ; if ((unsigned long )deve != (unsigned long )((struct se_dev_entry *)0)) { goto ldv_57647; } else { } rcu_read_unlock(); return ((struct se_dev_entry *)0); } } void core_free_device_list_for_node(struct se_node_acl *nacl , struct se_portal_group *tpg ) { struct se_dev_entry *deve ; struct hlist_node *____ptr ; struct hlist_node *________p1 ; struct hlist_node *_________p1 ; union __anonunion___u_404 __u ; int tmp ; struct hlist_node const *__mptr ; struct se_dev_entry *tmp___0 ; struct se_lun *lun ; struct se_lun *________p1___0 ; struct se_lun *_________p1___0 ; union __anonunion___u_406 __u___0 ; bool __warned___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; struct hlist_node *____ptr___0 ; struct hlist_node *________p1___1 ; struct hlist_node *_________p1___1 ; union __anonunion___u_408 __u___1 ; int tmp___4 ; struct hlist_node const *__mptr___0 ; struct se_dev_entry *tmp___5 ; { ldv_mutex_lock_69(& nacl->lun_entry_mutex); __read_once_size((void const volatile *)(& nacl->lun_entry_hlist.first), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); ____ptr = ________p1; if ((unsigned long )____ptr != (unsigned long )((struct hlist_node *)0)) { __mptr = (struct hlist_node const *)____ptr; tmp___0 = (struct se_dev_entry *)__mptr + 0xfffffffffffffed0UL; } else { tmp___0 = (struct se_dev_entry *)0; } deve = tmp___0; goto ldv_57715; ldv_57714: __read_once_size((void const volatile *)(& deve->se_lun), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned___0) { tmp___2 = lock_is_held(& nacl->lun_entry_mutex.dep_map); if (tmp___2 == 0) { tmp___3 = rcu_read_lock_held(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c", 247, "suspicious rcu_dereference_check() usage"); } else { } } else { } } else { } lun = ________p1___0; core_disable_device_list_for_node(lun, deve, nacl, tpg); __read_once_size((void const volatile *)(& deve->link.next), (void *)(& __u___1.__c), 8); _________p1___1 = __u___1.__val; ________p1___1 = _________p1___1; tmp___4 = debug_lockdep_rcu_enabled(); ____ptr___0 = ________p1___1; if ((unsigned long )____ptr___0 != (unsigned long )((struct hlist_node *)0)) { __mptr___0 = (struct hlist_node const *)____ptr___0; tmp___5 = (struct se_dev_entry *)__mptr___0 + 0xfffffffffffffed0UL; } else { tmp___5 = (struct se_dev_entry *)0; } deve = tmp___5; ldv_57715: ; if ((unsigned long )deve != (unsigned long )((struct se_dev_entry *)0)) { goto ldv_57714; } else { } ldv_mutex_unlock_70(& nacl->lun_entry_mutex); return; } } void core_update_device_list_access(u64 mapped_lun , u32 lun_access , struct se_node_acl *nacl ) { struct se_dev_entry *deve ; { ldv_mutex_lock_71(& nacl->lun_entry_mutex); deve = target_nacl_find_deve(nacl, mapped_lun); if ((unsigned long )deve != (unsigned long )((struct se_dev_entry *)0)) { if ((lun_access & 2U) != 0U) { deve->lun_flags = deve->lun_flags & 4294967294U; deve->lun_flags = deve->lun_flags | 2U; } else { deve->lun_flags = deve->lun_flags & 4294967293U; deve->lun_flags = deve->lun_flags | 1U; } } else { } ldv_mutex_unlock_72(& nacl->lun_entry_mutex); return; } } struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl , u64 mapped_lun ) { struct se_dev_entry *deve ; struct hlist_node *____ptr ; struct hlist_node *________p1 ; struct hlist_node *_________p1 ; union __anonunion___u_410 __u ; int tmp ; struct hlist_node const *__mptr ; struct se_dev_entry *tmp___0 ; struct hlist_node *____ptr___0 ; struct hlist_node *________p1___0 ; struct hlist_node *_________p1___0 ; union __anonunion___u_412 __u___0 ; int tmp___1 ; struct hlist_node const *__mptr___0 ; struct se_dev_entry *tmp___2 ; { __read_once_size((void const volatile *)(& nacl->lun_entry_hlist.first), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); ____ptr = ________p1; if ((unsigned long )____ptr != (unsigned long )((struct hlist_node *)0)) { __mptr = (struct hlist_node const *)____ptr; tmp___0 = (struct se_dev_entry *)__mptr + 0xfffffffffffffed0UL; } else { tmp___0 = (struct se_dev_entry *)0; } deve = tmp___0; goto ldv_57777; ldv_57776: ; if (deve->mapped_lun == mapped_lun) { return (deve); } else { } __read_once_size((void const volatile *)(& deve->link.next), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___1 = debug_lockdep_rcu_enabled(); ____ptr___0 = ________p1___0; if ((unsigned long )____ptr___0 != (unsigned long )((struct hlist_node *)0)) { __mptr___0 = (struct hlist_node const *)____ptr___0; tmp___2 = (struct se_dev_entry *)__mptr___0 + 0xfffffffffffffed0UL; } else { tmp___2 = (struct se_dev_entry *)0; } deve = tmp___2; ldv_57777: ; if ((unsigned long )deve != (unsigned long )((struct se_dev_entry *)0)) { goto ldv_57776; } else { } return ((struct se_dev_entry *)0); } } static char const __kstrtab_target_nacl_find_deve[22U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 'n', 'a', 'c', 'l', '_', 'f', 'i', 'n', 'd', '_', 'd', 'e', 'v', 'e', '\000'}; struct kernel_symbol const __ksymtab_target_nacl_find_deve ; struct kernel_symbol const __ksymtab_target_nacl_find_deve = {(unsigned long )(& target_nacl_find_deve), (char const *)(& __kstrtab_target_nacl_find_deve)}; void target_pr_kref_release(struct kref *kref ) { struct se_dev_entry *deve ; struct kref const *__mptr ; { __mptr = (struct kref const *)kref; deve = (struct se_dev_entry *)__mptr + 0xffffffffffffffc4UL; complete(& deve->pr_comp); return; } } static void target_luns_data_has_changed(struct se_node_acl *nacl , struct se_dev_entry *new , bool skip_new ) { struct se_dev_entry *tmp ; struct hlist_node *____ptr ; struct hlist_node *________p1 ; struct hlist_node *_________p1 ; union __anonunion___u_414 __u ; int tmp___0 ; struct hlist_node const *__mptr ; struct se_dev_entry *tmp___1 ; struct hlist_node *____ptr___0 ; struct hlist_node *________p1___0 ; struct hlist_node *_________p1___0 ; union __anonunion___u_416 __u___0 ; int tmp___2 ; struct hlist_node const *__mptr___0 ; struct se_dev_entry *tmp___3 ; { rcu_read_lock(); __read_once_size((void const volatile *)(& nacl->lun_entry_hlist.first), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp___0 = debug_lockdep_rcu_enabled(); ____ptr = ________p1; if ((unsigned long )____ptr != (unsigned long )((struct hlist_node *)0)) { __mptr = (struct hlist_node const *)____ptr; tmp___1 = (struct se_dev_entry *)__mptr + 0xfffffffffffffed0UL; } else { tmp___1 = (struct se_dev_entry *)0; } tmp = tmp___1; goto ldv_57849; ldv_57848: ; if ((int )skip_new && (unsigned long )tmp == (unsigned long )new) { goto ldv_57847; } else { } core_scsi3_ua_allocate(tmp, 63, 14); ldv_57847: __read_once_size((void const volatile *)(& tmp->link.next), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); ____ptr___0 = ________p1___0; if ((unsigned long )____ptr___0 != (unsigned long )((struct hlist_node *)0)) { __mptr___0 = (struct hlist_node const *)____ptr___0; tmp___3 = (struct se_dev_entry *)__mptr___0 + 0xfffffffffffffed0UL; } else { tmp___3 = (struct se_dev_entry *)0; } tmp = tmp___3; ldv_57849: ; if ((unsigned long )tmp != (unsigned long )((struct se_dev_entry *)0)) { goto ldv_57848; } else { } rcu_read_unlock(); return; } } extern void __compiletime_assert_361(void) ; extern void __compiletime_assert_362(void) ; extern void __compiletime_assert_380(void) ; extern void __compiletime_assert_381(void) ; int core_enable_device_list_for_node(struct se_lun *lun , struct se_lun_acl *lun_acl , u64 mapped_lun , u32 lun_access , struct se_node_acl *nacl , struct se_portal_group *tpg ) { struct se_dev_entry *orig ; struct se_dev_entry *new ; void *tmp ; struct lock_class_key __key ; struct se_lun *orig_lun ; struct se_lun *________p1 ; struct se_lun *_________p1 ; union __anonunion___u_418 __u ; bool __warned ; int tmp___0 ; int tmp___1 ; int tmp___2 ; long tmp___3 ; bool __cond ; struct se_lun *__var ; bool __cond___0 ; struct se_lun_acl *__var___0 ; bool __cond___1 ; struct se_lun *__var___1 ; bool __cond___2 ; struct se_lun_acl *__var___2 ; { tmp = kzalloc(336UL, 208U); new = (struct se_dev_entry *)tmp; if ((unsigned long )new == (unsigned long )((struct se_dev_entry *)0)) { printk("\vUnable to allocate se_dev_entry memory\n"); return (-12); } else { } atomic_set(& new->ua_count, 0); spinlock_check(& new->ua_lock); __raw_spin_lock_init(& new->ua_lock.__annonCompField17.rlock, "&(&new->ua_lock)->rlock", & __key); INIT_LIST_HEAD(& new->ua_list); INIT_LIST_HEAD(& new->lun_link); new->mapped_lun = mapped_lun; kref_init(& new->pr_kref); init_completion(& new->pr_comp); if ((lun_access & 2U) != 0U) { new->lun_flags = new->lun_flags | 2U; } else { new->lun_flags = new->lun_flags | 1U; } new->creation_time = get_jiffies_64(); new->attach_count = new->attach_count + 1U; ldv_mutex_lock_73(& nacl->lun_entry_mutex); orig = target_nacl_find_deve(nacl, mapped_lun); if ((unsigned long )orig != (unsigned long )((struct se_dev_entry *)0) && (unsigned long )orig->se_lun != (unsigned long )((struct se_lun *)0)) { __read_once_size((void const volatile *)(& orig->se_lun), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { tmp___1 = lock_is_held(& nacl->lun_entry_mutex.dep_map); if (tmp___1 == 0) { tmp___2 = rcu_read_lock_held(); if (tmp___2 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c", 349, "suspicious rcu_dereference_check() usage"); } else { } } else { } } else { } orig_lun = ________p1; if ((unsigned long )orig_lun != (unsigned long )lun) { printk("\vExisting orig->se_lun doesn\'t match new lun for dynamic -> explicit NodeACL conversion: %s\n", (char *)(& nacl->initiatorname)); ldv_mutex_unlock_74(& nacl->lun_entry_mutex); kfree((void const *)new); return (-22); } else { } tmp___3 = ldv__builtin_expect((unsigned long )orig->se_lun_acl != (unsigned long )((struct se_lun_acl *)0), 0L); if (tmp___3 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c"), "i" (359), "i" (12UL)); ldv_57873: ; goto ldv_57873; } else { } __cond = 0; if ((int )__cond) { __compiletime_assert_361(); } else { } __asm__ volatile ("": : : "memory"); __var = (struct se_lun *)0; *((struct se_lun * volatile *)(& new->se_lun)) = lun; __cond___0 = 0; if ((int )__cond___0) { __compiletime_assert_362(); } else { } __asm__ volatile ("": : : "memory"); __var___0 = (struct se_lun_acl *)0; *((struct se_lun_acl * volatile *)(& new->se_lun_acl)) = lun_acl; hlist_del_rcu(& orig->link); hlist_add_head_rcu(& new->link, & nacl->lun_entry_hlist); ldv_mutex_unlock_75(& nacl->lun_entry_mutex); spin_lock(& lun->lun_deve_lock); list_del(& orig->lun_link); list_add_tail(& new->lun_link, & lun->lun_deve_list); spin_unlock(& lun->lun_deve_lock); kref_put(& orig->pr_kref, & target_pr_kref_release); wait_for_completion(& orig->pr_comp); target_luns_data_has_changed(nacl, new, 1); kfree_call_rcu(& orig->callback_head, (void (*)(struct callback_head * ))320); return (0); } else { } __cond___1 = 0; if ((int )__cond___1) { __compiletime_assert_380(); } else { } __asm__ volatile ("": : : "memory"); __var___1 = (struct se_lun *)0; *((struct se_lun * volatile *)(& new->se_lun)) = lun; __cond___2 = 0; if ((int )__cond___2) { __compiletime_assert_381(); } else { } __asm__ volatile ("": : : "memory"); __var___2 = (struct se_lun_acl *)0; *((struct se_lun_acl * volatile *)(& new->se_lun_acl)) = lun_acl; hlist_add_head_rcu(& new->link, & nacl->lun_entry_hlist); ldv_mutex_unlock_76(& nacl->lun_entry_mutex); spin_lock(& lun->lun_deve_lock); list_add_tail(& new->lun_link, & lun->lun_deve_list); spin_unlock(& lun->lun_deve_lock); target_luns_data_has_changed(nacl, new, 1); return (0); } } extern void __compiletime_assert_431(void) ; void core_disable_device_list_for_node(struct se_lun *lun , struct se_dev_entry *orig , struct se_node_acl *nacl , struct se_portal_group *tpg ) { struct se_device *dev ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_420 __u ; int tmp ; bool __cond ; struct se_lun *__var ; bool __cond___0 ; struct se_lun_acl *__var___0 ; { __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); dev = ________p1; spin_lock(& lun->lun_deve_lock); list_del(& orig->lun_link); spin_unlock(& lun->lun_deve_lock); core_scsi3_ua_release_all(orig); hlist_del_rcu(& orig->link); clear_bit(1L, (unsigned long volatile *)(& orig->deve_flags)); __cond = 0; if ((int )__cond) { __compiletime_assert_430(); } else { } __asm__ volatile ("": : : "memory"); __var = (struct se_lun *)0; *((struct se_lun * volatile *)(& orig->se_lun)) = (struct se_lun */* volatile */)0; __cond___0 = 0; if ((int )__cond___0) { __compiletime_assert_431(); } else { } __asm__ volatile ("": : : "memory"); __var___0 = (struct se_lun_acl *)0; *((struct se_lun_acl * volatile *)(& orig->se_lun_acl)) = (struct se_lun_acl */* volatile */)0; orig->lun_flags = 0U; orig->creation_time = 0ULL; orig->attach_count = orig->attach_count - 1U; kref_put(& orig->pr_kref, & target_pr_kref_release); wait_for_completion(& orig->pr_comp); kfree_call_rcu(& orig->callback_head, (void (*)(struct callback_head * ))320); core_scsi3_free_pr_reg_from_nacl(dev, nacl); target_luns_data_has_changed(nacl, (struct se_dev_entry *)0, 0); return; } } void core_clear_lun_from_tpg(struct se_lun *lun , struct se_portal_group *tpg ) { struct se_node_acl *nacl ; struct se_dev_entry *deve ; struct list_head const *__mptr ; struct hlist_node *____ptr ; struct hlist_node *________p1 ; struct hlist_node *_________p1 ; union __anonunion___u_422 __u ; int tmp ; struct hlist_node const *__mptr___0 ; struct se_dev_entry *tmp___0 ; struct se_lun *tmp_lun ; struct se_lun *________p1___0 ; struct se_lun *_________p1___0 ; union __anonunion___u_424 __u___0 ; bool __warned___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; struct hlist_node *____ptr___0 ; struct hlist_node *________p1___1 ; struct hlist_node *_________p1___1 ; union __anonunion___u_426 __u___1 ; int tmp___4 ; struct hlist_node const *__mptr___1 ; struct se_dev_entry *tmp___5 ; struct list_head const *__mptr___2 ; { ldv_mutex_lock_77(& tpg->acl_node_mutex); __mptr = (struct list_head const *)tpg->acl_node_list.next; nacl = (struct se_node_acl *)__mptr + 0xfffffffffffffb70UL; goto ldv_58003; ldv_58002: ldv_mutex_lock_78(& nacl->lun_entry_mutex); __read_once_size((void const volatile *)(& nacl->lun_entry_hlist.first), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); ____ptr = ________p1; if ((unsigned long )____ptr != (unsigned long )((struct hlist_node *)0)) { __mptr___0 = (struct hlist_node const *)____ptr; tmp___0 = (struct se_dev_entry *)__mptr___0 + 0xfffffffffffffed0UL; } else { tmp___0 = (struct se_dev_entry *)0; } deve = tmp___0; goto ldv_58000; ldv_57999: __read_once_size((void const volatile *)(& deve->se_lun), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned___0) { tmp___2 = lock_is_held(& nacl->lun_entry_mutex.dep_map); if (tmp___2 == 0) { tmp___3 = rcu_read_lock_held(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c", 463, "suspicious rcu_dereference_check() usage"); } else { } } else { } } else { } tmp_lun = ________p1___0; if ((unsigned long )lun != (unsigned long )tmp_lun) { goto ldv_57998; } else { } core_disable_device_list_for_node(lun, deve, nacl, tpg); ldv_57998: __read_once_size((void const volatile *)(& deve->link.next), (void *)(& __u___1.__c), 8); _________p1___1 = __u___1.__val; ________p1___1 = _________p1___1; tmp___4 = debug_lockdep_rcu_enabled(); ____ptr___0 = ________p1___1; if ((unsigned long )____ptr___0 != (unsigned long )((struct hlist_node *)0)) { __mptr___1 = (struct hlist_node const *)____ptr___0; tmp___5 = (struct se_dev_entry *)__mptr___1 + 0xfffffffffffffed0UL; } else { tmp___5 = (struct se_dev_entry *)0; } deve = tmp___5; ldv_58000: ; if ((unsigned long )deve != (unsigned long )((struct se_dev_entry *)0)) { goto ldv_57999; } else { } ldv_mutex_unlock_79(& nacl->lun_entry_mutex); __mptr___2 = (struct list_head const *)nacl->acl_list.next; nacl = (struct se_node_acl *)__mptr___2 + 0xfffffffffffffb70UL; ldv_58003: ; if ((unsigned long )(& nacl->acl_list) != (unsigned long )(& tpg->acl_node_list)) { goto ldv_58002; } else { } ldv_mutex_unlock_80(& tpg->acl_node_mutex); return; } } int core_alloc_rtpi(struct se_lun *lun , struct se_device *dev ) { struct se_lun *tmp ; u16 tmp___0 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { spin_lock(& dev->se_port_lock); if (dev->export_count == 65535U) { printk("\fReached dev->dev_port_count == 0x0000ffff\n"); spin_unlock(& dev->se_port_lock); return (-28); } else { } again: tmp___0 = dev->dev_rpti_counter; dev->dev_rpti_counter = (u16 )((int )dev->dev_rpti_counter + 1); lun->lun_rtpi = tmp___0; if ((unsigned int )lun->lun_rtpi == 0U) { goto again; } else { } __mptr = (struct list_head const *)dev->dev_sep_list.next; tmp = (struct se_lun *)__mptr + 0xfffffffffffffb88UL; goto ldv_58016; ldv_58015: ; if ((int )lun->lun_rtpi == (int )tmp->lun_rtpi) { goto again; } else { } __mptr___0 = (struct list_head const *)tmp->lun_dev_link.next; tmp = (struct se_lun *)__mptr___0 + 0xfffffffffffffb88UL; ldv_58016: ; if ((unsigned long )(& tmp->lun_dev_link) != (unsigned long )(& dev->dev_sep_list)) { goto ldv_58015; } else { } spin_unlock(& dev->se_port_lock); return (0); } } static void se_release_vpd_for_dev(struct se_device *dev ) { struct t10_vpd *vpd ; struct t10_vpd *vpd_tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { spin_lock(& dev->t10_wwn.t10_vpd_lock); __mptr = (struct list_head const *)dev->t10_wwn.t10_vpd_list.next; vpd = (struct t10_vpd *)__mptr + 0xfffffffffffffee8UL; __mptr___0 = (struct list_head const *)vpd->vpd_list.next; vpd_tmp = (struct t10_vpd *)__mptr___0 + 0xfffffffffffffee8UL; goto ldv_58030; ldv_58029: list_del(& vpd->vpd_list); kfree((void const *)vpd); vpd = vpd_tmp; __mptr___1 = (struct list_head const *)vpd_tmp->vpd_list.next; vpd_tmp = (struct t10_vpd *)__mptr___1 + 0xfffffffffffffee8UL; ldv_58030: ; if ((unsigned long )(& vpd->vpd_list) != (unsigned long )(& dev->t10_wwn.t10_vpd_list)) { goto ldv_58029; } else { } spin_unlock(& dev->t10_wwn.t10_vpd_lock); return; } } static u32 se_dev_align_max_sectors(u32 max_sectors , u32 block_size___0 ) { u32 aligned_max_sectors ; u32 alignment ; unsigned long _max1 ; unsigned long _max2 ; u32 __x ; { _max1 = 1UL; _max2 = (unsigned long )(4096U / block_size___0); alignment = (u32 )(_max1 > _max2 ? _max1 : _max2); __x = max_sectors; aligned_max_sectors = __x - __x % alignment; if (max_sectors != aligned_max_sectors) { printk("\016Rounding down aligned max_sectors from %u to %u\n", max_sectors, aligned_max_sectors); } else { } return (aligned_max_sectors); } } int core_dev_add_lun(struct se_portal_group *tpg , struct se_device *dev , struct se_lun *lun ) { int rc ; struct _ddebug descriptor ; char *tmp ; u16 tmp___0 ; char *tmp___1 ; long tmp___2 ; struct se_node_acl *acl ; struct list_head const *__mptr ; int tmp___3 ; struct list_head const *__mptr___0 ; int tmp___4 ; { rc = core_tpg_add_lun(tpg, lun, 2U, dev); if (rc < 0) { return (rc); } else { } descriptor.modname = "target_core_mod"; descriptor.function = "core_dev_add_lun"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c"; descriptor.format = "%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from CORE HBA: %u\n"; descriptor.lineno = 562U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { tmp = (*((tpg->se_tpg_tfo)->get_fabric_name))(); tmp___0 = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); tmp___1 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from CORE HBA: %u\n", tmp___1, (int )tmp___0, lun->unpacked_lun, tmp, (dev->se_hba)->hba_id); } else { } tmp___4 = (*((tpg->se_tpg_tfo)->tpg_check_demo_mode))(tpg); if (tmp___4 != 0) { ldv_mutex_lock_81(& tpg->acl_node_mutex); __mptr = (struct list_head const *)tpg->acl_node_list.next; acl = (struct se_node_acl *)__mptr + 0xfffffffffffffb70UL; goto ldv_58057; ldv_58056: ; if ((int )acl->dynamic_node_acl) { if ((unsigned long )(tpg->se_tpg_tfo)->tpg_check_demo_mode_login_only == (unsigned long )((int (*/* const */)(struct se_portal_group * ))0)) { core_tpg_add_node_to_devs(acl, tpg, lun); } else { tmp___3 = (*((tpg->se_tpg_tfo)->tpg_check_demo_mode_login_only))(tpg); if (tmp___3 == 0) { core_tpg_add_node_to_devs(acl, tpg, lun); } else { } } } else { } __mptr___0 = (struct list_head const *)acl->acl_list.next; acl = (struct se_node_acl *)__mptr___0 + 0xfffffffffffffb70UL; ldv_58057: ; if ((unsigned long )(& acl->acl_list) != (unsigned long )(& tpg->acl_node_list)) { goto ldv_58056; } else { } ldv_mutex_unlock_82(& tpg->acl_node_mutex); } else { } return (0); } } void core_dev_del_lun(struct se_portal_group *tpg , struct se_lun *lun ) { struct _ddebug descriptor ; char *tmp ; u16 tmp___0 ; char *tmp___1 ; long tmp___2 ; { descriptor.modname = "target_core_mod"; descriptor.function = "core_dev_del_lun"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c"; descriptor.format = "%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from device object\n"; descriptor.lineno = 595U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { tmp = (*((tpg->se_tpg_tfo)->get_fabric_name))(); tmp___0 = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); tmp___1 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from device object\n", tmp___1, (int )tmp___0, lun->unpacked_lun, tmp); } else { } core_tpg_remove_lun(tpg, lun); return; } } struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *tpg , struct se_node_acl *nacl , u64 mapped_lun , int *ret ) { struct se_lun_acl *lacl ; char *tmp ; size_t tmp___0 ; void *tmp___1 ; { tmp___0 = strlen((char const *)(& nacl->initiatorname)); if (tmp___0 > 223UL) { tmp = (*((tpg->se_tpg_tfo)->get_fabric_name))(); printk("\v%s InitiatorName exceeds maximum size.\n", tmp); *ret = -75; return ((struct se_lun_acl *)0); } else { } tmp___1 = kzalloc(696UL, 208U); lacl = (struct se_lun_acl *)tmp___1; if ((unsigned long )lacl == (unsigned long )((struct se_lun_acl *)0)) { printk("\vUnable to allocate memory for struct se_lun_acl.\n"); *ret = -12; return ((struct se_lun_acl *)0); } else { } lacl->mapped_lun = mapped_lun; lacl->se_lun_nacl = nacl; snprintf((char *)(& lacl->initiatorname), 224UL, "%s", (char *)(& nacl->initiatorname)); return (lacl); } } int core_dev_add_initiator_node_lun_acl(struct se_portal_group *tpg , struct se_lun_acl *lacl , struct se_lun *lun , u32 lun_access ) { struct se_node_acl *nacl ; struct se_device *dev ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_428 __u ; int tmp ; int tmp___0 ; struct _ddebug descriptor ; u16 tmp___1 ; char *tmp___2 ; long tmp___3 ; { nacl = lacl->se_lun_nacl; __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); dev = ________p1; if ((unsigned long )nacl == (unsigned long )((struct se_node_acl *)0)) { return (-22); } else { } if ((int )lun->lun_access & 1 && (lun_access & 2U) != 0U) { lun_access = 1U; } else { } lacl->se_lun = lun; tmp___0 = core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun, lun_access, nacl, tpg); if (tmp___0 < 0) { return (-22); } else { } descriptor.modname = "target_core_mod"; descriptor.function = "core_dev_add_initiator_node_lun_acl"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c"; descriptor.format = "%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for InitiatorNode: %s\n"; descriptor.lineno = 659U; descriptor.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___3 != 0L) { tmp___1 = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); tmp___2 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for InitiatorNode: %s\n", tmp___2, (int )tmp___1, lun->unpacked_lun, lacl->mapped_lun, (lun_access & 2U) != 0U ? (char *)"RW" : (char *)"RO", (char *)(& lacl->initiatorname)); } else { } core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl, lacl->mapped_lun); return (0); } } int core_dev_del_initiator_node_lun_acl(struct se_lun *lun , struct se_lun_acl *lacl ) { struct se_portal_group *tpg ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; struct _ddebug descriptor ; u16 tmp ; char *tmp___0 ; long tmp___1 ; { tpg = lun->lun_tpg; nacl = lacl->se_lun_nacl; if ((unsigned long )nacl == (unsigned long )((struct se_node_acl *)0)) { return (-22); } else { } ldv_mutex_lock_83(& nacl->lun_entry_mutex); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); if ((unsigned long )deve != (unsigned long )((struct se_dev_entry *)0)) { core_disable_device_list_for_node(lun, deve, nacl, tpg); } else { } ldv_mutex_unlock_84(& nacl->lun_entry_mutex); descriptor.modname = "target_core_mod"; descriptor.function = "core_dev_del_initiator_node_lun_acl"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c"; descriptor.format = "%s_TPG[%hu]_LUN[%llu] - Removed ACL for InitiatorNode: %s Mapped LUN: %llu\n"; descriptor.lineno = 691U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); tmp___0 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "%s_TPG[%hu]_LUN[%llu] - Removed ACL for InitiatorNode: %s Mapped LUN: %llu\n", tmp___0, (int )tmp, lun->unpacked_lun, (char *)(& lacl->initiatorname), lacl->mapped_lun); } else { } return (0); } } void core_dev_free_initiator_node_lun_acl(struct se_portal_group *tpg , struct se_lun_acl *lacl ) { struct _ddebug descriptor ; char *tmp ; u16 tmp___0 ; char *tmp___1 ; long tmp___2 ; { descriptor.modname = "target_core_mod"; descriptor.function = "core_dev_free_initiator_node_lun_acl"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c"; descriptor.format = "%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s Mapped LUN: %llu\n"; descriptor.lineno = 704U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { tmp = (*((tpg->se_tpg_tfo)->get_fabric_name))(); tmp___0 = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); tmp___1 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s Mapped LUN: %llu\n", tmp___1, (int )tmp___0, tmp, (char *)(& lacl->initiatorname), lacl->mapped_lun); } else { } kfree((void const *)lacl); return; } } static void scsi_dump_inquiry(struct se_device *dev ) { struct t10_wwn *wwn ; char buf[17U] ; int i ; int device_type ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; struct _ddebug descriptor___1 ; long tmp___1 ; u32 tmp___2 ; struct _ddebug descriptor___2 ; char const *tmp___3 ; long tmp___4 ; { wwn = & dev->t10_wwn; i = 0; goto ldv_58115; ldv_58114: ; if ((int )((signed char )wwn->vendor[i]) > 31) { buf[i] = wwn->vendor[i]; } else { buf[i] = 32; } i = i + 1; ldv_58115: ; if (i <= 7) { goto ldv_58114; } else { } buf[i] = 0; descriptor.modname = "target_core_mod"; descriptor.function = "scsi_dump_inquiry"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c"; descriptor.format = " Vendor: %s\n"; descriptor.lineno = 723U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, " Vendor: %s\n", (char *)(& buf)); } else { } i = 0; goto ldv_58120; ldv_58119: ; if ((int )((signed char )wwn->model[i]) > 31) { buf[i] = wwn->model[i]; } else { buf[i] = 32; } i = i + 1; ldv_58120: ; if (i <= 15) { goto ldv_58119; } else { } buf[i] = 0; descriptor___0.modname = "target_core_mod"; descriptor___0.function = "scsi_dump_inquiry"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c"; descriptor___0.format = " Model: %s\n"; descriptor___0.lineno = 731U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor___0, " Model: %s\n", (char *)(& buf)); } else { } i = 0; goto ldv_58124; ldv_58123: ; if ((int )((signed char )wwn->revision[i]) > 31) { buf[i] = wwn->revision[i]; } else { buf[i] = 32; } i = i + 1; ldv_58124: ; if (i <= 3) { goto ldv_58123; } else { } buf[i] = 0; descriptor___1.modname = "target_core_mod"; descriptor___1.function = "scsi_dump_inquiry"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c"; descriptor___1.format = " Revision: %s\n"; descriptor___1.lineno = 739U; descriptor___1.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor___1, " Revision: %s\n", (char *)(& buf)); } else { } tmp___2 = (*((dev->transport)->get_device_type))(dev); device_type = (int )tmp___2; descriptor___2.modname = "target_core_mod"; descriptor___2.function = "scsi_dump_inquiry"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c"; descriptor___2.format = " Type: %s "; descriptor___2.lineno = 742U; descriptor___2.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = scsi_device_type((unsigned int )device_type); __dynamic_pr_debug(& descriptor___2, " Type: %s ", tmp___3); } else { } return; } } extern void __compiletime_assert_815(void) ; struct se_device *target_alloc_device(struct se_hba *hba , char const *name ) { struct se_device *dev ; struct se_lun *xcopy_lun ; struct lock_class_key __key ; struct lock_class_key __key___0 ; struct lock_class_key __key___1 ; struct lock_class_key __key___2 ; struct lock_class_key __key___3 ; struct lock_class_key __key___4 ; struct lock_class_key __key___5 ; struct lock_class_key __key___6 ; struct lock_class_key __key___7 ; struct lock_class_key __key___8 ; struct lock_class_key __key___9 ; bool __cond ; struct se_device *__var ; struct lock_class_key __key___10 ; { dev = (*(((hba->backend)->ops)->alloc_device))(hba, name); if ((unsigned long )dev == (unsigned long )((struct se_device *)0)) { return ((struct se_device *)0); } else { } dev->dev_link_magic = 4277001967U; dev->se_hba = hba; dev->transport = (hba->backend)->ops; dev->prot_length = 8; dev->hba_index = hba->hba_index; INIT_LIST_HEAD(& dev->dev_list); INIT_LIST_HEAD(& dev->dev_sep_list); INIT_LIST_HEAD(& dev->dev_tmr_list); INIT_LIST_HEAD(& dev->delayed_cmd_list); INIT_LIST_HEAD(& dev->state_list); INIT_LIST_HEAD(& dev->qf_cmd_list); INIT_LIST_HEAD(& dev->g_dev_node); spinlock_check(& dev->execute_task_lock); __raw_spin_lock_init(& dev->execute_task_lock.__annonCompField17.rlock, "&(&dev->execute_task_lock)->rlock", & __key); spinlock_check(& dev->delayed_cmd_lock); __raw_spin_lock_init(& dev->delayed_cmd_lock.__annonCompField17.rlock, "&(&dev->delayed_cmd_lock)->rlock", & __key___0); spinlock_check(& dev->dev_reservation_lock); __raw_spin_lock_init(& dev->dev_reservation_lock.__annonCompField17.rlock, "&(&dev->dev_reservation_lock)->rlock", & __key___1); spinlock_check(& dev->se_port_lock); __raw_spin_lock_init(& dev->se_port_lock.__annonCompField17.rlock, "&(&dev->se_port_lock)->rlock", & __key___2); spinlock_check(& dev->se_tmr_lock); __raw_spin_lock_init(& dev->se_tmr_lock.__annonCompField17.rlock, "&(&dev->se_tmr_lock)->rlock", & __key___3); spinlock_check(& dev->qf_cmd_lock); __raw_spin_lock_init(& dev->qf_cmd_lock.__annonCompField17.rlock, "&(&dev->qf_cmd_lock)->rlock", & __key___4); sema_init(& dev->caw_sem, 1); atomic_set(& dev->dev_ordered_id, 0); INIT_LIST_HEAD(& dev->t10_wwn.t10_vpd_list); spinlock_check(& dev->t10_wwn.t10_vpd_lock); __raw_spin_lock_init(& dev->t10_wwn.t10_vpd_lock.__annonCompField17.rlock, "&(&dev->t10_wwn.t10_vpd_lock)->rlock", & __key___5); INIT_LIST_HEAD(& dev->t10_pr.registration_list); INIT_LIST_HEAD(& dev->t10_pr.aptpl_reg_list); spinlock_check(& dev->t10_pr.registration_lock); __raw_spin_lock_init(& dev->t10_pr.registration_lock.__annonCompField17.rlock, "&(&dev->t10_pr.registration_lock)->rlock", & __key___6); spinlock_check(& dev->t10_pr.aptpl_reg_lock); __raw_spin_lock_init(& dev->t10_pr.aptpl_reg_lock.__annonCompField17.rlock, "&(&dev->t10_pr.aptpl_reg_lock)->rlock", & __key___7); INIT_LIST_HEAD(& dev->t10_alua.tg_pt_gps_list); spinlock_check(& dev->t10_alua.tg_pt_gps_lock); __raw_spin_lock_init(& dev->t10_alua.tg_pt_gps_lock.__annonCompField17.rlock, "&(&dev->t10_alua.tg_pt_gps_lock)->rlock", & __key___8); INIT_LIST_HEAD(& dev->t10_alua.lba_map_list); spinlock_check(& dev->t10_alua.lba_map_lock); __raw_spin_lock_init(& dev->t10_alua.lba_map_lock.__annonCompField17.rlock, "&(&dev->t10_alua.lba_map_lock)->rlock", & __key___9); dev->t10_wwn.t10_dev = dev; dev->t10_alua.t10_dev = dev; dev->dev_attrib.da_dev = dev; dev->dev_attrib.emulate_model_alias = 0; dev->dev_attrib.emulate_dpo = 1; dev->dev_attrib.emulate_fua_write = 1; dev->dev_attrib.emulate_fua_read = 1; dev->dev_attrib.emulate_write_cache = 0; dev->dev_attrib.emulate_ua_intlck_ctrl = 0; dev->dev_attrib.emulate_tas = 1; dev->dev_attrib.emulate_tpu = 0; dev->dev_attrib.emulate_tpws = 0; dev->dev_attrib.emulate_caw = 1; dev->dev_attrib.emulate_3pc = 1; dev->dev_attrib.pi_prot_type = 0; dev->dev_attrib.enforce_pr_isids = 1; dev->dev_attrib.force_pr_aptpl = 0; dev->dev_attrib.is_nonrot = 0; dev->dev_attrib.emulate_rest_reord = 0; dev->dev_attrib.max_unmap_lba_count = 0U; dev->dev_attrib.max_unmap_block_desc_count = 0U; dev->dev_attrib.unmap_granularity = 0U; dev->dev_attrib.unmap_granularity_alignment = 0U; dev->dev_attrib.max_write_same_len = 0U; xcopy_lun = & dev->xcopy_lun; __cond = 0; if ((int )__cond) { __compiletime_assert_815(); } else { } __asm__ volatile ("": : : "memory"); __var = (struct se_device *)0; *((struct se_device * volatile *)(& xcopy_lun->lun_se_dev)) = dev; init_completion(& xcopy_lun->lun_ref_comp); INIT_LIST_HEAD(& xcopy_lun->lun_deve_list); INIT_LIST_HEAD(& xcopy_lun->lun_dev_link); __mutex_init(& xcopy_lun->lun_tg_pt_md_mutex, "&xcopy_lun->lun_tg_pt_md_mutex", & __key___10); xcopy_lun->lun_tpg = & xcopy_pt_tpg; return (dev); } } int target_configure_device(struct se_device *dev ) { struct se_hba *hba ; int ret ; struct lock_class_key __key ; char const *__lock_name ; struct workqueue_struct *tmp ; struct lock_class_key __key___0 ; atomic_long_t __constr_expr_0 ; { hba = dev->se_hba; if ((int )dev->dev_flags & 1) { printk("\vse_dev->se_dev_ptr already set for storage object\n"); return (-17); } else { } ret = (*((dev->transport)->configure_device))(dev); if (ret != 0) { goto out; } else { } dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size; dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth; dev->dev_attrib.hw_max_sectors = se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, dev->dev_attrib.hw_block_size); dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; dev->dev_index = scsi_get_new_index(1); dev->creation_time = get_jiffies_64(); ret = core_setup_alua(dev); if (ret != 0) { goto out; } else { } __lock_name = "\"tmr-%s\"dev->transport->name"; tmp = __alloc_workqueue_key("tmr-%s", 10U, 1, & __key, __lock_name, (char const *)(& (dev->transport)->name)); dev->tmr_wq = tmp; if ((unsigned long )dev->tmr_wq == (unsigned long )((struct workqueue_struct *)0)) { printk("\vUnable to create tmr workqueue for %s\n", (char const *)(& (dev->transport)->name)); ret = -12; goto out_free_alua; } else { } __init_work(& dev->qf_work_queue, 0); __constr_expr_0.counter = 137438953408L; dev->qf_work_queue.data = __constr_expr_0; lockdep_init_map(& dev->qf_work_queue.lockdep_map, "(&dev->qf_work_queue)", & __key___0, 0); INIT_LIST_HEAD(& dev->qf_work_queue.entry); dev->qf_work_queue.func = & target_qf_do_work; if (((int )(dev->transport)->transport_flags & 1) == 0) { strncpy((char *)(& dev->t10_wwn.vendor), "LIO-ORG", 8UL); strncpy((char *)(& dev->t10_wwn.model), (char const *)(& (dev->transport)->inquiry_prod), 16UL); strncpy((char *)(& dev->t10_wwn.revision), (char const *)(& (dev->transport)->inquiry_rev), 4UL); } else { } scsi_dump_inquiry(dev); spin_lock(& hba->device_lock); hba->dev_count = hba->dev_count + 1U; spin_unlock(& hba->device_lock); ldv_mutex_lock_85(& g_device_mutex); list_add_tail(& dev->g_dev_node, & g_device_list); ldv_mutex_unlock_86(& g_device_mutex); dev->dev_flags = dev->dev_flags | 1U; return (0); out_free_alua: core_alua_free_lu_gp_mem(dev); out: se_release_vpd_for_dev(dev); return (ret); } } void target_free_device(struct se_device *dev ) { struct se_hba *hba ; int __ret_warn_on ; int tmp ; long tmp___0 ; { hba = dev->se_hba; tmp = list_empty((struct list_head const *)(& dev->dev_sep_list)); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_device.c", 915); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if ((int )dev->dev_flags & 1) { ldv_destroy_workqueue_87(dev->tmr_wq); ldv_mutex_lock_88(& g_device_mutex); list_del(& dev->g_dev_node); ldv_mutex_unlock_89(& g_device_mutex); spin_lock(& hba->device_lock); hba->dev_count = hba->dev_count - 1U; spin_unlock(& hba->device_lock); } else { } core_alua_free_lu_gp_mem(dev); core_alua_set_lba_map(dev, (struct list_head *)0, 0, 0); core_scsi3_free_all_registrations(dev); se_release_vpd_for_dev(dev); if ((unsigned long )(dev->transport)->free_prot != (unsigned long )((void (*/* const */)(struct se_device * ))0)) { (*((dev->transport)->free_prot))(dev); } else { } (*((dev->transport)->free_device))(dev); return; } } int core_dev_setup_virtual_lun0(void) { struct se_hba *hba ; struct se_device *dev ; char buf[23U] ; int ret ; long tmp ; bool tmp___0 ; { buf[0] = 'r'; buf[1] = 'd'; buf[2] = '_'; buf[3] = 'p'; buf[4] = 'a'; buf[5] = 'g'; buf[6] = 'e'; buf[7] = 's'; buf[8] = '='; buf[9] = '8'; buf[10] = ','; buf[11] = 'r'; buf[12] = 'd'; buf[13] = '_'; buf[14] = 'n'; buf[15] = 'u'; buf[16] = 'l'; buf[17] = 'l'; buf[18] = 'i'; buf[19] = 'o'; buf[20] = '='; buf[21] = '1'; buf[22] = '\000'; hba = core_alloc_hba("rd_mcp", 0U, 1U); tmp___0 = IS_ERR((void const *)hba); if ((int )tmp___0) { tmp = PTR_ERR((void const *)hba); return ((int )tmp); } else { } dev = target_alloc_device(hba, "virt_lun0"); if ((unsigned long )dev == (unsigned long )((struct se_device *)0)) { ret = -12; goto out_free_hba; } else { } (*(((hba->backend)->ops)->set_configfs_dev_params))(dev, (char const *)(& buf), 23L); ret = target_configure_device(dev); if (ret != 0) { goto out_free_se_dev; } else { } lun0_hba = hba; g_lun0_dev = dev; return (0); out_free_se_dev: target_free_device(dev); out_free_hba: core_delete_hba(hba); return (ret); } } void core_dev_release_virtual_lun0(void) { struct se_hba *hba ; { hba = lun0_hba; if ((unsigned long )hba == (unsigned long )((struct se_hba *)0)) { return; } else { } if ((unsigned long )g_lun0_dev != (unsigned long )((struct se_device *)0)) { target_free_device(g_lun0_dev); } else { } core_delete_hba(hba); return; } } sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd , sense_reason_t (*exec_cmd)(struct se_cmd * ) ) { unsigned char *cdb ; u16 tmp ; { cdb = cmd->t_task_cdb; switch ((int )*cdb) { case 40: ; case 168: ; case 136: ; case 29: ; case 47: ; case 143: ; case 46: ; case 174: ; case 163: ; goto ldv_58198; default: *(cdb + 1UL) = (unsigned int )*(cdb + 1UL) & 31U; goto ldv_58198; } ldv_58198: ; if ((unsigned int )*cdb == 160U) { cmd->execute_cmd = & spc_emulate_report_luns; return (0U); } else { } switch ((int )*cdb) { case 8: ; case 40: ; case 168: ; case 136: ; case 10: ; case 42: ; case 170: ; case 138: ; case 46: ; case 174: ; case 142: ; case 137: ; case 83: cmd->se_cmd_flags = cmd->se_cmd_flags | 8U; goto ldv_58213; case 127: tmp = get_unaligned_be16((void const *)cdb + 8U); switch ((int )tmp) { case 9: ; case 11: ; case 12: ; case 7: cmd->se_cmd_flags = cmd->se_cmd_flags | 8U; goto ldv_58219; } ldv_58219: ; } ldv_58213: cmd->execute_cmd = exec_cmd; return (0U); } } static char const __kstrtab_passthrough_parse_cdb[22U] = { 'p', 'a', 's', 's', 't', 'h', 'r', 'o', 'u', 'g', 'h', '_', 'p', 'a', 'r', 's', 'e', '_', 'c', 'd', 'b', '\000'}; struct kernel_symbol const __ksymtab_passthrough_parse_cdb ; struct kernel_symbol const __ksymtab_passthrough_parse_cdb = {(unsigned long )(& passthrough_parse_cdb), (char const *)(& __kstrtab_passthrough_parse_cdb)}; void work_init_1(void) { { ldv_work_1_0 = 0; ldv_work_1_1 = 0; ldv_work_1_2 = 0; ldv_work_1_3 = 0; return; } } void invoke_work_1(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_1_0 == 2 || ldv_work_1_0 == 3) { ldv_work_1_0 = 4; target_qf_do_work(ldv_work_struct_1_0); ldv_work_1_0 = 1; } else { } goto ldv_58237; case 1: ; if (ldv_work_1_1 == 2 || ldv_work_1_1 == 3) { ldv_work_1_1 = 4; target_qf_do_work(ldv_work_struct_1_0); ldv_work_1_1 = 1; } else { } goto ldv_58237; case 2: ; if (ldv_work_1_2 == 2 || ldv_work_1_2 == 3) { ldv_work_1_2 = 4; target_qf_do_work(ldv_work_struct_1_0); ldv_work_1_2 = 1; } else { } goto ldv_58237; case 3: ; if (ldv_work_1_3 == 2 || ldv_work_1_3 == 3) { ldv_work_1_3 = 4; target_qf_do_work(ldv_work_struct_1_0); ldv_work_1_3 = 1; } else { } goto ldv_58237; default: ldv_stop(); } ldv_58237: ; return; } } void call_and_disable_work_1(struct work_struct *work ) { { if ((ldv_work_1_0 == 2 || ldv_work_1_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_1_0) { target_qf_do_work(work); ldv_work_1_0 = 1; return; } else { } if ((ldv_work_1_1 == 2 || ldv_work_1_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_1_1) { target_qf_do_work(work); ldv_work_1_1 = 1; return; } else { } if ((ldv_work_1_2 == 2 || ldv_work_1_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_1_2) { target_qf_do_work(work); ldv_work_1_2 = 1; return; } else { } if ((ldv_work_1_3 == 2 || ldv_work_1_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_1_3) { target_qf_do_work(work); ldv_work_1_3 = 1; return; } else { } return; } } void disable_work_1(struct work_struct *work ) { { if ((ldv_work_1_0 == 3 || ldv_work_1_0 == 2) && (unsigned long )ldv_work_struct_1_0 == (unsigned long )work) { ldv_work_1_0 = 1; } else { } if ((ldv_work_1_1 == 3 || ldv_work_1_1 == 2) && (unsigned long )ldv_work_struct_1_1 == (unsigned long )work) { ldv_work_1_1 = 1; } else { } if ((ldv_work_1_2 == 3 || ldv_work_1_2 == 2) && (unsigned long )ldv_work_struct_1_2 == (unsigned long )work) { ldv_work_1_2 = 1; } else { } if ((ldv_work_1_3 == 3 || ldv_work_1_3 == 2) && (unsigned long )ldv_work_struct_1_3 == (unsigned long )work) { ldv_work_1_3 = 1; } else { } return; } } void activate_work_1(struct work_struct *work , int state ) { { if (ldv_work_1_0 == 0) { ldv_work_struct_1_0 = work; ldv_work_1_0 = state; return; } else { } if (ldv_work_1_1 == 0) { ldv_work_struct_1_1 = work; ldv_work_1_1 = state; return; } else { } if (ldv_work_1_2 == 0) { ldv_work_struct_1_2 = work; ldv_work_1_2 = state; return; } else { } if (ldv_work_1_3 == 0) { ldv_work_struct_1_3 = work; ldv_work_1_3 = state; return; } else { } return; } } void call_and_disable_all_1(int state ) { { if (ldv_work_1_0 == state) { call_and_disable_work_1(ldv_work_struct_1_0); } else { } if (ldv_work_1_1 == state) { call_and_disable_work_1(ldv_work_struct_1_1); } else { } if (ldv_work_1_2 == state) { call_and_disable_work_1(ldv_work_struct_1_2); } else { } if (ldv_work_1_3 == state) { call_and_disable_work_1(ldv_work_struct_1_3); } else { } return; } } bool ldv_queue_work_on_57(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_58(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_59(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_60(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_61(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_62(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_63(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_64(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_65(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_66(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_67(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_68(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_lock_69(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lun_entry_mutex_of_se_node_acl(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_70(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lun_entry_mutex_of_se_node_acl(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_71(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lun_entry_mutex_of_se_node_acl(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_72(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lun_entry_mutex_of_se_node_acl(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_73(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lun_entry_mutex_of_se_node_acl(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_74(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lun_entry_mutex_of_se_node_acl(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_75(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lun_entry_mutex_of_se_node_acl(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_76(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lun_entry_mutex_of_se_node_acl(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_77(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_lock_78(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lun_entry_mutex_of_se_node_acl(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_79(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lun_entry_mutex_of_se_node_acl(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_80(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_81(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_82(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_83(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lun_entry_mutex_of_se_node_acl(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_84(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lun_entry_mutex_of_se_node_acl(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_85(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_g_device_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_86(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_g_device_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_destroy_workqueue_87(struct workqueue_struct *ldv_func_arg1 ) { { destroy_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } void ldv_mutex_lock_88(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_g_device_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_89(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_g_device_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static void *ERR_PTR(long error ) ; __inline static bool IS_ERR(void const *ptr ) ; int ldv_mutex_trylock_137(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_133(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_134(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_138(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_132(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_135(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_136(struct mutex *ldv_func_arg1 ) ; __inline static void __preempt_count_add___0(int val ) { int pao_ID__ ; { pao_ID__ = 0; switch (4UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (__preempt_count): "qi" (val)); } goto ldv_6765; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (__preempt_count): "ri" (val)); } goto ldv_6765; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (__preempt_count): "ri" (val)); } goto ldv_6765; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (__preempt_count): "re" (val)); } goto ldv_6765; default: __bad_percpu_size(); } ldv_6765: ; return; } } __inline static void __preempt_count_sub___0(int val ) { int pao_ID__ ; { pao_ID__ = 0; switch (4UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (__preempt_count): "qi" (- val)); } goto ldv_6777; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (__preempt_count): "ri" (- val)); } goto ldv_6777; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (__preempt_count): "ri" (- val)); } goto ldv_6777; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (__preempt_count): "re" (- val)); } goto ldv_6777; default: __bad_percpu_size(); } ldv_6777: ; return; } } __inline static void __rcu_read_lock___0(void) { { __preempt_count_add___0(1); __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock___0(void) { { __asm__ volatile ("": : : "memory"); __preempt_count_sub___0(1); return; } } __inline static void rcu_read_lock___0(void) { bool __warned ; int tmp ; bool tmp___0 ; int tmp___1 ; { __rcu_read_lock___0(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 849, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock___0(void) { bool __warned ; int tmp ; bool tmp___0 ; int tmp___1 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 900, "rcu_read_unlock() used illegally while idle"); } else { } } else { } __rcu_read_unlock___0(); rcu_lock_release(& rcu_lock_map); return; } } bool ldv_queue_work_on_127(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_129(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_128(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_131(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_130(struct workqueue_struct *ldv_func_arg1 ) ; struct se_lun *core_tpg_alloc_lun(struct se_portal_group *tpg , u64 unpacked_lun ) ; struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg , char const *initiatorname ) ; void core_tpg_del_initiator_node_acl(struct se_node_acl *acl ) ; void target_stat_setup_port_default_groups(struct se_lun *lun ) ; void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl ) ; ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun , char *page ) ; ssize_t core_alua_store_tg_pt_gp_info(struct se_lun *lun , char const *page , size_t count ) ; ssize_t core_alua_show_offline_bit(struct se_lun *lun , char *page ) ; ssize_t core_alua_store_offline_bit(struct se_lun *lun , char const *page , size_t count ) ; ssize_t core_alua_show_secondary_status(struct se_lun *lun , char *page ) ; ssize_t core_alua_store_secondary_status(struct se_lun *lun , char const *page , size_t count ) ; ssize_t core_alua_show_secondary_write_metadata(struct se_lun *lun , char *page ) ; ssize_t core_alua_store_secondary_write_metadata(struct se_lun *lun , char const *page , size_t count ) ; static int target_fabric_mappedlun_link(struct config_item *lun_acl_ci , struct config_item *lun_ci ) { struct se_dev_entry *deve ; struct se_lun *lun ; struct config_group const *__mptr ; struct config_group *tmp ; struct se_lun_acl *lacl ; struct config_group const *__mptr___0 ; struct config_group *tmp___0 ; struct se_portal_group *se_tpg ; struct config_item *nacl_ci ; struct config_item *tpg_ci ; struct config_item *tpg_ci_s ; struct config_item *wwn_ci ; struct config_item *wwn_ci_s ; int lun_access ; char *tmp___1 ; char *tmp___2 ; char *tmp___3 ; int tmp___4 ; char *tmp___5 ; char *tmp___6 ; char *tmp___7 ; char *tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; { tmp = to_config_group(lun_ci); __mptr = (struct config_group const *)tmp; lun = (struct se_lun *)__mptr + 0xfffffffffffffe50UL; tmp___0 = to_config_group(lun_acl_ci); __mptr___0 = (struct config_group const *)tmp___0; lacl = (struct se_lun_acl *)__mptr___0 + 0xffffffffffffff08UL; if (lun->lun_link_magic != 4294932337U) { printk("\vBad lun->lun_link_magic, not a valid lun_ci pointer: %p to struct lun: %p\n", lun_ci, lun); return (-14); } else { } if ((unsigned long )lun->lun_se_dev == (unsigned long )((struct se_device *)0)) { printk("\vSource se_lun->lun_se_dev does not exist\n"); return (-22); } else { } se_tpg = lun->lun_tpg; nacl_ci = & ((lun_acl_ci->ci_parent)->ci_group)->cg_item; tpg_ci = & (nacl_ci->ci_group)->cg_item; wwn_ci = & (tpg_ci->ci_group)->cg_item; tpg_ci_s = & ((lun_ci->ci_parent)->ci_group)->cg_item; wwn_ci_s = & (tpg_ci_s->ci_group)->cg_item; tmp___2 = config_item_name(wwn_ci_s); tmp___3 = config_item_name(wwn_ci); tmp___4 = strcmp((char const *)tmp___3, (char const *)tmp___2); if (tmp___4 != 0) { tmp___1 = config_item_name(wwn_ci); printk("\vIllegal Initiator ACL SymLink outside of %s\n", tmp___1); return (-22); } else { } tmp___7 = config_item_name(tpg_ci_s); tmp___8 = config_item_name(tpg_ci); tmp___9 = strcmp((char const *)tmp___8, (char const *)tmp___7); if (tmp___9 != 0) { tmp___5 = config_item_name(tpg_ci); tmp___6 = config_item_name(wwn_ci); printk("\vIllegal Initiator ACL Symlink outside of %s TPGT: %s\n", tmp___6, tmp___5); return (-22); } else { } rcu_read_lock___0(); deve = target_nacl_find_deve(lacl->se_lun_nacl, lacl->mapped_lun); if ((unsigned long )deve != (unsigned long )((struct se_dev_entry *)0)) { lun_access = (int )deve->lun_flags; } else { tmp___10 = (*((se_tpg->se_tpg_tfo)->tpg_check_prod_mode_write_protect))(se_tpg); lun_access = tmp___10 != 0 ? 1 : 2; } rcu_read_unlock___0(); tmp___11 = core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun, (u32 )lun_access); return (tmp___11); } } static int target_fabric_mappedlun_unlink(struct config_item *lun_acl_ci , struct config_item *lun_ci ) { struct se_lun_acl *lacl ; struct config_group const *__mptr ; struct config_group *tmp ; struct se_lun *lun ; struct config_group const *__mptr___0 ; struct config_group *tmp___0 ; int tmp___1 ; { tmp = to_config_group(lun_acl_ci); __mptr = (struct config_group const *)tmp; lacl = (struct se_lun_acl *)__mptr + 0xffffffffffffff08UL; tmp___0 = to_config_group(lun_ci); __mptr___0 = (struct config_group const *)tmp___0; lun = (struct se_lun *)__mptr___0 + 0xfffffffffffffe50UL; tmp___1 = core_dev_del_initiator_node_lun_acl(lun, lacl); return (tmp___1); } } static ssize_t target_fabric_mappedlun_show_write_protect(struct se_lun_acl *lacl , char *page ) { struct se_node_acl *se_nacl ; struct se_dev_entry *deve ; ssize_t len ; int tmp ; { se_nacl = lacl->se_lun_nacl; len = 0L; rcu_read_lock___0(); deve = target_nacl_find_deve(se_nacl, lacl->mapped_lun); if ((unsigned long )deve != (unsigned long )((struct se_dev_entry *)0)) { tmp = sprintf(page, "%d\n", (int )deve->lun_flags & 1); len = (ssize_t )tmp; } else { } rcu_read_unlock___0(); return (len); } } static ssize_t target_fabric_mappedlun_store_write_protect(struct se_lun_acl *lacl , char const *page , size_t count ) { struct se_node_acl *se_nacl ; struct se_portal_group *se_tpg ; unsigned long op ; int ret ; struct _ddebug descriptor ; char *tmp ; long tmp___0 ; { se_nacl = lacl->se_lun_nacl; se_tpg = se_nacl->se_tpg; ret = kstrtoul(page, 0U, & op); if (ret != 0) { return ((ssize_t )ret); } else { } if (op != 1UL && op != 0UL) { return (-22L); } else { } core_update_device_list_access(lacl->mapped_lun, op != 0UL ? 1U : 2U, lacl->se_lun_nacl); descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_mappedlun_store_write_protect"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "%s_ConfigFS: Changed Initiator ACL: %s Mapped LUN: %llu Write Protect bit to %s\n"; descriptor.lineno = 206U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = (*((se_tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "%s_ConfigFS: Changed Initiator ACL: %s Mapped LUN: %llu Write Protect bit to %s\n", tmp, (char *)(& lacl->initiatorname), lacl->mapped_lun, op != 0UL ? (char *)"ON" : (char *)"OFF"); } else { } return ((ssize_t )count); } } static struct target_fabric_mappedlun_attribute target_fabric_mappedlun_write_protect = {{"write_protect", & __this_module, 420U}, & target_fabric_mappedlun_show_write_protect, & target_fabric_mappedlun_store_write_protect}; static struct se_lun_acl *to_target_fabric_mappedlun(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_lun_acl *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_lun_acl *)__mptr + 0xffffffffffffff08UL; } else { tmp___0 = (struct se_lun_acl *)0; } return (tmp___0); } } static ssize_t target_fabric_mappedlun_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_lun_acl *se_lun_acl ; struct se_lun_acl *tmp ; struct target_fabric_mappedlun_attribute *target_fabric_mappedlun_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_mappedlun(item); se_lun_acl = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_mappedlun_attr = (struct target_fabric_mappedlun_attribute *)__mptr; ret = 0L; if ((unsigned long )target_fabric_mappedlun_attr->show != (unsigned long )((ssize_t (*)(struct se_lun_acl * , char * ))0)) { ret = (*(target_fabric_mappedlun_attr->show))(se_lun_acl, page); } else { } return (ret); } } static ssize_t target_fabric_mappedlun_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_lun_acl *se_lun_acl ; struct se_lun_acl *tmp ; struct target_fabric_mappedlun_attribute *target_fabric_mappedlun_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_mappedlun(item); se_lun_acl = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_mappedlun_attr = (struct target_fabric_mappedlun_attribute *)__mptr; ret = -22L; if ((unsigned long )target_fabric_mappedlun_attr->store != (unsigned long )((ssize_t (*)(struct se_lun_acl * , char const * , size_t ))0)) { ret = (*(target_fabric_mappedlun_attr->store))(se_lun_acl, page, count); } else { } return (ret); } } static void target_fabric_mappedlun_release(struct config_item *item ) { struct se_lun_acl *lacl ; struct config_group const *__mptr ; struct config_group *tmp ; struct se_portal_group *se_tpg ; { tmp = to_config_group(item); __mptr = (struct config_group const *)tmp; lacl = (struct se_lun_acl *)__mptr + 0xffffffffffffff08UL; se_tpg = (lacl->se_lun_nacl)->se_tpg; core_dev_free_initiator_node_lun_acl(se_tpg, lacl); return; } } static struct configfs_attribute *target_fabric_mappedlun_attrs[2U] = { & target_fabric_mappedlun_write_protect.attr, (struct configfs_attribute *)0}; static struct configfs_item_operations target_fabric_mappedlun_item_ops = {& target_fabric_mappedlun_release, & target_fabric_mappedlun_attr_show, & target_fabric_mappedlun_attr_store, & target_fabric_mappedlun_link, & target_fabric_mappedlun_unlink}; static void target_fabric_setup_tpg_mappedlun_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_tpg_mappedlun_cit; cit->ct_item_ops = & target_fabric_mappedlun_item_ops; cit->ct_group_ops = (struct configfs_group_operations *)0; cit->ct_attrs = (struct configfs_attribute **)(& target_fabric_mappedlun_attrs); cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_tpg_mappedlun_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 239U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"tpg_mappedlun"); } else { } return; } } static struct config_group *target_core_mappedlun_stat_mkdir(struct config_group *group , char const *name ) { void *tmp ; { tmp = ERR_PTR(-38L); return ((struct config_group *)tmp); } } static void target_core_mappedlun_stat_rmdir(struct config_group *group , struct config_item *item ) { { return; } } static struct configfs_group_operations target_fabric_mappedlun_stat_group_ops = {0, & target_core_mappedlun_stat_mkdir, 0, 0, & target_core_mappedlun_stat_rmdir}; static void target_fabric_setup_tpg_mappedlun_stat_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_tpg_mappedlun_stat_cit; cit->ct_item_ops = (struct configfs_item_operations *)0; cit->ct_group_ops = & target_fabric_mappedlun_stat_group_ops; cit->ct_attrs = (struct configfs_attribute **)0; cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_tpg_mappedlun_stat_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 265U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"tpg_mappedlun_stat"); } else { } return; } } static struct se_node_acl *to_target_fabric_nacl_attrib(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_node_acl *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_node_acl *)__mptr + 0xfffffffffffffd58UL; } else { tmp___0 = (struct se_node_acl *)0; } return (tmp___0); } } static ssize_t target_fabric_nacl_attrib_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_node_acl *se_node_acl ; struct se_node_acl *tmp ; struct target_fabric_nacl_attrib_attribute *target_fabric_nacl_attrib_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_nacl_attrib(item); se_node_acl = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_nacl_attrib_attr = (struct target_fabric_nacl_attrib_attribute *)__mptr; ret = 0L; if ((unsigned long )target_fabric_nacl_attrib_attr->show != (unsigned long )((ssize_t (*)(struct se_node_acl * , char * ))0)) { ret = (*(target_fabric_nacl_attrib_attr->show))(se_node_acl, page); } else { } return (ret); } } static ssize_t target_fabric_nacl_attrib_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_node_acl *se_node_acl ; struct se_node_acl *tmp ; struct target_fabric_nacl_attrib_attribute *target_fabric_nacl_attrib_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_nacl_attrib(item); se_node_acl = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_nacl_attrib_attr = (struct target_fabric_nacl_attrib_attribute *)__mptr; ret = -22L; if ((unsigned long )target_fabric_nacl_attrib_attr->store != (unsigned long )((ssize_t (*)(struct se_node_acl * , char const * , size_t ))0)) { ret = (*(target_fabric_nacl_attrib_attr->store))(se_node_acl, page, count); } else { } return (ret); } } static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = {0, & target_fabric_nacl_attrib_attr_show, & target_fabric_nacl_attrib_attr_store, 0, 0}; static void target_fabric_setup_tpg_nacl_attrib_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct configfs_attribute **attrs ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_tpg_nacl_attrib_cit; attrs = (tf->tf_ops)->tfc_tpg_nacl_attrib_attrs; cit->ct_item_ops = & target_fabric_nacl_attrib_item_ops; cit->ct_group_ops = (struct configfs_group_operations *)0; cit->ct_attrs = attrs; cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_tpg_nacl_attrib_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 278U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"tpg_nacl_attrib"); } else { } return; } } static struct se_node_acl *to_target_fabric_nacl_auth(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_node_acl *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_node_acl *)__mptr + 0xfffffffffffffce8UL; } else { tmp___0 = (struct se_node_acl *)0; } return (tmp___0); } } static ssize_t target_fabric_nacl_auth_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_node_acl *se_node_acl ; struct se_node_acl *tmp ; struct target_fabric_nacl_auth_attribute *target_fabric_nacl_auth_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_nacl_auth(item); se_node_acl = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_nacl_auth_attr = (struct target_fabric_nacl_auth_attribute *)__mptr; ret = 0L; if ((unsigned long )target_fabric_nacl_auth_attr->show != (unsigned long )((ssize_t (*)(struct se_node_acl * , char * ))0)) { ret = (*(target_fabric_nacl_auth_attr->show))(se_node_acl, page); } else { } return (ret); } } static ssize_t target_fabric_nacl_auth_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_node_acl *se_node_acl ; struct se_node_acl *tmp ; struct target_fabric_nacl_auth_attribute *target_fabric_nacl_auth_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_nacl_auth(item); se_node_acl = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_nacl_auth_attr = (struct target_fabric_nacl_auth_attribute *)__mptr; ret = -22L; if ((unsigned long )target_fabric_nacl_auth_attr->store != (unsigned long )((ssize_t (*)(struct se_node_acl * , char const * , size_t ))0)) { ret = (*(target_fabric_nacl_auth_attr->store))(se_node_acl, page, count); } else { } return (ret); } } static struct configfs_item_operations target_fabric_nacl_auth_item_ops = {0, & target_fabric_nacl_auth_attr_show, & target_fabric_nacl_auth_attr_store, 0, 0}; static void target_fabric_setup_tpg_nacl_auth_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct configfs_attribute **attrs ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_tpg_nacl_auth_cit; attrs = (tf->tf_ops)->tfc_tpg_nacl_auth_attrs; cit->ct_item_ops = & target_fabric_nacl_auth_item_ops; cit->ct_group_ops = (struct configfs_group_operations *)0; cit->ct_attrs = attrs; cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_tpg_nacl_auth_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 291U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"tpg_nacl_auth"); } else { } return; } } static struct se_node_acl *to_target_fabric_nacl_param(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_node_acl *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_node_acl *)__mptr + 0xfffffffffffffc78UL; } else { tmp___0 = (struct se_node_acl *)0; } return (tmp___0); } } static ssize_t target_fabric_nacl_param_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_node_acl *se_node_acl ; struct se_node_acl *tmp ; struct target_fabric_nacl_param_attribute *target_fabric_nacl_param_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_nacl_param(item); se_node_acl = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_nacl_param_attr = (struct target_fabric_nacl_param_attribute *)__mptr; ret = 0L; if ((unsigned long )target_fabric_nacl_param_attr->show != (unsigned long )((ssize_t (*)(struct se_node_acl * , char * ))0)) { ret = (*(target_fabric_nacl_param_attr->show))(se_node_acl, page); } else { } return (ret); } } static ssize_t target_fabric_nacl_param_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_node_acl *se_node_acl ; struct se_node_acl *tmp ; struct target_fabric_nacl_param_attribute *target_fabric_nacl_param_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_nacl_param(item); se_node_acl = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_nacl_param_attr = (struct target_fabric_nacl_param_attribute *)__mptr; ret = -22L; if ((unsigned long )target_fabric_nacl_param_attr->store != (unsigned long )((ssize_t (*)(struct se_node_acl * , char const * , size_t ))0)) { ret = (*(target_fabric_nacl_param_attr->store))(se_node_acl, page, count); } else { } return (ret); } } static struct configfs_item_operations target_fabric_nacl_param_item_ops = {0, & target_fabric_nacl_param_attr_show, & target_fabric_nacl_param_attr_store, 0, 0}; static void target_fabric_setup_tpg_nacl_param_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct configfs_attribute **attrs ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_tpg_nacl_param_cit; attrs = (tf->tf_ops)->tfc_tpg_nacl_param_attrs; cit->ct_item_ops = & target_fabric_nacl_param_item_ops; cit->ct_group_ops = (struct configfs_group_operations *)0; cit->ct_attrs = attrs; cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_tpg_nacl_param_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 304U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"tpg_nacl_param"); } else { } return; } } static struct se_node_acl *to_target_fabric_nacl_base(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_node_acl *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_node_acl *)__mptr + 0xfffffffffffffdc8UL; } else { tmp___0 = (struct se_node_acl *)0; } return (tmp___0); } } static ssize_t target_fabric_nacl_base_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_node_acl *se_node_acl ; struct se_node_acl *tmp ; struct target_fabric_nacl_base_attribute *target_fabric_nacl_base_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_nacl_base(item); se_node_acl = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_nacl_base_attr = (struct target_fabric_nacl_base_attribute *)__mptr; ret = 0L; if ((unsigned long )target_fabric_nacl_base_attr->show != (unsigned long )((ssize_t (*)(struct se_node_acl * , char * ))0)) { ret = (*(target_fabric_nacl_base_attr->show))(se_node_acl, page); } else { } return (ret); } } static ssize_t target_fabric_nacl_base_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_node_acl *se_node_acl ; struct se_node_acl *tmp ; struct target_fabric_nacl_base_attribute *target_fabric_nacl_base_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_nacl_base(item); se_node_acl = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_nacl_base_attr = (struct target_fabric_nacl_base_attribute *)__mptr; ret = -22L; if ((unsigned long )target_fabric_nacl_base_attr->store != (unsigned long )((ssize_t (*)(struct se_node_acl * , char const * , size_t ))0)) { ret = (*(target_fabric_nacl_base_attr->store))(se_node_acl, page, count); } else { } return (ret); } } static struct config_group *target_fabric_make_mappedlun(struct config_group *group , char const *name ) { struct se_node_acl *se_nacl ; struct config_group const *__mptr ; struct se_portal_group *se_tpg ; struct target_fabric_configfs *tf ; struct se_lun_acl *lacl ; struct config_item *acl_ci ; struct config_group *lacl_cg ; struct config_group *ml_stat_grp ; char *buf ; unsigned long long mapped_lun ; int ret ; size_t tmp ; void *tmp___0 ; void *tmp___1 ; size_t tmp___2 ; char *tmp___3 ; void *tmp___4 ; void *tmp___5 ; void *tmp___6 ; { __mptr = (struct config_group const *)group; se_nacl = (struct se_node_acl *)__mptr + 0xfffffffffffffdc8UL; se_tpg = se_nacl->se_tpg; tf = (se_tpg->se_tpg_wwn)->wwn_tf; lacl = (struct se_lun_acl *)0; lacl_cg = (struct config_group *)0; ml_stat_grp = (struct config_group *)0; ret = 0; acl_ci = & group->cg_item; if ((unsigned long )acl_ci == (unsigned long )((struct config_item *)0)) { printk("\vUnable to locatel acl_ci\n"); return ((struct config_group *)0); } else { } tmp = strlen(name); tmp___0 = kzalloc(tmp + 1UL, 208U); buf = (char *)tmp___0; if ((unsigned long )buf == (unsigned long )((char *)0)) { printk("\vUnable to allocate memory for name buf\n"); tmp___1 = ERR_PTR(-12L); return ((struct config_group *)tmp___1); } else { } tmp___2 = strlen(name); snprintf(buf, tmp___2 + 1UL, "%s", name); tmp___3 = strstr((char const *)buf, "lun_"); if ((unsigned long )tmp___3 != (unsigned long )buf) { printk("\vUnable to locate \"lun_\" from buf: %s name: %s\n", buf, name); ret = -22; goto out; } else { } ret = kstrtoull((char const *)buf + 4U, 0U, & mapped_lun); if (ret != 0) { goto out; } else { } lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl, mapped_lun, & ret); if ((unsigned long )lacl == (unsigned long )((struct se_lun_acl *)0)) { ret = -22; goto out; } else { } lacl_cg = & lacl->se_lun_group; tmp___4 = kmalloc(16UL, 208U); lacl_cg->default_groups = (struct config_group **)tmp___4; if ((unsigned long )lacl_cg->default_groups == (unsigned long )((struct config_group **)0)) { printk("\vUnable to allocate lacl_cg->default_groups\n"); ret = -12; goto out; } else { } config_group_init_type_name(& lacl->se_lun_group, name, & tf->tf_tpg_mappedlun_cit); config_group_init_type_name(& lacl->ml_stat_grps.stat_group, "statistics", & tf->tf_tpg_mappedlun_stat_cit); *(lacl_cg->default_groups) = & lacl->ml_stat_grps.stat_group; *(lacl_cg->default_groups + 1UL) = (struct config_group *)0; ml_stat_grp = & lacl->ml_stat_grps.stat_group; tmp___5 = kmalloc(24UL, 208U); ml_stat_grp->default_groups = (struct config_group **)tmp___5; if ((unsigned long )ml_stat_grp->default_groups == (unsigned long )((struct config_group **)0)) { printk("\vUnable to allocate ml_stat_grp->default_groups\n"); ret = -12; goto out; } else { } target_stat_setup_mappedlun_default_groups(lacl); kfree((void const *)buf); return (& lacl->se_lun_group); out: ; if ((unsigned long )lacl_cg != (unsigned long )((struct config_group *)0)) { kfree((void const *)lacl_cg->default_groups); } else { } kfree((void const *)lacl); kfree((void const *)buf); tmp___6 = ERR_PTR((long )ret); return ((struct config_group *)tmp___6); } } static void target_fabric_drop_mappedlun(struct config_group *group , struct config_item *item ) { struct se_lun_acl *lacl ; struct config_group const *__mptr ; struct config_group *tmp ; struct config_item *df_item ; struct config_group *lacl_cg ; struct config_group *ml_stat_grp ; int i ; { tmp = to_config_group(item); __mptr = (struct config_group const *)tmp; lacl = (struct se_lun_acl *)__mptr + 0xffffffffffffff08UL; lacl_cg = (struct config_group *)0; ml_stat_grp = (struct config_group *)0; ml_stat_grp = & lacl->ml_stat_grps.stat_group; i = 0; goto ldv_62216; ldv_62215: df_item = & (*(ml_stat_grp->default_groups + (unsigned long )i))->cg_item; *(ml_stat_grp->default_groups + (unsigned long )i) = (struct config_group *)0; config_item_put(df_item); i = i + 1; ldv_62216: ; if ((unsigned long )*(ml_stat_grp->default_groups + (unsigned long )i) != (unsigned long )((struct config_group *)0)) { goto ldv_62215; } else { } kfree((void const *)ml_stat_grp->default_groups); lacl_cg = & lacl->se_lun_group; i = 0; goto ldv_62219; ldv_62218: df_item = & (*(lacl_cg->default_groups + (unsigned long )i))->cg_item; *(lacl_cg->default_groups + (unsigned long )i) = (struct config_group *)0; config_item_put(df_item); i = i + 1; ldv_62219: ; if ((unsigned long )*(lacl_cg->default_groups + (unsigned long )i) != (unsigned long )((struct config_group *)0)) { goto ldv_62218; } else { } kfree((void const *)lacl_cg->default_groups); config_item_put(item); return; } } static void target_fabric_nacl_base_release(struct config_item *item ) { struct se_node_acl *se_nacl ; struct config_group const *__mptr ; struct config_group *tmp ; struct target_fabric_configfs *tf ; { tmp = to_config_group(item); __mptr = (struct config_group const *)tmp; se_nacl = (struct se_node_acl *)__mptr + 0xfffffffffffffdc8UL; tf = ((se_nacl->se_tpg)->se_tpg_wwn)->wwn_tf; if ((unsigned long )(tf->tf_ops)->fabric_cleanup_nodeacl != (unsigned long )((void (*/* const */)(struct se_node_acl * ))0)) { (*((tf->tf_ops)->fabric_cleanup_nodeacl))(se_nacl); } else { } core_tpg_del_initiator_node_acl(se_nacl); return; } } static struct configfs_item_operations target_fabric_nacl_base_item_ops = {& target_fabric_nacl_base_release, & target_fabric_nacl_base_attr_show, & target_fabric_nacl_base_attr_store, 0, 0}; static struct configfs_group_operations target_fabric_nacl_base_group_ops = {0, & target_fabric_make_mappedlun, 0, 0, & target_fabric_drop_mappedlun}; static void target_fabric_setup_tpg_nacl_base_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct configfs_attribute **attrs ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_tpg_nacl_base_cit; attrs = (tf->tf_ops)->tfc_tpg_nacl_base_attrs; cit->ct_item_ops = & target_fabric_nacl_base_item_ops; cit->ct_group_ops = & target_fabric_nacl_base_group_ops; cit->ct_attrs = attrs; cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_tpg_nacl_base_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 451U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"tpg_nacl_base"); } else { } return; } } static void target_fabric_setup_tpg_nacl_stat_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_tpg_nacl_stat_cit; cit->ct_item_ops = (struct configfs_item_operations *)0; cit->ct_group_ops = (struct configfs_group_operations *)0; cit->ct_attrs = (struct configfs_attribute **)0; cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_tpg_nacl_stat_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 460U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"tpg_nacl_stat"); } else { } return; } } static struct config_group *target_fabric_make_nodeacl(struct config_group *group , char const *name ) { struct se_portal_group *se_tpg ; struct config_group const *__mptr ; struct target_fabric_configfs *tf ; struct se_node_acl *se_nacl ; struct config_group *nacl_cg ; void *tmp ; bool tmp___0 ; int ret ; int tmp___1 ; void *tmp___2 ; { __mptr = (struct config_group const *)group; se_tpg = (struct se_portal_group *)__mptr + 0xfffffffffffffc90UL; tf = (se_tpg->se_tpg_wwn)->wwn_tf; se_nacl = core_tpg_add_initiator_node_acl(se_tpg, name); tmp___0 = IS_ERR((void const *)se_nacl); if ((int )tmp___0) { tmp = ERR_CAST((void const *)se_nacl); return ((struct config_group *)tmp); } else { } if ((unsigned long )(tf->tf_ops)->fabric_init_nodeacl != (unsigned long )((int (*/* const */)(struct se_node_acl * , char const * ))0)) { tmp___1 = (*((tf->tf_ops)->fabric_init_nodeacl))(se_nacl, name); ret = tmp___1; if (ret != 0) { core_tpg_del_initiator_node_acl(se_nacl); tmp___2 = ERR_PTR((long )ret); return ((struct config_group *)tmp___2); } else { } } else { } nacl_cg = & se_nacl->acl_group; nacl_cg->default_groups = (struct config_group **)(& se_nacl->acl_default_groups); *(nacl_cg->default_groups) = & se_nacl->acl_attrib_group; *(nacl_cg->default_groups + 1UL) = & se_nacl->acl_auth_group; *(nacl_cg->default_groups + 2UL) = & se_nacl->acl_param_group; *(nacl_cg->default_groups + 3UL) = & se_nacl->acl_fabric_stat_group; *(nacl_cg->default_groups + 4UL) = (struct config_group *)0; config_group_init_type_name(& se_nacl->acl_group, name, & tf->tf_tpg_nacl_base_cit); config_group_init_type_name(& se_nacl->acl_attrib_group, "attrib", & tf->tf_tpg_nacl_attrib_cit); config_group_init_type_name(& se_nacl->acl_auth_group, "auth", & tf->tf_tpg_nacl_auth_cit); config_group_init_type_name(& se_nacl->acl_param_group, "param", & tf->tf_tpg_nacl_param_cit); config_group_init_type_name(& se_nacl->acl_fabric_stat_group, "fabric_statistics", & tf->tf_tpg_nacl_stat_cit); return (& se_nacl->acl_group); } } static void target_fabric_drop_nodeacl(struct config_group *group , struct config_item *item ) { struct se_node_acl *se_nacl ; struct config_group const *__mptr ; struct config_group *tmp ; struct config_item *df_item ; struct config_group *nacl_cg ; int i ; { tmp = to_config_group(item); __mptr = (struct config_group const *)tmp; se_nacl = (struct se_node_acl *)__mptr + 0xfffffffffffffdc8UL; nacl_cg = & se_nacl->acl_group; i = 0; goto ldv_62265; ldv_62264: df_item = & (*(nacl_cg->default_groups + (unsigned long )i))->cg_item; *(nacl_cg->default_groups + (unsigned long )i) = (struct config_group *)0; config_item_put(df_item); i = i + 1; ldv_62265: ; if ((unsigned long )*(nacl_cg->default_groups + (unsigned long )i) != (unsigned long )((struct config_group *)0)) { goto ldv_62264; } else { } config_item_put(item); return; } } static struct configfs_group_operations target_fabric_nacl_group_ops = {0, & target_fabric_make_nodeacl, 0, 0, & target_fabric_drop_nodeacl}; static void target_fabric_setup_tpg_nacl_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_tpg_nacl_cit; cit->ct_item_ops = (struct configfs_item_operations *)0; cit->ct_group_ops = & target_fabric_nacl_group_ops; cit->ct_attrs = (struct configfs_attribute **)0; cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_tpg_nacl_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 537U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"tpg_nacl"); } else { } return; } } static struct se_tpg_np *to_target_fabric_np_base(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_tpg_np *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_tpg_np *)__mptr + 0xfffffffffffffff8UL; } else { tmp___0 = (struct se_tpg_np *)0; } return (tmp___0); } } static ssize_t target_fabric_np_base_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_tpg_np *se_tpg_np ; struct se_tpg_np *tmp ; struct target_fabric_np_base_attribute *target_fabric_np_base_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_np_base(item); se_tpg_np = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_np_base_attr = (struct target_fabric_np_base_attribute *)__mptr; ret = 0L; if ((unsigned long )target_fabric_np_base_attr->show != (unsigned long )((ssize_t (*)(struct se_tpg_np * , char * ))0)) { ret = (*(target_fabric_np_base_attr->show))(se_tpg_np, page); } else { } return (ret); } } static ssize_t target_fabric_np_base_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_tpg_np *se_tpg_np ; struct se_tpg_np *tmp ; struct target_fabric_np_base_attribute *target_fabric_np_base_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_np_base(item); se_tpg_np = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_np_base_attr = (struct target_fabric_np_base_attribute *)__mptr; ret = -22L; if ((unsigned long )target_fabric_np_base_attr->store != (unsigned long )((ssize_t (*)(struct se_tpg_np * , char const * , size_t ))0)) { ret = (*(target_fabric_np_base_attr->store))(se_tpg_np, page, count); } else { } return (ret); } } static void target_fabric_np_base_release(struct config_item *item ) { struct se_tpg_np *se_tpg_np ; struct config_group const *__mptr ; struct config_group *tmp ; struct se_portal_group *se_tpg ; struct target_fabric_configfs *tf ; { tmp = to_config_group(item); __mptr = (struct config_group const *)tmp; se_tpg_np = (struct se_tpg_np *)__mptr + 0xfffffffffffffff8UL; se_tpg = se_tpg_np->tpg_np_parent; tf = (se_tpg->se_tpg_wwn)->wwn_tf; (*((tf->tf_ops)->fabric_drop_np))(se_tpg_np); return; } } static struct configfs_item_operations target_fabric_np_base_item_ops = {& target_fabric_np_base_release, & target_fabric_np_base_attr_show, & target_fabric_np_base_attr_store, 0, 0}; static void target_fabric_setup_tpg_np_base_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct configfs_attribute **attrs ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_tpg_np_base_cit; attrs = (tf->tf_ops)->tfc_tpg_np_base_attrs; cit->ct_item_ops = & target_fabric_np_base_item_ops; cit->ct_group_ops = (struct configfs_group_operations *)0; cit->ct_attrs = attrs; cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_tpg_np_base_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 561U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"tpg_np_base"); } else { } return; } } static struct config_group *target_fabric_make_np(struct config_group *group , char const *name ) { struct se_portal_group *se_tpg ; struct config_group const *__mptr ; struct target_fabric_configfs *tf ; struct se_tpg_np *se_tpg_np ; void *tmp ; void *tmp___0 ; bool tmp___1 ; { __mptr = (struct config_group const *)group; se_tpg = (struct se_portal_group *)__mptr + 0xfffffffffffffd00UL; tf = (se_tpg->se_tpg_wwn)->wwn_tf; if ((unsigned long )(tf->tf_ops)->fabric_make_np == (unsigned long )((struct se_tpg_np *(*/* const */)(struct se_portal_group * , struct config_group * , char const * ))0)) { printk("\vtf->tf_ops.fabric_make_np is NULL\n"); tmp = ERR_PTR(-38L); return ((struct config_group *)tmp); } else { } se_tpg_np = (*((tf->tf_ops)->fabric_make_np))(se_tpg, group, name); if ((unsigned long )se_tpg_np == (unsigned long )((struct se_tpg_np *)0)) { tmp___0 = ERR_PTR(-22L); return ((struct config_group *)tmp___0); } else { tmp___1 = IS_ERR((void const *)se_tpg_np); if ((int )tmp___1) { tmp___0 = ERR_PTR(-22L); return ((struct config_group *)tmp___0); } else { } } se_tpg_np->tpg_np_parent = se_tpg; config_group_init_type_name(& se_tpg_np->tpg_np_group, name, & tf->tf_tpg_np_base_cit); return (& se_tpg_np->tpg_np_group); } } static void target_fabric_drop_np(struct config_group *group , struct config_item *item ) { { config_item_put(item); return; } } static struct configfs_group_operations target_fabric_np_group_ops = {0, & target_fabric_make_np, 0, 0, & target_fabric_drop_np}; static void target_fabric_setup_tpg_np_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_tpg_np_cit; cit->ct_item_ops = (struct configfs_item_operations *)0; cit->ct_group_ops = & target_fabric_np_group_ops; cit->ct_attrs = (struct configfs_attribute **)0; cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_tpg_np_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 607U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"tpg_np"); } else { } return; } } static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(struct se_lun *lun , char *page ) { ssize_t tmp ; { if ((unsigned long )lun == (unsigned long )((struct se_lun *)0) || (unsigned long )lun->lun_se_dev == (unsigned long )((struct se_device *)0)) { return (-19L); } else { } tmp = core_alua_show_tg_pt_gp_info(lun, page); return (tmp); } } static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(struct se_lun *lun , char const *page , size_t count ) { ssize_t tmp ; { if ((unsigned long )lun == (unsigned long )((struct se_lun *)0) || (unsigned long )lun->lun_se_dev == (unsigned long )((struct se_device *)0)) { return (-19L); } else { } tmp = core_alua_store_tg_pt_gp_info(lun, page, count); return (tmp); } } static struct target_fabric_port_attribute target_fabric_port_alua_tg_pt_gp = {{"alua_tg_pt_gp", & __this_module, 420U}, & target_fabric_port_show_attr_alua_tg_pt_gp, & target_fabric_port_store_attr_alua_tg_pt_gp}; static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(struct se_lun *lun , char *page ) { ssize_t tmp ; { if ((unsigned long )lun == (unsigned long )((struct se_lun *)0) || (unsigned long )lun->lun_se_dev == (unsigned long )((struct se_device *)0)) { return (-19L); } else { } tmp = core_alua_show_offline_bit(lun, page); return (tmp); } } static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(struct se_lun *lun , char const *page , size_t count ) { ssize_t tmp ; { if ((unsigned long )lun == (unsigned long )((struct se_lun *)0) || (unsigned long )lun->lun_se_dev == (unsigned long )((struct se_device *)0)) { return (-19L); } else { } tmp = core_alua_store_offline_bit(lun, page, count); return (tmp); } } static struct target_fabric_port_attribute target_fabric_port_alua_tg_pt_offline = {{"alua_tg_pt_offline", & __this_module, 420U}, & target_fabric_port_show_attr_alua_tg_pt_offline, & target_fabric_port_store_attr_alua_tg_pt_offline}; static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(struct se_lun *lun , char *page ) { ssize_t tmp ; { if ((unsigned long )lun == (unsigned long )((struct se_lun *)0) || (unsigned long )lun->lun_se_dev == (unsigned long )((struct se_device *)0)) { return (-19L); } else { } tmp = core_alua_show_secondary_status(lun, page); return (tmp); } } static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(struct se_lun *lun , char const *page , size_t count ) { ssize_t tmp ; { if ((unsigned long )lun == (unsigned long )((struct se_lun *)0) || (unsigned long )lun->lun_se_dev == (unsigned long )((struct se_device *)0)) { return (-19L); } else { } tmp = core_alua_store_secondary_status(lun, page, count); return (tmp); } } static struct target_fabric_port_attribute target_fabric_port_alua_tg_pt_status = {{"alua_tg_pt_status", & __this_module, 420U}, & target_fabric_port_show_attr_alua_tg_pt_status, & target_fabric_port_store_attr_alua_tg_pt_status}; static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(struct se_lun *lun , char *page ) { ssize_t tmp ; { if ((unsigned long )lun == (unsigned long )((struct se_lun *)0) || (unsigned long )lun->lun_se_dev == (unsigned long )((struct se_device *)0)) { return (-19L); } else { } tmp = core_alua_show_secondary_write_metadata(lun, page); return (tmp); } } static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(struct se_lun *lun , char const *page , size_t count ) { ssize_t tmp ; { if ((unsigned long )lun == (unsigned long )((struct se_lun *)0) || (unsigned long )lun->lun_se_dev == (unsigned long )((struct se_device *)0)) { return (-19L); } else { } tmp = core_alua_store_secondary_write_metadata(lun, page, count); return (tmp); } } static struct target_fabric_port_attribute target_fabric_port_alua_tg_pt_write_md = {{"alua_tg_pt_write_md", & __this_module, 420U}, & target_fabric_port_show_attr_alua_tg_pt_write_md, & target_fabric_port_store_attr_alua_tg_pt_write_md}; static struct configfs_attribute *target_fabric_port_attrs[5U] = { & target_fabric_port_alua_tg_pt_gp.attr, & target_fabric_port_alua_tg_pt_offline.attr, & target_fabric_port_alua_tg_pt_status.attr, & target_fabric_port_alua_tg_pt_write_md.attr, (struct configfs_attribute *)0}; static struct se_lun *to_target_fabric_port(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_lun *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_lun *)__mptr + 0xfffffffffffffe50UL; } else { tmp___0 = (struct se_lun *)0; } return (tmp___0); } } static ssize_t target_fabric_port_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_lun *se_lun ; struct se_lun *tmp ; struct target_fabric_port_attribute *target_fabric_port_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_port(item); se_lun = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_port_attr = (struct target_fabric_port_attribute *)__mptr; ret = 0L; if ((unsigned long )target_fabric_port_attr->show != (unsigned long )((ssize_t (*)(struct se_lun * , char * ))0)) { ret = (*(target_fabric_port_attr->show))(se_lun, page); } else { } return (ret); } } static ssize_t target_fabric_port_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_lun *se_lun ; struct se_lun *tmp ; struct target_fabric_port_attribute *target_fabric_port_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_port(item); se_lun = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_port_attr = (struct target_fabric_port_attribute *)__mptr; ret = -22L; if ((unsigned long )target_fabric_port_attr->store != (unsigned long )((ssize_t (*)(struct se_lun * , char const * , size_t ))0)) { ret = (*(target_fabric_port_attr->store))(se_lun, page, count); } else { } return (ret); } } static int target_fabric_port_link(struct config_item *lun_ci , struct config_item *se_dev_ci ) { struct config_item *tpg_ci ; struct se_lun *lun ; struct config_group const *__mptr ; struct config_group *tmp ; struct se_portal_group *se_tpg ; struct se_device *dev ; struct config_group const *__mptr___0 ; struct config_group *tmp___0 ; struct target_fabric_configfs *tf ; int ret ; struct config_group const *__mptr___1 ; struct config_group *tmp___1 ; { tmp = to_config_group(lun_ci); __mptr = (struct config_group const *)tmp; lun = (struct se_lun *)__mptr + 0xfffffffffffffe50UL; tmp___0 = to_config_group(se_dev_ci); __mptr___0 = (struct config_group const *)tmp___0; dev = (struct se_device *)__mptr___0 + 0xfffffffffffff7c0UL; if (dev->dev_link_magic != 4277001967U) { printk("\vBad dev->dev_link_magic, not a valid se_dev_ci pointer: %p to struct se_device: %p\n", se_dev_ci, dev); return (-14); } else { } if ((dev->dev_flags & 1U) == 0U) { printk("\vse_device not configured yet, cannot port link\n"); return (-19); } else { } tpg_ci = & ((lun_ci->ci_parent)->ci_group)->cg_item; tmp___1 = to_config_group(tpg_ci); __mptr___1 = (struct config_group const *)tmp___1; se_tpg = (struct se_portal_group *)__mptr___1 + 0xfffffffffffffe18UL; tf = (se_tpg->se_tpg_wwn)->wwn_tf; if ((unsigned long )lun->lun_se_dev != (unsigned long )((struct se_device *)0)) { printk("\vPort Symlink already exists\n"); return (-17); } else { } ret = core_dev_add_lun(se_tpg, dev, lun); if (ret != 0) { printk("\vcore_dev_add_lun() failed: %d\n", ret); goto out; } else { } if ((unsigned long )(tf->tf_ops)->fabric_post_link != (unsigned long )((int (*/* const */)(struct se_portal_group * , struct se_lun * ))0)) { (*((tf->tf_ops)->fabric_post_link))(se_tpg, lun); } else { } return (0); out: ; return (ret); } } static int target_fabric_port_unlink(struct config_item *lun_ci , struct config_item *se_dev_ci ) { struct se_lun *lun ; struct config_group const *__mptr ; struct config_group *tmp ; struct se_portal_group *se_tpg ; struct target_fabric_configfs *tf ; { tmp = to_config_group(lun_ci); __mptr = (struct config_group const *)tmp; lun = (struct se_lun *)__mptr + 0xfffffffffffffe50UL; se_tpg = lun->lun_tpg; tf = (se_tpg->se_tpg_wwn)->wwn_tf; if ((unsigned long )(tf->tf_ops)->fabric_pre_unlink != (unsigned long )((void (*/* const */)(struct se_portal_group * , struct se_lun * ))0)) { (*((tf->tf_ops)->fabric_pre_unlink))(se_tpg, lun); } else { } core_dev_del_lun(se_tpg, lun); return (0); } } static void target_fabric_port_release(struct config_item *item ) { struct se_lun *lun ; struct config_group const *__mptr ; struct config_group *tmp ; { tmp = to_config_group(item); __mptr = (struct config_group const *)tmp; lun = (struct se_lun *)__mptr + 0xfffffffffffffe50UL; kfree_call_rcu(& lun->callback_head, (void (*)(struct callback_head * ))1176); return; } } static struct configfs_item_operations target_fabric_port_item_ops = {& target_fabric_port_release, & target_fabric_port_attr_show, & target_fabric_port_attr_store, & target_fabric_port_link, & target_fabric_port_unlink}; static void target_fabric_setup_tpg_port_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_tpg_port_cit; cit->ct_item_ops = & target_fabric_port_item_ops; cit->ct_group_ops = (struct configfs_group_operations *)0; cit->ct_attrs = (struct configfs_attribute **)(& target_fabric_port_attrs); cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_tpg_port_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 831U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"tpg_port"); } else { } return; } } static struct config_group *target_core_port_stat_mkdir(struct config_group *group , char const *name ) { void *tmp ; { tmp = ERR_PTR(-38L); return ((struct config_group *)tmp); } } static void target_core_port_stat_rmdir(struct config_group *group , struct config_item *item ) { { return; } } static struct configfs_group_operations target_fabric_port_stat_group_ops = {0, & target_core_port_stat_mkdir, 0, 0, & target_core_port_stat_rmdir}; static void target_fabric_setup_tpg_port_stat_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_tpg_port_stat_cit; cit->ct_item_ops = (struct configfs_item_operations *)0; cit->ct_group_ops = & target_fabric_port_stat_group_ops; cit->ct_attrs = (struct configfs_attribute **)0; cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_tpg_port_stat_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 856U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"tpg_port_stat"); } else { } return; } } static struct config_group *target_fabric_make_lun(struct config_group *group , char const *name ) { struct se_lun *lun ; struct se_portal_group *se_tpg ; struct config_group const *__mptr ; struct target_fabric_configfs *tf ; struct config_group *lun_cg ; struct config_group *port_stat_grp ; unsigned long long unpacked_lun ; int errno ; void *tmp ; char *tmp___0 ; void *tmp___1 ; void *tmp___2 ; bool tmp___3 ; void *tmp___4 ; void *tmp___5 ; void *tmp___6 ; void *tmp___7 ; { __mptr = (struct config_group const *)group; se_tpg = (struct se_portal_group *)__mptr + 0xfffffffffffffd70UL; tf = (se_tpg->se_tpg_wwn)->wwn_tf; lun_cg = (struct config_group *)0; port_stat_grp = (struct config_group *)0; tmp___0 = strstr(name, "lun_"); if ((unsigned long )((char const *)tmp___0) != (unsigned long )name) { printk("\vUnable to locate \'_\" in \"lun_$LUN_NUMBER\"\n"); tmp = ERR_PTR(-22L); return ((struct config_group *)tmp); } else { } errno = kstrtoull(name + 4UL, 0U, & unpacked_lun); if (errno != 0) { tmp___1 = ERR_PTR((long )errno); return ((struct config_group *)tmp___1); } else { } lun = core_tpg_alloc_lun(se_tpg, unpacked_lun); tmp___3 = IS_ERR((void const *)lun); if ((int )tmp___3) { tmp___2 = ERR_CAST((void const *)lun); return ((struct config_group *)tmp___2); } else { } lun_cg = & lun->lun_group; tmp___4 = kmalloc(16UL, 208U); lun_cg->default_groups = (struct config_group **)tmp___4; if ((unsigned long )lun_cg->default_groups == (unsigned long )((struct config_group **)0)) { printk("\vUnable to allocate lun_cg->default_groups\n"); kfree((void const *)lun); tmp___5 = ERR_PTR(-12L); return ((struct config_group *)tmp___5); } else { } config_group_init_type_name(& lun->lun_group, name, & tf->tf_tpg_port_cit); config_group_init_type_name(& lun->port_stat_grps.stat_group, "statistics", & tf->tf_tpg_port_stat_cit); *(lun_cg->default_groups) = & lun->port_stat_grps.stat_group; *(lun_cg->default_groups + 1UL) = (struct config_group *)0; port_stat_grp = & lun->port_stat_grps.stat_group; tmp___6 = kzalloc(32UL, 208U); port_stat_grp->default_groups = (struct config_group **)tmp___6; if ((unsigned long )port_stat_grp->default_groups == (unsigned long )((struct config_group **)0)) { printk("\vUnable to allocate port_stat_grp->default_groups\n"); kfree((void const *)lun_cg->default_groups); kfree((void const *)lun); tmp___7 = ERR_PTR(-12L); return ((struct config_group *)tmp___7); } else { } target_stat_setup_port_default_groups(lun); return (& lun->lun_group); } } static void target_fabric_drop_lun(struct config_group *group , struct config_item *item ) { struct se_lun *lun ; struct config_group const *__mptr ; struct config_group *tmp ; struct config_item *df_item ; struct config_group *lun_cg ; struct config_group *port_stat_grp ; int i ; { tmp = to_config_group(item); __mptr = (struct config_group const *)tmp; lun = (struct se_lun *)__mptr + 0xfffffffffffffe50UL; port_stat_grp = & lun->port_stat_grps.stat_group; i = 0; goto ldv_62492; ldv_62491: df_item = & (*(port_stat_grp->default_groups + (unsigned long )i))->cg_item; *(port_stat_grp->default_groups + (unsigned long )i) = (struct config_group *)0; config_item_put(df_item); i = i + 1; ldv_62492: ; if ((unsigned long )*(port_stat_grp->default_groups + (unsigned long )i) != (unsigned long )((struct config_group *)0)) { goto ldv_62491; } else { } kfree((void const *)port_stat_grp->default_groups); lun_cg = & lun->lun_group; i = 0; goto ldv_62495; ldv_62494: df_item = & (*(lun_cg->default_groups + (unsigned long )i))->cg_item; *(lun_cg->default_groups + (unsigned long )i) = (struct config_group *)0; config_item_put(df_item); i = i + 1; ldv_62495: ; if ((unsigned long )*(lun_cg->default_groups + (unsigned long )i) != (unsigned long )((struct config_group *)0)) { goto ldv_62494; } else { } kfree((void const *)lun_cg->default_groups); config_item_put(item); return; } } static struct configfs_group_operations target_fabric_lun_group_ops = {0, & target_fabric_make_lun, 0, 0, & target_fabric_drop_lun}; static void target_fabric_setup_tpg_lun_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_tpg_lun_cit; cit->ct_item_ops = (struct configfs_item_operations *)0; cit->ct_group_ops = & target_fabric_lun_group_ops; cit->ct_attrs = (struct configfs_attribute **)0; cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_tpg_lun_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 951U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"tpg_lun"); } else { } return; } } static struct se_portal_group *to_target_fabric_tpg_attrib(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_portal_group *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_portal_group *)__mptr + 0xfffffffffffffc20UL; } else { tmp___0 = (struct se_portal_group *)0; } return (tmp___0); } } static ssize_t target_fabric_tpg_attrib_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_portal_group *se_portal_group ; struct se_portal_group *tmp ; struct target_fabric_tpg_attrib_attribute *target_fabric_tpg_attrib_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_tpg_attrib(item); se_portal_group = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_tpg_attrib_attr = (struct target_fabric_tpg_attrib_attribute *)__mptr; ret = 0L; if ((unsigned long )target_fabric_tpg_attrib_attr->show != (unsigned long )((ssize_t (*)(struct se_portal_group * , char * ))0)) { ret = (*(target_fabric_tpg_attrib_attr->show))(se_portal_group, page); } else { } return (ret); } } static ssize_t target_fabric_tpg_attrib_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_portal_group *se_portal_group ; struct se_portal_group *tmp ; struct target_fabric_tpg_attrib_attribute *target_fabric_tpg_attrib_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_tpg_attrib(item); se_portal_group = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_tpg_attrib_attr = (struct target_fabric_tpg_attrib_attribute *)__mptr; ret = -22L; if ((unsigned long )target_fabric_tpg_attrib_attr->store != (unsigned long )((ssize_t (*)(struct se_portal_group * , char const * , size_t ))0)) { ret = (*(target_fabric_tpg_attrib_attr->store))(se_portal_group, page, count); } else { } return (ret); } } static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = {0, & target_fabric_tpg_attrib_attr_show, & target_fabric_tpg_attrib_attr_store, 0, 0}; static void target_fabric_setup_tpg_attrib_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct configfs_attribute **attrs ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_tpg_attrib_cit; attrs = (tf->tf_ops)->tfc_tpg_attrib_attrs; cit->ct_item_ops = & target_fabric_tpg_attrib_item_ops; cit->ct_group_ops = (struct configfs_group_operations *)0; cit->ct_attrs = attrs; cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_tpg_attrib_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 964U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"tpg_attrib"); } else { } return; } } static struct se_portal_group *to_target_fabric_tpg_auth(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_portal_group *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_portal_group *)__mptr + 0xfffffffffffffbb0UL; } else { tmp___0 = (struct se_portal_group *)0; } return (tmp___0); } } static ssize_t target_fabric_tpg_auth_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_portal_group *se_portal_group ; struct se_portal_group *tmp ; struct target_fabric_tpg_auth_attribute *target_fabric_tpg_auth_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_tpg_auth(item); se_portal_group = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_tpg_auth_attr = (struct target_fabric_tpg_auth_attribute *)__mptr; ret = 0L; if ((unsigned long )target_fabric_tpg_auth_attr->show != (unsigned long )((ssize_t (*)(struct se_portal_group * , char * ))0)) { ret = (*(target_fabric_tpg_auth_attr->show))(se_portal_group, page); } else { } return (ret); } } static ssize_t target_fabric_tpg_auth_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_portal_group *se_portal_group ; struct se_portal_group *tmp ; struct target_fabric_tpg_auth_attribute *target_fabric_tpg_auth_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_tpg_auth(item); se_portal_group = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_tpg_auth_attr = (struct target_fabric_tpg_auth_attribute *)__mptr; ret = -22L; if ((unsigned long )target_fabric_tpg_auth_attr->store != (unsigned long )((ssize_t (*)(struct se_portal_group * , char const * , size_t ))0)) { ret = (*(target_fabric_tpg_auth_attr->store))(se_portal_group, page, count); } else { } return (ret); } } static struct configfs_item_operations target_fabric_tpg_auth_item_ops = {0, & target_fabric_tpg_auth_attr_show, & target_fabric_tpg_auth_attr_store, 0, 0}; static void target_fabric_setup_tpg_auth_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct configfs_attribute **attrs ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_tpg_auth_cit; attrs = (tf->tf_ops)->tfc_tpg_auth_attrs; cit->ct_item_ops = & target_fabric_tpg_auth_item_ops; cit->ct_group_ops = (struct configfs_group_operations *)0; cit->ct_attrs = attrs; cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_tpg_auth_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 977U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"tpg_auth"); } else { } return; } } static struct se_portal_group *to_target_fabric_tpg_param(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_portal_group *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_portal_group *)__mptr + 0xfffffffffffffb40UL; } else { tmp___0 = (struct se_portal_group *)0; } return (tmp___0); } } static ssize_t target_fabric_tpg_param_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_portal_group *se_portal_group ; struct se_portal_group *tmp ; struct target_fabric_tpg_param_attribute *target_fabric_tpg_param_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_tpg_param(item); se_portal_group = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_tpg_param_attr = (struct target_fabric_tpg_param_attribute *)__mptr; ret = 0L; if ((unsigned long )target_fabric_tpg_param_attr->show != (unsigned long )((ssize_t (*)(struct se_portal_group * , char * ))0)) { ret = (*(target_fabric_tpg_param_attr->show))(se_portal_group, page); } else { } return (ret); } } static ssize_t target_fabric_tpg_param_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_portal_group *se_portal_group ; struct se_portal_group *tmp ; struct target_fabric_tpg_param_attribute *target_fabric_tpg_param_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_tpg_param(item); se_portal_group = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_tpg_param_attr = (struct target_fabric_tpg_param_attribute *)__mptr; ret = -22L; if ((unsigned long )target_fabric_tpg_param_attr->store != (unsigned long )((ssize_t (*)(struct se_portal_group * , char const * , size_t ))0)) { ret = (*(target_fabric_tpg_param_attr->store))(se_portal_group, page, count); } else { } return (ret); } } static struct configfs_item_operations target_fabric_tpg_param_item_ops = {0, & target_fabric_tpg_param_attr_show, & target_fabric_tpg_param_attr_store, 0, 0}; static void target_fabric_setup_tpg_param_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct configfs_attribute **attrs ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_tpg_param_cit; attrs = (tf->tf_ops)->tfc_tpg_param_attrs; cit->ct_item_ops = & target_fabric_tpg_param_item_ops; cit->ct_group_ops = (struct configfs_group_operations *)0; cit->ct_attrs = attrs; cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_tpg_param_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 990U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"tpg_param"); } else { } return; } } static struct se_portal_group *to_target_fabric_tpg(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_portal_group *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_portal_group *)__mptr + 0xfffffffffffffe18UL; } else { tmp___0 = (struct se_portal_group *)0; } return (tmp___0); } } static ssize_t target_fabric_tpg_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_portal_group *se_portal_group ; struct se_portal_group *tmp ; struct target_fabric_tpg_attribute *target_fabric_tpg_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_tpg(item); se_portal_group = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_tpg_attr = (struct target_fabric_tpg_attribute *)__mptr; ret = 0L; if ((unsigned long )target_fabric_tpg_attr->show != (unsigned long )((ssize_t (*)(struct se_portal_group * , char * ))0)) { ret = (*(target_fabric_tpg_attr->show))(se_portal_group, page); } else { } return (ret); } } static ssize_t target_fabric_tpg_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_portal_group *se_portal_group ; struct se_portal_group *tmp ; struct target_fabric_tpg_attribute *target_fabric_tpg_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_tpg(item); se_portal_group = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_tpg_attr = (struct target_fabric_tpg_attribute *)__mptr; ret = -22L; if ((unsigned long )target_fabric_tpg_attr->store != (unsigned long )((ssize_t (*)(struct se_portal_group * , char const * , size_t ))0)) { ret = (*(target_fabric_tpg_attr->store))(se_portal_group, page, count); } else { } return (ret); } } static void target_fabric_tpg_release(struct config_item *item ) { struct se_portal_group *se_tpg ; struct config_group const *__mptr ; struct config_group *tmp ; struct se_wwn *wwn ; struct target_fabric_configfs *tf ; { tmp = to_config_group(item); __mptr = (struct config_group const *)tmp; se_tpg = (struct se_portal_group *)__mptr + 0xfffffffffffffe18UL; wwn = se_tpg->se_tpg_wwn; tf = wwn->wwn_tf; (*((tf->tf_ops)->fabric_drop_tpg))(se_tpg); return; } } static struct configfs_item_operations target_fabric_tpg_base_item_ops = {& target_fabric_tpg_release, & target_fabric_tpg_attr_show, & target_fabric_tpg_attr_store, 0, 0}; static void target_fabric_setup_tpg_base_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct configfs_attribute **attrs ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_tpg_base_cit; attrs = (tf->tf_ops)->tfc_tpg_base_attrs; cit->ct_item_ops = & target_fabric_tpg_base_item_ops; cit->ct_group_ops = (struct configfs_group_operations *)0; cit->ct_attrs = attrs; cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_tpg_base_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 1016U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"tpg_base"); } else { } return; } } static struct config_group *target_fabric_make_tpg(struct config_group *group , char const *name ) { struct se_wwn *wwn ; struct config_group const *__mptr ; struct target_fabric_configfs *tf ; struct se_portal_group *se_tpg ; void *tmp ; void *tmp___0 ; bool tmp___1 ; { __mptr = (struct config_group const *)group; wwn = (struct se_wwn *)__mptr + 0xfffffffffffffff8UL; tf = wwn->wwn_tf; if ((unsigned long )(tf->tf_ops)->fabric_make_tpg == (unsigned long )((struct se_portal_group *(*/* const */)(struct se_wwn * , struct config_group * , char const * ))0)) { printk("\vtf->tf_ops->fabric_make_tpg is NULL\n"); tmp = ERR_PTR(-38L); return ((struct config_group *)tmp); } else { } se_tpg = (*((tf->tf_ops)->fabric_make_tpg))(wwn, group, name); if ((unsigned long )se_tpg == (unsigned long )((struct se_portal_group *)0)) { tmp___0 = ERR_PTR(-22L); return ((struct config_group *)tmp___0); } else { tmp___1 = IS_ERR((void const *)se_tpg); if ((int )tmp___1) { tmp___0 = ERR_PTR(-22L); return ((struct config_group *)tmp___0); } else { } } se_tpg->tpg_group.default_groups = (struct config_group **)(& se_tpg->tpg_default_groups); *(se_tpg->tpg_group.default_groups) = & se_tpg->tpg_lun_group; *(se_tpg->tpg_group.default_groups + 1UL) = & se_tpg->tpg_np_group; *(se_tpg->tpg_group.default_groups + 2UL) = & se_tpg->tpg_acl_group; *(se_tpg->tpg_group.default_groups + 3UL) = & se_tpg->tpg_attrib_group; *(se_tpg->tpg_group.default_groups + 4UL) = & se_tpg->tpg_auth_group; *(se_tpg->tpg_group.default_groups + 5UL) = & se_tpg->tpg_param_group; *(se_tpg->tpg_group.default_groups + 6UL) = (struct config_group *)0; config_group_init_type_name(& se_tpg->tpg_group, name, & tf->tf_tpg_base_cit); config_group_init_type_name(& se_tpg->tpg_lun_group, "lun", & tf->tf_tpg_lun_cit); config_group_init_type_name(& se_tpg->tpg_np_group, "np", & tf->tf_tpg_np_cit); config_group_init_type_name(& se_tpg->tpg_acl_group, "acls", & tf->tf_tpg_nacl_cit); config_group_init_type_name(& se_tpg->tpg_attrib_group, "attrib", & tf->tf_tpg_attrib_cit); config_group_init_type_name(& se_tpg->tpg_auth_group, "auth", & tf->tf_tpg_auth_cit); config_group_init_type_name(& se_tpg->tpg_param_group, "param", & tf->tf_tpg_param_cit); return (& se_tpg->tpg_group); } } static void target_fabric_drop_tpg(struct config_group *group , struct config_item *item ) { struct se_portal_group *se_tpg ; struct config_group const *__mptr ; struct config_group *tmp ; struct config_group *tpg_cg ; struct config_item *df_item ; int i ; { tmp = to_config_group(item); __mptr = (struct config_group const *)tmp; se_tpg = (struct se_portal_group *)__mptr + 0xfffffffffffffe18UL; tpg_cg = & se_tpg->tpg_group; i = 0; goto ldv_62668; ldv_62667: df_item = & (*(tpg_cg->default_groups + (unsigned long )i))->cg_item; *(tpg_cg->default_groups + (unsigned long )i) = (struct config_group *)0; config_item_put(df_item); i = i + 1; ldv_62668: ; if ((unsigned long )*(tpg_cg->default_groups + (unsigned long )i) != (unsigned long )((struct config_group *)0)) { goto ldv_62667; } else { } config_item_put(item); return; } } static void target_fabric_release_wwn(struct config_item *item ) { struct se_wwn *wwn ; struct config_group const *__mptr ; struct config_group *tmp ; struct target_fabric_configfs *tf ; { tmp = to_config_group(item); __mptr = (struct config_group const *)tmp; wwn = (struct se_wwn *)__mptr + 0xfffffffffffffff8UL; tf = wwn->wwn_tf; (*((tf->tf_ops)->fabric_drop_wwn))(wwn); return; } } static struct configfs_item_operations target_fabric_tpg_item_ops = {& target_fabric_release_wwn, 0, 0, 0, 0}; static struct configfs_group_operations target_fabric_tpg_group_ops = {0, & target_fabric_make_tpg, 0, 0, & target_fabric_drop_tpg}; static void target_fabric_setup_tpg_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_tpg_cit; cit->ct_item_ops = & target_fabric_tpg_item_ops; cit->ct_group_ops = & target_fabric_tpg_group_ops; cit->ct_attrs = (struct configfs_attribute **)0; cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_tpg_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 1109U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"tpg"); } else { } return; } } static void target_fabric_setup_wwn_fabric_stats_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_wwn_fabric_stats_cit; cit->ct_item_ops = (struct configfs_item_operations *)0; cit->ct_group_ops = (struct configfs_group_operations *)0; cit->ct_attrs = (struct configfs_attribute **)0; cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_wwn_fabric_stats_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 1118U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"wwn_fabric_stats"); } else { } return; } } static struct config_group *target_fabric_make_wwn(struct config_group *group , char const *name ) { struct target_fabric_configfs *tf ; struct config_group const *__mptr ; struct se_wwn *wwn ; void *tmp ; void *tmp___0 ; bool tmp___1 ; { __mptr = (struct config_group const *)group; tf = (struct target_fabric_configfs *)__mptr + 0xffffffffffffffe8UL; if ((unsigned long )(tf->tf_ops)->fabric_make_wwn == (unsigned long )((struct se_wwn *(*/* const */)(struct target_fabric_configfs * , struct config_group * , char const * ))0)) { printk("\vtf->tf_ops.fabric_make_wwn is NULL\n"); tmp = ERR_PTR(-38L); return ((struct config_group *)tmp); } else { } wwn = (*((tf->tf_ops)->fabric_make_wwn))(tf, group, name); if ((unsigned long )wwn == (unsigned long )((struct se_wwn *)0)) { tmp___0 = ERR_PTR(-22L); return ((struct config_group *)tmp___0); } else { tmp___1 = IS_ERR((void const *)wwn); if ((int )tmp___1) { tmp___0 = ERR_PTR(-22L); return ((struct config_group *)tmp___0); } else { } } wwn->wwn_tf = tf; wwn->wwn_group.default_groups = (struct config_group **)(& wwn->wwn_default_groups); *(wwn->wwn_group.default_groups) = & wwn->fabric_stat_group; *(wwn->wwn_group.default_groups + 1UL) = (struct config_group *)0; config_group_init_type_name(& wwn->wwn_group, name, & tf->tf_tpg_cit); config_group_init_type_name(& wwn->fabric_stat_group, "fabric_statistics", & tf->tf_wwn_fabric_stats_cit); return (& wwn->wwn_group); } } static void target_fabric_drop_wwn(struct config_group *group , struct config_item *item ) { struct se_wwn *wwn ; struct config_group const *__mptr ; struct config_group *tmp ; struct config_item *df_item ; struct config_group *cg ; int i ; { tmp = to_config_group(item); __mptr = (struct config_group const *)tmp; wwn = (struct se_wwn *)__mptr + 0xfffffffffffffff8UL; cg = & wwn->wwn_group; i = 0; goto ldv_62710; ldv_62709: df_item = & (*(cg->default_groups + (unsigned long )i))->cg_item; *(cg->default_groups + (unsigned long )i) = (struct config_group *)0; config_item_put(df_item); i = i + 1; ldv_62710: ; if ((unsigned long )*(cg->default_groups + (unsigned long )i) != (unsigned long )((struct config_group *)0)) { goto ldv_62709; } else { } config_item_put(item); return; } } static struct configfs_group_operations target_fabric_wwn_group_ops = {0, & target_fabric_make_wwn, 0, 0, & target_fabric_drop_wwn}; static struct target_fabric_configfs *to_target_fabric_wwn(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct target_fabric_configfs *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct target_fabric_configfs *)__mptr + 0xffffffffffffffe8UL; } else { tmp___0 = (struct target_fabric_configfs *)0; } return (tmp___0); } } static ssize_t target_fabric_wwn_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct target_fabric_configfs *target_fabric_configfs ; struct target_fabric_configfs *tmp ; struct target_fabric_wwn_attribute *target_fabric_wwn_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_wwn(item); target_fabric_configfs = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_wwn_attr = (struct target_fabric_wwn_attribute *)__mptr; ret = 0L; if ((unsigned long )target_fabric_wwn_attr->show != (unsigned long )((ssize_t (*)(struct target_fabric_configfs * , char * ))0)) { ret = (*(target_fabric_wwn_attr->show))(target_fabric_configfs, page); } else { } return (ret); } } static ssize_t target_fabric_wwn_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct target_fabric_configfs *target_fabric_configfs ; struct target_fabric_configfs *tmp ; struct target_fabric_wwn_attribute *target_fabric_wwn_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_wwn(item); target_fabric_configfs = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_wwn_attr = (struct target_fabric_wwn_attribute *)__mptr; ret = -22L; if ((unsigned long )target_fabric_wwn_attr->store != (unsigned long )((ssize_t (*)(struct target_fabric_configfs * , char const * , size_t ))0)) { ret = (*(target_fabric_wwn_attr->store))(target_fabric_configfs, page, count); } else { } return (ret); } } static struct configfs_item_operations target_fabric_wwn_item_ops = {0, & target_fabric_wwn_attr_show, & target_fabric_wwn_attr_store, 0, 0}; static void target_fabric_setup_wwn_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct configfs_attribute **attrs ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_wwn_cit; attrs = (tf->tf_ops)->tfc_wwn_attrs; cit->ct_item_ops = & target_fabric_wwn_item_ops; cit->ct_group_ops = & target_fabric_wwn_group_ops; cit->ct_attrs = attrs; cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_wwn_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 1189U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"wwn"); } else { } return; } } static struct target_fabric_configfs *to_target_fabric_discovery(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct target_fabric_configfs *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct target_fabric_configfs *)__mptr + 0xffffffffffffff78UL; } else { tmp___0 = (struct target_fabric_configfs *)0; } return (tmp___0); } } static ssize_t target_fabric_discovery_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct target_fabric_configfs *target_fabric_configfs ; struct target_fabric_configfs *tmp ; struct target_fabric_discovery_attribute *target_fabric_discovery_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_discovery(item); target_fabric_configfs = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_discovery_attr = (struct target_fabric_discovery_attribute *)__mptr; ret = 0L; if ((unsigned long )target_fabric_discovery_attr->show != (unsigned long )((ssize_t (*)(struct target_fabric_configfs * , char * ))0)) { ret = (*(target_fabric_discovery_attr->show))(target_fabric_configfs, page); } else { } return (ret); } } static ssize_t target_fabric_discovery_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct target_fabric_configfs *target_fabric_configfs ; struct target_fabric_configfs *tmp ; struct target_fabric_discovery_attribute *target_fabric_discovery_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_fabric_discovery(item); target_fabric_configfs = tmp; __mptr = (struct configfs_attribute const *)attr; target_fabric_discovery_attr = (struct target_fabric_discovery_attribute *)__mptr; ret = -22L; if ((unsigned long )target_fabric_discovery_attr->store != (unsigned long )((ssize_t (*)(struct target_fabric_configfs * , char const * , size_t ))0)) { ret = (*(target_fabric_discovery_attr->store))(target_fabric_configfs, page, count); } else { } return (ret); } } static struct configfs_item_operations target_fabric_discovery_item_ops = {0, & target_fabric_discovery_attr_show, & target_fabric_discovery_attr_store, 0, 0}; static void target_fabric_setup_discovery_cit(struct target_fabric_configfs *tf ) { struct config_item_type *cit ; struct configfs_attribute **attrs ; struct _ddebug descriptor ; long tmp ; { cit = & tf->tf_discovery_cit; attrs = (tf->tf_ops)->tfc_discovery_attrs; cit->ct_item_ops = & target_fabric_discovery_item_ops; cit->ct_group_ops = (struct configfs_group_operations *)0; cit->ct_attrs = attrs; cit->ct_owner = (tf->tf_ops)->module; descriptor.modname = "target_core_mod"; descriptor.function = "target_fabric_setup_discovery_cit"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_configfs.c"; descriptor.format = "Setup generic %s\n"; descriptor.lineno = 1203U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup generic %s\n", (char *)"discovery"); } else { } return; } } int target_fabric_setup_cits(struct target_fabric_configfs *tf ) { { target_fabric_setup_discovery_cit(tf); target_fabric_setup_wwn_cit(tf); target_fabric_setup_wwn_fabric_stats_cit(tf); target_fabric_setup_tpg_cit(tf); target_fabric_setup_tpg_base_cit(tf); target_fabric_setup_tpg_port_cit(tf); target_fabric_setup_tpg_port_stat_cit(tf); target_fabric_setup_tpg_lun_cit(tf); target_fabric_setup_tpg_np_cit(tf); target_fabric_setup_tpg_np_base_cit(tf); target_fabric_setup_tpg_attrib_cit(tf); target_fabric_setup_tpg_auth_cit(tf); target_fabric_setup_tpg_param_cit(tf); target_fabric_setup_tpg_nacl_cit(tf); target_fabric_setup_tpg_nacl_base_cit(tf); target_fabric_setup_tpg_nacl_attrib_cit(tf); target_fabric_setup_tpg_nacl_auth_cit(tf); target_fabric_setup_tpg_nacl_param_cit(tf); target_fabric_setup_tpg_nacl_stat_cit(tf); target_fabric_setup_tpg_mappedlun_cit(tf); target_fabric_setup_tpg_mappedlun_stat_cit(tf); return (0); } } extern int ldv_probe_94(void) ; extern int ldv_probe_114(void) ; extern int ldv_probe_106(void) ; extern int ldv_probe_109(void) ; extern int ldv_probe_100(void) ; extern int ldv_probe_93(void) ; void ldv_initialize_configfs_item_operations_112(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_fabric_nacl_attrib_item_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_fabric_nacl_attrib_item_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_configfs_item_operations_94(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_fabric_tpg_base_item_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_fabric_tpg_base_item_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_configfs_item_operations_111(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_fabric_nacl_auth_item_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_fabric_nacl_auth_item_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_configfs_group_operations_92(void) { void *tmp ; { tmp = ldv_init_zalloc(112UL); target_fabric_tpg_group_ops_group0 = (struct config_group *)tmp; return; } } void ldv_initialize_configfs_item_operations_95(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_fabric_tpg_param_item_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_fabric_tpg_param_item_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_configfs_group_operations_113(void) { void *tmp ; { tmp = ldv_init_zalloc(112UL); target_fabric_mappedlun_stat_group_ops_group0 = (struct config_group *)tmp; return; } } void ldv_initialize_configfs_group_operations_105(void) { void *tmp ; { tmp = ldv_init_zalloc(112UL); target_fabric_np_group_ops_group0 = (struct config_group *)tmp; return; } } void ldv_initialize_configfs_group_operations_98(void) { void *tmp ; { tmp = ldv_init_zalloc(112UL); target_fabric_lun_group_ops_group0 = (struct config_group *)tmp; return; } } void ldv_initialize_configfs_item_operations_96(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_fabric_tpg_auth_item_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_fabric_tpg_auth_item_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_configfs_item_operations_97(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_fabric_tpg_attrib_item_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_fabric_tpg_attrib_item_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_configfs_group_operations_99(void) { void *tmp ; { tmp = ldv_init_zalloc(112UL); target_fabric_port_stat_group_ops_group0 = (struct config_group *)tmp; return; } } void ldv_initialize_configfs_group_operations_91(void) { void *tmp ; { tmp = ldv_init_zalloc(112UL); target_fabric_wwn_group_ops_group0 = (struct config_group *)tmp; return; } } void ldv_initialize_configfs_item_operations_89(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_fabric_discovery_item_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_fabric_discovery_item_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_configfs_item_operations_114(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(80UL); target_fabric_mappedlun_item_ops_group1 = (struct config_item *)tmp; tmp___0 = ldv_init_zalloc(24UL); target_fabric_mappedlun_item_ops_group0 = (struct configfs_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(80UL); target_fabric_mappedlun_item_ops_group2 = (struct config_item *)tmp___1; return; } } void ldv_initialize_configfs_item_operations_90(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_fabric_wwn_item_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_fabric_wwn_item_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_target_fabric_port_attribute_101(void) { void *tmp ; { tmp = ldv_init_zalloc(1192UL); target_fabric_port_alua_tg_pt_write_md_group0 = (struct se_lun *)tmp; return; } } void ldv_initialize_configfs_item_operations_110(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_fabric_nacl_param_item_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_fabric_nacl_param_item_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_configfs_group_operations_108(void) { void *tmp ; { tmp = ldv_init_zalloc(112UL); target_fabric_nacl_base_group_ops_group0 = (struct config_group *)tmp; return; } } void ldv_initialize_target_fabric_port_attribute_103(void) { void *tmp ; { tmp = ldv_init_zalloc(1192UL); target_fabric_port_alua_tg_pt_offline_group0 = (struct se_lun *)tmp; return; } } void ldv_initialize_configfs_item_operations_100(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(80UL); target_fabric_port_item_ops_group1 = (struct config_item *)tmp; tmp___0 = ldv_init_zalloc(24UL); target_fabric_port_item_ops_group0 = (struct configfs_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(80UL); target_fabric_port_item_ops_group2 = (struct config_item *)tmp___1; return; } } void ldv_initialize_configfs_group_operations_107(void) { void *tmp ; { tmp = ldv_init_zalloc(112UL); target_fabric_nacl_group_ops_group0 = (struct config_group *)tmp; return; } } void ldv_initialize_target_fabric_port_attribute_102(void) { void *tmp ; { tmp = ldv_init_zalloc(1192UL); target_fabric_port_alua_tg_pt_status_group0 = (struct se_lun *)tmp; return; } } void ldv_initialize_configfs_item_operations_106(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_fabric_np_base_item_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_fabric_np_base_item_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_target_fabric_port_attribute_104(void) { void *tmp ; { tmp = ldv_init_zalloc(1192UL); target_fabric_port_alua_tg_pt_gp_group0 = (struct se_lun *)tmp; return; } } void ldv_initialize_configfs_item_operations_109(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_fabric_nacl_base_item_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_fabric_nacl_base_item_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_target_fabric_mappedlun_attribute_115(void) { void *tmp ; { tmp = ldv_init_zalloc(696UL); target_fabric_mappedlun_write_protect_group0 = (struct se_lun_acl *)tmp; return; } } void ldv_main_exported_104(void) { char *ldvarg206 ; void *tmp ; size_t ldvarg205 ; char *ldvarg204 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg206 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg204 = (char *)tmp___0; ldv_memset((void *)(& ldvarg205), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_104 == 1) { target_fabric_port_store_attr_alua_tg_pt_gp(target_fabric_port_alua_tg_pt_gp_group0, (char const *)ldvarg206, ldvarg205); ldv_state_variable_104 = 1; } else { } goto ldv_62881; case 1: ; if (ldv_state_variable_104 == 1) { target_fabric_port_show_attr_alua_tg_pt_gp(target_fabric_port_alua_tg_pt_gp_group0, ldvarg204); ldv_state_variable_104 = 1; } else { } goto ldv_62881; default: ldv_stop(); } ldv_62881: ; return; } } void ldv_main_exported_90(void) { char *ldvarg6 ; void *tmp ; size_t ldvarg5 ; char *ldvarg7 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg6 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg7 = (char *)tmp___0; ldv_memset((void *)(& ldvarg5), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_90 == 1) { target_fabric_wwn_attr_show(target_fabric_wwn_item_ops_group1, target_fabric_wwn_item_ops_group0, ldvarg7); ldv_state_variable_90 = 1; } else { } goto ldv_62891; case 1: ; if (ldv_state_variable_90 == 1) { target_fabric_wwn_attr_store(target_fabric_wwn_item_ops_group1, target_fabric_wwn_item_ops_group0, (char const *)ldvarg6, ldvarg5); ldv_state_variable_90 = 1; } else { } goto ldv_62891; default: ldv_stop(); } ldv_62891: ; return; } } void ldv_main_exported_102(void) { size_t ldvarg15 ; char *ldvarg16 ; void *tmp ; char *ldvarg14 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg16 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg14 = (char *)tmp___0; ldv_memset((void *)(& ldvarg15), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_102 == 1) { target_fabric_port_store_attr_alua_tg_pt_status(target_fabric_port_alua_tg_pt_status_group0, (char const *)ldvarg16, ldvarg15); ldv_state_variable_102 = 1; } else { } goto ldv_62901; case 1: ; if (ldv_state_variable_102 == 1) { target_fabric_port_show_attr_alua_tg_pt_status(target_fabric_port_alua_tg_pt_status_group0, ldvarg14); ldv_state_variable_102 = 1; } else { } goto ldv_62901; default: ldv_stop(); } ldv_62901: ; return; } } void ldv_main_exported_91(void) { struct config_item *ldvarg309 ; void *tmp ; char *ldvarg308 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(80UL); ldvarg309 = (struct config_item *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg308 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_91 == 1) { target_fabric_drop_wwn(target_fabric_wwn_group_ops_group0, ldvarg309); ldv_state_variable_91 = 1; } else { } goto ldv_62910; case 1: ; if (ldv_state_variable_91 == 1) { target_fabric_make_wwn(target_fabric_wwn_group_ops_group0, (char const *)ldvarg308); ldv_state_variable_91 = 1; } else { } goto ldv_62910; default: ldv_stop(); } ldv_62910: ; return; } } void ldv_main_exported_107(void) { struct config_item *ldvarg316 ; void *tmp ; char *ldvarg315 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(80UL); ldvarg316 = (struct config_item *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg315 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_107 == 1) { target_fabric_drop_nodeacl(target_fabric_nacl_group_ops_group0, ldvarg316); ldv_state_variable_107 = 1; } else { } goto ldv_62919; case 1: ; if (ldv_state_variable_107 == 1) { target_fabric_make_nodeacl(target_fabric_nacl_group_ops_group0, (char const *)ldvarg315); ldv_state_variable_107 = 1; } else { } goto ldv_62919; default: ldv_stop(); } ldv_62919: ; return; } } void ldv_main_exported_99(void) { char *ldvarg268 ; void *tmp ; struct config_item *ldvarg269 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg268 = (char *)tmp; tmp___0 = ldv_init_zalloc(80UL); ldvarg269 = (struct config_item *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_99 == 1) { target_core_port_stat_rmdir(target_fabric_port_stat_group_ops_group0, ldvarg269); ldv_state_variable_99 = 1; } else { } goto ldv_62928; case 1: ; if (ldv_state_variable_99 == 1) { target_core_port_stat_mkdir(target_fabric_port_stat_group_ops_group0, (char const *)ldvarg268); ldv_state_variable_99 = 1; } else { } goto ldv_62928; default: ldv_stop(); } ldv_62928: ; return; } } void ldv_main_exported_93(void) { struct config_item *ldvarg82 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(80UL); ldvarg82 = (struct config_item *)tmp; tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_93 == 2) { target_fabric_release_wwn(ldvarg82); ldv_state_variable_93 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_62936; case 1: ; if (ldv_state_variable_93 == 1) { ldv_probe_93(); ldv_state_variable_93 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_62936; default: ldv_stop(); } ldv_62936: ; return; } } void ldv_main_exported_106(void) { size_t ldvarg83 ; char *ldvarg84 ; void *tmp ; char *ldvarg85 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg84 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg85 = (char *)tmp___0; ldv_memset((void *)(& ldvarg83), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_106 == 1) { target_fabric_np_base_attr_show(target_fabric_np_base_item_ops_group1, target_fabric_np_base_item_ops_group0, ldvarg85); ldv_state_variable_106 = 1; } else { } if (ldv_state_variable_106 == 2) { target_fabric_np_base_attr_show(target_fabric_np_base_item_ops_group1, target_fabric_np_base_item_ops_group0, ldvarg85); ldv_state_variable_106 = 2; } else { } goto ldv_62946; case 1: ; if (ldv_state_variable_106 == 2) { target_fabric_np_base_release(target_fabric_np_base_item_ops_group1); ldv_state_variable_106 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_62946; case 2: ; if (ldv_state_variable_106 == 1) { target_fabric_np_base_attr_store(target_fabric_np_base_item_ops_group1, target_fabric_np_base_item_ops_group0, (char const *)ldvarg84, ldvarg83); ldv_state_variable_106 = 1; } else { } if (ldv_state_variable_106 == 2) { target_fabric_np_base_attr_store(target_fabric_np_base_item_ops_group1, target_fabric_np_base_item_ops_group0, (char const *)ldvarg84, ldvarg83); ldv_state_variable_106 = 2; } else { } goto ldv_62946; case 3: ; if (ldv_state_variable_106 == 1) { ldv_probe_106(); ldv_state_variable_106 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_62946; default: ldv_stop(); } ldv_62946: ; return; } } void ldv_main_exported_105(void) { char *ldvarg451 ; void *tmp ; struct config_item *ldvarg452 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg451 = (char *)tmp; tmp___0 = ldv_init_zalloc(80UL); ldvarg452 = (struct config_item *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_105 == 1) { target_fabric_drop_np(target_fabric_np_group_ops_group0, ldvarg452); ldv_state_variable_105 = 1; } else { } goto ldv_62957; case 1: ; if (ldv_state_variable_105 == 1) { target_fabric_make_np(target_fabric_np_group_ops_group0, (char const *)ldvarg451); ldv_state_variable_105 = 1; } else { } goto ldv_62957; default: ldv_stop(); } ldv_62957: ; return; } } void ldv_main_exported_100(void) { char *ldvarg391 ; void *tmp ; size_t ldvarg389 ; char *ldvarg390 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg391 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg390 = (char *)tmp___0; ldv_memset((void *)(& ldvarg389), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_100 == 1) { target_fabric_port_attr_show(target_fabric_port_item_ops_group2, target_fabric_port_item_ops_group0, ldvarg391); ldv_state_variable_100 = 1; } else { } if (ldv_state_variable_100 == 2) { target_fabric_port_attr_show(target_fabric_port_item_ops_group2, target_fabric_port_item_ops_group0, ldvarg391); ldv_state_variable_100 = 2; } else { } goto ldv_62967; case 1: ; if (ldv_state_variable_100 == 2) { target_fabric_port_release(target_fabric_port_item_ops_group2); ldv_state_variable_100 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_62967; case 2: ; if (ldv_state_variable_100 == 1) { target_fabric_port_unlink(target_fabric_port_item_ops_group2, target_fabric_port_item_ops_group1); ldv_state_variable_100 = 1; } else { } if (ldv_state_variable_100 == 2) { target_fabric_port_unlink(target_fabric_port_item_ops_group2, target_fabric_port_item_ops_group1); ldv_state_variable_100 = 2; } else { } goto ldv_62967; case 3: ; if (ldv_state_variable_100 == 1) { target_fabric_port_attr_store(target_fabric_port_item_ops_group2, target_fabric_port_item_ops_group0, (char const *)ldvarg390, ldvarg389); ldv_state_variable_100 = 1; } else { } if (ldv_state_variable_100 == 2) { target_fabric_port_attr_store(target_fabric_port_item_ops_group2, target_fabric_port_item_ops_group0, (char const *)ldvarg390, ldvarg389); ldv_state_variable_100 = 2; } else { } goto ldv_62967; case 4: ; if (ldv_state_variable_100 == 1) { target_fabric_port_link(target_fabric_port_item_ops_group2, target_fabric_port_item_ops_group1); ldv_state_variable_100 = 1; } else { } if (ldv_state_variable_100 == 2) { target_fabric_port_link(target_fabric_port_item_ops_group2, target_fabric_port_item_ops_group1); ldv_state_variable_100 = 2; } else { } goto ldv_62967; case 5: ; if (ldv_state_variable_100 == 1) { ldv_probe_100(); ldv_state_variable_100 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_62967; default: ldv_stop(); } ldv_62967: ; return; } } void ldv_main_exported_96(void) { char *ldvarg223 ; void *tmp ; size_t ldvarg222 ; char *ldvarg224 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg223 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg224 = (char *)tmp___0; ldv_memset((void *)(& ldvarg222), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_96 == 1) { target_fabric_tpg_auth_attr_show(target_fabric_tpg_auth_item_ops_group1, target_fabric_tpg_auth_item_ops_group0, ldvarg224); ldv_state_variable_96 = 1; } else { } goto ldv_62981; case 1: ; if (ldv_state_variable_96 == 1) { target_fabric_tpg_auth_attr_store(target_fabric_tpg_auth_item_ops_group1, target_fabric_tpg_auth_item_ops_group0, (char const *)ldvarg223, ldvarg222); ldv_state_variable_96 = 1; } else { } goto ldv_62981; default: ldv_stop(); } ldv_62981: ; return; } } void ldv_main_exported_110(void) { char *ldvarg155 ; void *tmp ; size_t ldvarg153 ; char *ldvarg154 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg155 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg154 = (char *)tmp___0; ldv_memset((void *)(& ldvarg153), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_110 == 1) { target_fabric_nacl_param_attr_show(target_fabric_nacl_param_item_ops_group1, target_fabric_nacl_param_item_ops_group0, ldvarg155); ldv_state_variable_110 = 1; } else { } goto ldv_62991; case 1: ; if (ldv_state_variable_110 == 1) { target_fabric_nacl_param_attr_store(target_fabric_nacl_param_item_ops_group1, target_fabric_nacl_param_item_ops_group0, (char const *)ldvarg154, ldvarg153); ldv_state_variable_110 = 1; } else { } goto ldv_62991; default: ldv_stop(); } ldv_62991: ; return; } } void ldv_main_exported_95(void) { char *ldvarg46 ; void *tmp ; char *ldvarg47 ; void *tmp___0 ; size_t ldvarg45 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg46 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg47 = (char *)tmp___0; ldv_memset((void *)(& ldvarg45), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_95 == 1) { target_fabric_tpg_param_attr_show(target_fabric_tpg_param_item_ops_group1, target_fabric_tpg_param_item_ops_group0, ldvarg47); ldv_state_variable_95 = 1; } else { } goto ldv_63001; case 1: ; if (ldv_state_variable_95 == 1) { target_fabric_tpg_param_attr_store(target_fabric_tpg_param_item_ops_group1, target_fabric_tpg_param_item_ops_group0, (char const *)ldvarg46, ldvarg45); ldv_state_variable_95 = 1; } else { } goto ldv_63001; default: ldv_stop(); } ldv_63001: ; return; } } void ldv_main_exported_94(void) { char *ldvarg466 ; void *tmp ; size_t ldvarg464 ; char *ldvarg465 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg466 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg465 = (char *)tmp___0; ldv_memset((void *)(& ldvarg464), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_94 == 1) { target_fabric_tpg_attr_show(target_fabric_tpg_base_item_ops_group1, target_fabric_tpg_base_item_ops_group0, ldvarg466); ldv_state_variable_94 = 1; } else { } if (ldv_state_variable_94 == 2) { target_fabric_tpg_attr_show(target_fabric_tpg_base_item_ops_group1, target_fabric_tpg_base_item_ops_group0, ldvarg466); ldv_state_variable_94 = 2; } else { } goto ldv_63011; case 1: ; if (ldv_state_variable_94 == 2) { target_fabric_tpg_release(target_fabric_tpg_base_item_ops_group1); ldv_state_variable_94 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_63011; case 2: ; if (ldv_state_variable_94 == 1) { target_fabric_tpg_attr_store(target_fabric_tpg_base_item_ops_group1, target_fabric_tpg_base_item_ops_group0, (char const *)ldvarg465, ldvarg464); ldv_state_variable_94 = 1; } else { } if (ldv_state_variable_94 == 2) { target_fabric_tpg_attr_store(target_fabric_tpg_base_item_ops_group1, target_fabric_tpg_base_item_ops_group0, (char const *)ldvarg465, ldvarg464); ldv_state_variable_94 = 2; } else { } goto ldv_63011; case 3: ; if (ldv_state_variable_94 == 1) { ldv_probe_94(); ldv_state_variable_94 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_63011; default: ldv_stop(); } ldv_63011: ; return; } } void ldv_main_exported_97(void) { size_t ldvarg342 ; char *ldvarg344 ; void *tmp ; char *ldvarg343 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg344 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg343 = (char *)tmp___0; ldv_memset((void *)(& ldvarg342), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_97 == 1) { target_fabric_tpg_attrib_attr_show(target_fabric_tpg_attrib_item_ops_group1, target_fabric_tpg_attrib_item_ops_group0, ldvarg344); ldv_state_variable_97 = 1; } else { } goto ldv_63023; case 1: ; if (ldv_state_variable_97 == 1) { target_fabric_tpg_attrib_attr_store(target_fabric_tpg_attrib_item_ops_group1, target_fabric_tpg_attrib_item_ops_group0, (char const *)ldvarg343, ldvarg342); ldv_state_variable_97 = 1; } else { } goto ldv_63023; default: ldv_stop(); } ldv_63023: ; return; } } void ldv_main_exported_114(void) { size_t ldvarg103 ; char *ldvarg105 ; void *tmp ; char *ldvarg104 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg105 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg104 = (char *)tmp___0; ldv_memset((void *)(& ldvarg103), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_114 == 1) { target_fabric_mappedlun_attr_show(target_fabric_mappedlun_item_ops_group2, target_fabric_mappedlun_item_ops_group0, ldvarg105); ldv_state_variable_114 = 1; } else { } if (ldv_state_variable_114 == 2) { target_fabric_mappedlun_attr_show(target_fabric_mappedlun_item_ops_group2, target_fabric_mappedlun_item_ops_group0, ldvarg105); ldv_state_variable_114 = 2; } else { } goto ldv_63033; case 1: ; if (ldv_state_variable_114 == 2) { target_fabric_mappedlun_release(target_fabric_mappedlun_item_ops_group2); ldv_state_variable_114 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_63033; case 2: ; if (ldv_state_variable_114 == 1) { target_fabric_mappedlun_unlink(target_fabric_mappedlun_item_ops_group2, target_fabric_mappedlun_item_ops_group1); ldv_state_variable_114 = 1; } else { } if (ldv_state_variable_114 == 2) { target_fabric_mappedlun_unlink(target_fabric_mappedlun_item_ops_group2, target_fabric_mappedlun_item_ops_group1); ldv_state_variable_114 = 2; } else { } goto ldv_63033; case 3: ; if (ldv_state_variable_114 == 1) { target_fabric_mappedlun_attr_store(target_fabric_mappedlun_item_ops_group2, target_fabric_mappedlun_item_ops_group0, (char const *)ldvarg104, ldvarg103); ldv_state_variable_114 = 1; } else { } if (ldv_state_variable_114 == 2) { target_fabric_mappedlun_attr_store(target_fabric_mappedlun_item_ops_group2, target_fabric_mappedlun_item_ops_group0, (char const *)ldvarg104, ldvarg103); ldv_state_variable_114 = 2; } else { } goto ldv_63033; case 4: ; if (ldv_state_variable_114 == 1) { target_fabric_mappedlun_link(target_fabric_mappedlun_item_ops_group2, target_fabric_mappedlun_item_ops_group1); ldv_state_variable_114 = 1; } else { } if (ldv_state_variable_114 == 2) { target_fabric_mappedlun_link(target_fabric_mappedlun_item_ops_group2, target_fabric_mappedlun_item_ops_group1); ldv_state_variable_114 = 2; } else { } goto ldv_63033; case 5: ; if (ldv_state_variable_114 == 1) { ldv_probe_114(); ldv_state_variable_114 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_63033; default: ldv_stop(); } ldv_63033: ; return; } } void ldv_main_exported_111(void) { char *ldvarg479 ; void *tmp ; size_t ldvarg477 ; char *ldvarg478 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg479 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg478 = (char *)tmp___0; ldv_memset((void *)(& ldvarg477), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_111 == 1) { target_fabric_nacl_auth_attr_show(target_fabric_nacl_auth_item_ops_group1, target_fabric_nacl_auth_item_ops_group0, ldvarg479); ldv_state_variable_111 = 1; } else { } goto ldv_63047; case 1: ; if (ldv_state_variable_111 == 1) { target_fabric_nacl_auth_attr_store(target_fabric_nacl_auth_item_ops_group1, target_fabric_nacl_auth_item_ops_group0, (char const *)ldvarg478, ldvarg477); ldv_state_variable_111 = 1; } else { } goto ldv_63047; default: ldv_stop(); } ldv_63047: ; return; } } void ldv_main_exported_108(void) { char *ldvarg283 ; void *tmp ; struct config_item *ldvarg284 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg283 = (char *)tmp; tmp___0 = ldv_init_zalloc(80UL); ldvarg284 = (struct config_item *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_108 == 1) { target_fabric_drop_mappedlun(target_fabric_nacl_base_group_ops_group0, ldvarg284); ldv_state_variable_108 = 1; } else { } goto ldv_63056; case 1: ; if (ldv_state_variable_108 == 1) { target_fabric_make_mappedlun(target_fabric_nacl_base_group_ops_group0, (char const *)ldvarg283); ldv_state_variable_108 = 1; } else { } goto ldv_63056; default: ldv_stop(); } ldv_63056: ; return; } } void ldv_main_exported_115(void) { size_t ldvarg286 ; char *ldvarg287 ; void *tmp ; char *ldvarg285 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg287 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg285 = (char *)tmp___0; ldv_memset((void *)(& ldvarg286), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_115 == 1) { target_fabric_mappedlun_store_write_protect(target_fabric_mappedlun_write_protect_group0, (char const *)ldvarg287, ldvarg286); ldv_state_variable_115 = 1; } else { } goto ldv_63066; case 1: ; if (ldv_state_variable_115 == 1) { target_fabric_mappedlun_show_write_protect(target_fabric_mappedlun_write_protect_group0, ldvarg285); ldv_state_variable_115 = 1; } else { } goto ldv_63066; default: ldv_stop(); } ldv_63066: ; return; } } void ldv_main_exported_112(void) { char *ldvarg178 ; void *tmp ; size_t ldvarg176 ; char *ldvarg177 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg178 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg177 = (char *)tmp___0; ldv_memset((void *)(& ldvarg176), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_112 == 1) { target_fabric_nacl_attrib_attr_show(target_fabric_nacl_attrib_item_ops_group1, target_fabric_nacl_attrib_item_ops_group0, ldvarg178); ldv_state_variable_112 = 1; } else { } goto ldv_63076; case 1: ; if (ldv_state_variable_112 == 1) { target_fabric_nacl_attrib_attr_store(target_fabric_nacl_attrib_item_ops_group1, target_fabric_nacl_attrib_item_ops_group0, (char const *)ldvarg177, ldvarg176); ldv_state_variable_112 = 1; } else { } goto ldv_63076; default: ldv_stop(); } ldv_63076: ; return; } } void ldv_main_exported_109(void) { char *ldvarg56 ; void *tmp ; size_t ldvarg55 ; char *ldvarg57 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg56 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg57 = (char *)tmp___0; ldv_memset((void *)(& ldvarg55), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_109 == 1) { target_fabric_nacl_base_attr_show(target_fabric_nacl_base_item_ops_group1, target_fabric_nacl_base_item_ops_group0, ldvarg57); ldv_state_variable_109 = 1; } else { } if (ldv_state_variable_109 == 2) { target_fabric_nacl_base_attr_show(target_fabric_nacl_base_item_ops_group1, target_fabric_nacl_base_item_ops_group0, ldvarg57); ldv_state_variable_109 = 2; } else { } goto ldv_63086; case 1: ; if (ldv_state_variable_109 == 2) { target_fabric_nacl_base_release(target_fabric_nacl_base_item_ops_group1); ldv_state_variable_109 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_63086; case 2: ; if (ldv_state_variable_109 == 1) { target_fabric_nacl_base_attr_store(target_fabric_nacl_base_item_ops_group1, target_fabric_nacl_base_item_ops_group0, (char const *)ldvarg56, ldvarg55); ldv_state_variable_109 = 1; } else { } if (ldv_state_variable_109 == 2) { target_fabric_nacl_base_attr_store(target_fabric_nacl_base_item_ops_group1, target_fabric_nacl_base_item_ops_group0, (char const *)ldvarg56, ldvarg55); ldv_state_variable_109 = 2; } else { } goto ldv_63086; case 3: ; if (ldv_state_variable_109 == 1) { ldv_probe_109(); ldv_state_variable_109 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_63086; default: ldv_stop(); } ldv_63086: ; return; } } void ldv_main_exported_92(void) { struct config_item *ldvarg289 ; void *tmp ; char *ldvarg288 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(80UL); ldvarg289 = (struct config_item *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg288 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_92 == 1) { target_fabric_drop_tpg(target_fabric_tpg_group_ops_group0, ldvarg289); ldv_state_variable_92 = 1; } else { } goto ldv_63097; case 1: ; if (ldv_state_variable_92 == 1) { target_fabric_make_tpg(target_fabric_tpg_group_ops_group0, (char const *)ldvarg288); ldv_state_variable_92 = 1; } else { } goto ldv_63097; default: ldv_stop(); } ldv_63097: ; return; } } void ldv_main_exported_98(void) { char *ldvarg237 ; void *tmp ; struct config_item *ldvarg238 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg237 = (char *)tmp; tmp___0 = ldv_init_zalloc(80UL); ldvarg238 = (struct config_item *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_98 == 1) { target_fabric_drop_lun(target_fabric_lun_group_ops_group0, ldvarg238); ldv_state_variable_98 = 1; } else { } goto ldv_63106; case 1: ; if (ldv_state_variable_98 == 1) { target_fabric_make_lun(target_fabric_lun_group_ops_group0, (char const *)ldvarg237); ldv_state_variable_98 = 1; } else { } goto ldv_63106; default: ldv_stop(); } ldv_63106: ; return; } } void ldv_main_exported_103(void) { size_t ldvarg291 ; char *ldvarg292 ; void *tmp ; char *ldvarg290 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg292 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg290 = (char *)tmp___0; ldv_memset((void *)(& ldvarg291), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_103 == 1) { target_fabric_port_store_attr_alua_tg_pt_offline(target_fabric_port_alua_tg_pt_offline_group0, (char const *)ldvarg292, ldvarg291); ldv_state_variable_103 = 1; } else { } goto ldv_63116; case 1: ; if (ldv_state_variable_103 == 1) { target_fabric_port_show_attr_alua_tg_pt_offline(target_fabric_port_alua_tg_pt_offline_group0, ldvarg290); ldv_state_variable_103 = 1; } else { } goto ldv_63116; default: ldv_stop(); } ldv_63116: ; return; } } void ldv_main_exported_89(void) { char *ldvarg62 ; void *tmp ; size_t ldvarg61 ; char *ldvarg63 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg62 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg63 = (char *)tmp___0; ldv_memset((void *)(& ldvarg61), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_89 == 1) { target_fabric_discovery_attr_show(target_fabric_discovery_item_ops_group1, target_fabric_discovery_item_ops_group0, ldvarg63); ldv_state_variable_89 = 1; } else { } goto ldv_63126; case 1: ; if (ldv_state_variable_89 == 1) { target_fabric_discovery_attr_store(target_fabric_discovery_item_ops_group1, target_fabric_discovery_item_ops_group0, (char const *)ldvarg62, ldvarg61); ldv_state_variable_89 = 1; } else { } goto ldv_63126; default: ldv_stop(); } ldv_63126: ; return; } } void ldv_main_exported_101(void) { char *ldvarg120 ; void *tmp ; char *ldvarg122 ; void *tmp___0 ; size_t ldvarg121 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg120 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg122 = (char *)tmp___0; ldv_memset((void *)(& ldvarg121), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_101 == 1) { target_fabric_port_store_attr_alua_tg_pt_write_md(target_fabric_port_alua_tg_pt_write_md_group0, (char const *)ldvarg122, ldvarg121); ldv_state_variable_101 = 1; } else { } goto ldv_63136; case 1: ; if (ldv_state_variable_101 == 1) { target_fabric_port_show_attr_alua_tg_pt_write_md(target_fabric_port_alua_tg_pt_write_md_group0, ldvarg120); ldv_state_variable_101 = 1; } else { } goto ldv_63136; default: ldv_stop(); } ldv_63136: ; return; } } void ldv_main_exported_113(void) { char *ldvarg298 ; void *tmp ; struct config_item *ldvarg299 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg298 = (char *)tmp; tmp___0 = ldv_init_zalloc(80UL); ldvarg299 = (struct config_item *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_113 == 1) { target_core_mappedlun_stat_rmdir(target_fabric_mappedlun_stat_group_ops_group0, ldvarg299); ldv_state_variable_113 = 1; } else { } goto ldv_63145; case 1: ; if (ldv_state_variable_113 == 1) { target_core_mappedlun_stat_mkdir(target_fabric_mappedlun_stat_group_ops_group0, (char const *)ldvarg298); ldv_state_variable_113 = 1; } else { } goto ldv_63145; default: ldv_stop(); } ldv_63145: ; return; } } bool ldv_queue_work_on_127(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_128(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_129(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_130(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_131(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_132(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_133(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_134(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_135(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_lock_136(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_137(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_138(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern int hex2bin(u8 * , char const * , size_t ) ; extern int strncasecmp(char const * , char const * , size_t ) ; int ldv_mutex_trylock_163(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_161(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_164(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_165(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_160(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_162(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_166(struct mutex *ldv_func_arg1 ) ; extern void _raw_spin_lock_irq(raw_spinlock_t * ) ; extern void _raw_spin_unlock_irq(raw_spinlock_t * ) ; __inline static void spin_lock_irq(spinlock_t *lock ) { { _raw_spin_lock_irq(& lock->__annonCompField17.rlock); return; } } __inline static void spin_unlock_irq(spinlock_t *lock ) { { _raw_spin_unlock_irq(& lock->__annonCompField17.rlock); return; } } bool ldv_queue_work_on_155(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_157(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_156(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_159(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_158(struct workqueue_struct *ldv_func_arg1 ) ; extern unsigned char const _ctype[] ; __inline static unsigned char __tolower(unsigned char c ) { { if ((int )_ctype[(int )c] & 1) { c = (unsigned int )c + 32U; } else { } return (c); } } int target_get_pr_transport_id_len(struct se_node_acl *nacl , struct t10_pr_registration *pr_reg , int *format_code ) ; int target_get_pr_transport_id(struct se_node_acl *nacl , struct t10_pr_registration *pr_reg , int *format_code , unsigned char *buf ) ; char const *target_parse_pr_out_transport_id(struct se_portal_group *tpg , char const *buf , u32 *out_tid_len , char **port_nexus_ptr ) ; static int sas_get_pr_transport_id(struct se_node_acl *nacl , int *format_code , unsigned char *buf ) { int ret ; struct _ddebug descriptor ; long tmp ; { ret = hex2bin(buf + 4U, (char const *)(& nacl->initiatorname) + 4U, 8UL); if (ret != 0) { descriptor.modname = "target_core_mod"; descriptor.function = "sas_get_pr_transport_id"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_lib.c"; descriptor.format = "%s: invalid hex string\n"; descriptor.lineno = 57U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "%s: invalid hex string\n", "sas_get_pr_transport_id"); } else { } return (ret); } else { } return (24); } } static int fc_get_pr_transport_id(struct se_node_acl *se_nacl , int *format_code , unsigned char *buf ) { unsigned char *ptr ; int i ; int ret ; u32 off ; int tmp ; u32 tmp___0 ; struct _ddebug descriptor ; long tmp___1 ; { off = 8U; ptr = (unsigned char *)(& se_nacl->initiatorname); i = 0; goto ldv_56941; ldv_56944: tmp = strncmp((char const *)ptr + (unsigned long )i, ":", 1UL); if (tmp == 0) { i = i + 1; goto ldv_56941; } else { } tmp___0 = off; off = off + 1U; ret = hex2bin(buf + (unsigned long )tmp___0, (char const *)ptr + (unsigned long )i, 1UL); if (ret < 0) { descriptor.modname = "target_core_mod"; descriptor.function = "fc_get_pr_transport_id"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_lib.c"; descriptor.format = "%s: invalid hex string\n"; descriptor.lineno = 85U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor, "%s: invalid hex string\n", "fc_get_pr_transport_id"); } else { } return (ret); } else { } i = i + 2; ldv_56941: ; if (i <= 23) { goto ldv_56944; } else { } return (24); } } static int sbp_get_pr_transport_id(struct se_node_acl *nacl , int *format_code , unsigned char *buf ) { int ret ; struct _ddebug descriptor ; long tmp ; { ret = hex2bin(buf + 8U, (char const *)(& nacl->initiatorname), 8UL); if (ret != 0) { descriptor.modname = "target_core_mod"; descriptor.function = "sbp_get_pr_transport_id"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_lib.c"; descriptor.format = "%s: invalid hex string\n"; descriptor.lineno = 105U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "%s: invalid hex string\n", "sbp_get_pr_transport_id"); } else { } return (ret); } else { } return (24); } } static int srp_get_pr_transport_id(struct se_node_acl *nacl , int *format_code , unsigned char *buf ) { char const *p ; unsigned int len ; unsigned int count ; unsigned int leading_zero_bytes ; int rc ; int tmp ; size_t tmp___0 ; unsigned int _min1 ; unsigned int _min2 ; struct _ddebug descriptor ; long tmp___1 ; { p = (char const *)(& nacl->initiatorname); tmp = strncasecmp(p, "0x", 2UL); if (tmp == 0) { p = p + 2UL; } else { } tmp___0 = strlen(p); len = (unsigned int )tmp___0; if ((int )len & 1) { return (-22); } else { } _min1 = len / 2U; _min2 = 16U; count = _min1 < _min2 ? _min1 : _min2; leading_zero_bytes = 16U - count; memset((void *)buf + 8U, 0, (size_t )leading_zero_bytes); rc = hex2bin(buf + ((unsigned long )leading_zero_bytes + 8UL), p, (size_t )count); if (rc < 0) { descriptor.modname = "target_core_mod"; descriptor.function = "srp_get_pr_transport_id"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_lib.c"; descriptor.format = "hex2bin failed for %s: %d\n"; descriptor.lineno = 133U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor, "hex2bin failed for %s: %d\n", "srp_get_pr_transport_id", rc); } else { } return (rc); } else { } return (24); } } static int iscsi_get_pr_transport_id(struct se_node_acl *se_nacl , struct t10_pr_registration *pr_reg , int *format_code , unsigned char *buf ) { u32 off ; u32 padding ; u16 len ; int tmp ; { off = 4U; padding = 0U; len = 0U; spin_lock_irq(& se_nacl->nacl_sess_lock); tmp = sprintf((char *)buf + (unsigned long )off, "%s", (char *)(& se_nacl->initiatorname)); len = (u16 )tmp; len = (u16 )((int )len + 1); if (*format_code == 1 && (int )pr_reg->isid_present_at_reg) { *buf = (unsigned int )*buf | 64U; *(buf + (unsigned long )((u32 )len + off)) = 44U; off = off + 1U; *(buf + (unsigned long )((u32 )len + off)) = 105U; off = off + 1U; *(buf + (unsigned long )((u32 )len + off)) = 44U; off = off + 1U; *(buf + (unsigned long )((u32 )len + off)) = 48U; off = off + 1U; *(buf + (unsigned long )((u32 )len + off)) = 120U; off = off + 1U; len = (unsigned int )len + 5U; *(buf + (unsigned long )((u32 )len + off)) = (unsigned char )pr_reg->pr_reg_isid[0]; off = off + 1U; *(buf + (unsigned long )((u32 )len + off)) = (unsigned char )pr_reg->pr_reg_isid[1]; off = off + 1U; *(buf + (unsigned long )((u32 )len + off)) = (unsigned char )pr_reg->pr_reg_isid[2]; off = off + 1U; *(buf + (unsigned long )((u32 )len + off)) = (unsigned char )pr_reg->pr_reg_isid[3]; off = off + 1U; *(buf + (unsigned long )((u32 )len + off)) = (unsigned char )pr_reg->pr_reg_isid[4]; off = off + 1U; *(buf + (unsigned long )((u32 )len + off)) = (unsigned char )pr_reg->pr_reg_isid[5]; off = off + 1U; *(buf + (unsigned long )((u32 )len + off)) = 0U; off = off + 1U; len = (unsigned int )len + 7U; } else { } spin_unlock_irq(& se_nacl->nacl_sess_lock); padding = (u32 )(- ((int )len)) & 3U; if (padding != 0U) { len = (int )((u16 )padding) + (int )len; } else { } *(buf + 2UL) = (unsigned char )((int )len >> 8); *(buf + 3UL) = (unsigned char )len; len = (unsigned int )len + 4U; return ((int )len); } } static int iscsi_get_pr_transport_id_len(struct se_node_acl *se_nacl , struct t10_pr_registration *pr_reg , int *format_code ) { u32 len ; u32 padding ; size_t tmp ; { len = 0U; padding = 0U; spin_lock_irq(& se_nacl->nacl_sess_lock); tmp = strlen((char const *)(& se_nacl->initiatorname)); len = (u32 )tmp; len = len + 1U; if ((int )pr_reg->isid_present_at_reg) { len = len + 5U; len = len + 7U; *format_code = 1; } else { *format_code = 0; } spin_unlock_irq(& se_nacl->nacl_sess_lock); padding = - len & 3U; if (padding != 0U) { len = len + padding; } else { } len = len + 4U; return ((int )len); } } static char *iscsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg , char const *buf , u32 *out_tid_len , char **port_nexus_ptr ) { char *p ; u32 tid_len ; u32 padding ; int i ; u16 add_len ; u8 format_code ; size_t tmp ; struct _ddebug descriptor ; long tmp___0 ; unsigned char tmp___1 ; { format_code = (unsigned int )((u8 )*buf) & 192U; if ((unsigned int )format_code != 0U && (unsigned int )format_code != 64U) { printk("\vIllegal format code: 0x%02x for iSCSI Initiator Transport ID\n", (int )format_code); return ((char *)0); } else { } if ((unsigned long )out_tid_len != (unsigned long )((u32 *)0U)) { add_len = (u16 )((int )((short )((int )*(buf + 2UL) << 8)) | (int )((short )*(buf + 3UL))); tmp = strlen(buf + 4UL); tid_len = (u32 )tmp; tid_len = tid_len + 4U; tid_len = tid_len + 1U; padding = - tid_len & 3U; if (padding != 0U) { tid_len = tid_len + padding; } else { } if ((u32 )((int )add_len + 4) != tid_len) { descriptor.modname = "target_core_mod"; descriptor.function = "iscsi_parse_pr_out_transport_id"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_fabric_lib.c"; descriptor.format = "LIO-Target Extracted add_len: %hu does not match calculated tid_len: %u, using tid_len instead\n"; descriptor.lineno = 321U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "LIO-Target Extracted add_len: %hu does not match calculated tid_len: %u, using tid_len instead\n", (int )add_len + 4, tid_len); } else { } *out_tid_len = tid_len; } else { *out_tid_len = (u32 )((int )add_len + 4); } } else { } if ((unsigned int )format_code == 64U) { p = strstr(buf + 4UL, ",i,0x"); if ((unsigned long )p == (unsigned long )((char *)0)) { printk("\vUnable to locate \",i,0x\" separator for Initiator port identifier: %s\n", buf + 4UL); return ((char *)0); } else { } *p = 0; p = p + 5UL; *port_nexus_ptr = p; i = 0; goto ldv_57001; ldv_57000: ; if (((int )_ctype[(int )((unsigned char )*p)] & 4) != 0) { p = p + 1; goto ldv_56999; } else { } tmp___1 = __tolower((int )((unsigned char )*p)); *p = (char )tmp___1; p = p + 1; ldv_56999: i = i + 1; ldv_57001: ; if (i <= 11) { goto ldv_57000; } else { } } else { } return ((char *)buf + 4U); } } int target_get_pr_transport_id_len(struct se_node_acl *nacl , struct t10_pr_registration *pr_reg , int *format_code ) { int tmp ; { switch ((nacl->se_tpg)->proto_id) { case 0: ; case 3: ; case 4: ; case 6: ; goto ldv_57012; case 5: tmp = iscsi_get_pr_transport_id_len(nacl, pr_reg, format_code); return (tmp); default: printk("\vUnknown proto_id: 0x%02x\n", (nacl->se_tpg)->proto_id); return (-22); } ldv_57012: *format_code = 0; return (24); } } int target_get_pr_transport_id(struct se_node_acl *nacl , struct t10_pr_registration *pr_reg , int *format_code , unsigned char *buf ) { int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { switch ((nacl->se_tpg)->proto_id) { case 6: tmp = sas_get_pr_transport_id(nacl, format_code, buf); return (tmp); case 3: tmp___0 = sbp_get_pr_transport_id(nacl, format_code, buf); return (tmp___0); case 4: tmp___1 = srp_get_pr_transport_id(nacl, format_code, buf); return (tmp___1); case 0: tmp___2 = fc_get_pr_transport_id(nacl, format_code, buf); return (tmp___2); case 5: tmp___3 = iscsi_get_pr_transport_id(nacl, pr_reg, format_code, buf); return (tmp___3); default: printk("\vUnknown proto_id: 0x%02x\n", (nacl->se_tpg)->proto_id); return (-22); } } } char const *target_parse_pr_out_transport_id(struct se_portal_group *tpg , char const *buf , u32 *out_tid_len , char **port_nexus_ptr ) { u32 offset ; char *tmp ; { switch (tpg->proto_id) { case 6: offset = 4U; goto ldv_57035; case 3: ; case 4: ; case 0: offset = 8U; goto ldv_57035; case 5: tmp = iscsi_parse_pr_out_transport_id(tpg, buf, out_tid_len, port_nexus_ptr); return ((char const *)tmp); default: printk("\vUnknown proto_id: 0x%02x\n", tpg->proto_id); return ((char const *)0); } ldv_57035: *port_nexus_ptr = (char *)0; *out_tid_len = 24U; return (buf + (unsigned long )offset); } } bool ldv_queue_work_on_155(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_156(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_157(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_158(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_159(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_160(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_161(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_162(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_163(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_164(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_165(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_166(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static void *ERR_PTR(long error ) ; int ldv_mutex_trylock_191(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_189(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_192(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_193(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_196(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_197(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_199(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_201(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_202(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_188(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_190(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_194(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_195(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_198(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_200(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_backend_mutex(struct mutex *lock ) ; void ldv_mutex_unlock_backend_mutex(struct mutex *lock ) ; bool ldv_queue_work_on_183(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_185(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_184(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_187(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_186(struct workqueue_struct *ldv_func_arg1 ) ; extern bool try_module_get(struct module * ) ; extern void module_put(struct module * ) ; int transport_backend_register(struct target_backend_ops const *ops ) ; void target_backend_unregister(struct target_backend_ops const *ops ) ; static struct list_head backend_list = {& backend_list, & backend_list}; static struct mutex backend_mutex = {{1}, {{{{{0}}, 3735899821U, 4294967295U, (void *)-1, {0, {0, 0}, "backend_mutex.wait_lock", 0, 0UL}}}}, {& backend_mutex.wait_list, & backend_mutex.wait_list}, 0, (void *)(& backend_mutex), {0, {0, 0}, "backend_mutex", 0, 0UL}}; static u32 hba_id_counter ; static spinlock_t hba_lock = {{{{{0}}, 3735899821U, 4294967295U, (void *)-1, {0, {0, 0}, "hba_lock", 0, 0UL}}}}; static struct list_head hba_list = {& hba_list, & hba_list}; int transport_backend_register(struct target_backend_ops const *ops ) { struct target_backend *tb ; struct target_backend *old ; void *tmp ; struct list_head const *__mptr ; int tmp___0 ; struct list_head const *__mptr___0 ; struct _ddebug descriptor ; long tmp___1 ; { tmp = kzalloc(216UL, 208U); tb = (struct target_backend *)tmp; if ((unsigned long )tb == (unsigned long )((struct target_backend *)0)) { return (-12); } else { } tb->ops = ops; ldv_mutex_lock_195(& backend_mutex); __mptr = (struct list_head const *)backend_list.next; old = (struct target_backend *)__mptr; goto ldv_57943; ldv_57942: tmp___0 = strcmp((char const *)(& (old->ops)->name), (char const *)(& ops->name)); if (tmp___0 == 0) { printk("\vbackend %s already registered.\n", (char const *)(& ops->name)); ldv_mutex_unlock_196(& backend_mutex); kfree((void const *)tb); return (-17); } else { } __mptr___0 = (struct list_head const *)old->list.next; old = (struct target_backend *)__mptr___0; ldv_57943: ; if ((unsigned long )(& old->list) != (unsigned long )(& backend_list)) { goto ldv_57942; } else { } target_setup_backend_cits(tb); list_add_tail(& tb->list, & backend_list); ldv_mutex_unlock_197(& backend_mutex); descriptor.modname = "target_core_mod"; descriptor.function = "transport_backend_register"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_hba.c"; descriptor.format = "TCM: Registered subsystem plugin: %s struct module: %p\n"; descriptor.lineno = 74U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor, "TCM: Registered subsystem plugin: %s struct module: %p\n", (char const *)(& ops->name), ops->owner); } else { } return (0); } } static char const __kstrtab_transport_backend_register[27U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'b', 'a', 'c', 'k', 'e', 'n', 'd', '_', 'r', 'e', 'g', 'i', 's', 't', 'e', 'r', '\000'}; struct kernel_symbol const __ksymtab_transport_backend_register ; struct kernel_symbol const __ksymtab_transport_backend_register = {(unsigned long )(& transport_backend_register), (char const *)(& __kstrtab_transport_backend_register)}; void target_backend_unregister(struct target_backend_ops const *ops ) { struct target_backend *tb ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { ldv_mutex_lock_198(& backend_mutex); __mptr = (struct list_head const *)backend_list.next; tb = (struct target_backend *)__mptr; goto ldv_57964; ldv_57963: ; if ((unsigned long )tb->ops == (unsigned long )ops) { list_del(& tb->list); kfree((void const *)tb); goto ldv_57962; } else { } __mptr___0 = (struct list_head const *)tb->list.next; tb = (struct target_backend *)__mptr___0; ldv_57964: ; if ((unsigned long )(& tb->list) != (unsigned long )(& backend_list)) { goto ldv_57963; } else { } ldv_57962: ldv_mutex_unlock_199(& backend_mutex); return; } } static char const __kstrtab_target_backend_unregister[26U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 'b', 'a', 'c', 'k', 'e', 'n', 'd', '_', 'u', 'n', 'r', 'e', 'g', 'i', 's', 't', 'e', 'r', '\000'}; struct kernel_symbol const __ksymtab_target_backend_unregister ; struct kernel_symbol const __ksymtab_target_backend_unregister = {(unsigned long )(& target_backend_unregister), (char const *)(& __kstrtab_target_backend_unregister)}; static struct target_backend *core_get_backend(char const *name ) { struct target_backend *tb ; struct list_head const *__mptr ; int tmp ; struct list_head const *__mptr___0 ; bool tmp___0 ; int tmp___1 ; { ldv_mutex_lock_200(& backend_mutex); __mptr = (struct list_head const *)backend_list.next; tb = (struct target_backend *)__mptr; goto ldv_57982; ldv_57981: tmp = strcmp((char const *)(& (tb->ops)->name), name); if (tmp == 0) { goto found; } else { } __mptr___0 = (struct list_head const *)tb->list.next; tb = (struct target_backend *)__mptr___0; ldv_57982: ; if ((unsigned long )(& tb->list) != (unsigned long )(& backend_list)) { goto ldv_57981; } else { } ldv_mutex_unlock_201(& backend_mutex); return ((struct target_backend *)0); found: ; if ((unsigned long )(tb->ops)->owner != (unsigned long )((struct module */* const */)0)) { tmp___0 = try_module_get((tb->ops)->owner); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { tb = (struct target_backend *)0; } else { } } else { } ldv_mutex_unlock_202(& backend_mutex); return (tb); } } struct se_hba *core_alloc_hba(char const *plugin_name , u32 plugin_dep_id , u32 hba_flags ) { struct se_hba *hba ; int ret ; void *tmp ; void *tmp___0 ; struct lock_class_key __key ; struct lock_class_key __key___0 ; u32 tmp___1 ; struct _ddebug descriptor ; long tmp___2 ; void *tmp___3 ; { ret = 0; tmp = kzalloc(400UL, 208U); hba = (struct se_hba *)tmp; if ((unsigned long )hba == (unsigned long )((struct se_hba *)0)) { printk("\vUnable to allocate struct se_hba\n"); tmp___0 = ERR_PTR(-12L); return ((struct se_hba *)tmp___0); } else { } spinlock_check(& hba->device_lock); __raw_spin_lock_init(& hba->device_lock.__annonCompField17.rlock, "&(&hba->device_lock)->rlock", & __key); __mutex_init(& hba->hba_access_mutex, "&hba->hba_access_mutex", & __key___0); hba->hba_index = scsi_get_new_index(0); hba->hba_flags = hba->hba_flags | hba_flags; hba->backend = core_get_backend(plugin_name); if ((unsigned long )hba->backend == (unsigned long )((struct target_backend *)0)) { ret = -22; goto out_free_hba; } else { } ret = (*(((hba->backend)->ops)->attach_hba))(hba, plugin_dep_id); if (ret < 0) { goto out_module_put; } else { } spin_lock(& hba_lock); tmp___1 = hba_id_counter; hba_id_counter = hba_id_counter + 1U; hba->hba_id = tmp___1; list_add_tail(& hba->hba_node, & hba_list); spin_unlock(& hba_lock); descriptor.modname = "target_core_mod"; descriptor.function = "core_alloc_hba"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_hba.c"; descriptor.format = "CORE_HBA[%d] - Attached HBA to Generic Target Core\n"; descriptor.lineno = 147U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_pr_debug(& descriptor, "CORE_HBA[%d] - Attached HBA to Generic Target Core\n", hba->hba_id); } else { } return (hba); out_module_put: module_put(((hba->backend)->ops)->owner); hba->backend = (struct target_backend *)0; out_free_hba: kfree((void const *)hba); tmp___3 = ERR_PTR((long )ret); return ((struct se_hba *)tmp___3); } } int core_delete_hba(struct se_hba *hba ) { int __ret_warn_on ; long tmp ; struct _ddebug descriptor ; long tmp___0 ; { __ret_warn_on = hba->dev_count != 0U; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_hba.c", 162); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); (*(((hba->backend)->ops)->detach_hba))(hba); spin_lock(& hba_lock); list_del(& hba->hba_node); spin_unlock(& hba_lock); descriptor.modname = "target_core_mod"; descriptor.function = "core_delete_hba"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_hba.c"; descriptor.format = "CORE_HBA[%d] - Detached HBA from Generic Target Core\n"; descriptor.lineno = 171U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "CORE_HBA[%d] - Detached HBA from Generic Target Core\n", hba->hba_id); } else { } module_put(((hba->backend)->ops)->owner); hba->backend = (struct target_backend *)0; kfree((void const *)hba); return (0); } } bool ldv_queue_work_on_183(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_184(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_185(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_186(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_187(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_188(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_189(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_190(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_191(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_192(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_193(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_194(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_lock_195(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_backend_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_196(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_backend_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_197(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_backend_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_198(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_backend_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_199(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_backend_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_200(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_backend_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_201(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_backend_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_202(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_backend_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static void set_bit(long nr , unsigned long volatile *addr ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; bts %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr): "memory"); return; } } __inline static int constant_test_bit(long nr , unsigned long const volatile *addr ) { { return ((int )((unsigned long )*(addr + (unsigned long )(nr >> 6)) >> ((int )nr & 63)) & 1); } } __inline static __u64 __arch_swab64(__u64 val ) { { __asm__ ("bswapq %0": "=r" (val): "0" (val)); return (val); } } __inline static __u64 __fswab64(__u64 val ) { __u64 tmp ; { tmp = __arch_swab64(val); return (tmp); } } __inline static __u64 __swab64p(__u64 const *p ) { __u64 tmp ; { tmp = __fswab64(*p); return (tmp); } } __inline static __u64 __be64_to_cpup(__be64 const *p ) { __u64 tmp ; { tmp = __swab64p(p); return (tmp); } } extern void __list_del_entry(struct list_head * ) ; __inline static void list_del_init(struct list_head *entry ) { { __list_del_entry(entry); INIT_LIST_HEAD(entry); return; } } __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; __inline static void rep_nop(void) { { __asm__ volatile ("rep; nop": : : "memory"); return; } } __inline static void cpu_relax(void) { { rep_nop(); return; } } extern void __cmpxchg_wrong_size(void) ; __inline static long atomic64_read(atomic64_t const *v ) { long __var ; { __var = 0L; return ((long )*((long const volatile *)(& v->counter))); } } __inline static int atomic64_sub_and_test(long i , atomic64_t *v ) { char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; subq %2, %0; sete %1": "+m" (v->counter), "=qm" (c): "er" (i): "memory"); return ((int )((signed char )c) != 0); } } __inline static long atomic64_cmpxchg(atomic64_t *v , long old , long new ) { long __ret ; long __old ; long __new ; u8 volatile *__ptr ; u16 volatile *__ptr___0 ; u32 volatile *__ptr___1 ; u64 volatile *__ptr___2 ; { __old = old; __new = new; switch (8UL) { case 1UL: __ptr = (u8 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgb %2,%1": "=a" (__ret), "+m" (*__ptr): "q" (__new), "0" (__old): "memory"); goto ldv_5716; case 2UL: __ptr___0 = (u16 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgw %2,%1": "=a" (__ret), "+m" (*__ptr___0): "r" (__new), "0" (__old): "memory"); goto ldv_5716; case 4UL: __ptr___1 = (u32 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgl %2,%1": "=a" (__ret), "+m" (*__ptr___1): "r" (__new), "0" (__old): "memory"); goto ldv_5716; case 8UL: __ptr___2 = (u64 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgq %2,%1": "=a" (__ret), "+m" (*__ptr___2): "r" (__new), "0" (__old): "memory"); goto ldv_5716; default: __cmpxchg_wrong_size(); } ldv_5716: ; return (__ret); } } __inline static int atomic64_add_unless(atomic64_t *v , long a , long u ) { long c ; long old ; long tmp ; long tmp___0 ; { c = atomic64_read((atomic64_t const *)v); ldv_5745: tmp = ldv__builtin_expect(c == u, 0L); if (tmp != 0L) { goto ldv_5744; } else { } old = atomic64_cmpxchg(v, c, c + a); tmp___0 = ldv__builtin_expect(old == c, 1L); if (tmp___0 != 0L) { goto ldv_5744; } else { } c = old; goto ldv_5745; ldv_5744: ; return (c != u); } } __inline static int atomic_long_sub_and_test(long i , atomic_long_t *l ) { atomic64_t *v ; int tmp ; { v = l; tmp = atomic64_sub_and_test(i, v); return (tmp); } } int ldv_mutex_trylock_235(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_233(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_236(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_237(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_240(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_242(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_232(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_234(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_238(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_239(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_241(struct mutex *ldv_func_arg1 ) ; __inline static void __preempt_count_add___1(int val ) { int pao_ID__ ; { pao_ID__ = 0; switch (4UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (__preempt_count): "qi" (val)); } goto ldv_6781; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (__preempt_count): "ri" (val)); } goto ldv_6781; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (__preempt_count): "ri" (val)); } goto ldv_6781; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (__preempt_count): "re" (val)); } goto ldv_6781; default: __bad_percpu_size(); } ldv_6781: ; return; } } __inline static void __preempt_count_sub___1(int val ) { int pao_ID__ ; { pao_ID__ = 0; switch (4UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (__preempt_count): "qi" (- val)); } goto ldv_6793; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (__preempt_count): "ri" (- val)); } goto ldv_6793; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (__preempt_count): "ri" (- val)); } goto ldv_6793; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (__preempt_count): "re" (- val)); } goto ldv_6793; default: __bad_percpu_size(); } ldv_6793: ; return; } } __inline static void __rcu_read_lock___1(void) { { __preempt_count_add___1(1); __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock___1(void) { { __asm__ volatile ("": : : "memory"); __preempt_count_sub___1(1); return; } } __inline static void rcu_read_lock___1(void) { bool __warned ; int tmp ; bool tmp___0 ; int tmp___1 ; { __rcu_read_lock___1(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 849, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock___1(void) { bool __warned ; int tmp ; bool tmp___0 ; int tmp___1 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 900, "rcu_read_unlock() used illegally while idle"); } else { } } else { } __rcu_read_unlock___1(); rcu_lock_release(& rcu_lock_map); return; } } __inline static void rcu_read_lock_sched___0(void) { bool __warned ; int tmp ; bool tmp___0 ; int tmp___1 ; { __preempt_count_add___1(1); __asm__ volatile ("": : : "memory"); rcu_lock_acquire(& rcu_sched_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 965, "rcu_read_lock_sched() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock_sched___0(void) { bool __warned ; int tmp ; bool tmp___0 ; int tmp___1 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 983, "rcu_read_unlock_sched() used illegally while idle"); } else { } } else { } rcu_lock_release(& rcu_sched_lock_map); __asm__ volatile ("": : : "memory"); __preempt_count_sub___1(1); return; } } bool ldv_queue_work_on_227(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_229(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_228(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_231(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_230(struct workqueue_struct *ldv_func_arg1 ) ; __inline static bool __ref_is_percpu___0(struct percpu_ref *ref , unsigned long **percpu_countp ) { unsigned long percpu_ptr ; unsigned long _________p1 ; union __anonunion___u_192___0 __u ; long tmp ; { __read_once_size((void const volatile *)(& ref->percpu_count_ptr), (void *)(& __u.__c), 8); _________p1 = __u.__val; percpu_ptr = _________p1; tmp = ldv__builtin_expect((percpu_ptr & 3UL) != 0UL, 0L); if (tmp != 0L) { return (0); } else { } *percpu_countp = (unsigned long *)percpu_ptr; return (1); } } __inline static bool percpu_ref_tryget_live(struct percpu_ref *ref ) { unsigned long *percpu_count ; int ret ; void const *__vpp_verify ; int pao_ID__ ; int pao_ID_____0 ; int pao_ID_____1 ; int pao_ID_____2 ; bool tmp ; { ret = 0; rcu_read_lock_sched___0(); tmp = __ref_is_percpu___0(ref, & percpu_count); if ((int )tmp) { __vpp_verify = (void const *)0; switch (8UL) { case 1UL: pao_ID__ = 1; switch (8UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (1UL)); } goto ldv_16593; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (1UL)); } goto ldv_16593; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (1UL)); } goto ldv_16593; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (1UL)); } goto ldv_16593; default: __bad_percpu_size(); } ldv_16593: ; goto ldv_16598; case 2UL: pao_ID_____0 = 1; switch (8UL) { case 1UL: ; if (pao_ID_____0 == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (1UL)); } goto ldv_16604; case 2UL: ; if (pao_ID_____0 == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (1UL)); } goto ldv_16604; case 4UL: ; if (pao_ID_____0 == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (1UL)); } goto ldv_16604; case 8UL: ; if (pao_ID_____0 == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (1UL)); } goto ldv_16604; default: __bad_percpu_size(); } ldv_16604: ; goto ldv_16598; case 4UL: pao_ID_____1 = 1; switch (8UL) { case 1UL: ; if (pao_ID_____1 == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (1UL)); } goto ldv_16614; case 2UL: ; if (pao_ID_____1 == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (1UL)); } goto ldv_16614; case 4UL: ; if (pao_ID_____1 == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (1UL)); } goto ldv_16614; case 8UL: ; if (pao_ID_____1 == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (1UL)); } goto ldv_16614; default: __bad_percpu_size(); } ldv_16614: ; goto ldv_16598; case 8UL: pao_ID_____2 = 1; switch (8UL) { case 1UL: ; if (pao_ID_____2 == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (1UL)); } goto ldv_16624; case 2UL: ; if (pao_ID_____2 == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (1UL)); } goto ldv_16624; case 4UL: ; if (pao_ID_____2 == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (1UL)); } goto ldv_16624; case 8UL: ; if (pao_ID_____2 == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (1UL)); } goto ldv_16624; default: __bad_percpu_size(); } ldv_16624: ; goto ldv_16598; default: __bad_size_call_parameter(); goto ldv_16598; } ldv_16598: ret = 1; } else if ((ref->percpu_count_ptr & 2UL) == 0UL) { ret = atomic64_add_unless(& ref->count, 1L, 0L); } else { } rcu_read_unlock_sched___0(); return (ret != 0); } } __inline static void percpu_ref_put_many(struct percpu_ref *ref , unsigned long nr ) { unsigned long *percpu_count ; void const *__vpp_verify ; int pao_ID__ ; int pao_ID_____0 ; int pao_ID_____1 ; int pao_ID_____2 ; int tmp ; long tmp___0 ; bool tmp___1 ; { rcu_read_lock_sched___0(); tmp___1 = __ref_is_percpu___0(ref, & percpu_count); if ((int )tmp___1) { __vpp_verify = (void const *)0; switch (8UL) { case 1UL: pao_ID__ = 0; switch (8UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (- nr)); } goto ldv_16641; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16641; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16641; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (- nr)); } goto ldv_16641; default: __bad_percpu_size(); } ldv_16641: ; goto ldv_16646; case 2UL: pao_ID_____0 = 0; switch (8UL) { case 1UL: ; if (pao_ID_____0 == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (- nr)); } goto ldv_16652; case 2UL: ; if (pao_ID_____0 == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16652; case 4UL: ; if (pao_ID_____0 == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16652; case 8UL: ; if (pao_ID_____0 == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (- nr)); } goto ldv_16652; default: __bad_percpu_size(); } ldv_16652: ; goto ldv_16646; case 4UL: pao_ID_____1 = 0; switch (8UL) { case 1UL: ; if (pao_ID_____1 == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (- nr)); } goto ldv_16662; case 2UL: ; if (pao_ID_____1 == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16662; case 4UL: ; if (pao_ID_____1 == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16662; case 8UL: ; if (pao_ID_____1 == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (- nr)); } goto ldv_16662; default: __bad_percpu_size(); } ldv_16662: ; goto ldv_16646; case 8UL: pao_ID_____2 = 0; switch (8UL) { case 1UL: ; if (pao_ID_____2 == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (- nr)); } goto ldv_16672; case 2UL: ; if (pao_ID_____2 == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16672; case 4UL: ; if (pao_ID_____2 == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16672; case 8UL: ; if (pao_ID_____2 == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (- nr)); } goto ldv_16672; default: __bad_percpu_size(); } ldv_16672: ; goto ldv_16646; default: __bad_size_call_parameter(); goto ldv_16646; } ldv_16646: ; } else { tmp = atomic_long_sub_and_test((long )nr, & ref->count); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { (*(ref->release))(ref); } else { } } rcu_read_unlock_sched___0(); return; } } __inline static void percpu_ref_put(struct percpu_ref *ref ) { { percpu_ref_put_many(ref, 1UL); return; } } extern void *kmem_cache_alloc(struct kmem_cache * , gfp_t ) ; extern void kmem_cache_free(struct kmem_cache * , void * ) ; __inline static void *kmem_cache_zalloc(struct kmem_cache *k , gfp_t flags ) { void *tmp ; { tmp = kmem_cache_alloc(k, flags | 32768U); return (tmp); } } extern void *vzalloc(unsigned long ) ; extern void vfree(void const * ) ; extern void fput(struct file * ) ; __inline static u64 get_unaligned_be64(void const *p ) { __u64 tmp ; { tmp = __be64_to_cpup((__be64 const *)p); return (tmp); } } extern struct file *filp_open(char const * , int , umode_t ) ; extern ssize_t kernel_write(struct file * , char const * , size_t , loff_t ) ; __inline static void atomic_inc_mb(atomic_t *v ) { { __asm__ volatile ("": : : "memory"); atomic_inc(v); __asm__ volatile ("": : : "memory"); return; } } __inline static void atomic_dec_mb(atomic_t *v ) { { __asm__ volatile ("": : : "memory"); atomic_dec(v); __asm__ volatile ("": : : "memory"); return; } } void target_complete_cmd(struct se_cmd *cmd , u8 scsi_status ) ; void *transport_kmap_data_sg(struct se_cmd *cmd ) ; void transport_kunmap_data_sg(struct se_cmd *cmd ) ; int core_tmr_lun_reset(struct se_device *dev , struct se_tmr_req *tmr , struct list_head *preempt_and_abort_list , struct se_cmd *prout_cmd ) ; struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg , char const *initiatorname ) ; unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd ) ; struct kmem_cache *t10_pr_reg_cache ; sense_reason_t target_scsi2_reservation_release(struct se_cmd *cmd ) ; sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *cmd ) ; sense_reason_t target_scsi3_emulate_pr_in(struct se_cmd *cmd ) ; sense_reason_t target_scsi3_emulate_pr_out(struct se_cmd *cmd ) ; sense_reason_t target_check_reservation(struct se_cmd *cmd ) ; void target_ua_allocate_lun(struct se_node_acl *nacl , u32 unpacked_lun , u8 asc , u8 ascq ) ; void core_pr_dump_initiator_port(struct t10_pr_registration *pr_reg , char *buf , u32 size ) { { if (! pr_reg->isid_present_at_reg) { *buf = 0; } else { } snprintf(buf, (size_t )size, ",i,0x%s", (char *)(& pr_reg->pr_reg_isid)); return; } } static void __core_scsi3_complete_pro_release(struct se_device *dev , struct se_node_acl *se_nacl , struct t10_pr_registration *pr_reg , int explicit , int unreg ) ; static int is_reservation_holder(struct t10_pr_registration *pr_res_holder , struct t10_pr_registration *pr_reg ) { int pr_res_type ; { if ((unsigned long )pr_res_holder != (unsigned long )((struct t10_pr_registration *)0)) { pr_res_type = pr_res_holder->pr_res_type; return (((unsigned long )pr_res_holder == (unsigned long )pr_reg || pr_res_type == 7) || pr_res_type == 8); } else { } return (0); } } static sense_reason_t target_scsi2_reservation_check(struct se_cmd *cmd ) { struct se_device *dev ; struct se_session *sess ; { dev = cmd->se_dev; sess = cmd->se_sess; switch ((int )*(cmd->t_task_cdb)) { case 18: ; case 23: ; case 87: ; return (0U); default: ; goto ldv_57397; } ldv_57397: ; if ((unsigned long )dev->dev_reserved_node_acl == (unsigned long )((struct se_node_acl *)0) || (unsigned long )sess == (unsigned long )((struct se_session *)0)) { return (0U); } else { } if ((unsigned long )dev->dev_reserved_node_acl != (unsigned long )sess->se_node_acl) { return (16U); } else { } if ((dev->dev_reservation_flags & 2U) != 0U) { if (dev->dev_res_bin_isid != sess->sess_bin_isid) { return (16U); } else { } } else { } return (0U); } } static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *dev , struct se_node_acl *nacl , struct se_session *sess ) ; static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg ) ; static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd ) { struct se_session *se_sess ; struct se_device *dev ; struct t10_pr_registration *pr_reg ; struct t10_reservation *pr_tmpl ; int conflict ; int tmp ; { se_sess = cmd->se_sess; dev = cmd->se_dev; pr_tmpl = & dev->t10_pr; conflict = 0; pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); if ((unsigned long )pr_reg != (unsigned long )((struct t10_pr_registration *)0)) { if (pr_reg->pr_res_holder != 0) { core_scsi3_put_pr_reg(pr_reg); return (1); } else { } if (((pr_reg->pr_res_type == 5 || pr_reg->pr_res_type == 6) || pr_reg->pr_res_type == 7) || pr_reg->pr_res_type == 8) { core_scsi3_put_pr_reg(pr_reg); return (1); } else { } core_scsi3_put_pr_reg(pr_reg); conflict = 1; } else { spin_lock(& pr_tmpl->registration_lock); tmp = list_empty((struct list_head const *)(& pr_tmpl->registration_list)); conflict = tmp == 0; spin_unlock(& pr_tmpl->registration_lock); } if (conflict != 0) { printk("\vReceived legacy SPC-2 RESERVE/RELEASE while active SPC-3 registrations exist, returning RESERVATION_CONFLICT\n"); return (-16); } else { } return (0); } } sense_reason_t target_scsi2_reservation_release(struct se_cmd *cmd ) { struct se_device *dev ; struct se_session *sess ; struct se_portal_group *tpg ; int rc ; struct _ddebug descriptor ; char *tmp ; long tmp___0 ; { dev = cmd->se_dev; sess = cmd->se_sess; if ((unsigned long )sess == (unsigned long )((struct se_session *)0) || (unsigned long )sess->se_tpg == (unsigned long )((struct se_portal_group *)0)) { goto out; } else { } rc = target_check_scsi2_reservation_conflict(cmd); if (rc == 1) { goto out; } else { } if (rc < 0) { return (16U); } else { } spin_lock(& dev->dev_reservation_lock); if ((unsigned long )dev->dev_reserved_node_acl == (unsigned long )((struct se_node_acl *)0) || (unsigned long )sess == (unsigned long )((struct se_session *)0)) { goto out_unlock; } else { } if ((unsigned long )dev->dev_reserved_node_acl != (unsigned long )sess->se_node_acl) { goto out_unlock; } else { } if (dev->dev_res_bin_isid != sess->sess_bin_isid) { goto out_unlock; } else { } dev->dev_reserved_node_acl = (struct se_node_acl *)0; dev->dev_reservation_flags = dev->dev_reservation_flags & 4294967294U; if ((dev->dev_reservation_flags & 2U) != 0U) { dev->dev_res_bin_isid = 0ULL; dev->dev_reservation_flags = dev->dev_reservation_flags & 4294967293U; } else { } tpg = sess->se_tpg; descriptor.modname = "target_core_mod"; descriptor.function = "target_scsi2_reservation_release"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor.format = "SCSI-2 Released reservation for %s LUN: %llu -> MAPPED LUN: %llu for %s\n"; descriptor.lineno = 236U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = (*((tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "SCSI-2 Released reservation for %s LUN: %llu -> MAPPED LUN: %llu for %s\n", tmp, (cmd->se_lun)->unpacked_lun, cmd->orig_fe_lun, (char *)(& (sess->se_node_acl)->initiatorname)); } else { } out_unlock: spin_unlock(& dev->dev_reservation_lock); out: target_complete_cmd(cmd, 0); return (0U); } } sense_reason_t target_scsi2_reservation_reserve(struct se_cmd *cmd ) { struct se_device *dev ; struct se_session *sess ; struct se_portal_group *tpg ; sense_reason_t ret ; int rc ; char *tmp ; struct _ddebug descriptor ; char *tmp___0 ; long tmp___1 ; { dev = cmd->se_dev; sess = cmd->se_sess; ret = 0U; if ((int )*(cmd->t_task_cdb + 1UL) & 1 && ((int )*(cmd->t_task_cdb + 1UL) & 2) != 0) { printk("\vLongIO and Obselete Bits set, returning ILLEGAL_REQUEST\n"); return (2U); } else { } if ((unsigned long )sess == (unsigned long )((struct se_session *)0) || (unsigned long )sess->se_tpg == (unsigned long )((struct se_portal_group *)0)) { goto out; } else { } rc = target_check_scsi2_reservation_conflict(cmd); if (rc == 1) { goto out; } else { } if (rc < 0) { return (16U); } else { } tpg = sess->se_tpg; spin_lock(& dev->dev_reservation_lock); if ((unsigned long )dev->dev_reserved_node_acl != (unsigned long )((struct se_node_acl *)0) && (unsigned long )dev->dev_reserved_node_acl != (unsigned long )sess->se_node_acl) { tmp = (*((tpg->se_tpg_tfo)->get_fabric_name))(); printk("\vSCSI-2 RESERVATION CONFLIFT for %s fabric\n", tmp); printk("\vOriginal reserver LUN: %llu %s\n", (cmd->se_lun)->unpacked_lun, (char *)(& (dev->dev_reserved_node_acl)->initiatorname)); printk("\vCurrent attempt - LUN: %llu -> MAPPED LUN: %llu from %s \n", (cmd->se_lun)->unpacked_lun, cmd->orig_fe_lun, (char *)(& (sess->se_node_acl)->initiatorname)); ret = 16U; goto out_unlock; } else { } dev->dev_reserved_node_acl = sess->se_node_acl; dev->dev_reservation_flags = dev->dev_reservation_flags | 1U; if (sess->sess_bin_isid != 0ULL) { dev->dev_res_bin_isid = sess->sess_bin_isid; dev->dev_reservation_flags = dev->dev_reservation_flags | 2U; } else { } descriptor.modname = "target_core_mod"; descriptor.function = "target_scsi2_reservation_reserve"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor.format = "SCSI-2 Reserved %s LUN: %llu -> MAPPED LUN: %llu for %s\n"; descriptor.lineno = 299U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp___0 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "SCSI-2 Reserved %s LUN: %llu -> MAPPED LUN: %llu for %s\n", tmp___0, (cmd->se_lun)->unpacked_lun, cmd->orig_fe_lun, (char *)(& (sess->se_node_acl)->initiatorname)); } else { } out_unlock: spin_unlock(& dev->dev_reservation_lock); out: ; if (ret == 0U) { target_complete_cmd(cmd, 0); } else { } return (ret); } } static int core_scsi3_pr_seq_non_holder(struct se_cmd *cmd , u32 pr_reg_type , bool isid_mismatch ) { unsigned char *cdb ; struct se_session *se_sess ; struct se_node_acl *nacl ; int other_cdb ; int registered_nexus ; int ret ; int all_reg ; int reg_only ; int we ; int legacy ; struct se_dev_entry *se_deve ; struct _ddebug descriptor ; unsigned char *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; unsigned char *tmp___1 ; unsigned char *tmp___2 ; long tmp___3 ; struct _ddebug descriptor___1 ; unsigned char *tmp___4 ; long tmp___5 ; struct _ddebug descriptor___2 ; unsigned char *tmp___6 ; long tmp___7 ; struct _ddebug descriptor___3 ; unsigned char *tmp___8 ; long tmp___9 ; struct _ddebug descriptor___4 ; unsigned char *tmp___10 ; unsigned char *tmp___11 ; long tmp___12 ; { cdb = cmd->t_task_cdb; se_sess = cmd->se_sess; nacl = se_sess->se_node_acl; other_cdb = 0; registered_nexus = 0; ret = 1; all_reg = 0; reg_only = 0; we = 0; legacy = 0; if ((int )isid_mismatch) { registered_nexus = 0; } else { rcu_read_lock___1(); se_deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun); if ((unsigned long )se_deve != (unsigned long )((struct se_dev_entry *)0)) { registered_nexus = constant_test_bit(1L, (unsigned long const volatile *)(& se_deve->deve_flags)); } else { } rcu_read_unlock___1(); } switch (pr_reg_type) { case 1U: we = 1; case 3U: ; goto ldv_57453; case 5U: we = 1; case 6U: reg_only = 1; goto ldv_57453; case 7U: we = 1; case 8U: all_reg = 1; goto ldv_57453; default: ; return (-22); } ldv_57453: ; switch ((int )*cdb) { case 162: ; if (registered_nexus != 0) { return (0); } else { } ret = we == 0; goto ldv_57460; case 26: ; case 90: ; case 140: ; case 60: ; case 28: ; if (legacy != 0) { ret = 1; goto ldv_57460; } else { } if (registered_nexus != 0) { ret = 0; goto ldv_57460; } else { } ret = we == 0; goto ldv_57460; case 95: ; switch ((int )*(cdb + 1UL) & 31) { case 3: ; case 4: ; case 5: ret = registered_nexus == 0; goto ldv_57470; case 0: ; case 6: ret = 0; goto ldv_57470; case 7: ; case 1: ret = 1; goto ldv_57470; case 2: ret = registered_nexus == 0; goto ldv_57470; default: printk("\vUnknown PERSISTENT_RESERVE_OUT service action: 0x%02x\n", (int )*(cdb + 1UL) & 31); return (-22); } ldv_57470: ; goto ldv_57460; case 23: ; case 87: ret = 0; goto ldv_57460; case 22: ; case 86: ret = 0; goto ldv_57460; case 0: ret = legacy != 0; goto ldv_57460; case 163: ; switch ((int )*(cdb + 1UL) & 31) { case 16: ; if (registered_nexus != 0) { ret = 0; goto ldv_57484; } else { } ret = we == 0; goto ldv_57484; case 12: ; case 13: ; if (legacy != 0) { ret = 1; goto ldv_57484; } else { } if (registered_nexus != 0) { ret = 0; goto ldv_57484; } else { } ret = we == 0; goto ldv_57484; case 11: ; case 5: ; case 14: ; case 10: ; case 15: ret = 0; goto ldv_57484; default: printk("\vUnknown MI Service Action: 0x%02x\n", (int )*(cdb + 1UL) & 31); return (-22); } ldv_57484: ; goto ldv_57460; case 134: ; case 135: ; case 18: ; case 77: ; case 171: ; case 160: ; case 3: ; case 94: ret = 0; goto ldv_57460; default: other_cdb = 1; goto ldv_57460; } ldv_57460: ; if (ret == 0 && other_cdb == 0) { descriptor.modname = "target_core_mod"; descriptor.function = "core_scsi3_pr_seq_non_holder"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor.format = "Allowing explicit CDB: 0x%02x for %s reservation holder\n"; descriptor.lineno = 491U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = core_scsi3_pr_dump_type((int )pr_reg_type); __dynamic_pr_debug(& descriptor, "Allowing explicit CDB: 0x%02x for %s reservation holder\n", (int )*cdb, tmp); } else { } return (ret); } else { } if (we != 0 && registered_nexus == 0) { if ((unsigned int )cmd->data_direction == 1U) { descriptor___0.modname = "target_core_mod"; descriptor___0.function = "core_scsi3_pr_seq_non_holder"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___0.format = "%s Conflict for unregistered nexus %s CDB: 0x%02x to %s reservation\n"; descriptor___0.lineno = 508U; descriptor___0.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___3 != 0L) { tmp___1 = core_scsi3_pr_dump_type((int )pr_reg_type); tmp___2 = transport_dump_cmd_direction(cmd); __dynamic_pr_debug(& descriptor___0, "%s Conflict for unregistered nexus %s CDB: 0x%02x to %s reservation\n", tmp___2, (char *)(& (se_sess->se_node_acl)->initiatorname), (int )*cdb, tmp___1); } else { } return (1); } else { if (registered_nexus == 0) { descriptor___1.modname = "target_core_mod"; descriptor___1.function = "core_scsi3_pr_seq_non_holder"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___1.format = "Allowing implicit CDB: 0x%02x for %s reservation on unregistered nexus\n"; descriptor___1.lineno = 525U; descriptor___1.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___5 != 0L) { tmp___4 = core_scsi3_pr_dump_type((int )pr_reg_type); __dynamic_pr_debug(& descriptor___1, "Allowing implicit CDB: 0x%02x for %s reservation on unregistered nexus\n", (int )*cdb, tmp___4); } else { } } else { } return (0); } } else if (reg_only != 0 || all_reg != 0) { if (registered_nexus != 0) { descriptor___2.modname = "target_core_mod"; descriptor___2.function = "core_scsi3_pr_seq_non_holder"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___2.format = "Allowing implicit CDB: 0x%02x for %s reservation\n"; descriptor___2.lineno = 539U; descriptor___2.flags = 0U; tmp___7 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___7 != 0L) { tmp___6 = core_scsi3_pr_dump_type((int )pr_reg_type); __dynamic_pr_debug(& descriptor___2, "Allowing implicit CDB: 0x%02x for %s reservation\n", (int )*cdb, tmp___6); } else { } return (0); } else { } } else if (we != 0 && registered_nexus != 0) { if ((unsigned int )cmd->data_direction == 2U) { descriptor___3.modname = "target_core_mod"; descriptor___3.function = "core_scsi3_pr_seq_non_holder"; descriptor___3.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___3.format = "Allowing READ CDB: 0x%02x for %s reservation\n"; descriptor___3.lineno = 551U; descriptor___3.flags = 0U; tmp___9 = ldv__builtin_expect((long )descriptor___3.flags & 1L, 0L); if (tmp___9 != 0L) { tmp___8 = core_scsi3_pr_dump_type((int )pr_reg_type); __dynamic_pr_debug(& descriptor___3, "Allowing READ CDB: 0x%02x for %s reservation\n", (int )*cdb, tmp___8); } else { } return (0); } else { } } else { } descriptor___4.modname = "target_core_mod"; descriptor___4.function = "core_scsi3_pr_seq_non_holder"; descriptor___4.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___4.format = "%s Conflict for %sregistered nexus %s CDB: 0x%2x for %s reservation\n"; descriptor___4.lineno = 560U; descriptor___4.flags = 0U; tmp___12 = ldv__builtin_expect((long )descriptor___4.flags & 1L, 0L); if (tmp___12 != 0L) { tmp___10 = core_scsi3_pr_dump_type((int )pr_reg_type); tmp___11 = transport_dump_cmd_direction(cmd); __dynamic_pr_debug(& descriptor___4, "%s Conflict for %sregistered nexus %s CDB: 0x%2x for %s reservation\n", tmp___11, registered_nexus != 0 ? (char *)"" : (char *)"un", (char *)(& (se_sess->se_node_acl)->initiatorname), (int )*cdb, tmp___10); } else { } return (1); } } static sense_reason_t target_scsi3_pr_reservation_check(struct se_cmd *cmd ) { struct se_device *dev ; struct se_session *sess ; u32 pr_reg_type ; bool isid_mismatch ; int tmp ; { dev = cmd->se_dev; sess = cmd->se_sess; isid_mismatch = 0; if ((unsigned long )dev->dev_pr_res_holder == (unsigned long )((struct t10_pr_registration *)0)) { return (0U); } else { } pr_reg_type = (u32 )(dev->dev_pr_res_holder)->pr_res_type; cmd->pr_res_key = (dev->dev_pr_res_holder)->pr_res_key; if ((unsigned long )(dev->dev_pr_res_holder)->pr_reg_nacl != (unsigned long )sess->se_node_acl) { goto check_nonholder; } else { } if ((int )(dev->dev_pr_res_holder)->isid_present_at_reg) { if ((dev->dev_pr_res_holder)->pr_reg_bin_isid != sess->sess_bin_isid) { isid_mismatch = 1; goto check_nonholder; } else { } } else { } return (0U); check_nonholder: tmp = core_scsi3_pr_seq_non_holder(cmd, pr_reg_type, (int )isid_mismatch); if (tmp != 0) { return (16U); } else { } return (0U); } } static u32 core_scsi3_pr_generation(struct se_device *dev ) { u32 prg ; u32 tmp ; { spin_lock(& dev->dev_reservation_lock); tmp = dev->t10_pr.pr_generation; dev->t10_pr.pr_generation = dev->t10_pr.pr_generation + 1U; prg = tmp; spin_unlock(& dev->dev_reservation_lock); return (prg); } } static struct t10_pr_registration *__core_scsi3_do_alloc_registration(struct se_device *dev , struct se_node_acl *nacl , struct se_lun *lun , struct se_dev_entry *deve , u64 mapped_lun , unsigned char *isid , u64 sa_res_key , int all_tg_pt , int aptpl ) { struct t10_pr_registration *pr_reg ; void *tmp ; { tmp = kmem_cache_zalloc(t10_pr_reg_cache, 32U); pr_reg = (struct t10_pr_registration *)tmp; if ((unsigned long )pr_reg == (unsigned long )((struct t10_pr_registration *)0)) { printk("\vUnable to allocate struct t10_pr_registration\n"); return ((struct t10_pr_registration *)0); } else { } INIT_LIST_HEAD(& pr_reg->pr_reg_list); INIT_LIST_HEAD(& pr_reg->pr_reg_abort_list); INIT_LIST_HEAD(& pr_reg->pr_reg_aptpl_list); INIT_LIST_HEAD(& pr_reg->pr_reg_atp_list); INIT_LIST_HEAD(& pr_reg->pr_reg_atp_mem_list); atomic_set(& pr_reg->pr_res_holders, 0); pr_reg->pr_reg_nacl = nacl; pr_reg->pr_reg_deve = deve; pr_reg->pr_res_mapped_lun = mapped_lun; pr_reg->pr_aptpl_target_lun = lun->unpacked_lun; pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi; pr_reg->pr_res_key = sa_res_key; pr_reg->pr_reg_all_tg_pt = all_tg_pt; pr_reg->pr_reg_aptpl = aptpl; if ((unsigned long )isid != (unsigned long )((unsigned char *)0U)) { pr_reg->pr_reg_bin_isid = get_unaligned_be64((void const *)isid); snprintf((char *)(& pr_reg->pr_reg_isid), 16UL, "%s", isid); pr_reg->isid_present_at_reg = 1; } else { } return (pr_reg); } } static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve ) ; static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve ) ; static struct t10_pr_registration *__core_scsi3_alloc_registration(struct se_device *dev , struct se_node_acl *nacl , struct se_lun *lun , struct se_dev_entry *deve , u64 mapped_lun , unsigned char *isid , u64 sa_res_key , int all_tg_pt , int aptpl ) { struct se_dev_entry *deve_tmp ; struct se_node_acl *nacl_tmp ; struct se_lun_acl *lacl_tmp ; struct se_lun *lun_tmp ; struct se_lun *next ; struct se_lun *dest_lun ; struct target_core_fabric_ops const *tfo ; struct t10_pr_registration *pr_reg ; struct t10_pr_registration *pr_reg_atp ; struct t10_pr_registration *pr_reg_tmp ; struct t10_pr_registration *pr_reg_tmp_safe ; int ret ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; bool tmp ; int tmp___0 ; struct list_head const *__mptr___1 ; struct se_lun_acl *________p1 ; struct se_lun_acl *_________p1 ; union __anonunion___u_382___0 __u ; bool __warned ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; struct se_lun *________p1___0 ; struct se_lun *_________p1___0 ; union __anonunion___u_384___0 __u___0 ; bool __warned___0 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; struct list_head const *__mptr___5 ; struct list_head const *__mptr___6 ; { tfo = (nacl->se_tpg)->se_tpg_tfo; pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, lun, deve, mapped_lun, isid, sa_res_key, all_tg_pt, aptpl); if ((unsigned long )pr_reg == (unsigned long )((struct t10_pr_registration *)0)) { return ((struct t10_pr_registration *)0); } else { } if (all_tg_pt == 0) { return (pr_reg); } else { } spin_lock(& dev->se_port_lock); __mptr = (struct list_head const *)dev->dev_sep_list.next; lun_tmp = (struct se_lun *)__mptr + 0xfffffffffffffb88UL; __mptr___0 = (struct list_head const *)lun_tmp->lun_dev_link.next; next = (struct se_lun *)__mptr___0 + 0xfffffffffffffb88UL; goto ldv_57597; ldv_57596: tmp = percpu_ref_tryget_live(& lun_tmp->lun_ref); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto ldv_57566; } else { } spin_unlock(& dev->se_port_lock); spin_lock(& lun_tmp->lun_deve_lock); __mptr___1 = (struct list_head const *)lun_tmp->lun_deve_list.next; deve_tmp = (struct se_dev_entry *)__mptr___1 + 0xfffffffffffffef0UL; goto ldv_57594; ldv_57593: ; if ((unsigned long )deve_tmp->se_lun_acl == (unsigned long )((struct se_lun_acl *)0)) { goto ldv_57571; } else { } __read_once_size((void const volatile *)(& deve_tmp->se_lun_acl), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned) { tmp___2 = lock_is_held(& lun_tmp->lun_deve_lock.__annonCompField17.__annonCompField16.dep_map); if (tmp___2 == 0) { tmp___3 = rcu_read_lock_held(); if (tmp___3 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c", 723, "suspicious rcu_dereference_check() usage"); } else { } } else { } } else { } lacl_tmp = ________p1; nacl_tmp = lacl_tmp->se_lun_nacl; if ((unsigned long )nacl == (unsigned long )nacl_tmp) { goto ldv_57571; } else { } if ((unsigned long )(nacl_tmp->se_tpg)->se_tpg_tfo != (unsigned long )tfo) { goto ldv_57571; } else { } tmp___4 = strcmp((char const *)(& nacl->initiatorname), (char const *)(& nacl_tmp->initiatorname)); if (tmp___4 != 0) { goto ldv_57571; } else { } kref_get(& deve_tmp->pr_kref); spin_unlock(& lun_tmp->lun_deve_lock); ret = core_scsi3_lunacl_depend_item(deve_tmp); if (ret < 0) { printk("\vcore_scsi3_lunacl_depend_item() failed\n"); percpu_ref_put(& lun_tmp->lun_ref); kref_put(& deve_tmp->pr_kref, & target_pr_kref_release); goto out; } else { } __read_once_size((void const volatile *)(& deve_tmp->se_lun), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___5 = debug_lockdep_rcu_enabled(); if (tmp___5 != 0 && ! __warned___0) { tmp___6 = atomic_read((atomic_t const *)(& deve_tmp->pr_kref.refcount)); if (tmp___6 == 0) { tmp___7 = rcu_read_lock_held(); if (tmp___7 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c", 768, "suspicious rcu_dereference_check() usage"); } else { } } else { } } else { } dest_lun = ________p1___0; pr_reg_atp = __core_scsi3_do_alloc_registration(dev, nacl_tmp, dest_lun, deve_tmp, deve_tmp->mapped_lun, (unsigned char *)0U, sa_res_key, all_tg_pt, aptpl); if ((unsigned long )pr_reg_atp == (unsigned long )((struct t10_pr_registration *)0)) { percpu_ref_put(& lun_tmp->lun_ref); core_scsi3_lunacl_undepend_item(deve_tmp); goto out; } else { } list_add_tail(& pr_reg_atp->pr_reg_atp_mem_list, & pr_reg->pr_reg_atp_list); spin_lock(& lun_tmp->lun_deve_lock); ldv_57571: __mptr___2 = (struct list_head const *)deve_tmp->lun_link.next; deve_tmp = (struct se_dev_entry *)__mptr___2 + 0xfffffffffffffef0UL; ldv_57594: ; if ((unsigned long )(& deve_tmp->lun_link) != (unsigned long )(& lun_tmp->lun_deve_list)) { goto ldv_57593; } else { } spin_unlock(& lun_tmp->lun_deve_lock); spin_lock(& dev->se_port_lock); percpu_ref_put(& lun_tmp->lun_ref); ldv_57566: lun_tmp = next; __mptr___3 = (struct list_head const *)next->lun_dev_link.next; next = (struct se_lun *)__mptr___3 + 0xfffffffffffffb88UL; ldv_57597: ; if ((unsigned long )(& lun_tmp->lun_dev_link) != (unsigned long )(& dev->dev_sep_list)) { goto ldv_57596; } else { } spin_unlock(& dev->se_port_lock); return (pr_reg); out: __mptr___4 = (struct list_head const *)pr_reg->pr_reg_atp_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___4 + 0xfffffffffffffd50UL; __mptr___5 = (struct list_head const *)pr_reg_tmp->pr_reg_atp_mem_list.next; pr_reg_tmp_safe = (struct t10_pr_registration *)__mptr___5 + 0xfffffffffffffd50UL; goto ldv_57606; ldv_57605: list_del(& pr_reg_tmp->pr_reg_atp_mem_list); core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve); kmem_cache_free(t10_pr_reg_cache, (void *)pr_reg_tmp); pr_reg_tmp = pr_reg_tmp_safe; __mptr___6 = (struct list_head const *)pr_reg_tmp_safe->pr_reg_atp_mem_list.next; pr_reg_tmp_safe = (struct t10_pr_registration *)__mptr___6 + 0xfffffffffffffd50UL; ldv_57606: ; if ((unsigned long )(& pr_reg_tmp->pr_reg_atp_mem_list) != (unsigned long )(& pr_reg->pr_reg_atp_list)) { goto ldv_57605; } else { } kmem_cache_free(t10_pr_reg_cache, (void *)pr_reg); return ((struct t10_pr_registration *)0); } } int core_scsi3_alloc_aptpl_registration(struct t10_reservation *pr_tmpl , u64 sa_res_key , unsigned char *i_port , unsigned char *isid , u64 mapped_lun , unsigned char *t_port , u16 tpgt , u64 target_lun , int res_holder , int all_tg_pt , u8 type ) { struct t10_pr_registration *pr_reg ; void *tmp ; struct _ddebug descriptor ; long tmp___0 ; { if (((unsigned long )i_port == (unsigned long )((unsigned char *)0U) || (unsigned long )t_port == (unsigned long )((unsigned char *)0U)) || sa_res_key == 0ULL) { printk("\vIllegal parameters for APTPL registration\n"); return (-22); } else { } tmp = kmem_cache_zalloc(t10_pr_reg_cache, 208U); pr_reg = (struct t10_pr_registration *)tmp; if ((unsigned long )pr_reg == (unsigned long )((struct t10_pr_registration *)0)) { printk("\vUnable to allocate struct t10_pr_registration\n"); return (-12); } else { } INIT_LIST_HEAD(& pr_reg->pr_reg_list); INIT_LIST_HEAD(& pr_reg->pr_reg_abort_list); INIT_LIST_HEAD(& pr_reg->pr_reg_aptpl_list); INIT_LIST_HEAD(& pr_reg->pr_reg_atp_list); INIT_LIST_HEAD(& pr_reg->pr_reg_atp_mem_list); atomic_set(& pr_reg->pr_res_holders, 0); pr_reg->pr_reg_nacl = (struct se_node_acl *)0; pr_reg->pr_reg_deve = (struct se_dev_entry *)0; pr_reg->pr_res_mapped_lun = mapped_lun; pr_reg->pr_aptpl_target_lun = target_lun; pr_reg->pr_res_key = sa_res_key; pr_reg->pr_reg_all_tg_pt = all_tg_pt; pr_reg->pr_reg_aptpl = 1; pr_reg->pr_res_scope = 0; pr_reg->pr_res_type = (int )type; if ((unsigned long )isid != (unsigned long )((unsigned char *)0U)) { pr_reg->pr_reg_bin_isid = get_unaligned_be64((void const *)isid); snprintf((char *)(& pr_reg->pr_reg_isid), 16UL, "%s", isid); pr_reg->isid_present_at_reg = 1; } else { } snprintf((char *)(& pr_reg->pr_iport), 256UL, "%s", i_port); snprintf((char *)(& pr_reg->pr_tport), 256UL, "%s", t_port); pr_reg->pr_reg_tpgt = tpgt; pr_reg->pr_res_holder = res_holder; list_add_tail(& pr_reg->pr_reg_aptpl_list, & pr_tmpl->aptpl_reg_list); descriptor.modname = "target_core_mod"; descriptor.function = "core_scsi3_alloc_aptpl_registration"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor.format = "SPC-3 PR APTPL Successfully added registration%s from metadata\n"; descriptor.lineno = 869U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "SPC-3 PR APTPL Successfully added registration%s from metadata\n", res_holder != 0 ? (char *)"+reservation" : (char *)""); } else { } return (0); } } static void core_scsi3_aptpl_reserve(struct se_device *dev , struct se_portal_group *tpg , struct se_node_acl *node_acl , struct t10_pr_registration *pr_reg ) { char i_buf[21U] ; struct _ddebug descriptor ; unsigned char *tmp ; char *tmp___0 ; long tmp___1 ; struct _ddebug descriptor___0 ; char *tmp___2 ; long tmp___3 ; { memset((void *)(& i_buf), 0, 21UL); core_pr_dump_initiator_port(pr_reg, (char *)(& i_buf), 21U); spin_lock(& dev->dev_reservation_lock); dev->dev_pr_res_holder = pr_reg; spin_unlock(& dev->dev_reservation_lock); descriptor.modname = "target_core_mod"; descriptor.function = "core_scsi3_aptpl_reserve"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor.format = "SPC-3 PR [%s] Service Action: APTPL RESERVE created new reservation holder TYPE: %s ALL_TG_PT: %d\n"; descriptor.lineno = 892U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp = core_scsi3_pr_dump_type(pr_reg->pr_res_type); tmp___0 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "SPC-3 PR [%s] Service Action: APTPL RESERVE created new reservation holder TYPE: %s ALL_TG_PT: %d\n", tmp___0, tmp, pr_reg->pr_reg_all_tg_pt != 0); } else { } descriptor___0.modname = "target_core_mod"; descriptor___0.function = "core_scsi3_aptpl_reserve"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___0.format = "SPC-3 PR [%s] RESERVE Node: %s%s\n"; descriptor___0.lineno = 895U; descriptor___0.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___3 != 0L) { tmp___2 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor___0, "SPC-3 PR [%s] RESERVE Node: %s%s\n", tmp___2, (char *)(& node_acl->initiatorname), (char *)(& i_buf)); } else { } return; } } static void __core_scsi3_add_registration(struct se_device *dev , struct se_node_acl *nacl , struct t10_pr_registration *pr_reg , enum register_type register_type , int register_move ) ; static int __core_scsi3_check_aptpl_registration(struct se_device *dev , struct se_portal_group *tpg , struct se_lun *lun , u64 target_lun , struct se_node_acl *nacl , u64 mapped_lun ) { struct t10_pr_registration *pr_reg ; struct t10_pr_registration *pr_reg_tmp ; struct t10_reservation *pr_tmpl ; unsigned char i_port[256U] ; unsigned char t_port[256U] ; u16 tpgt ; char *tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; int tmp___0 ; int tmp___1 ; struct list_head const *__mptr___1 ; { pr_tmpl = & dev->t10_pr; memset((void *)(& i_port), 0, 256UL); memset((void *)(& t_port), 0, 256UL); snprintf((char *)(& i_port), 256UL, "%s", (char *)(& nacl->initiatorname)); tmp = (*((tpg->se_tpg_tfo)->tpg_get_wwn))(tpg); snprintf((char *)(& t_port), 256UL, "%s", tmp); tpgt = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); spin_lock(& pr_tmpl->aptpl_reg_lock); __mptr = (struct list_head const *)pr_tmpl->aptpl_reg_list.next; pr_reg = (struct t10_pr_registration *)__mptr + 0xfffffffffffffd70UL; __mptr___0 = (struct list_head const *)pr_reg->pr_reg_aptpl_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___0 + 0xfffffffffffffd70UL; goto ldv_57661; ldv_57660: tmp___0 = strcmp((char const *)(& pr_reg->pr_iport), (char const *)(& i_port)); if (tmp___0 == 0 && pr_reg->pr_res_mapped_lun == mapped_lun) { tmp___1 = strcmp((char const *)(& pr_reg->pr_tport), (char const *)(& t_port)); if (tmp___1 == 0) { if ((int )pr_reg->pr_reg_tpgt == (int )tpgt) { if (pr_reg->pr_aptpl_target_lun == target_lun) { pr_reg->pr_reg_nacl = nacl; pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi; list_del(& pr_reg->pr_reg_aptpl_list); spin_unlock(& pr_tmpl->aptpl_reg_lock); __core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0); if (pr_reg->pr_res_holder != 0) { core_scsi3_aptpl_reserve(dev, tpg, nacl, pr_reg); } else { } spin_lock(& pr_tmpl->aptpl_reg_lock); pr_tmpl->pr_aptpl_active = 1; } else { } } else { } } else { } } else { } pr_reg = pr_reg_tmp; __mptr___1 = (struct list_head const *)pr_reg_tmp->pr_reg_aptpl_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___1 + 0xfffffffffffffd70UL; ldv_57661: ; if ((unsigned long )(& pr_reg->pr_reg_aptpl_list) != (unsigned long )(& pr_tmpl->aptpl_reg_list)) { goto ldv_57660; } else { } spin_unlock(& pr_tmpl->aptpl_reg_lock); return (0); } } int core_scsi3_check_aptpl_registration(struct se_device *dev , struct se_portal_group *tpg , struct se_lun *lun , struct se_node_acl *nacl , u64 mapped_lun ) { int tmp ; { if ((int )dev->dev_reservation_flags & 1) { return (0); } else { } tmp = __core_scsi3_check_aptpl_registration(dev, tpg, lun, lun->unpacked_lun, nacl, mapped_lun); return (tmp); } } static void __core_scsi3_dump_registration(struct target_core_fabric_ops const *tfo , struct se_device *dev , struct se_node_acl *nacl , struct t10_pr_registration *pr_reg , enum register_type register_type ) { struct se_portal_group *se_tpg ; char i_buf[21U] ; struct _ddebug descriptor ; char *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; u16 tmp___1 ; char *tmp___2 ; char *tmp___3 ; long tmp___4 ; struct _ddebug descriptor___1 ; char *tmp___5 ; long tmp___6 ; struct _ddebug descriptor___2 ; char *tmp___7 ; long tmp___8 ; { se_tpg = nacl->se_tpg; memset((void *)(& i_buf), 0, 21UL); core_pr_dump_initiator_port(pr_reg, (char *)(& i_buf), 21U); descriptor.modname = "target_core_mod"; descriptor.function = "__core_scsi3_dump_registration"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor.format = "SPC-3 PR [%s] Service Action: REGISTER%s Initiator Node: %s%s\n"; descriptor.lineno = 1003U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = (*(tfo->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "SPC-3 PR [%s] Service Action: REGISTER%s Initiator Node: %s%s\n", tmp, (unsigned int )register_type != 2U ? ((unsigned int )register_type == 1U ? (char *)"_AND_IGNORE_EXISTING_KEY" : (char *)"") : (char *)"_AND_MOVE", (char *)(& nacl->initiatorname), (char *)(& i_buf)); } else { } descriptor___0.modname = "target_core_mod"; descriptor___0.function = "__core_scsi3_dump_registration"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___0.format = "SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n"; descriptor___0.lineno = 1006U; descriptor___0.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___1 = (*(tfo->tpg_get_tag))(se_tpg); tmp___2 = (*(tfo->tpg_get_wwn))(se_tpg); tmp___3 = (*(tfo->get_fabric_name))(); __dynamic_pr_debug(& descriptor___0, "SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n", tmp___3, tmp___2, (int )tmp___1); } else { } descriptor___1.modname = "target_core_mod"; descriptor___1.function = "__core_scsi3_dump_registration"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___1.format = "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target Port(s)\n"; descriptor___1.lineno = 1010U; descriptor___1.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___6 != 0L) { tmp___5 = (*(tfo->get_fabric_name))(); __dynamic_pr_debug(& descriptor___1, "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target Port(s)\n", tmp___5, pr_reg->pr_reg_all_tg_pt != 0 ? (char *)"ALL" : (char *)"SINGLE", (char const *)(& (dev->transport)->name)); } else { } descriptor___2.modname = "target_core_mod"; descriptor___2.function = "__core_scsi3_dump_registration"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___2.format = "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration: 0x%08x APTPL: %d\n"; descriptor___2.lineno = 1014U; descriptor___2.flags = 0U; tmp___8 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___8 != 0L) { tmp___7 = (*(tfo->get_fabric_name))(); __dynamic_pr_debug(& descriptor___2, "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration: 0x%08x APTPL: %d\n", tmp___7, pr_reg->pr_res_key, pr_reg->pr_res_generation, pr_reg->pr_reg_aptpl); } else { } return; } } static void __core_scsi3_add_registration(struct se_device *dev , struct se_node_acl *nacl , struct t10_pr_registration *pr_reg , enum register_type register_type , int register_move ) { struct target_core_fabric_ops const *tfo ; struct t10_pr_registration *pr_reg_tmp ; struct t10_pr_registration *pr_reg_tmp_safe ; struct t10_reservation *pr_tmpl ; struct se_dev_entry *deve ; u32 tmp ; u32 tmp___0 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct se_node_acl *nacl_tmp ; struct list_head const *__mptr___1 ; { tfo = (nacl->se_tpg)->se_tpg_tfo; pr_tmpl = & dev->t10_pr; if (register_move != 0) { tmp = dev->t10_pr.pr_generation; dev->t10_pr.pr_generation = dev->t10_pr.pr_generation + 1U; pr_reg->pr_res_generation = tmp; } else { tmp___0 = core_scsi3_pr_generation(dev); pr_reg->pr_res_generation = tmp___0; } spin_lock(& pr_tmpl->registration_lock); list_add_tail(& pr_reg->pr_reg_list, & pr_tmpl->registration_list); __core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type); spin_unlock(& pr_tmpl->registration_lock); rcu_read_lock___1(); deve = pr_reg->pr_reg_deve; if ((unsigned long )deve != (unsigned long )((struct se_dev_entry *)0)) { set_bit(1L, (unsigned long volatile *)(& deve->deve_flags)); } else { } rcu_read_unlock___1(); if (pr_reg->pr_reg_all_tg_pt == 0 || register_move != 0) { return; } else { } __mptr = (struct list_head const *)pr_reg->pr_reg_atp_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr + 0xfffffffffffffd50UL; __mptr___0 = (struct list_head const *)pr_reg_tmp->pr_reg_atp_mem_list.next; pr_reg_tmp_safe = (struct t10_pr_registration *)__mptr___0 + 0xfffffffffffffd50UL; goto ldv_57704; ldv_57703: nacl_tmp = pr_reg_tmp->pr_reg_nacl; list_del(& pr_reg_tmp->pr_reg_atp_mem_list); pr_reg_tmp->pr_res_generation = core_scsi3_pr_generation(dev); spin_lock(& pr_tmpl->registration_lock); list_add_tail(& pr_reg_tmp->pr_reg_list, & pr_tmpl->registration_list); __core_scsi3_dump_registration(tfo, dev, nacl_tmp, pr_reg_tmp, register_type); spin_unlock(& pr_tmpl->registration_lock); rcu_read_lock___1(); deve = pr_reg_tmp->pr_reg_deve; if ((unsigned long )deve != (unsigned long )((struct se_dev_entry *)0)) { set_bit(1L, (unsigned long volatile *)(& deve->deve_flags)); } else { } rcu_read_unlock___1(); core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve); pr_reg_tmp = pr_reg_tmp_safe; __mptr___1 = (struct list_head const *)pr_reg_tmp_safe->pr_reg_atp_mem_list.next; pr_reg_tmp_safe = (struct t10_pr_registration *)__mptr___1 + 0xfffffffffffffd50UL; ldv_57704: ; if ((unsigned long )(& pr_reg_tmp->pr_reg_atp_mem_list) != (unsigned long )(& pr_reg->pr_reg_atp_list)) { goto ldv_57703; } else { } return; } } static int core_scsi3_alloc_registration(struct se_device *dev , struct se_node_acl *nacl , struct se_lun *lun , struct se_dev_entry *deve , u64 mapped_lun , unsigned char *isid , u64 sa_res_key , int all_tg_pt , int aptpl , enum register_type register_type , int register_move ) { struct t10_pr_registration *pr_reg ; { pr_reg = __core_scsi3_alloc_registration(dev, nacl, lun, deve, mapped_lun, isid, sa_res_key, all_tg_pt, aptpl); if ((unsigned long )pr_reg == (unsigned long )((struct t10_pr_registration *)0)) { return (-1); } else { } __core_scsi3_add_registration(dev, nacl, pr_reg, register_type, register_move); return (0); } } static struct t10_pr_registration *__core_scsi3_locate_pr_reg(struct se_device *dev , struct se_node_acl *nacl , unsigned char *isid ) { struct t10_reservation *pr_tmpl ; struct t10_pr_registration *pr_reg ; struct t10_pr_registration *pr_reg_tmp ; struct se_portal_group *tpg ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; int tmp ; struct list_head const *__mptr___1 ; { pr_tmpl = & dev->t10_pr; spin_lock(& pr_tmpl->registration_lock); __mptr = (struct list_head const *)pr_tmpl->registration_list.next; pr_reg = (struct t10_pr_registration *)__mptr + 0xfffffffffffffd90UL; __mptr___0 = (struct list_head const *)pr_reg->pr_reg_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___0 + 0xfffffffffffffd90UL; goto ldv_57737; ldv_57736: ; if ((unsigned long )pr_reg->pr_reg_nacl != (unsigned long )nacl) { goto ldv_57735; } else { } tpg = (pr_reg->pr_reg_nacl)->se_tpg; if (! pr_reg->isid_present_at_reg) { if ((unsigned long )(tpg->se_tpg_tfo)->sess_get_initiator_sid != (unsigned long )((u32 (*/* const */)(struct se_session * , unsigned char * , u32 ))0)) { if (dev->dev_attrib.enforce_pr_isids != 0) { goto ldv_57735; } else { } } else { } atomic_inc_mb(& pr_reg->pr_res_holders); spin_unlock(& pr_tmpl->registration_lock); return (pr_reg); } else { } if ((unsigned long )isid == (unsigned long )((unsigned char *)0U)) { goto ldv_57735; } else { } tmp = strcmp((char const *)isid, (char const *)(& pr_reg->pr_reg_isid)); if (tmp != 0) { goto ldv_57735; } else { } atomic_inc_mb(& pr_reg->pr_res_holders); spin_unlock(& pr_tmpl->registration_lock); return (pr_reg); ldv_57735: pr_reg = pr_reg_tmp; __mptr___1 = (struct list_head const *)pr_reg_tmp->pr_reg_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___1 + 0xfffffffffffffd90UL; ldv_57737: ; if ((unsigned long )(& pr_reg->pr_reg_list) != (unsigned long )(& pr_tmpl->registration_list)) { goto ldv_57736; } else { } spin_unlock(& pr_tmpl->registration_lock); return ((struct t10_pr_registration *)0); } } static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *dev , struct se_node_acl *nacl , struct se_session *sess ) { struct se_portal_group *tpg ; unsigned char buf[16U] ; unsigned char *isid_ptr ; struct t10_pr_registration *tmp ; { tpg = nacl->se_tpg; isid_ptr = (unsigned char *)0U; if ((unsigned long )(tpg->se_tpg_tfo)->sess_get_initiator_sid != (unsigned long )((u32 (*/* const */)(struct se_session * , unsigned char * , u32 ))0)) { memset((void *)(& buf), 0, 16UL); (*((tpg->se_tpg_tfo)->sess_get_initiator_sid))(sess, (unsigned char *)(& buf), 16U); isid_ptr = (unsigned char *)(& buf); } else { } tmp = __core_scsi3_locate_pr_reg(dev, nacl, isid_ptr); return (tmp); } } static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg ) { { atomic_dec_mb(& pr_reg->pr_res_holders); return; } } static int core_scsi3_check_implicit_release(struct se_device *dev , struct t10_pr_registration *pr_reg ) { struct se_node_acl *nacl ; struct t10_pr_registration *pr_res_holder ; int ret ; int tmp ; { nacl = pr_reg->pr_reg_nacl; ret = 0; spin_lock(& dev->dev_reservation_lock); pr_res_holder = dev->dev_pr_res_holder; if ((unsigned long )pr_res_holder == (unsigned long )((struct t10_pr_registration *)0)) { spin_unlock(& dev->dev_reservation_lock); return (ret); } else { } if ((unsigned long )pr_res_holder == (unsigned long )pr_reg) { __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0, 1); ret = 1; } else if (pr_reg->pr_reg_all_tg_pt != 0) { tmp = strcmp((char const *)(& (pr_res_holder->pr_reg_nacl)->initiatorname), (char const *)(& (pr_reg->pr_reg_nacl)->initiatorname)); if (tmp == 0) { if (pr_res_holder->pr_res_key == pr_reg->pr_res_key) { printk("\vSPC-3 PR: Unable to perform ALL_TG_PT=1 UNREGISTER while existing reservation with matching key 0x%016Lx is present from another SCSI Initiator Port\n", pr_reg->pr_res_key); ret = -1; } else { } } else { } } else { } spin_unlock(& dev->dev_reservation_lock); return (ret); } } static void __core_scsi3_free_registration(struct se_device *dev , struct t10_pr_registration *pr_reg , struct list_head *preempt_and_abort_list , int dec_holders ) { struct target_core_fabric_ops const *tfo ; struct t10_reservation *pr_tmpl ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; char i_buf[21U] ; int tmp ; struct _ddebug descriptor ; char *tmp___0 ; long tmp___1 ; int tmp___2 ; struct _ddebug descriptor___0 ; char *tmp___3 ; long tmp___4 ; struct _ddebug descriptor___1 ; char *tmp___5 ; long tmp___6 ; struct _ddebug descriptor___2 ; char *tmp___7 ; long tmp___8 ; { tfo = ((pr_reg->pr_reg_nacl)->se_tpg)->se_tpg_tfo; pr_tmpl = & dev->t10_pr; nacl = pr_reg->pr_reg_nacl; memset((void *)(& i_buf), 0, 21UL); core_pr_dump_initiator_port(pr_reg, (char *)(& i_buf), 21U); tmp = list_empty((struct list_head const *)(& pr_reg->pr_reg_list)); if (tmp == 0) { list_del(& pr_reg->pr_reg_list); } else { } if (dec_holders != 0) { core_scsi3_put_pr_reg(pr_reg); } else { } spin_unlock(& pr_tmpl->registration_lock); goto ldv_57771; ldv_57770: descriptor.modname = "target_core_mod"; descriptor.function = "__core_scsi3_free_registration"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor.format = "SPC-3 PR [%s] waiting for pr_res_holders\n"; descriptor.lineno = 1288U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp___0 = (*(tfo->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "SPC-3 PR [%s] waiting for pr_res_holders\n", tmp___0); } else { } cpu_relax(); ldv_57771: tmp___2 = atomic_read((atomic_t const *)(& pr_reg->pr_res_holders)); if (tmp___2 != 0) { goto ldv_57770; } else { } rcu_read_lock___1(); deve = target_nacl_find_deve(nacl, pr_reg->pr_res_mapped_lun); if ((unsigned long )deve != (unsigned long )((struct se_dev_entry *)0)) { clear_bit(1L, (unsigned long volatile *)(& deve->deve_flags)); } else { } rcu_read_unlock___1(); spin_lock(& pr_tmpl->registration_lock); descriptor___0.modname = "target_core_mod"; descriptor___0.function = "__core_scsi3_free_registration"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___0.format = "SPC-3 PR [%s] Service Action: UNREGISTER Initiator Node: %s%s\n"; descriptor___0.lineno = 1302U; descriptor___0.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = (*(tfo->get_fabric_name))(); __dynamic_pr_debug(& descriptor___0, "SPC-3 PR [%s] Service Action: UNREGISTER Initiator Node: %s%s\n", tmp___3, (char *)(& (pr_reg->pr_reg_nacl)->initiatorname), (char *)(& i_buf)); } else { } descriptor___1.modname = "target_core_mod"; descriptor___1.function = "__core_scsi3_free_registration"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___1.format = "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target Port(s)\n"; descriptor___1.lineno = 1306U; descriptor___1.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___6 != 0L) { tmp___5 = (*(tfo->get_fabric_name))(); __dynamic_pr_debug(& descriptor___1, "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target Port(s)\n", tmp___5, pr_reg->pr_reg_all_tg_pt != 0 ? (char *)"ALL" : (char *)"SINGLE", (char const *)(& (dev->transport)->name)); } else { } descriptor___2.modname = "target_core_mod"; descriptor___2.function = "__core_scsi3_free_registration"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___2.format = "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration: 0x%08x\n"; descriptor___2.lineno = 1309U; descriptor___2.flags = 0U; tmp___8 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___8 != 0L) { tmp___7 = (*(tfo->get_fabric_name))(); __dynamic_pr_debug(& descriptor___2, "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration: 0x%08x\n", tmp___7, pr_reg->pr_res_key, pr_reg->pr_res_generation); } else { } if ((unsigned long )preempt_and_abort_list == (unsigned long )((struct list_head *)0)) { pr_reg->pr_reg_deve = (struct se_dev_entry *)0; pr_reg->pr_reg_nacl = (struct se_node_acl *)0; kmem_cache_free(t10_pr_reg_cache, (void *)pr_reg); return; } else { } list_add_tail(& pr_reg->pr_reg_abort_list, preempt_and_abort_list); return; } } void core_scsi3_free_pr_reg_from_nacl(struct se_device *dev , struct se_node_acl *nacl ) { struct t10_reservation *pr_tmpl ; struct t10_pr_registration *pr_reg ; struct t10_pr_registration *pr_reg_tmp ; struct t10_pr_registration *pr_res_holder ; bool free_reg ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { pr_tmpl = & dev->t10_pr; free_reg = 0; spin_lock(& dev->dev_reservation_lock); pr_res_holder = dev->dev_pr_res_holder; if ((unsigned long )pr_res_holder != (unsigned long )((struct t10_pr_registration *)0) && (unsigned long )pr_res_holder->pr_reg_nacl == (unsigned long )nacl) { __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0, 1); free_reg = 1; } else { } spin_unlock(& dev->dev_reservation_lock); spin_lock(& pr_tmpl->registration_lock); if ((unsigned long )pr_res_holder != (unsigned long )((struct t10_pr_registration *)0) && (int )free_reg) { __core_scsi3_free_registration(dev, pr_res_holder, (struct list_head *)0, 0); } else { } __mptr = (struct list_head const *)pr_tmpl->registration_list.next; pr_reg = (struct t10_pr_registration *)__mptr + 0xfffffffffffffd90UL; __mptr___0 = (struct list_head const *)pr_reg->pr_reg_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___0 + 0xfffffffffffffd90UL; goto ldv_57793; ldv_57792: ; if ((unsigned long )pr_reg->pr_reg_nacl != (unsigned long )nacl) { goto ldv_57791; } else { } __core_scsi3_free_registration(dev, pr_reg, (struct list_head *)0, 0); ldv_57791: pr_reg = pr_reg_tmp; __mptr___1 = (struct list_head const *)pr_reg_tmp->pr_reg_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___1 + 0xfffffffffffffd90UL; ldv_57793: ; if ((unsigned long )(& pr_reg->pr_reg_list) != (unsigned long )(& pr_tmpl->registration_list)) { goto ldv_57792; } else { } spin_unlock(& pr_tmpl->registration_lock); return; } } void core_scsi3_free_all_registrations(struct se_device *dev ) { struct t10_reservation *pr_tmpl ; struct t10_pr_registration *pr_reg ; struct t10_pr_registration *pr_reg_tmp ; struct t10_pr_registration *pr_res_holder ; struct se_node_acl *pr_res_nacl ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; { pr_tmpl = & dev->t10_pr; spin_lock(& dev->dev_reservation_lock); pr_res_holder = dev->dev_pr_res_holder; if ((unsigned long )pr_res_holder != (unsigned long )((struct t10_pr_registration *)0)) { pr_res_nacl = pr_res_holder->pr_reg_nacl; __core_scsi3_complete_pro_release(dev, pr_res_nacl, pr_res_holder, 0, 0); } else { } spin_unlock(& dev->dev_reservation_lock); spin_lock(& pr_tmpl->registration_lock); __mptr = (struct list_head const *)pr_tmpl->registration_list.next; pr_reg = (struct t10_pr_registration *)__mptr + 0xfffffffffffffd90UL; __mptr___0 = (struct list_head const *)pr_reg->pr_reg_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___0 + 0xfffffffffffffd90UL; goto ldv_57810; ldv_57809: __core_scsi3_free_registration(dev, pr_reg, (struct list_head *)0, 0); pr_reg = pr_reg_tmp; __mptr___1 = (struct list_head const *)pr_reg_tmp->pr_reg_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___1 + 0xfffffffffffffd90UL; ldv_57810: ; if ((unsigned long )(& pr_reg->pr_reg_list) != (unsigned long )(& pr_tmpl->registration_list)) { goto ldv_57809; } else { } spin_unlock(& pr_tmpl->registration_lock); spin_lock(& pr_tmpl->aptpl_reg_lock); __mptr___2 = (struct list_head const *)pr_tmpl->aptpl_reg_list.next; pr_reg = (struct t10_pr_registration *)__mptr___2 + 0xfffffffffffffd70UL; __mptr___3 = (struct list_head const *)pr_reg->pr_reg_aptpl_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___3 + 0xfffffffffffffd70UL; goto ldv_57819; ldv_57818: list_del(& pr_reg->pr_reg_aptpl_list); kmem_cache_free(t10_pr_reg_cache, (void *)pr_reg); pr_reg = pr_reg_tmp; __mptr___4 = (struct list_head const *)pr_reg_tmp->pr_reg_aptpl_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___4 + 0xfffffffffffffd70UL; ldv_57819: ; if ((unsigned long )(& pr_reg->pr_reg_aptpl_list) != (unsigned long )(& pr_tmpl->aptpl_reg_list)) { goto ldv_57818; } else { } spin_unlock(& pr_tmpl->aptpl_reg_lock); return; } } static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg ) { int tmp ; { tmp = target_depend_item(& tpg->tpg_group.cg_item); return (tmp); } } static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg ) { { target_undepend_item(& tpg->tpg_group.cg_item); atomic_dec_mb(& tpg->tpg_pr_ref_count); return; } } static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl ) { int tmp ; { if ((int )nacl->dynamic_node_acl) { return (0); } else { } tmp = target_depend_item(& nacl->acl_group.cg_item); return (tmp); } } static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl ) { { if (! nacl->dynamic_node_acl) { target_undepend_item(& nacl->acl_group.cg_item); } else { } atomic_dec_mb(& nacl->acl_pr_ref_count); return; } } static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve ) { struct se_lun_acl *lun_acl ; struct se_node_acl *nacl ; struct se_portal_group *tpg ; struct se_lun_acl *________p1 ; struct se_lun_acl *_________p1 ; union __anonunion___u_386___0 __u ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { __read_once_size((void const volatile *)(& se_deve->se_lun_acl), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = atomic_read((atomic_t const *)(& se_deve->pr_kref.refcount)); if (tmp___0 == 0) { tmp___1 = rcu_read_lock_held(); if (tmp___1 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c", 1427, "suspicious rcu_dereference_check() usage"); } else { } } else { } } else { } lun_acl = ________p1; if ((unsigned long )lun_acl == (unsigned long )((struct se_lun_acl *)0)) { return (0); } else { } nacl = lun_acl->se_lun_nacl; tpg = nacl->se_tpg; tmp___2 = target_depend_item(& lun_acl->se_lun_group.cg_item); return (tmp___2); } } static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve ) { struct se_lun_acl *lun_acl ; struct se_node_acl *nacl ; struct se_portal_group *tpg ; struct se_lun_acl *________p1 ; struct se_lun_acl *_________p1 ; union __anonunion___u_388___0 __u ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; { __read_once_size((void const volatile *)(& se_deve->se_lun_acl), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = atomic_read((atomic_t const *)(& se_deve->pr_kref.refcount)); if (tmp___0 == 0) { tmp___1 = rcu_read_lock_held(); if (tmp___1 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c", 1446, "suspicious rcu_dereference_check() usage"); } else { } } else { } } else { } lun_acl = ________p1; if ((unsigned long )lun_acl == (unsigned long )((struct se_lun_acl *)0)) { kref_put(& se_deve->pr_kref, & target_pr_kref_release); return; } else { } nacl = lun_acl->se_lun_nacl; tpg = nacl->se_tpg; target_undepend_item(& lun_acl->se_lun_group.cg_item); kref_put(& se_deve->pr_kref, & target_pr_kref_release); return; } } static sense_reason_t core_scsi3_decode_spec_i_port(struct se_cmd *cmd , struct se_portal_group *tpg , unsigned char *l_isid , u64 sa_res_key , int all_tg_pt , int aptpl ) { struct se_device *dev ; struct se_portal_group *dest_tpg ; struct se_portal_group *tmp_tpg ; struct se_session *se_sess ; struct se_node_acl *dest_node_acl ; struct se_dev_entry *dest_se_deve ; struct t10_pr_registration *dest_pr_reg ; struct t10_pr_registration *local_pr_reg ; struct t10_pr_registration *pr_reg_e ; struct t10_pr_registration *pr_reg_tmp ; struct t10_pr_registration *pr_reg_tmp_safe ; struct list_head tid_dest_list ; struct pr_transport_id_holder *tidh_new ; struct pr_transport_id_holder *tidh ; struct pr_transport_id_holder *tidh_tmp ; unsigned char *buf ; unsigned char *ptr ; unsigned char proto_ident ; unsigned char const *i_str ; char *iport_ptr ; char i_buf[21U] ; sense_reason_t ret ; u32 tpdl ; u32 tid_len ; u32 dest_rtpi ; void *tmp ; void *tmp___0 ; struct se_lun *dest_lun ; struct se_lun *tmp_lun ; struct list_head const *__mptr ; char const *tmp___1 ; int tmp___2 ; int tmp___3 ; struct _ddebug descriptor ; char *tmp___4 ; long tmp___5 ; struct list_head const *__mptr___0 ; struct _ddebug descriptor___0 ; char *tmp___6 ; long tmp___7 ; char *tmp___8 ; int tmp___9 ; struct _ddebug descriptor___1 ; char *tmp___10 ; long tmp___11 ; void *tmp___12 ; struct se_lun *________p1 ; struct se_lun *_________p1 ; union __anonunion___u_390___0 __u ; bool __warned ; int tmp___13 ; int tmp___14 ; int tmp___15 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct _ddebug descriptor___2 ; char *tmp___16 ; long tmp___17 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; struct list_head const *__mptr___5 ; struct list_head const *__mptr___6 ; struct list_head const *__mptr___7 ; struct list_head const *__mptr___8 ; struct list_head const *__mptr___9 ; { dev = cmd->se_dev; dest_tpg = (struct se_portal_group *)0; se_sess = cmd->se_sess; dest_node_acl = (struct se_node_acl *)0; dest_se_deve = (struct se_dev_entry *)0; tid_dest_list.next = & tid_dest_list; tid_dest_list.prev = & tid_dest_list; iport_ptr = (char *)0; tid_len = 0U; dest_rtpi = 0U; tmp = kzalloc(48UL, 208U); tidh_new = (struct pr_transport_id_holder *)tmp; if ((unsigned long )tidh_new == (unsigned long )((struct pr_transport_id_holder *)0)) { printk("\vUnable to allocate tidh_new\n"); return (10U); } else { } INIT_LIST_HEAD(& tidh_new->dest_list); tidh_new->dest_tpg = tpg; tidh_new->dest_node_acl = se_sess->se_node_acl; local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, se_sess->se_node_acl, cmd->se_lun, (struct se_dev_entry *)0, cmd->orig_fe_lun, l_isid, sa_res_key, all_tg_pt, aptpl); if ((unsigned long )local_pr_reg == (unsigned long )((struct t10_pr_registration *)0)) { kfree((void const *)tidh_new); return (10U); } else { } tidh_new->dest_pr_reg = local_pr_reg; tidh_new->dest_se_deve = (struct se_dev_entry *)0; list_add_tail(& tidh_new->dest_list, & tid_dest_list); if (cmd->data_length <= 27U) { printk("\fSPC-PR: Received PR OUT parameter list length too small: %u\n", cmd->data_length); ret = 9U; goto out; } else { } tmp___0 = transport_kmap_data_sg(cmd); buf = (unsigned char *)tmp___0; if ((unsigned long )buf == (unsigned long )((unsigned char *)0U)) { ret = 10U; goto out; } else { } tpdl = (u32 )((int )*(buf + 24UL) << 24); tpdl = (u32 )((int )*(buf + 25UL) << 16) | tpdl; tpdl = (u32 )((int )*(buf + 26UL) << 8) | tpdl; tpdl = (u32 )*(buf + 27UL) | tpdl; if (tpdl + 28U != cmd->data_length) { printk("\vSPC-3 PR: Illegal tpdl: %u + 28 byte header does not equal CDB data_length: %u\n", tpdl, cmd->data_length); ret = 9U; goto out_unmap; } else { } ptr = buf + 28UL; goto ldv_57914; ldv_57925: proto_ident = (unsigned int )*ptr & 15U; dest_tpg = (struct se_portal_group *)0; spin_lock(& dev->se_port_lock); __mptr = (struct list_head const *)dev->dev_sep_list.next; tmp_lun = (struct se_lun *)__mptr + 0xfffffffffffffb88UL; goto ldv_57911; ldv_57910: tmp_tpg = tmp_lun->lun_tpg; if (tmp_tpg->proto_id != (int )proto_ident) { goto ldv_57906; } else { } dest_rtpi = (u32 )tmp_lun->lun_rtpi; tmp___1 = target_parse_pr_out_transport_id(tmp_tpg, (char const *)ptr, & tid_len, & iport_ptr); i_str = (unsigned char const *)tmp___1; if ((unsigned long )i_str == (unsigned long )((unsigned char const *)0U)) { goto ldv_57906; } else { } atomic_inc_mb(& tmp_tpg->tpg_pr_ref_count); spin_unlock(& dev->se_port_lock); tmp___2 = core_scsi3_tpg_depend_item(tmp_tpg); if (tmp___2 != 0) { printk("\v core_scsi3_tpg_depend_item() for tmp_tpg\n"); atomic_dec_mb(& tmp_tpg->tpg_pr_ref_count); ret = 10U; goto out_unmap; } else { } ldv_mutex_lock_239(& tmp_tpg->acl_node_mutex); dest_node_acl = __core_tpg_get_initiator_node_acl(tmp_tpg, (char const *)i_str); if ((unsigned long )dest_node_acl != (unsigned long )((struct se_node_acl *)0)) { atomic_inc_mb(& dest_node_acl->acl_pr_ref_count); } else { } ldv_mutex_unlock_240(& tmp_tpg->acl_node_mutex); if ((unsigned long )dest_node_acl == (unsigned long )((struct se_node_acl *)0)) { core_scsi3_tpg_undepend_item(tmp_tpg); spin_lock(& dev->se_port_lock); goto ldv_57906; } else { } tmp___3 = core_scsi3_nodeacl_depend_item(dest_node_acl); if (tmp___3 != 0) { printk("\vconfigfs_depend_item() failed for dest_node_acl->acl_group\n"); atomic_dec_mb(& dest_node_acl->acl_pr_ref_count); core_scsi3_tpg_undepend_item(tmp_tpg); ret = 10U; goto out_unmap; } else { } dest_tpg = tmp_tpg; descriptor.modname = "target_core_mod"; descriptor.function = "core_scsi3_decode_spec_i_port"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor.format = "SPC-3 PR SPEC_I_PT: Located %s Node: %s Port RTPI: %hu\n"; descriptor.lineno = 1615U; descriptor.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___5 != 0L) { tmp___4 = (*((dest_tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "SPC-3 PR SPEC_I_PT: Located %s Node: %s Port RTPI: %hu\n", tmp___4, (char *)(& dest_node_acl->initiatorname), dest_rtpi); } else { } spin_lock(& dev->se_port_lock); goto ldv_57909; ldv_57906: __mptr___0 = (struct list_head const *)tmp_lun->lun_dev_link.next; tmp_lun = (struct se_lun *)__mptr___0 + 0xfffffffffffffb88UL; ldv_57911: ; if ((unsigned long )(& tmp_lun->lun_dev_link) != (unsigned long )(& dev->dev_sep_list)) { goto ldv_57910; } else { } ldv_57909: spin_unlock(& dev->se_port_lock); if ((unsigned long )dest_tpg == (unsigned long )((struct se_portal_group *)0)) { printk("\vSPC-3 PR SPEC_I_PT: Unable to locate dest_tpg\n"); ret = 9U; goto out_unmap; } else { } descriptor___0.modname = "target_core_mod"; descriptor___0.function = "core_scsi3_decode_spec_i_port"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___0.format = "SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u tid_len: %d for %s + %s\n"; descriptor___0.lineno = 1632U; descriptor___0.flags = 0U; tmp___7 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___7 != 0L) { tmp___6 = (*((dest_tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor___0, "SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u tid_len: %d for %s + %s\n", tmp___6, cmd->data_length, tpdl, tid_len, i_str, iport_ptr); } else { } if (tid_len > tpdl) { printk("\vSPC-3 PR SPEC_I_PT: Illegal tid_len: %u for Transport ID: %s\n", tid_len, ptr); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); ret = 9U; goto out_unmap; } else { } dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, (int )((u16 )dest_rtpi)); if ((unsigned long )dest_se_deve == (unsigned long )((struct se_dev_entry *)0)) { tmp___8 = (*((dest_tpg->se_tpg_tfo)->get_fabric_name))(); printk("\vUnable to locate %s dest_se_deve from destination RTPI: %hu\n", tmp___8, dest_rtpi); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); ret = 9U; goto out_unmap; } else { } tmp___9 = core_scsi3_lunacl_depend_item(dest_se_deve); if (tmp___9 != 0) { printk("\vcore_scsi3_lunacl_depend_item() failed\n"); kref_put(& dest_se_deve->pr_kref, & target_pr_kref_release); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); ret = 10U; goto out_unmap; } else { } descriptor___1.modname = "target_core_mod"; descriptor___1.function = "core_scsi3_decode_spec_i_port"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___1.format = "SPC-3 PR SPEC_I_PT: Located %s Node: %s dest_se_deve mapped_lun: %llu\n"; descriptor___1.lineno = 1674U; descriptor___1.flags = 0U; tmp___11 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___11 != 0L) { tmp___10 = (*((dest_tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor___1, "SPC-3 PR SPEC_I_PT: Located %s Node: %s dest_se_deve mapped_lun: %llu\n", tmp___10, (char *)(& dest_node_acl->initiatorname), dest_se_deve->mapped_lun); } else { } pr_reg_e = __core_scsi3_locate_pr_reg(dev, dest_node_acl, (unsigned char *)iport_ptr); if ((unsigned long )pr_reg_e != (unsigned long )((struct t10_pr_registration *)0)) { core_scsi3_put_pr_reg(pr_reg_e); core_scsi3_lunacl_undepend_item(dest_se_deve); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); ptr = ptr + (unsigned long )tid_len; tpdl = tpdl - tid_len; tid_len = 0U; goto ldv_57914; } else { } tmp___12 = kzalloc(48UL, 208U); tidh_new = (struct pr_transport_id_holder *)tmp___12; if ((unsigned long )tidh_new == (unsigned long )((struct pr_transport_id_holder *)0)) { printk("\vUnable to allocate tidh_new\n"); core_scsi3_lunacl_undepend_item(dest_se_deve); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); ret = 10U; goto out_unmap; } else { } INIT_LIST_HEAD(& tidh_new->dest_list); tidh_new->dest_tpg = dest_tpg; tidh_new->dest_node_acl = dest_node_acl; tidh_new->dest_se_deve = dest_se_deve; __read_once_size((void const volatile *)(& dest_se_deve->se_lun), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp___13 = debug_lockdep_rcu_enabled(); if (tmp___13 != 0 && ! __warned) { tmp___14 = atomic_read((atomic_t const *)(& dest_se_deve->pr_kref.refcount)); if (tmp___14 == 0) { tmp___15 = rcu_read_lock_held(); if (tmp___15 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c", 1729, "suspicious rcu_dereference_check() usage"); } else { } } else { } } else { } dest_lun = ________p1; dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl, dest_lun, dest_se_deve, dest_se_deve->mapped_lun, (unsigned char *)iport_ptr, sa_res_key, all_tg_pt, aptpl); if ((unsigned long )dest_pr_reg == (unsigned long )((struct t10_pr_registration *)0)) { core_scsi3_lunacl_undepend_item(dest_se_deve); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); kfree((void const *)tidh_new); ret = 9U; goto out_unmap; } else { } tidh_new->dest_pr_reg = dest_pr_reg; list_add_tail(& tidh_new->dest_list, & tid_dest_list); ptr = ptr + (unsigned long )tid_len; tpdl = tpdl - tid_len; tid_len = 0U; ldv_57914: ; if (tpdl != 0U) { goto ldv_57925; } else { } transport_kunmap_data_sg(cmd); __mptr___1 = (struct list_head const *)tid_dest_list.next; tidh = (struct pr_transport_id_holder *)__mptr___1 + 0xffffffffffffffe0UL; __mptr___2 = (struct list_head const *)tidh->dest_list.next; tidh_tmp = (struct pr_transport_id_holder *)__mptr___2 + 0xffffffffffffffe0UL; goto ldv_57936; ldv_57935: dest_tpg = tidh->dest_tpg; dest_node_acl = tidh->dest_node_acl; dest_se_deve = tidh->dest_se_deve; dest_pr_reg = tidh->dest_pr_reg; list_del(& tidh->dest_list); kfree((void const *)tidh); memset((void *)(& i_buf), 0, 21UL); core_pr_dump_initiator_port(dest_pr_reg, (char *)(& i_buf), 21U); __core_scsi3_add_registration(cmd->se_dev, dest_node_acl, dest_pr_reg, 0, 0); descriptor___2.modname = "target_core_mod"; descriptor___2.function = "core_scsi3_decode_spec_i_port"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___2.format = "SPC-3 PR [%s] SPEC_I_PT: Successfully registered Transport ID for Node: %s%s Mapped LUN: %llu\n"; descriptor___2.lineno = 1786U; descriptor___2.flags = 0U; tmp___17 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___17 != 0L) { tmp___16 = (*((dest_tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor___2, "SPC-3 PR [%s] SPEC_I_PT: Successfully registered Transport ID for Node: %s%s Mapped LUN: %llu\n", tmp___16, (char *)(& dest_node_acl->initiatorname), (char *)(& i_buf), (unsigned long )dest_se_deve != (unsigned long )((struct se_dev_entry *)0) ? dest_se_deve->mapped_lun : 0ULL); } else { } if ((unsigned long )dest_se_deve == (unsigned long )((struct se_dev_entry *)0)) { goto ldv_57934; } else { } core_scsi3_lunacl_undepend_item(dest_se_deve); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); ldv_57934: tidh = tidh_tmp; __mptr___3 = (struct list_head const *)tidh_tmp->dest_list.next; tidh_tmp = (struct pr_transport_id_holder *)__mptr___3 + 0xffffffffffffffe0UL; ldv_57936: ; if ((unsigned long )(& tidh->dest_list) != (unsigned long )(& tid_dest_list)) { goto ldv_57935; } else { } return (0U); out_unmap: transport_kunmap_data_sg(cmd); out: __mptr___4 = (struct list_head const *)tid_dest_list.next; tidh = (struct pr_transport_id_holder *)__mptr___4 + 0xffffffffffffffe0UL; __mptr___5 = (struct list_head const *)tidh->dest_list.next; tidh_tmp = (struct pr_transport_id_holder *)__mptr___5 + 0xffffffffffffffe0UL; goto ldv_57955; ldv_57954: dest_tpg = tidh->dest_tpg; dest_node_acl = tidh->dest_node_acl; dest_se_deve = tidh->dest_se_deve; dest_pr_reg = tidh->dest_pr_reg; list_del(& tidh->dest_list); kfree((void const *)tidh); __mptr___6 = (struct list_head const *)dest_pr_reg->pr_reg_atp_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___6 + 0xfffffffffffffd50UL; __mptr___7 = (struct list_head const *)pr_reg_tmp->pr_reg_atp_mem_list.next; pr_reg_tmp_safe = (struct t10_pr_registration *)__mptr___7 + 0xfffffffffffffd50UL; goto ldv_57951; ldv_57950: list_del(& pr_reg_tmp->pr_reg_atp_mem_list); core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve); kmem_cache_free(t10_pr_reg_cache, (void *)pr_reg_tmp); pr_reg_tmp = pr_reg_tmp_safe; __mptr___8 = (struct list_head const *)pr_reg_tmp_safe->pr_reg_atp_mem_list.next; pr_reg_tmp_safe = (struct t10_pr_registration *)__mptr___8 + 0xfffffffffffffd50UL; ldv_57951: ; if ((unsigned long )(& pr_reg_tmp->pr_reg_atp_mem_list) != (unsigned long )(& dest_pr_reg->pr_reg_atp_list)) { goto ldv_57950; } else { } kmem_cache_free(t10_pr_reg_cache, (void *)dest_pr_reg); if ((unsigned long )dest_se_deve == (unsigned long )((struct se_dev_entry *)0)) { goto ldv_57953; } else { } core_scsi3_lunacl_undepend_item(dest_se_deve); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_tpg); ldv_57953: tidh = tidh_tmp; __mptr___9 = (struct list_head const *)tidh_tmp->dest_list.next; tidh_tmp = (struct pr_transport_id_holder *)__mptr___9 + 0xffffffffffffffe0UL; ldv_57955: ; if ((unsigned long )(& tidh->dest_list) != (unsigned long )(& tid_dest_list)) { goto ldv_57954; } else { } return (ret); } } static int core_scsi3_update_aptpl_buf(struct se_device *dev , unsigned char *buf , u32 pr_aptpl_buf_len ) { struct se_portal_group *tpg ; struct t10_pr_registration *pr_reg ; unsigned char tmp[512U] ; unsigned char isid_buf[32U] ; ssize_t len ; int reg_count ; int ret ; struct list_head const *__mptr ; char *tmp___0 ; char *tmp___1 ; size_t tmp___2 ; int tmp___3 ; u16 tmp___4 ; char *tmp___5 ; char *tmp___6 ; size_t tmp___7 ; int tmp___8 ; struct list_head const *__mptr___0 ; int tmp___9 ; { len = 0L; reg_count = 0; ret = 0; spin_lock(& dev->dev_reservation_lock); spin_lock(& dev->t10_pr.registration_lock); __mptr = (struct list_head const *)dev->t10_pr.registration_list.next; pr_reg = (struct t10_pr_registration *)__mptr + 0xfffffffffffffd90UL; goto ldv_57975; ldv_57974: tmp[0] = 0U; isid_buf[0] = 0U; tpg = (pr_reg->pr_reg_nacl)->se_tpg; if ((int )pr_reg->isid_present_at_reg) { snprintf((char *)(& isid_buf), 32UL, "initiator_sid=%s\n", (char *)(& pr_reg->pr_reg_isid)); } else { } if ((unsigned long )dev->dev_pr_res_holder == (unsigned long )pr_reg) { tmp___0 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); snprintf((char *)(& tmp), 512UL, "PR_REG_START: %d\ninitiator_fabric=%s\ninitiator_node=%s\n%ssa_res_key=%llu\nres_holder=1\nres_type=%02x\nres_scope=%02x\nres_all_tg_pt=%d\nmapped_lun=%llu\n", reg_count, tmp___0, (char *)(& (pr_reg->pr_reg_nacl)->initiatorname), (unsigned char *)(& isid_buf), pr_reg->pr_res_key, pr_reg->pr_res_type, pr_reg->pr_res_scope, pr_reg->pr_reg_all_tg_pt, pr_reg->pr_res_mapped_lun); } else { tmp___1 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); snprintf((char *)(& tmp), 512UL, "PR_REG_START: %d\ninitiator_fabric=%s\ninitiator_node=%s\n%ssa_res_key=%llu\nres_holder=0\nres_all_tg_pt=%d\nmapped_lun=%llu\n", reg_count, tmp___1, (char *)(& (pr_reg->pr_reg_nacl)->initiatorname), (unsigned char *)(& isid_buf), pr_reg->pr_res_key, pr_reg->pr_reg_all_tg_pt, pr_reg->pr_res_mapped_lun); } tmp___2 = strlen((char const *)(& tmp)); if (tmp___2 + (unsigned long )len >= (unsigned long )pr_aptpl_buf_len) { printk("\vUnable to update renaming APTPL metadata, reallocating larger buffer\n"); ret = -90; goto out; } else { } tmp___3 = sprintf((char *)buf + (unsigned long )len, "%s", (unsigned char *)(& tmp)); len = (ssize_t )tmp___3 + len; tmp___4 = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); tmp___5 = (*((tpg->se_tpg_tfo)->tpg_get_wwn))(tpg); tmp___6 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); snprintf((char *)(& tmp), 512UL, "target_fabric=%s\ntarget_node=%s\ntpgt=%hu\nport_rtpi=%hu\ntarget_lun=%llu\nPR_REG_END: %d\n", tmp___6, tmp___5, (int )tmp___4, (int )pr_reg->tg_pt_sep_rtpi, pr_reg->pr_aptpl_target_lun, reg_count); tmp___7 = strlen((char const *)(& tmp)); if (tmp___7 + (unsigned long )len >= (unsigned long )pr_aptpl_buf_len) { printk("\vUnable to update renaming APTPL metadata, reallocating larger buffer\n"); ret = -90; goto out; } else { } tmp___8 = sprintf((char *)buf + (unsigned long )len, "%s", (unsigned char *)(& tmp)); len = (ssize_t )tmp___8 + len; reg_count = reg_count + 1; __mptr___0 = (struct list_head const *)pr_reg->pr_reg_list.next; pr_reg = (struct t10_pr_registration *)__mptr___0 + 0xfffffffffffffd90UL; ldv_57975: ; if ((unsigned long )(& pr_reg->pr_reg_list) != (unsigned long )(& dev->t10_pr.registration_list)) { goto ldv_57974; } else { } if (reg_count == 0) { tmp___9 = sprintf((char *)buf + (unsigned long )len, "No Registrations or Reservations"); len = (ssize_t )tmp___9 + len; } else { } out: spin_unlock(& dev->t10_pr.registration_lock); spin_unlock(& dev->dev_reservation_lock); return (ret); } } static int __core_scsi3_write_aptpl_to_file(struct se_device *dev , unsigned char *buf ) { struct t10_wwn *wwn ; struct file *file ; int flags ; char path[512U] ; u32 pr_aptpl_buf_len ; int ret ; size_t tmp ; long tmp___0 ; bool tmp___1 ; size_t tmp___2 ; ssize_t tmp___3 ; struct _ddebug descriptor ; long tmp___4 ; { wwn = & dev->t10_wwn; flags = 578; memset((void *)(& path), 0, 512UL); tmp = strlen((char const *)(& wwn->unit_serial)); if (tmp > 511UL) { printk("\vWWN value for struct se_device does not fit into path buffer\n"); return (-90); } else { } snprintf((char *)(& path), 512UL, "/var/target/pr/aptpl_%s", (char *)(& wwn->unit_serial)); file = filp_open((char const *)(& path), flags, 384); tmp___1 = IS_ERR((void const *)file); if ((int )tmp___1) { printk("\vfilp_open(%s) for APTPL metadata failed\n", (char *)(& path)); tmp___0 = PTR_ERR((void const *)file); return ((int )tmp___0); } else { } tmp___2 = strlen((char const *)buf); pr_aptpl_buf_len = (u32 )tmp___2 + 1U; tmp___3 = kernel_write(file, (char const *)buf, (size_t )pr_aptpl_buf_len, 0LL); ret = (int )tmp___3; if (ret < 0) { descriptor.modname = "target_core_mod"; descriptor.function = "__core_scsi3_write_aptpl_to_file"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor.format = "Error writing APTPL metadata file: %s\n"; descriptor.lineno = 1965U; descriptor.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___4 != 0L) { __dynamic_pr_debug(& descriptor, "Error writing APTPL metadata file: %s\n", (char *)(& path)); } else { } } else { } fput(file); return (ret < 0 ? -5 : 0); } } static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev , bool aptpl ) { unsigned char *buf ; int rc ; int len ; char *null_buf ; struct _ddebug descriptor ; long tmp ; void *tmp___0 ; struct _ddebug descriptor___0 ; long tmp___1 ; { len = 262144; if (! aptpl) { null_buf = (char *)"No Registrations or Reservations\n"; rc = __core_scsi3_write_aptpl_to_file(dev, (unsigned char *)null_buf); dev->t10_pr.pr_aptpl_active = 0; descriptor.modname = "target_core_mod"; descriptor.function = "core_scsi3_update_and_write_aptpl"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor.format = "SPC-3 PR: Set APTPL Bit Deactivated\n"; descriptor.lineno = 1985U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "SPC-3 PR: Set APTPL Bit Deactivated\n"); } else { } if (rc != 0) { return (10U); } else { } return (0U); } else { } retry: tmp___0 = vzalloc((unsigned long )len); buf = (unsigned char *)tmp___0; if ((unsigned long )buf == (unsigned long )((unsigned char *)0U)) { return (18U); } else { } rc = core_scsi3_update_aptpl_buf(dev, buf, (u32 )len); if (rc < 0) { vfree((void const *)buf); len = len * 2; goto retry; } else { } rc = __core_scsi3_write_aptpl_to_file(dev, buf); if (rc != 0) { printk("\vSPC-3 PR: Could not update APTPL\n"); vfree((void const *)buf); return (10U); } else { } dev->t10_pr.pr_aptpl_active = 1; vfree((void const *)buf); descriptor___0.modname = "target_core_mod"; descriptor___0.function = "core_scsi3_update_and_write_aptpl"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___0.format = "SPC-3 PR: Set APTPL Bit Activated\n"; descriptor___0.lineno = 2012U; descriptor___0.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor___0, "SPC-3 PR: Set APTPL Bit Activated\n"); } else { } return (0U); } } static sense_reason_t core_scsi3_emulate_pro_register(struct se_cmd *cmd , u64 res_key , u64 sa_res_key , bool aptpl , bool all_tg_pt , bool spec_i_pt , enum register_type register_type ) { struct se_session *se_sess ; struct se_device *dev ; struct se_lun *se_lun ; struct se_portal_group *se_tpg ; struct t10_pr_registration *pr_reg ; struct t10_pr_registration *pr_reg_p ; struct t10_pr_registration *pr_reg_tmp ; struct t10_reservation *pr_tmpl ; unsigned char isid_buf[16U] ; unsigned char *isid_ptr ; sense_reason_t ret ; int pr_holder ; int type ; int tmp ; sense_reason_t tmp___0 ; struct _ddebug descriptor ; char *tmp___1 ; long tmp___2 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; int tmp___3 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; { se_sess = cmd->se_sess; dev = cmd->se_dev; se_lun = cmd->se_lun; pr_tmpl = & dev->t10_pr; isid_ptr = (unsigned char *)0U; ret = 0U; pr_holder = 0; if ((unsigned long )se_sess == (unsigned long )((struct se_session *)0) || (unsigned long )se_lun == (unsigned long )((struct se_lun *)0)) { printk("\vSPC-3 PR: se_sess || struct se_lun is NULL!\n"); return (10U); } else { } se_tpg = se_sess->se_tpg; if ((unsigned long )(se_tpg->se_tpg_tfo)->sess_get_initiator_sid != (unsigned long )((u32 (*/* const */)(struct se_session * , unsigned char * , u32 ))0)) { memset((void *)(& isid_buf), 0, 16UL); (*((se_tpg->se_tpg_tfo)->sess_get_initiator_sid))(se_sess, (unsigned char *)(& isid_buf), 16U); isid_ptr = (unsigned char *)(& isid_buf); } else { } pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); if ((unsigned long )pr_reg == (unsigned long )((struct t10_pr_registration *)0)) { if (res_key != 0ULL) { printk("\fSPC-3 PR: Reservation Key non-zero for SA REGISTER, returning CONFLICT\n"); return (16U); } else { } if (sa_res_key == 0ULL) { return (0U); } else { } if (! spec_i_pt) { tmp = core_scsi3_alloc_registration(cmd->se_dev, se_sess->se_node_acl, cmd->se_lun, (struct se_dev_entry *)0, cmd->orig_fe_lun, isid_ptr, sa_res_key, (int )all_tg_pt, (int )aptpl, register_type, 0); if (tmp != 0) { printk("\vUnable to allocate struct t10_pr_registration\n"); return (9U); } else { } } else { ret = core_scsi3_decode_spec_i_port(cmd, se_tpg, isid_ptr, sa_res_key, (int )all_tg_pt, (int )aptpl); if (ret != 0U) { return (ret); } else { } } tmp___0 = core_scsi3_update_and_write_aptpl(dev, (int )aptpl); return (tmp___0); } else { } if ((unsigned int )register_type == 0U && pr_reg->pr_res_key != res_key) { printk("\vSPC-3 PR REGISTER: Received res_key: 0x%016Lx does not match existing SA REGISTER res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key); ret = 16U; goto out; } else { } if ((int )spec_i_pt) { printk("\vSPC-3 PR REGISTER: SPEC_I_PT set on a registered nexus\n"); ret = 9U; goto out; } else { } if (pr_reg->pr_reg_all_tg_pt != 0 && ! all_tg_pt) { printk("\vSPC-3 PR REGISTER: ALL_TG_PT=1 registration exists, but ALL_TG_PT=1 bit not present in received PROUT\n"); ret = 8U; goto out; } else { } if (sa_res_key != 0ULL) { pr_reg->pr_res_generation = core_scsi3_pr_generation(cmd->se_dev); pr_reg->pr_res_key = sa_res_key; descriptor.modname = "target_core_mod"; descriptor.function = "core_scsi3_emulate_pro_register"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor.format = "SPC-3 PR [%s] REGISTER%s: Changed Reservation Key for %s to: 0x%016Lx PRgeneration: 0x%08x\n"; descriptor.lineno = 2137U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = (*((cmd->se_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "SPC-3 PR [%s] REGISTER%s: Changed Reservation Key for %s to: 0x%016Lx PRgeneration: 0x%08x\n", tmp___1, (unsigned int )register_type == 1U ? (char *)"_AND_IGNORE_EXISTING_KEY" : (char *)"", (char *)(& (pr_reg->pr_reg_nacl)->initiatorname), pr_reg->pr_res_key, pr_reg->pr_res_generation); } else { } } else { type = pr_reg->pr_res_type; pr_holder = core_scsi3_check_implicit_release(cmd->se_dev, pr_reg); if (pr_holder < 0) { ret = 16U; goto out; } else { } spin_lock(& pr_tmpl->registration_lock); if (pr_reg->pr_reg_all_tg_pt != 0) { __mptr = (struct list_head const *)pr_tmpl->registration_list.next; pr_reg_p = (struct t10_pr_registration *)__mptr + 0xfffffffffffffd90UL; __mptr___0 = (struct list_head const *)pr_reg_p->pr_reg_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___0 + 0xfffffffffffffd90UL; goto ldv_58034; ldv_58033: ; if (pr_reg_p->pr_reg_all_tg_pt == 0) { goto ldv_58032; } else { } if (pr_reg_p->pr_res_key != res_key) { goto ldv_58032; } else { } if ((unsigned long )pr_reg == (unsigned long )pr_reg_p) { goto ldv_58032; } else { } tmp___3 = strcmp((char const *)(& (pr_reg->pr_reg_nacl)->initiatorname), (char const *)(& (pr_reg_p->pr_reg_nacl)->initiatorname)); if (tmp___3 != 0) { goto ldv_58032; } else { } __core_scsi3_free_registration(dev, pr_reg_p, (struct list_head *)0, 0); ldv_58032: pr_reg_p = pr_reg_tmp; __mptr___1 = (struct list_head const *)pr_reg_tmp->pr_reg_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___1 + 0xfffffffffffffd90UL; ldv_58034: ; if ((unsigned long )(& pr_reg_p->pr_reg_list) != (unsigned long )(& pr_tmpl->registration_list)) { goto ldv_58033; } else { } } else { } __core_scsi3_free_registration(cmd->se_dev, pr_reg, (struct list_head *)0, 1); pr_reg = (struct t10_pr_registration *)0; if (pr_holder != 0 && (type == 5 || type == 6)) { __mptr___2 = (struct list_head const *)pr_tmpl->registration_list.next; pr_reg_p = (struct t10_pr_registration *)__mptr___2 + 0xfffffffffffffd90UL; goto ldv_58041; ldv_58040: target_ua_allocate_lun(pr_reg_p->pr_reg_nacl, (u32 )pr_reg_p->pr_res_mapped_lun, 42, 4); __mptr___3 = (struct list_head const *)pr_reg_p->pr_reg_list.next; pr_reg_p = (struct t10_pr_registration *)__mptr___3 + 0xfffffffffffffd90UL; ldv_58041: ; if ((unsigned long )(& pr_reg_p->pr_reg_list) != (unsigned long )(& pr_tmpl->registration_list)) { goto ldv_58040; } else { } } else { } spin_unlock(& pr_tmpl->registration_lock); } ret = core_scsi3_update_and_write_aptpl(dev, (int )aptpl); out: ; if ((unsigned long )pr_reg != (unsigned long )((struct t10_pr_registration *)0)) { core_scsi3_put_pr_reg(pr_reg); } else { } return (ret); } } unsigned char *core_scsi3_pr_dump_type(int type ) { { switch (type) { case 1: ; return ((unsigned char *)"Write Exclusive Access"); case 3: ; return ((unsigned char *)"Exclusive Access"); case 5: ; return ((unsigned char *)"Write Exclusive Access, Registrants Only"); case 6: ; return ((unsigned char *)"Exclusive Access, Registrants Only"); case 7: ; return ((unsigned char *)"Write Exclusive Access, All Registrants"); case 8: ; return ((unsigned char *)"Exclusive Access, All Registrants"); default: ; goto ldv_58053; } ldv_58053: ; return ((unsigned char *)"Unknown SPC-3 PR Type"); } } static sense_reason_t core_scsi3_pro_reserve(struct se_cmd *cmd , int type , int scope , u64 res_key ) { struct se_device *dev ; struct se_session *se_sess ; struct se_lun *se_lun ; struct t10_pr_registration *pr_reg ; struct t10_pr_registration *pr_res_holder ; struct t10_reservation *pr_tmpl ; char i_buf[21U] ; sense_reason_t ret ; struct se_node_acl *pr_res_nacl ; char *tmp ; char *tmp___0 ; int tmp___1 ; struct se_node_acl *pr_res_nacl___0 ; char *tmp___2 ; char *tmp___3 ; struct _ddebug descriptor ; unsigned char *tmp___4 ; char *tmp___5 ; long tmp___6 ; struct _ddebug descriptor___0 ; char *tmp___7 ; long tmp___8 ; { dev = cmd->se_dev; se_sess = cmd->se_sess; se_lun = cmd->se_lun; pr_tmpl = & dev->t10_pr; memset((void *)(& i_buf), 0, 21UL); if ((unsigned long )se_sess == (unsigned long )((struct se_session *)0) || (unsigned long )se_lun == (unsigned long )((struct se_lun *)0)) { printk("\vSPC-3 PR: se_sess || struct se_lun is NULL!\n"); return (10U); } else { } pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); if ((unsigned long )pr_reg == (unsigned long )((struct t10_pr_registration *)0)) { printk("\vSPC-3 PR: Unable to locate PR_REGISTERED *pr_reg for RESERVE\n"); return (10U); } else { } if (pr_reg->pr_res_key != res_key) { printk("\vSPC-3 PR RESERVE: Received res_key: 0x%016Lx does not match existing SA REGISTER res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key); ret = 16U; goto out_put_pr_reg; } else { } if (scope != 0) { printk("\vSPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); ret = 9U; goto out_put_pr_reg; } else { } spin_lock(& dev->dev_reservation_lock); pr_res_holder = dev->dev_pr_res_holder; if ((unsigned long )pr_res_holder != (unsigned long )((struct t10_pr_registration *)0)) { tmp___1 = is_reservation_holder(pr_res_holder, pr_reg); if (tmp___1 == 0) { pr_res_nacl = pr_res_holder->pr_reg_nacl; tmp = (*(((pr_res_nacl->se_tpg)->se_tpg_tfo)->get_fabric_name))(); tmp___0 = (*((cmd->se_tfo)->get_fabric_name))(); printk("\vSPC-3 PR: Attempted RESERVE from [%s]: %s while reservation already held by [%s]: %s, returning RESERVATION_CONFLICT\n", tmp___0, (char *)(& (se_sess->se_node_acl)->initiatorname), tmp, (char *)(& (pr_res_holder->pr_reg_nacl)->initiatorname)); spin_unlock(& dev->dev_reservation_lock); ret = 16U; goto out_put_pr_reg; } else { } if (pr_res_holder->pr_res_type != type || pr_res_holder->pr_res_scope != scope) { pr_res_nacl___0 = pr_res_holder->pr_reg_nacl; tmp___2 = (*(((pr_res_nacl___0->se_tpg)->se_tpg_tfo)->get_fabric_name))(); tmp___3 = (*((cmd->se_tfo)->get_fabric_name))(); printk("\vSPC-3 PR: Attempted RESERVE from [%s]: %s trying to change TYPE and/or SCOPE, while reservation already held by [%s]: %s, returning RESERVATION_CONFLICT\n", tmp___3, (char *)(& (se_sess->se_node_acl)->initiatorname), tmp___2, (char *)(& (pr_res_holder->pr_reg_nacl)->initiatorname)); spin_unlock(& dev->dev_reservation_lock); ret = 16U; goto out_put_pr_reg; } else { } spin_unlock(& dev->dev_reservation_lock); ret = 0U; goto out_put_pr_reg; } else { } pr_reg->pr_res_scope = scope; pr_reg->pr_res_type = type; pr_reg->pr_res_holder = 1; dev->dev_pr_res_holder = pr_reg; core_pr_dump_initiator_port(pr_reg, (char *)(& i_buf), 21U); descriptor.modname = "target_core_mod"; descriptor.function = "core_scsi3_pro_reserve"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor.format = "SPC-3 PR [%s] Service Action: RESERVE created new reservation holder TYPE: %s ALL_TG_PT: %d\n"; descriptor.lineno = 2381U; descriptor.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___6 != 0L) { tmp___4 = core_scsi3_pr_dump_type(type); tmp___5 = (*((cmd->se_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "SPC-3 PR [%s] Service Action: RESERVE created new reservation holder TYPE: %s ALL_TG_PT: %d\n", tmp___5, tmp___4, pr_reg->pr_reg_all_tg_pt != 0); } else { } descriptor___0.modname = "target_core_mod"; descriptor___0.function = "core_scsi3_pro_reserve"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___0.format = "SPC-3 PR [%s] RESERVE Node: %s%s\n"; descriptor___0.lineno = 2385U; descriptor___0.flags = 0U; tmp___8 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___8 != 0L) { tmp___7 = (*((cmd->se_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor___0, "SPC-3 PR [%s] RESERVE Node: %s%s\n", tmp___7, (char *)(& (se_sess->se_node_acl)->initiatorname), (char *)(& i_buf)); } else { } spin_unlock(& dev->dev_reservation_lock); if (pr_tmpl->pr_aptpl_active != 0) { core_scsi3_update_and_write_aptpl(cmd->se_dev, 1); } else { } ret = 0U; out_put_pr_reg: core_scsi3_put_pr_reg(pr_reg); return (ret); } } static sense_reason_t core_scsi3_emulate_pro_reserve(struct se_cmd *cmd , int type , int scope , u64 res_key ) { sense_reason_t tmp ; { switch (type) { case 1: ; case 3: ; case 5: ; case 6: ; case 7: ; case 8: tmp = core_scsi3_pro_reserve(cmd, type, scope, res_key); return (tmp); default: printk("\vSPC-3 PR: Unknown Service Action RESERVE Type: 0x%02x\n", type); return (8U); } } } static void __core_scsi3_complete_pro_release(struct se_device *dev , struct se_node_acl *se_nacl , struct t10_pr_registration *pr_reg , int explicit , int unreg ) { struct target_core_fabric_ops const *tfo ; char i_buf[21U] ; int pr_res_type ; int pr_res_scope ; struct list_head const *__mptr ; int tmp ; struct _ddebug descriptor ; unsigned char *tmp___0 ; char *tmp___1 ; long tmp___2 ; struct _ddebug descriptor___0 ; char *tmp___3 ; long tmp___4 ; int tmp___5 ; int tmp___6 ; { tfo = (se_nacl->se_tpg)->se_tpg_tfo; pr_res_type = 0; pr_res_scope = 0; memset((void *)(& i_buf), 0, 21UL); core_pr_dump_initiator_port(pr_reg, (char *)(& i_buf), 21U); if ((unsigned long )dev->dev_pr_res_holder != (unsigned long )((struct t10_pr_registration *)0)) { pr_res_type = (dev->dev_pr_res_holder)->pr_res_type; pr_res_scope = (dev->dev_pr_res_holder)->pr_res_scope; (dev->dev_pr_res_holder)->pr_res_type = 0; (dev->dev_pr_res_holder)->pr_res_scope = 0; (dev->dev_pr_res_holder)->pr_res_holder = 0; dev->dev_pr_res_holder = (struct t10_pr_registration *)0; } else { } if (unreg == 0) { goto out; } else { } spin_lock(& dev->t10_pr.registration_lock); list_del_init(& pr_reg->pr_reg_list); tmp = list_empty((struct list_head const *)(& dev->t10_pr.registration_list)); if (tmp == 0 && (pr_res_type == 7 || pr_res_type == 8)) { __mptr = (struct list_head const *)dev->t10_pr.registration_list.next; dev->dev_pr_res_holder = (struct t10_pr_registration *)__mptr + 0xfffffffffffffd90UL; (dev->dev_pr_res_holder)->pr_res_type = pr_res_type; (dev->dev_pr_res_holder)->pr_res_scope = pr_res_scope; (dev->dev_pr_res_holder)->pr_res_holder = 1; } else { } spin_unlock(& dev->t10_pr.registration_lock); out: ; if ((unsigned long )dev->dev_pr_res_holder == (unsigned long )((struct t10_pr_registration *)0)) { descriptor.modname = "target_core_mod"; descriptor.function = "__core_scsi3_complete_pro_release"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor.format = "SPC-3 PR [%s] Service Action: %s RELEASE cleared reservation holder TYPE: %s ALL_TG_PT: %d\n"; descriptor.lineno = 2474U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___0 = core_scsi3_pr_dump_type(pr_res_type); tmp___1 = (*(tfo->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "SPC-3 PR [%s] Service Action: %s RELEASE cleared reservation holder TYPE: %s ALL_TG_PT: %d\n", tmp___1, explicit != 0 ? (char *)"explicit" : (char *)"implicit", tmp___0, pr_reg->pr_reg_all_tg_pt != 0); } else { } } else { } descriptor___0.modname = "target_core_mod"; descriptor___0.function = "__core_scsi3_complete_pro_release"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___0.format = "SPC-3 PR [%s] RELEASE Node: %s%s\n"; descriptor___0.lineno = 2478U; descriptor___0.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___4 != 0L) { tmp___3 = (*(tfo->get_fabric_name))(); __dynamic_pr_debug(& descriptor___0, "SPC-3 PR [%s] RELEASE Node: %s%s\n", tmp___3, (char *)(& se_nacl->initiatorname), (char *)(& i_buf)); } else { } tmp___6 = 0; pr_reg->pr_res_scope = tmp___6; tmp___5 = tmp___6; pr_reg->pr_res_type = tmp___5; pr_reg->pr_res_holder = tmp___5; return; } } static sense_reason_t core_scsi3_emulate_pro_release(struct se_cmd *cmd , int type , int scope , u64 res_key ) { struct se_device *dev ; struct se_session *se_sess ; struct se_lun *se_lun ; struct t10_pr_registration *pr_reg ; struct t10_pr_registration *pr_reg_p ; struct t10_pr_registration *pr_res_holder ; struct t10_reservation *pr_tmpl ; sense_reason_t ret ; int tmp ; struct se_node_acl *pr_res_nacl ; char *tmp___0 ; char *tmp___1 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { dev = cmd->se_dev; se_sess = cmd->se_sess; se_lun = cmd->se_lun; pr_tmpl = & dev->t10_pr; ret = 0U; if ((unsigned long )se_sess == (unsigned long )((struct se_session *)0) || (unsigned long )se_lun == (unsigned long )((struct se_lun *)0)) { printk("\vSPC-3 PR: se_sess || struct se_lun is NULL!\n"); return (10U); } else { } pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess); if ((unsigned long )pr_reg == (unsigned long )((struct t10_pr_registration *)0)) { printk("\vSPC-3 PR: Unable to locate PR_REGISTERED *pr_reg for RELEASE\n"); return (10U); } else { } spin_lock(& dev->dev_reservation_lock); pr_res_holder = dev->dev_pr_res_holder; if ((unsigned long )pr_res_holder == (unsigned long )((struct t10_pr_registration *)0)) { spin_unlock(& dev->dev_reservation_lock); goto out_put_pr_reg; } else { } tmp = is_reservation_holder(pr_res_holder, pr_reg); if (tmp == 0) { spin_unlock(& dev->dev_reservation_lock); goto out_put_pr_reg; } else { } if (pr_reg->pr_res_key != res_key) { printk("\vSPC-3 PR RELEASE: Received res_key: 0x%016Lx does not match existing SA REGISTER res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key); spin_unlock(& dev->dev_reservation_lock); ret = 16U; goto out_put_pr_reg; } else { } if (pr_res_holder->pr_res_type != type || pr_res_holder->pr_res_scope != scope) { pr_res_nacl = pr_res_holder->pr_reg_nacl; tmp___0 = (*(((pr_res_nacl->se_tpg)->se_tpg_tfo)->get_fabric_name))(); tmp___1 = (*((cmd->se_tfo)->get_fabric_name))(); printk("\vSPC-3 PR RELEASE: Attempted to release reservation from [%s]: %s with different TYPE and/or SCOPE while reservation already held by [%s]: %s, returning RESERVATION_CONFLICT\n", tmp___1, (char *)(& (se_sess->se_node_acl)->initiatorname), tmp___0, (char *)(& (pr_res_holder->pr_reg_nacl)->initiatorname)); spin_unlock(& dev->dev_reservation_lock); ret = 16U; goto out_put_pr_reg; } else { } __core_scsi3_complete_pro_release(dev, se_sess->se_node_acl, pr_reg, 1, 0); spin_unlock(& dev->dev_reservation_lock); if (((type != 5 && type != 6) && type != 7) && type != 8) { goto write_aptpl; } else { } spin_lock(& pr_tmpl->registration_lock); __mptr = (struct list_head const *)pr_tmpl->registration_list.next; pr_reg_p = (struct t10_pr_registration *)__mptr + 0xfffffffffffffd90UL; goto ldv_58127; ldv_58126: ; if ((unsigned long )pr_reg_p == (unsigned long )pr_reg) { goto ldv_58125; } else { } target_ua_allocate_lun(pr_reg_p->pr_reg_nacl, (u32 )pr_reg_p->pr_res_mapped_lun, 42, 4); ldv_58125: __mptr___0 = (struct list_head const *)pr_reg_p->pr_reg_list.next; pr_reg_p = (struct t10_pr_registration *)__mptr___0 + 0xfffffffffffffd90UL; ldv_58127: ; if ((unsigned long )(& pr_reg_p->pr_reg_list) != (unsigned long )(& pr_tmpl->registration_list)) { goto ldv_58126; } else { } spin_unlock(& pr_tmpl->registration_lock); write_aptpl: ; if (pr_tmpl->pr_aptpl_active != 0) { core_scsi3_update_and_write_aptpl(cmd->se_dev, 1); } else { } out_put_pr_reg: core_scsi3_put_pr_reg(pr_reg); return (ret); } } static sense_reason_t core_scsi3_emulate_pro_clear(struct se_cmd *cmd , u64 res_key ) { struct se_device *dev ; struct se_node_acl *pr_reg_nacl ; struct se_session *se_sess ; struct t10_reservation *pr_tmpl ; struct t10_pr_registration *pr_reg ; struct t10_pr_registration *pr_reg_tmp ; struct t10_pr_registration *pr_reg_n ; struct t10_pr_registration *pr_res_holder ; u64 pr_res_mapped_lun ; int calling_it_nexus ; struct se_node_acl *pr_res_nacl ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct _ddebug descriptor ; char *tmp ; long tmp___0 ; { dev = cmd->se_dev; se_sess = cmd->se_sess; pr_tmpl = & dev->t10_pr; pr_res_mapped_lun = 0ULL; calling_it_nexus = 0; pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); if ((unsigned long )pr_reg_n == (unsigned long )((struct t10_pr_registration *)0)) { printk("\vSPC-3 PR: Unable to locate PR_REGISTERED *pr_reg for CLEAR\n"); return (10U); } else { } if (pr_reg_n->pr_res_key != res_key) { printk("\vSPC-3 PR REGISTER: Received res_key: 0x%016Lx does not match existing SA REGISTER res_key: 0x%016Lx\n", res_key, pr_reg_n->pr_res_key); core_scsi3_put_pr_reg(pr_reg_n); return (16U); } else { } spin_lock(& dev->dev_reservation_lock); pr_res_holder = dev->dev_pr_res_holder; if ((unsigned long )pr_res_holder != (unsigned long )((struct t10_pr_registration *)0)) { pr_res_nacl = pr_res_holder->pr_reg_nacl; __core_scsi3_complete_pro_release(dev, pr_res_nacl, pr_res_holder, 0, 0); } else { } spin_unlock(& dev->dev_reservation_lock); spin_lock(& pr_tmpl->registration_lock); __mptr = (struct list_head const *)pr_tmpl->registration_list.next; pr_reg = (struct t10_pr_registration *)__mptr + 0xfffffffffffffd90UL; __mptr___0 = (struct list_head const *)pr_reg->pr_reg_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___0 + 0xfffffffffffffd90UL; goto ldv_58151; ldv_58150: calling_it_nexus = (unsigned long )pr_reg_n == (unsigned long )pr_reg; pr_reg_nacl = pr_reg->pr_reg_nacl; pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; __core_scsi3_free_registration(dev, pr_reg, (struct list_head *)0, calling_it_nexus); if (calling_it_nexus == 0) { target_ua_allocate_lun(pr_reg_nacl, (u32 )pr_res_mapped_lun, 42, 3); } else { } pr_reg = pr_reg_tmp; __mptr___1 = (struct list_head const *)pr_reg_tmp->pr_reg_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___1 + 0xfffffffffffffd90UL; ldv_58151: ; if ((unsigned long )(& pr_reg->pr_reg_list) != (unsigned long )(& pr_tmpl->registration_list)) { goto ldv_58150; } else { } spin_unlock(& pr_tmpl->registration_lock); descriptor.modname = "target_core_mod"; descriptor.function = "core_scsi3_emulate_pro_clear"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor.format = "SPC-3 PR [%s] Service Action: CLEAR complete\n"; descriptor.lineno = 2718U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = (*((cmd->se_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "SPC-3 PR [%s] Service Action: CLEAR complete\n", tmp); } else { } core_scsi3_update_and_write_aptpl(cmd->se_dev, 0); core_scsi3_pr_generation(dev); return (0U); } } static void __core_scsi3_complete_pro_preempt(struct se_device *dev , struct t10_pr_registration *pr_reg , struct list_head *preempt_and_abort_list , int type , int scope , enum preempt_type preempt_type ) { struct se_node_acl *nacl ; struct target_core_fabric_ops const *tfo ; char i_buf[21U] ; struct _ddebug descriptor ; unsigned char *tmp ; char *tmp___0 ; long tmp___1 ; struct _ddebug descriptor___0 ; char *tmp___2 ; long tmp___3 ; { nacl = pr_reg->pr_reg_nacl; tfo = (nacl->se_tpg)->se_tpg_tfo; memset((void *)(& i_buf), 0, 21UL); core_pr_dump_initiator_port(pr_reg, (char *)(& i_buf), 21U); if ((unsigned long )dev->dev_pr_res_holder != (unsigned long )((struct t10_pr_registration *)0)) { __core_scsi3_complete_pro_release(dev, nacl, dev->dev_pr_res_holder, 0, 0); } else { } dev->dev_pr_res_holder = pr_reg; pr_reg->pr_res_holder = 1; pr_reg->pr_res_type = type; pr_reg->pr_res_scope = scope; descriptor.modname = "target_core_mod"; descriptor.function = "__core_scsi3_complete_pro_preempt"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor.format = "SPC-3 PR [%s] Service Action: PREEMPT%s created new reservation holder TYPE: %s ALL_TG_PT: %d\n"; descriptor.lineno = 2759U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp = core_scsi3_pr_dump_type(type); tmp___0 = (*(tfo->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "SPC-3 PR [%s] Service Action: PREEMPT%s created new reservation holder TYPE: %s ALL_TG_PT: %d\n", tmp___0, (unsigned int )preempt_type == 1U ? (char *)"_AND_ABORT" : (char *)"", tmp, pr_reg->pr_reg_all_tg_pt != 0); } else { } descriptor___0.modname = "target_core_mod"; descriptor___0.function = "__core_scsi3_complete_pro_preempt"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___0.format = "SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n"; descriptor___0.lineno = 2762U; descriptor___0.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___3 != 0L) { tmp___2 = (*(tfo->get_fabric_name))(); __dynamic_pr_debug(& descriptor___0, "SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n", tmp___2, (unsigned int )preempt_type == 1U ? (char *)"_AND_ABORT" : (char *)"", (char *)(& nacl->initiatorname), (char *)(& i_buf)); } else { } if ((unsigned long )preempt_and_abort_list != (unsigned long )((struct list_head *)0)) { list_add_tail(& pr_reg->pr_reg_abort_list, preempt_and_abort_list); } else { } return; } } static void core_scsi3_release_preempt_and_abort(struct list_head *preempt_and_abort_list , struct t10_pr_registration *pr_reg_holder ) { struct t10_pr_registration *pr_reg ; struct t10_pr_registration *pr_reg_tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { __mptr = (struct list_head const *)preempt_and_abort_list->next; pr_reg = (struct t10_pr_registration *)__mptr + 0xfffffffffffffd80UL; __mptr___0 = (struct list_head const *)pr_reg->pr_reg_abort_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___0 + 0xfffffffffffffd80UL; goto ldv_58183; ldv_58182: list_del(& pr_reg->pr_reg_abort_list); if ((unsigned long )pr_reg_holder == (unsigned long )pr_reg) { goto ldv_58181; } else { } if (pr_reg->pr_res_holder != 0) { printk("\fpr_reg->pr_res_holder still set\n"); goto ldv_58181; } else { } pr_reg->pr_reg_deve = (struct se_dev_entry *)0; pr_reg->pr_reg_nacl = (struct se_node_acl *)0; kmem_cache_free(t10_pr_reg_cache, (void *)pr_reg); ldv_58181: pr_reg = pr_reg_tmp; __mptr___1 = (struct list_head const *)pr_reg_tmp->pr_reg_abort_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___1 + 0xfffffffffffffd80UL; ldv_58183: ; if ((unsigned long )(& pr_reg->pr_reg_abort_list) != (unsigned long )preempt_and_abort_list) { goto ldv_58182; } else { } return; } } static sense_reason_t core_scsi3_pro_preempt(struct se_cmd *cmd , int type , int scope , u64 res_key , u64 sa_res_key , enum preempt_type preempt_type ) { struct se_device *dev ; struct se_node_acl *pr_reg_nacl ; struct se_session *se_sess ; struct list_head preempt_and_abort_list ; struct t10_pr_registration *pr_reg ; struct t10_pr_registration *pr_reg_tmp ; struct t10_pr_registration *pr_reg_n ; struct t10_pr_registration *pr_res_holder ; struct t10_reservation *pr_tmpl ; u64 pr_res_mapped_lun ; int all_reg ; int calling_it_nexus ; bool sa_res_key_unmatched ; int prh_type ; int prh_scope ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; struct list_head const *__mptr___5 ; struct list_head const *__mptr___6 ; struct list_head const *__mptr___7 ; { dev = cmd->se_dev; se_sess = cmd->se_sess; preempt_and_abort_list.next = & preempt_and_abort_list; preempt_and_abort_list.prev = & preempt_and_abort_list; pr_tmpl = & dev->t10_pr; pr_res_mapped_lun = 0ULL; all_reg = 0; calling_it_nexus = 0; sa_res_key_unmatched = sa_res_key != 0ULL; prh_type = 0; prh_scope = 0; if ((unsigned long )se_sess == (unsigned long )((struct se_session *)0)) { return (10U); } else { } pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); if ((unsigned long )pr_reg_n == (unsigned long )((struct t10_pr_registration *)0)) { printk("\vSPC-3 PR: Unable to locate PR_REGISTERED *pr_reg for PREEMPT%s\n", (unsigned int )preempt_type == 1U ? (char *)"_AND_ABORT" : (char *)""); return (16U); } else { } if (pr_reg_n->pr_res_key != res_key) { core_scsi3_put_pr_reg(pr_reg_n); return (16U); } else { } if (scope != 0) { printk("\vSPC-3 PR: Illegal SCOPE: 0x%02x\n", scope); core_scsi3_put_pr_reg(pr_reg_n); return (9U); } else { } spin_lock(& dev->dev_reservation_lock); pr_res_holder = dev->dev_pr_res_holder; if ((unsigned long )pr_res_holder != (unsigned long )((struct t10_pr_registration *)0) && (pr_res_holder->pr_res_type == 7 || pr_res_holder->pr_res_type == 8)) { all_reg = 1; } else { } if (all_reg == 0 && sa_res_key == 0ULL) { spin_unlock(& dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg_n); return (9U); } else { } if ((unsigned long )pr_res_holder == (unsigned long )((struct t10_pr_registration *)0) || pr_res_holder->pr_res_key != sa_res_key) { spin_lock(& pr_tmpl->registration_lock); __mptr = (struct list_head const *)pr_tmpl->registration_list.next; pr_reg = (struct t10_pr_registration *)__mptr + 0xfffffffffffffd90UL; __mptr___0 = (struct list_head const *)pr_reg->pr_reg_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___0 + 0xfffffffffffffd90UL; goto ldv_58216; ldv_58215: ; if (all_reg == 0) { if (pr_reg->pr_res_key != sa_res_key) { goto ldv_58214; } else { } sa_res_key_unmatched = 0; calling_it_nexus = (unsigned long )pr_reg_n == (unsigned long )pr_reg; pr_reg_nacl = pr_reg->pr_reg_nacl; pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; __core_scsi3_free_registration(dev, pr_reg, (unsigned int )preempt_type == 1U ? & preempt_and_abort_list : (struct list_head *)0, calling_it_nexus); } else { if (sa_res_key != 0ULL && pr_reg->pr_res_key != sa_res_key) { goto ldv_58214; } else { } sa_res_key_unmatched = 0; calling_it_nexus = (unsigned long )pr_reg_n == (unsigned long )pr_reg; if (calling_it_nexus != 0) { goto ldv_58214; } else { } pr_reg_nacl = pr_reg->pr_reg_nacl; pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; __core_scsi3_free_registration(dev, pr_reg, (unsigned int )preempt_type == 1U ? & preempt_and_abort_list : (struct list_head *)0, 0); } if (calling_it_nexus == 0) { target_ua_allocate_lun(pr_reg_nacl, (u32 )pr_res_mapped_lun, 42, 5); } else { } ldv_58214: pr_reg = pr_reg_tmp; __mptr___1 = (struct list_head const *)pr_reg_tmp->pr_reg_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___1 + 0xfffffffffffffd90UL; ldv_58216: ; if ((unsigned long )(& pr_reg->pr_reg_list) != (unsigned long )(& pr_tmpl->registration_list)) { goto ldv_58215; } else { } spin_unlock(& pr_tmpl->registration_lock); if ((int )sa_res_key_unmatched) { spin_unlock(& dev->dev_reservation_lock); core_scsi3_put_pr_reg(pr_reg_n); return (16U); } else { } if (((unsigned long )pr_res_holder != (unsigned long )((struct t10_pr_registration *)0) && all_reg != 0) && sa_res_key == 0ULL) { __core_scsi3_complete_pro_preempt(dev, pr_reg_n, (unsigned int )preempt_type == 1U ? & preempt_and_abort_list : (struct list_head *)0, type, scope, preempt_type); if ((unsigned int )preempt_type == 1U) { core_scsi3_release_preempt_and_abort(& preempt_and_abort_list, pr_reg_n); } else { } } else { } spin_unlock(& dev->dev_reservation_lock); if (pr_tmpl->pr_aptpl_active != 0) { core_scsi3_update_and_write_aptpl(cmd->se_dev, 1); } else { } core_scsi3_put_pr_reg(pr_reg_n); core_scsi3_pr_generation(cmd->se_dev); return (0U); } else { } prh_type = pr_res_holder->pr_res_type; prh_scope = pr_res_holder->pr_res_scope; if ((unsigned long )pr_reg_n != (unsigned long )pr_res_holder) { __core_scsi3_complete_pro_release(dev, pr_res_holder->pr_reg_nacl, dev->dev_pr_res_holder, 0, 0); } else { } spin_lock(& pr_tmpl->registration_lock); __mptr___2 = (struct list_head const *)pr_tmpl->registration_list.next; pr_reg = (struct t10_pr_registration *)__mptr___2 + 0xfffffffffffffd90UL; __mptr___3 = (struct list_head const *)pr_reg->pr_reg_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___3 + 0xfffffffffffffd90UL; goto ldv_58226; ldv_58225: calling_it_nexus = (unsigned long )pr_reg_n == (unsigned long )pr_reg; if (calling_it_nexus != 0) { goto ldv_58224; } else { } if (pr_reg->pr_res_key != sa_res_key) { goto ldv_58224; } else { } pr_reg_nacl = pr_reg->pr_reg_nacl; pr_res_mapped_lun = pr_reg->pr_res_mapped_lun; __core_scsi3_free_registration(dev, pr_reg, (unsigned int )preempt_type == 1U ? & preempt_and_abort_list : (struct list_head *)0, calling_it_nexus); target_ua_allocate_lun(pr_reg_nacl, (u32 )pr_res_mapped_lun, 42, 5); ldv_58224: pr_reg = pr_reg_tmp; __mptr___4 = (struct list_head const *)pr_reg_tmp->pr_reg_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___4 + 0xfffffffffffffd90UL; ldv_58226: ; if ((unsigned long )(& pr_reg->pr_reg_list) != (unsigned long )(& pr_tmpl->registration_list)) { goto ldv_58225; } else { } spin_unlock(& pr_tmpl->registration_lock); __core_scsi3_complete_pro_preempt(dev, pr_reg_n, (unsigned int )preempt_type == 1U ? & preempt_and_abort_list : (struct list_head *)0, type, scope, preempt_type); if (prh_type != type || prh_scope != scope) { spin_lock(& pr_tmpl->registration_lock); __mptr___5 = (struct list_head const *)pr_tmpl->registration_list.next; pr_reg = (struct t10_pr_registration *)__mptr___5 + 0xfffffffffffffd90UL; __mptr___6 = (struct list_head const *)pr_reg->pr_reg_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___6 + 0xfffffffffffffd90UL; goto ldv_58236; ldv_58235: calling_it_nexus = (unsigned long )pr_reg_n == (unsigned long )pr_reg; if (calling_it_nexus != 0) { goto ldv_58234; } else { } target_ua_allocate_lun(pr_reg->pr_reg_nacl, (u32 )pr_reg->pr_res_mapped_lun, 42, 4); ldv_58234: pr_reg = pr_reg_tmp; __mptr___7 = (struct list_head const *)pr_reg_tmp->pr_reg_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___7 + 0xfffffffffffffd90UL; ldv_58236: ; if ((unsigned long )(& pr_reg->pr_reg_list) != (unsigned long )(& pr_tmpl->registration_list)) { goto ldv_58235; } else { } spin_unlock(& pr_tmpl->registration_lock); } else { } spin_unlock(& dev->dev_reservation_lock); if ((unsigned int )preempt_type == 1U) { core_tmr_lun_reset(dev, (struct se_tmr_req *)0, & preempt_and_abort_list, cmd); core_scsi3_release_preempt_and_abort(& preempt_and_abort_list, pr_reg_n); } else { } if (pr_tmpl->pr_aptpl_active != 0) { core_scsi3_update_and_write_aptpl(cmd->se_dev, 1); } else { } core_scsi3_put_pr_reg(pr_reg_n); core_scsi3_pr_generation(cmd->se_dev); return (0U); } } static sense_reason_t core_scsi3_emulate_pro_preempt(struct se_cmd *cmd , int type , int scope , u64 res_key , u64 sa_res_key , enum preempt_type preempt_type ) { sense_reason_t tmp ; { switch (type) { case 1: ; case 3: ; case 5: ; case 6: ; case 7: ; case 8: tmp = core_scsi3_pro_preempt(cmd, type, scope, res_key, sa_res_key, preempt_type); return (tmp); default: printk("\vSPC-3 PR: Unknown Service Action PREEMPT%s Type: 0x%02x\n", (unsigned int )preempt_type == 1U ? (char *)"_AND_ABORT" : (char *)"", type); return (8U); } } } static sense_reason_t core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd , u64 res_key , u64 sa_res_key , int aptpl , int unreg ) { struct se_session *se_sess ; struct se_device *dev ; struct se_dev_entry *dest_se_deve ; struct se_lun *se_lun ; struct se_lun *tmp_lun ; struct se_node_acl *pr_res_nacl ; struct se_node_acl *pr_reg_nacl ; struct se_node_acl *dest_node_acl ; struct se_portal_group *se_tpg ; struct se_portal_group *dest_se_tpg ; struct target_core_fabric_ops const *dest_tf_ops ; struct target_core_fabric_ops const *tf_ops ; struct t10_pr_registration *pr_reg ; struct t10_pr_registration *pr_res_holder ; struct t10_pr_registration *dest_pr_reg ; struct t10_reservation *pr_tmpl ; unsigned char *buf ; unsigned char const *initiator_str ; char *iport_ptr ; char i_buf[21U] ; u32 tid_len ; u32 tmp_tid_len ; int new_reg ; int type ; int scope ; int matching_iname ; sense_reason_t ret ; unsigned short rtpi ; unsigned char proto_ident ; void *tmp ; struct list_head const *__mptr ; int tmp___0 ; struct list_head const *__mptr___0 ; void *tmp___1 ; struct _ddebug descriptor ; long tmp___2 ; char *tmp___3 ; char const *tmp___4 ; struct _ddebug descriptor___0 ; char *tmp___5 ; long tmp___6 ; int tmp___7 ; int tmp___8 ; char *tmp___9 ; int tmp___10 ; struct _ddebug descriptor___1 ; char *tmp___11 ; long tmp___12 ; char *tmp___13 ; int tmp___14 ; struct _ddebug descriptor___2 ; char *tmp___15 ; long tmp___16 ; int tmp___17 ; unsigned char *tmp___18 ; struct se_lun *dest_lun ; struct se_lun *________p1 ; struct se_lun *_________p1 ; union __anonunion___u_392___0 __u ; bool __warned ; int tmp___19 ; int tmp___20 ; int tmp___21 ; int tmp___22 ; u32 tmp___23 ; struct _ddebug descriptor___3 ; unsigned char *tmp___24 ; char *tmp___25 ; long tmp___26 ; struct _ddebug descriptor___4 ; char *tmp___27 ; char *tmp___28 ; long tmp___29 ; { se_sess = cmd->se_sess; dev = cmd->se_dev; dest_se_deve = (struct se_dev_entry *)0; se_lun = cmd->se_lun; dest_node_acl = (struct se_node_acl *)0; dest_se_tpg = (struct se_portal_group *)0; dest_tf_ops = (struct target_core_fabric_ops const *)0; pr_tmpl = & dev->t10_pr; iport_ptr = (char *)0; new_reg = 0; if ((unsigned long )se_sess == (unsigned long )((struct se_session *)0) || (unsigned long )se_lun == (unsigned long )((struct se_lun *)0)) { printk("\vSPC-3 PR: se_sess || struct se_lun is NULL!\n"); return (10U); } else { } memset((void *)(& i_buf), 0, 21UL); se_tpg = se_sess->se_tpg; tf_ops = se_tpg->se_tpg_tfo; pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl, se_sess); if ((unsigned long )pr_reg == (unsigned long )((struct t10_pr_registration *)0)) { printk("\vSPC-3 PR: Unable to locate PR_REGISTERED *pr_reg for REGISTER_AND_MOVE\n"); return (10U); } else { } if (pr_reg->pr_res_key != res_key) { printk("\fSPC-3 PR REGISTER_AND_MOVE: Received res_key: 0x%016Lx does not match existing SA REGISTER res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key); ret = 16U; goto out_put_pr_reg; } else { } if (sa_res_key == 0ULL) { printk("\fSPC-3 PR REGISTER_AND_MOVE: Received zero sa_res_key\n"); ret = 9U; goto out_put_pr_reg; } else { } tmp = transport_kmap_data_sg(cmd); buf = (unsigned char *)tmp; if ((unsigned long )buf == (unsigned long )((unsigned char *)0U)) { ret = 10U; goto out_put_pr_reg; } else { } rtpi = (int )((unsigned short )*(buf + 18UL)) << 8U; rtpi = (int )((unsigned short )*(buf + 19UL)) | (int )rtpi; tid_len = (u32 )((int )*(buf + 20UL) << 24); tid_len = (u32 )((int )*(buf + 21UL) << 16) | tid_len; tid_len = (u32 )((int )*(buf + 22UL) << 8) | tid_len; tid_len = (u32 )*(buf + 23UL) | tid_len; transport_kunmap_data_sg(cmd); buf = (unsigned char *)0U; if (tid_len + 24U != cmd->data_length) { printk("\vSPC-3 PR: Illegal tid_len: %u + 24 byte header does not equal CDB data_length: %u\n", tid_len, cmd->data_length); ret = 9U; goto out_put_pr_reg; } else { } spin_lock(& dev->se_port_lock); __mptr = (struct list_head const *)dev->dev_sep_list.next; tmp_lun = (struct se_lun *)__mptr + 0xfffffffffffffb88UL; goto ldv_58297; ldv_58296: ; if ((int )tmp_lun->lun_rtpi != (int )rtpi) { goto ldv_58294; } else { } dest_se_tpg = tmp_lun->lun_tpg; dest_tf_ops = dest_se_tpg->se_tpg_tfo; if ((unsigned long )dest_tf_ops == (unsigned long )((struct target_core_fabric_ops const *)0)) { goto ldv_58294; } else { } atomic_inc_mb(& dest_se_tpg->tpg_pr_ref_count); spin_unlock(& dev->se_port_lock); tmp___0 = core_scsi3_tpg_depend_item(dest_se_tpg); if (tmp___0 != 0) { printk("\vcore_scsi3_tpg_depend_item() failed for dest_se_tpg\n"); atomic_dec_mb(& dest_se_tpg->tpg_pr_ref_count); ret = 10U; goto out_put_pr_reg; } else { } spin_lock(& dev->se_port_lock); goto ldv_58295; ldv_58294: __mptr___0 = (struct list_head const *)tmp_lun->lun_dev_link.next; tmp_lun = (struct se_lun *)__mptr___0 + 0xfffffffffffffb88UL; ldv_58297: ; if ((unsigned long )(& tmp_lun->lun_dev_link) != (unsigned long )(& dev->dev_sep_list)) { goto ldv_58296; } else { } ldv_58295: spin_unlock(& dev->se_port_lock); if ((unsigned long )dest_se_tpg == (unsigned long )((struct se_portal_group *)0) || (unsigned long )dest_tf_ops == (unsigned long )((struct target_core_fabric_ops const *)0)) { printk("\vSPC-3 PR REGISTER_AND_MOVE: Unable to locate fabric ops from Relative Target Port Identifier: %hu\n", (int )rtpi); ret = 9U; goto out_put_pr_reg; } else { } tmp___1 = transport_kmap_data_sg(cmd); buf = (unsigned char *)tmp___1; if ((unsigned long )buf == (unsigned long )((unsigned char *)0U)) { ret = 10U; goto out_put_pr_reg; } else { } proto_ident = (unsigned int )*(buf + 24UL) & 15U; descriptor.modname = "target_core_mod"; descriptor.function = "core_scsi3_emulate_pro_register_and_move"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor.format = "SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier: 0x%02x\n"; descriptor.lineno = 3245U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_pr_debug(& descriptor, "SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier: 0x%02x\n", (int )proto_ident); } else { } if ((int )proto_ident != dest_se_tpg->proto_id) { tmp___3 = (*(dest_tf_ops->get_fabric_name))(); printk("\vSPC-3 PR REGISTER_AND_MOVE: Received proto_ident: 0x%02x does not match ident: 0x%02x from fabric: %s\n", (int )proto_ident, dest_se_tpg->proto_id, tmp___3); ret = 9U; goto out; } else { } tmp___4 = target_parse_pr_out_transport_id(dest_se_tpg, (char const *)buf + 24U, & tmp_tid_len, & iport_ptr); initiator_str = (unsigned char const *)tmp___4; if ((unsigned long )initiator_str == (unsigned long )((unsigned char const *)0U)) { printk("\vSPC-3 PR REGISTER_AND_MOVE: Unable to locate initiator_str from Transport ID\n"); ret = 9U; goto out; } else { } transport_kunmap_data_sg(cmd); buf = (unsigned char *)0U; descriptor___0.modname = "target_core_mod"; descriptor___0.function = "core_scsi3_emulate_pro_register_and_move"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___0.format = "SPC-3 PR [%s] Extracted initiator %s identifier: %s %s\n"; descriptor___0.lineno = 3271U; descriptor___0.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___6 != 0L) { tmp___5 = (*(dest_tf_ops->get_fabric_name))(); __dynamic_pr_debug(& descriptor___0, "SPC-3 PR [%s] Extracted initiator %s identifier: %s %s\n", tmp___5, (unsigned long )iport_ptr != (unsigned long )((char *)0) ? (char *)"port" : (char *)"device", initiator_str, (unsigned long )iport_ptr != (unsigned long )((char *)0) ? iport_ptr : (char *)""); } else { } pr_reg_nacl = pr_reg->pr_reg_nacl; tmp___7 = strcmp((char const *)initiator_str, (char const *)(& pr_reg_nacl->initiatorname)); matching_iname = tmp___7 == 0; if (matching_iname == 0) { goto after_iport_check; } else { } if ((unsigned long )iport_ptr == (unsigned long )((char *)0) || ! pr_reg->isid_present_at_reg) { printk("\vSPC-3 PR REGISTER_AND_MOVE: TransportID: %s matches: %s on received I_T Nexus\n", initiator_str, (char *)(& pr_reg_nacl->initiatorname)); ret = 9U; goto out; } else { } tmp___8 = strcmp((char const *)iport_ptr, (char const *)(& pr_reg->pr_reg_isid)); if (tmp___8 == 0) { printk("\vSPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s matches: %s %s on received I_T Nexus\n", initiator_str, iport_ptr, (char *)(& pr_reg_nacl->initiatorname), (char *)(& pr_reg->pr_reg_isid)); ret = 9U; goto out; } else { } after_iport_check: ldv_mutex_lock_241(& dest_se_tpg->acl_node_mutex); dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, (char const *)initiator_str); if ((unsigned long )dest_node_acl != (unsigned long )((struct se_node_acl *)0)) { atomic_inc_mb(& dest_node_acl->acl_pr_ref_count); } else { } ldv_mutex_unlock_242(& dest_se_tpg->acl_node_mutex); if ((unsigned long )dest_node_acl == (unsigned long )((struct se_node_acl *)0)) { tmp___9 = (*(dest_tf_ops->get_fabric_name))(); printk("\vUnable to locate %s dest_node_acl for TransportID%s\n", tmp___9, initiator_str); ret = 9U; goto out; } else { } tmp___10 = core_scsi3_nodeacl_depend_item(dest_node_acl); if (tmp___10 != 0) { printk("\vcore_scsi3_nodeacl_depend_item() for dest_node_acl\n"); atomic_dec_mb(& dest_node_acl->acl_pr_ref_count); dest_node_acl = (struct se_node_acl *)0; ret = 9U; goto out; } else { } descriptor___1.modname = "target_core_mod"; descriptor___1.function = "core_scsi3_emulate_pro_register_and_move"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___1.format = "SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl: %s from TransportID\n"; descriptor___1.lineno = 3331U; descriptor___1.flags = 0U; tmp___12 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___12 != 0L) { tmp___11 = (*(dest_tf_ops->get_fabric_name))(); __dynamic_pr_debug(& descriptor___1, "SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl: %s from TransportID\n", tmp___11, (char *)(& dest_node_acl->initiatorname)); } else { } dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, (int )rtpi); if ((unsigned long )dest_se_deve == (unsigned long )((struct se_dev_entry *)0)) { tmp___13 = (*(dest_tf_ops->get_fabric_name))(); printk("\vUnable to locate %s dest_se_deve from RTPI: %hu\n", tmp___13, (int )rtpi); ret = 9U; goto out; } else { } tmp___14 = core_scsi3_lunacl_depend_item(dest_se_deve); if (tmp___14 != 0) { printk("\vcore_scsi3_lunacl_depend_item() failed\n"); kref_put(& dest_se_deve->pr_kref, & target_pr_kref_release); dest_se_deve = (struct se_dev_entry *)0; ret = 10U; goto out; } else { } descriptor___2.modname = "target_core_mod"; descriptor___2.function = "core_scsi3_emulate_pro_register_and_move"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___2.format = "SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN ACL for dest_se_deve->mapped_lun: %llu\n"; descriptor___2.lineno = 3356U; descriptor___2.flags = 0U; tmp___16 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___16 != 0L) { tmp___15 = (*(dest_tf_ops->get_fabric_name))(); __dynamic_pr_debug(& descriptor___2, "SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN ACL for dest_se_deve->mapped_lun: %llu\n", tmp___15, (char *)(& dest_node_acl->initiatorname), dest_se_deve->mapped_lun); } else { } spin_lock(& dev->dev_reservation_lock); pr_res_holder = dev->dev_pr_res_holder; if ((unsigned long )pr_res_holder == (unsigned long )((struct t10_pr_registration *)0)) { printk("\fSPC-3 PR REGISTER_AND_MOVE: No reservation currently held\n"); spin_unlock(& dev->dev_reservation_lock); ret = 8U; goto out; } else { } tmp___17 = is_reservation_holder(pr_res_holder, pr_reg); if (tmp___17 == 0) { printk("\fSPC-3 PR REGISTER_AND_MOVE: Calling I_T Nexus is not reservation holder\n"); spin_unlock(& dev->dev_reservation_lock); ret = 16U; goto out; } else { } if (pr_res_holder->pr_res_type == 7 || pr_res_holder->pr_res_type == 8) { tmp___18 = core_scsi3_pr_dump_type(pr_res_holder->pr_res_type); printk("\fSPC-3 PR REGISTER_AND_MOVE: Unable to move reservation for type: %s\n", tmp___18); spin_unlock(& dev->dev_reservation_lock); ret = 16U; goto out; } else { } pr_res_nacl = pr_res_holder->pr_reg_nacl; type = pr_res_holder->pr_res_type; scope = pr_res_holder->pr_res_type; dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, (unsigned char *)iport_ptr); if ((unsigned long )dest_pr_reg == (unsigned long )((struct t10_pr_registration *)0)) { __read_once_size((void const volatile *)(& dest_se_deve->se_lun), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp___19 = debug_lockdep_rcu_enabled(); if (tmp___19 != 0 && ! __warned) { tmp___20 = atomic_read((atomic_t const *)(& dest_se_deve->pr_kref.refcount)); if (tmp___20 == 0) { tmp___21 = rcu_read_lock_held(); if (tmp___21 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c", 3432, "suspicious rcu_dereference_check() usage"); } else { } } else { } } else { } dest_lun = ________p1; spin_unlock(& dev->dev_reservation_lock); tmp___22 = core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl, dest_lun, dest_se_deve, dest_se_deve->mapped_lun, (unsigned char *)iport_ptr, sa_res_key, 0, aptpl, 2, 1); if (tmp___22 != 0) { ret = 9U; goto out; } else { } spin_lock(& dev->dev_reservation_lock); dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, (unsigned char *)iport_ptr); new_reg = 1; } else { } __core_scsi3_complete_pro_release(dev, pr_res_nacl, dev->dev_pr_res_holder, 0, 0); dev->dev_pr_res_holder = dest_pr_reg; dest_pr_reg->pr_res_holder = 1; dest_pr_reg->pr_res_type = type; pr_reg->pr_res_scope = scope; core_pr_dump_initiator_port(pr_reg, (char *)(& i_buf), 21U); if (new_reg == 0) { tmp___23 = pr_tmpl->pr_generation; pr_tmpl->pr_generation = pr_tmpl->pr_generation + 1U; dest_pr_reg->pr_res_generation = tmp___23; } else { } spin_unlock(& dev->dev_reservation_lock); descriptor___3.modname = "target_core_mod"; descriptor___3.function = "core_scsi3_emulate_pro_register_and_move"; descriptor___3.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___3.format = "SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE created new reservation holder TYPE: %s on object RTPI: %hu PRGeneration: 0x%08x\n"; descriptor___3.lineno = 3473U; descriptor___3.flags = 0U; tmp___26 = ldv__builtin_expect((long )descriptor___3.flags & 1L, 0L); if (tmp___26 != 0L) { tmp___24 = core_scsi3_pr_dump_type(type); tmp___25 = (*(dest_tf_ops->get_fabric_name))(); __dynamic_pr_debug(& descriptor___3, "SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE created new reservation holder TYPE: %s on object RTPI: %hu PRGeneration: 0x%08x\n", tmp___25, tmp___24, (int )rtpi, dest_pr_reg->pr_res_generation); } else { } descriptor___4.modname = "target_core_mod"; descriptor___4.function = "core_scsi3_emulate_pro_register_and_move"; descriptor___4.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_pr.c"; descriptor___4.format = "SPC-3 PR Successfully moved reservation from %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n"; descriptor___4.lineno = 3479U; descriptor___4.flags = 0U; tmp___29 = ldv__builtin_expect((long )descriptor___4.flags & 1L, 0L); if (tmp___29 != 0L) { tmp___27 = (*(dest_tf_ops->get_fabric_name))(); tmp___28 = (*(tf_ops->get_fabric_name))(); __dynamic_pr_debug(& descriptor___4, "SPC-3 PR Successfully moved reservation from %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n", tmp___28, (char *)(& pr_reg_nacl->initiatorname), (char *)(& i_buf), tmp___27, (char *)(& dest_node_acl->initiatorname), (unsigned long )iport_ptr != (unsigned long )((char *)0) ? iport_ptr : (char *)""); } else { } core_scsi3_lunacl_undepend_item(dest_se_deve); core_scsi3_nodeacl_undepend_item(dest_node_acl); core_scsi3_tpg_undepend_item(dest_se_tpg); if (unreg != 0) { spin_lock(& pr_tmpl->registration_lock); __core_scsi3_free_registration(dev, pr_reg, (struct list_head *)0, 1); spin_unlock(& pr_tmpl->registration_lock); } else { core_scsi3_put_pr_reg(pr_reg); } core_scsi3_update_and_write_aptpl(cmd->se_dev, aptpl != 0); transport_kunmap_data_sg(cmd); core_scsi3_put_pr_reg(dest_pr_reg); return (0U); out: ; if ((unsigned long )buf != (unsigned long )((unsigned char *)0U)) { transport_kunmap_data_sg(cmd); } else { } if ((unsigned long )dest_se_deve != (unsigned long )((struct se_dev_entry *)0)) { core_scsi3_lunacl_undepend_item(dest_se_deve); } else { } if ((unsigned long )dest_node_acl != (unsigned long )((struct se_node_acl *)0)) { core_scsi3_nodeacl_undepend_item(dest_node_acl); } else { } core_scsi3_tpg_undepend_item(dest_se_tpg); out_put_pr_reg: core_scsi3_put_pr_reg(pr_reg); return (ret); } } static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb ) { unsigned int __v1 ; unsigned int __v2 ; { __v1 = (unsigned int )(((((int )*cdb << 24) | ((int )*(cdb + 1UL) << 16)) | ((int )*(cdb + 2UL) << 8)) | (int )*(cdb + 3UL)); __v2 = (unsigned int )(((((int )*(cdb + 4UL) << 24) | ((int )*(cdb + 5UL) << 16)) | ((int )*(cdb + 6UL) << 8)) | (int )*(cdb + 7UL)); return ((unsigned long long )__v2 | ((unsigned long long )__v1 << 32)); } } sense_reason_t target_scsi3_emulate_pr_out(struct se_cmd *cmd ) { struct se_device *dev ; unsigned char *cdb ; unsigned char *buf ; u64 res_key ; u64 sa_res_key ; int sa ; int scope ; int type ; int aptpl ; int spec_i_pt ; int all_tg_pt ; int unreg ; sense_reason_t ret ; void *tmp ; { dev = cmd->se_dev; cdb = cmd->t_task_cdb; spec_i_pt = 0; all_tg_pt = 0; unreg = 0; if ((int )(cmd->se_dev)->dev_reservation_flags & 1) { printk("\vReceived PERSISTENT_RESERVE CDB while legacy SPC-2 reservation is held, returning RESERVATION_CONFLICT\n"); return (16U); } else { } if ((unsigned long )cmd->se_sess == (unsigned long )((struct se_session *)0)) { return (10U); } else { } if (cmd->data_length <= 23U) { printk("\fSPC-PR: Received PR OUT parameter list length too small: %u\n", cmd->data_length); return (9U); } else { } sa = (int )*(cdb + 1UL) & 31; scope = (int )*(cdb + 2UL) & 240; type = (int )*(cdb + 2UL) & 15; tmp = transport_kmap_data_sg(cmd); buf = (unsigned char *)tmp; if ((unsigned long )buf == (unsigned long )((unsigned char *)0U)) { return (10U); } else { } res_key = core_scsi3_extract_reservation_key(buf); sa_res_key = core_scsi3_extract_reservation_key(buf + 8UL); if (sa != 7) { spec_i_pt = (int )*(buf + 20UL) & 8; all_tg_pt = (int )*(buf + 20UL) & 4; aptpl = (int )*(buf + 20UL) & 1; } else { aptpl = (int )*(buf + 17UL) & 1; unreg = (int )*(buf + 17UL) & 2; } if (dev->dev_attrib.force_pr_aptpl != 0) { aptpl = 1; } else { } transport_kunmap_data_sg(cmd); buf = (unsigned char *)0U; if (spec_i_pt != 0 && ((int )*(cdb + 1UL) & 31) != 0) { return (9U); } else { } if ((spec_i_pt == 0 && ((int )*(cdb + 1UL) & 31) != 7) && cmd->data_length != 24U) { printk("\fSPC-PR: Received PR OUT illegal parameter list length: %u\n", cmd->data_length); return (9U); } else { } switch (sa) { case 0: ret = core_scsi3_emulate_pro_register(cmd, res_key, sa_res_key, aptpl != 0, all_tg_pt != 0, spec_i_pt != 0, 0); goto ldv_58340; case 1: ret = core_scsi3_emulate_pro_reserve(cmd, type, scope, res_key); goto ldv_58340; case 2: ret = core_scsi3_emulate_pro_release(cmd, type, scope, res_key); goto ldv_58340; case 3: ret = core_scsi3_emulate_pro_clear(cmd, res_key); goto ldv_58340; case 4: ret = core_scsi3_emulate_pro_preempt(cmd, type, scope, res_key, sa_res_key, 0); goto ldv_58340; case 5: ret = core_scsi3_emulate_pro_preempt(cmd, type, scope, res_key, sa_res_key, 1); goto ldv_58340; case 6: ret = core_scsi3_emulate_pro_register(cmd, 0ULL, sa_res_key, aptpl != 0, all_tg_pt != 0, spec_i_pt != 0, 1); goto ldv_58340; case 7: ret = core_scsi3_emulate_pro_register_and_move(cmd, res_key, sa_res_key, aptpl, unreg); goto ldv_58340; default: printk("\vUnknown PERSISTENT_RESERVE_OUT service action: 0x%02x\n", (int )*(cdb + 1UL) & 31); return (8U); } ldv_58340: ; if (ret == 0U) { target_complete_cmd(cmd, 0); } else { } return (ret); } } static sense_reason_t core_scsi3_pri_read_keys(struct se_cmd *cmd ) { struct se_device *dev ; struct t10_pr_registration *pr_reg ; unsigned char *buf ; u32 add_len ; u32 off ; void *tmp ; struct list_head const *__mptr ; u32 tmp___0 ; u32 tmp___1 ; u32 tmp___2 ; u32 tmp___3 ; u32 tmp___4 ; u32 tmp___5 ; u32 tmp___6 ; u32 tmp___7 ; struct list_head const *__mptr___0 ; { dev = cmd->se_dev; add_len = 0U; off = 8U; if (cmd->data_length <= 7U) { printk("\vPRIN SA READ_KEYS SCSI Data Length: %u too small\n", cmd->data_length); return (8U); } else { } tmp = transport_kmap_data_sg(cmd); buf = (unsigned char *)tmp; if ((unsigned long )buf == (unsigned long )((unsigned char *)0U)) { return (10U); } else { } *buf = (unsigned char )(dev->t10_pr.pr_generation >> 24); *(buf + 1UL) = (unsigned char )(dev->t10_pr.pr_generation >> 16); *(buf + 2UL) = (unsigned char )(dev->t10_pr.pr_generation >> 8); *(buf + 3UL) = (unsigned char )dev->t10_pr.pr_generation; spin_lock(& dev->t10_pr.registration_lock); __mptr = (struct list_head const *)dev->t10_pr.registration_list.next; pr_reg = (struct t10_pr_registration *)__mptr + 0xfffffffffffffd90UL; goto ldv_58363; ldv_58362: ; if (add_len + 8U > cmd->data_length - 8U) { goto ldv_58361; } else { } tmp___0 = off; off = off + 1U; *(buf + (unsigned long )tmp___0) = (unsigned char )(pr_reg->pr_res_key >> 56); tmp___1 = off; off = off + 1U; *(buf + (unsigned long )tmp___1) = (unsigned char )(pr_reg->pr_res_key >> 48); tmp___2 = off; off = off + 1U; *(buf + (unsigned long )tmp___2) = (unsigned char )(pr_reg->pr_res_key >> 40); tmp___3 = off; off = off + 1U; *(buf + (unsigned long )tmp___3) = (unsigned char )(pr_reg->pr_res_key >> 32); tmp___4 = off; off = off + 1U; *(buf + (unsigned long )tmp___4) = (unsigned char )(pr_reg->pr_res_key >> 24); tmp___5 = off; off = off + 1U; *(buf + (unsigned long )tmp___5) = (unsigned char )(pr_reg->pr_res_key >> 16); tmp___6 = off; off = off + 1U; *(buf + (unsigned long )tmp___6) = (unsigned char )(pr_reg->pr_res_key >> 8); tmp___7 = off; off = off + 1U; *(buf + (unsigned long )tmp___7) = (unsigned char )pr_reg->pr_res_key; add_len = add_len + 8U; __mptr___0 = (struct list_head const *)pr_reg->pr_reg_list.next; pr_reg = (struct t10_pr_registration *)__mptr___0 + 0xfffffffffffffd90UL; ldv_58363: ; if ((unsigned long )(& pr_reg->pr_reg_list) != (unsigned long )(& dev->t10_pr.registration_list)) { goto ldv_58362; } else { } ldv_58361: spin_unlock(& dev->t10_pr.registration_lock); *(buf + 4UL) = (unsigned char )(add_len >> 24); *(buf + 5UL) = (unsigned char )(add_len >> 16); *(buf + 6UL) = (unsigned char )(add_len >> 8); *(buf + 7UL) = (unsigned char )add_len; transport_kunmap_data_sg(cmd); return (0U); } } static sense_reason_t core_scsi3_pri_read_reservation(struct se_cmd *cmd ) { struct se_device *dev ; struct t10_pr_registration *pr_reg ; unsigned char *buf ; u64 pr_res_key ; u32 add_len ; void *tmp ; { dev = cmd->se_dev; add_len = 16U; if (cmd->data_length <= 7U) { printk("\vPRIN SA READ_RESERVATIONS SCSI Data Length: %u too small\n", cmd->data_length); return (8U); } else { } tmp = transport_kmap_data_sg(cmd); buf = (unsigned char *)tmp; if ((unsigned long )buf == (unsigned long )((unsigned char *)0U)) { return (10U); } else { } *buf = (unsigned char )(dev->t10_pr.pr_generation >> 24); *(buf + 1UL) = (unsigned char )(dev->t10_pr.pr_generation >> 16); *(buf + 2UL) = (unsigned char )(dev->t10_pr.pr_generation >> 8); *(buf + 3UL) = (unsigned char )dev->t10_pr.pr_generation; spin_lock(& dev->dev_reservation_lock); pr_reg = dev->dev_pr_res_holder; if ((unsigned long )pr_reg != (unsigned long )((struct t10_pr_registration *)0)) { *(buf + 4UL) = (unsigned char )(add_len >> 24); *(buf + 5UL) = (unsigned char )(add_len >> 16); *(buf + 6UL) = (unsigned char )(add_len >> 8); *(buf + 7UL) = (unsigned char )add_len; if (cmd->data_length <= 21U) { goto err; } else { } if (pr_reg->pr_res_type == 7 || pr_reg->pr_res_type == 8) { pr_res_key = 0ULL; } else { pr_res_key = pr_reg->pr_res_key; } *(buf + 8UL) = (unsigned char )(pr_res_key >> 56); *(buf + 9UL) = (unsigned char )(pr_res_key >> 48); *(buf + 10UL) = (unsigned char )(pr_res_key >> 40); *(buf + 11UL) = (unsigned char )(pr_res_key >> 32); *(buf + 12UL) = (unsigned char )(pr_res_key >> 24); *(buf + 13UL) = (unsigned char )(pr_res_key >> 16); *(buf + 14UL) = (unsigned char )(pr_res_key >> 8); *(buf + 15UL) = (unsigned char )pr_res_key; *(buf + 21UL) = (unsigned char )(((int )((signed char )pr_reg->pr_res_scope) & -16) | ((int )((signed char )pr_reg->pr_res_type) & 15)); } else { } err: spin_unlock(& dev->dev_reservation_lock); transport_kunmap_data_sg(cmd); return (0U); } } static sense_reason_t core_scsi3_pri_report_capabilities(struct se_cmd *cmd ) { struct se_device *dev ; struct t10_reservation *pr_tmpl ; unsigned char *buf ; u16 add_len ; void *tmp ; { dev = cmd->se_dev; pr_tmpl = & dev->t10_pr; add_len = 8U; if (cmd->data_length <= 5U) { printk("\vPRIN SA REPORT_CAPABILITIES SCSI Data Length: %u too small\n", cmd->data_length); return (8U); } else { } tmp = transport_kmap_data_sg(cmd); buf = (unsigned char *)tmp; if ((unsigned long )buf == (unsigned long )((unsigned char *)0U)) { return (10U); } else { } *buf = (unsigned char )((int )add_len >> 8); *(buf + 1UL) = (unsigned char )add_len; *(buf + 2UL) = (unsigned int )*(buf + 2UL) | 16U; *(buf + 2UL) = (unsigned int )*(buf + 2UL) | 8U; *(buf + 2UL) = (unsigned int )*(buf + 2UL) | 4U; *(buf + 2UL) = (unsigned int )*(buf + 2UL) | 1U; *(buf + 3UL) = (unsigned int )*(buf + 3UL) | 128U; *(buf + 3UL) = (unsigned int )*(buf + 3UL) | 16U; if (pr_tmpl->pr_aptpl_active != 0) { *(buf + 3UL) = (unsigned int )*(buf + 3UL) | 1U; } else { } *(buf + 4UL) = (unsigned int )*(buf + 4UL) | 128U; *(buf + 4UL) = (unsigned int )*(buf + 4UL) | 64U; *(buf + 4UL) = (unsigned int )*(buf + 4UL) | 32U; *(buf + 4UL) = (unsigned int )*(buf + 4UL) | 8U; *(buf + 4UL) = (unsigned int )*(buf + 4UL) | 2U; *(buf + 5UL) = (unsigned int )*(buf + 5UL) | 1U; transport_kunmap_data_sg(cmd); return (0U); } } static sense_reason_t core_scsi3_pri_read_full_status(struct se_cmd *cmd ) { struct se_device *dev ; struct se_node_acl *se_nacl ; struct se_portal_group *se_tpg ; struct t10_pr_registration *pr_reg ; struct t10_pr_registration *pr_reg_tmp ; struct t10_reservation *pr_tmpl ; unsigned char *buf ; u32 add_desc_len ; u32 add_len ; u32 off ; int format_code ; int pr_res_type ; int pr_res_scope ; int exp_desc_len ; int desc_len ; bool all_reg ; void *tmp ; struct t10_pr_registration *pr_holder ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; u32 tmp___0 ; u32 tmp___1 ; u32 tmp___2 ; u32 tmp___3 ; u32 tmp___4 ; u32 tmp___5 ; u32 tmp___6 ; u32 tmp___7 ; u32 tmp___8 ; u32 tmp___9 ; u32 tmp___10 ; u32 tmp___11 ; u16 sep_rtpi ; u32 tmp___12 ; u32 tmp___13 ; u32 tmp___14 ; u32 tmp___15 ; u32 tmp___16 ; u32 tmp___17 ; struct list_head const *__mptr___1 ; { dev = cmd->se_dev; pr_tmpl = & dev->t10_pr; add_desc_len = 0U; add_len = 0U; off = 8U; format_code = 0; pr_res_type = 0; pr_res_scope = 0; all_reg = 0; if (cmd->data_length <= 7U) { printk("\vPRIN SA READ_FULL_STATUS SCSI Data Length: %u too small\n", cmd->data_length); return (8U); } else { } tmp = transport_kmap_data_sg(cmd); buf = (unsigned char *)tmp; if ((unsigned long )buf == (unsigned long )((unsigned char *)0U)) { return (10U); } else { } *buf = (unsigned char )(dev->t10_pr.pr_generation >> 24); *(buf + 1UL) = (unsigned char )(dev->t10_pr.pr_generation >> 16); *(buf + 2UL) = (unsigned char )(dev->t10_pr.pr_generation >> 8); *(buf + 3UL) = (unsigned char )dev->t10_pr.pr_generation; spin_lock(& dev->dev_reservation_lock); if ((unsigned long )dev->dev_pr_res_holder != (unsigned long )((struct t10_pr_registration *)0)) { pr_holder = dev->dev_pr_res_holder; if (pr_holder->pr_res_type == 7 || pr_holder->pr_res_type == 8) { all_reg = 1; pr_res_type = pr_holder->pr_res_type; pr_res_scope = pr_holder->pr_res_scope; } else { } } else { } spin_unlock(& dev->dev_reservation_lock); spin_lock(& pr_tmpl->registration_lock); __mptr = (struct list_head const *)pr_tmpl->registration_list.next; pr_reg = (struct t10_pr_registration *)__mptr + 0xfffffffffffffd90UL; __mptr___0 = (struct list_head const *)pr_reg->pr_reg_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___0 + 0xfffffffffffffd90UL; goto ldv_58409; ldv_58408: se_nacl = pr_reg->pr_reg_nacl; se_tpg = (pr_reg->pr_reg_nacl)->se_tpg; add_desc_len = 0U; atomic_inc_mb(& pr_reg->pr_res_holders); spin_unlock(& pr_tmpl->registration_lock); exp_desc_len = target_get_pr_transport_id_len(se_nacl, pr_reg, & format_code); if (exp_desc_len < 0 || (u32 )exp_desc_len + add_len > cmd->data_length) { printk("\fSPC-3 PRIN READ_FULL_STATUS ran out of buffer: %d\n", cmd->data_length); spin_lock(& pr_tmpl->registration_lock); atomic_dec_mb(& pr_reg->pr_res_holders); goto ldv_58406; } else { } tmp___0 = off; off = off + 1U; *(buf + (unsigned long )tmp___0) = (unsigned char )(pr_reg->pr_res_key >> 56); tmp___1 = off; off = off + 1U; *(buf + (unsigned long )tmp___1) = (unsigned char )(pr_reg->pr_res_key >> 48); tmp___2 = off; off = off + 1U; *(buf + (unsigned long )tmp___2) = (unsigned char )(pr_reg->pr_res_key >> 40); tmp___3 = off; off = off + 1U; *(buf + (unsigned long )tmp___3) = (unsigned char )(pr_reg->pr_res_key >> 32); tmp___4 = off; off = off + 1U; *(buf + (unsigned long )tmp___4) = (unsigned char )(pr_reg->pr_res_key >> 24); tmp___5 = off; off = off + 1U; *(buf + (unsigned long )tmp___5) = (unsigned char )(pr_reg->pr_res_key >> 16); tmp___6 = off; off = off + 1U; *(buf + (unsigned long )tmp___6) = (unsigned char )(pr_reg->pr_res_key >> 8); tmp___7 = off; off = off + 1U; *(buf + (unsigned long )tmp___7) = (unsigned char )pr_reg->pr_res_key; off = off + 4U; if (pr_reg->pr_reg_all_tg_pt != 0) { *(buf + (unsigned long )off) = 2U; } else { } if (pr_reg->pr_res_holder != 0) { tmp___8 = off; off = off + 1U; *(buf + (unsigned long )tmp___8) = (unsigned int )*(buf + (unsigned long )tmp___8) | 1U; tmp___9 = off; off = off + 1U; *(buf + (unsigned long )tmp___9) = (unsigned char )(((int )((signed char )pr_reg->pr_res_scope) & -16) | ((int )((signed char )pr_reg->pr_res_type) & 15)); } else if ((int )all_reg) { tmp___10 = off; off = off + 1U; *(buf + (unsigned long )tmp___10) = (unsigned int )*(buf + (unsigned long )tmp___10) | 1U; tmp___11 = off; off = off + 1U; *(buf + (unsigned long )tmp___11) = (unsigned char )(((int )((signed char )pr_res_scope) & -16) | ((int )((signed char )pr_res_type) & 15)); } else { off = off + 2U; } off = off + 4U; if (pr_reg->pr_reg_all_tg_pt == 0) { sep_rtpi = pr_reg->tg_pt_sep_rtpi; tmp___12 = off; off = off + 1U; *(buf + (unsigned long )tmp___12) = (unsigned char )((int )sep_rtpi >> 8); tmp___13 = off; off = off + 1U; *(buf + (unsigned long )tmp___13) = (unsigned char )sep_rtpi; } else { off = off + 2U; } *(buf + (unsigned long )(off + 4U)) = (unsigned char )se_tpg->proto_id; desc_len = target_get_pr_transport_id(se_nacl, pr_reg, & format_code, buf + (unsigned long )(off + 4U)); spin_lock(& pr_tmpl->registration_lock); atomic_dec_mb(& pr_reg->pr_res_holders); if (desc_len < 0) { goto ldv_58406; } else { } tmp___14 = off; off = off + 1U; *(buf + (unsigned long )tmp___14) = (unsigned char )((unsigned int )desc_len >> 24); tmp___15 = off; off = off + 1U; *(buf + (unsigned long )tmp___15) = (unsigned char )(desc_len >> 16); tmp___16 = off; off = off + 1U; *(buf + (unsigned long )tmp___16) = (unsigned char )(desc_len >> 8); tmp___17 = off; off = off + 1U; *(buf + (unsigned long )tmp___17) = (unsigned char )desc_len; add_desc_len = (u32 )(desc_len + 24); off = off + (u32 )desc_len; add_len = add_len + add_desc_len; pr_reg = pr_reg_tmp; __mptr___1 = (struct list_head const *)pr_reg_tmp->pr_reg_list.next; pr_reg_tmp = (struct t10_pr_registration *)__mptr___1 + 0xfffffffffffffd90UL; ldv_58409: ; if ((unsigned long )(& pr_reg->pr_reg_list) != (unsigned long )(& pr_tmpl->registration_list)) { goto ldv_58408; } else { } ldv_58406: spin_unlock(& pr_tmpl->registration_lock); *(buf + 4UL) = (unsigned char )(add_len >> 24); *(buf + 5UL) = (unsigned char )(add_len >> 16); *(buf + 6UL) = (unsigned char )(add_len >> 8); *(buf + 7UL) = (unsigned char )add_len; transport_kunmap_data_sg(cmd); return (0U); } } sense_reason_t target_scsi3_emulate_pr_in(struct se_cmd *cmd ) { sense_reason_t ret ; { if ((int )(cmd->se_dev)->dev_reservation_flags & 1) { printk("\vReceived PERSISTENT_RESERVE CDB while legacy SPC-2 reservation is held, returning RESERVATION_CONFLICT\n"); return (16U); } else { } switch ((int )*(cmd->t_task_cdb + 1UL) & 31) { case 0: ret = core_scsi3_pri_read_keys(cmd); goto ldv_58415; case 1: ret = core_scsi3_pri_read_reservation(cmd); goto ldv_58415; case 2: ret = core_scsi3_pri_report_capabilities(cmd); goto ldv_58415; case 3: ret = core_scsi3_pri_read_full_status(cmd); goto ldv_58415; default: printk("\vUnknown PERSISTENT_RESERVE_IN service action: 0x%02x\n", (int )*(cmd->t_task_cdb + 1UL) & 31); return (8U); } ldv_58415: ; if (ret == 0U) { target_complete_cmd(cmd, 0); } else { } return (ret); } } sense_reason_t target_check_reservation(struct se_cmd *cmd ) { struct se_device *dev ; sense_reason_t ret ; { dev = cmd->se_dev; if ((unsigned long )cmd->se_sess == (unsigned long )((struct se_session *)0)) { return (0U); } else { } if ((int )(dev->se_hba)->hba_flags & 1) { return (0U); } else { } if ((int )(dev->transport)->transport_flags & 1) { return (0U); } else { } spin_lock(& dev->dev_reservation_lock); if ((int )dev->dev_reservation_flags & 1) { ret = target_scsi2_reservation_check(cmd); } else { ret = target_scsi3_pr_reservation_check(cmd); } spin_unlock(& dev->dev_reservation_lock); return (ret); } } bool ldv_queue_work_on_227(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_228(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_229(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_230(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_231(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_232(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_233(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_234(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_235(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_236(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_237(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_238(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_lock_239(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_240(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_241(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_242(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static __u32 __arch_swab32(__u32 val ) { { __asm__ ("bswapl %0": "=r" (val): "0" (val)); return (val); } } __inline static __u32 __fswab32(__u32 val ) { __u32 tmp ; { tmp = __arch_swab32(val); return (tmp); } } __inline static void __list_splice(struct list_head const *list , struct list_head *prev , struct list_head *next ) { struct list_head *first ; struct list_head *last ; { first = list->next; last = list->prev; first->prev = prev; prev->next = first; last->next = next; next->prev = last; return; } } __inline static void list_splice_init(struct list_head *list , struct list_head *head ) { int tmp ; { tmp = list_empty((struct list_head const *)list); if (tmp == 0) { __list_splice((struct list_head const *)list, head, head->next); INIT_LIST_HEAD(list); } else { } return; } } __inline static void *ERR_PTR(long error ) ; __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; __inline static int atomic64_add_unless___0(atomic64_t *v , long a , long u ) { long c ; long old ; long tmp ; long tmp___0 ; { c = atomic64_read((atomic64_t const *)v); ldv_5745: tmp = ldv__builtin_expect(c == u, 0L); if (tmp != 0L) { goto ldv_5744; } else { } old = atomic64_cmpxchg(v, c, c + a); tmp___0 = ldv__builtin_expect(old == c, 1L); if (tmp___0 != 0L) { goto ldv_5744; } else { } c = old; goto ldv_5745; ldv_5744: ; return (c != u); } } int ldv_mutex_trylock_271(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_269(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_272(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_273(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_276(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_279(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_268(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_270(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_274(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_275(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_278(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_lun_tg_pt_md_mutex_of_se_lun(struct mutex *lock ) ; void ldv_mutex_unlock_lun_tg_pt_md_mutex_of_se_lun(struct mutex *lock ) ; void ldv_mutex_lock_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp(struct mutex *lock ) ; void ldv_mutex_unlock_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp(struct mutex *lock ) ; __inline static int preempt_count(void) { int pfo_ret__ ; { switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (__preempt_count)); goto ldv_6724; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6724; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6724; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6724; default: __bad_percpu_size(); } ldv_6724: ; return (pfo_ret__ & 2147483647); } } __inline static int queued_spin_is_locked(struct qspinlock *lock ) { int tmp ; { tmp = atomic_read((atomic_t const *)(& lock->val)); return (tmp); } } extern void init_timer_key(struct timer_list * , unsigned int , char const * , struct lock_class_key * ) ; extern void delayed_work_timer_fn(unsigned long ) ; bool ldv_queue_work_on_263(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_265(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_264(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_267(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_266(struct workqueue_struct *ldv_func_arg1 ) ; extern bool flush_delayed_work(struct delayed_work * ) ; bool ldv_flush_delayed_work_277(struct delayed_work *ldv_func_arg1 ) ; bool ldv_flush_delayed_work_280(struct delayed_work *ldv_func_arg1 ) ; __inline static bool queue_delayed_work(struct workqueue_struct *wq , struct delayed_work *dwork , unsigned long delay ) { bool tmp ; { tmp = ldv_queue_delayed_work_on_264(8192, wq, dwork, delay); return (tmp); } } __inline static bool __ref_is_percpu___1(struct percpu_ref *ref , unsigned long **percpu_countp ) { unsigned long percpu_ptr ; unsigned long _________p1 ; union __anonunion___u_192___1 __u ; long tmp ; { __read_once_size((void const volatile *)(& ref->percpu_count_ptr), (void *)(& __u.__c), 8); _________p1 = __u.__val; percpu_ptr = _________p1; tmp = ldv__builtin_expect((percpu_ptr & 3UL) != 0UL, 0L); if (tmp != 0L) { return (0); } else { } *percpu_countp = (unsigned long *)percpu_ptr; return (1); } } __inline static bool percpu_ref_tryget_live___0(struct percpu_ref *ref ) { unsigned long *percpu_count ; int ret ; void const *__vpp_verify ; int pao_ID__ ; int pao_ID_____0 ; int pao_ID_____1 ; int pao_ID_____2 ; bool tmp ; { ret = 0; rcu_read_lock_sched___0(); tmp = __ref_is_percpu___1(ref, & percpu_count); if ((int )tmp) { __vpp_verify = (void const *)0; switch (8UL) { case 1UL: pao_ID__ = 1; switch (8UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (1UL)); } goto ldv_16601; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (1UL)); } goto ldv_16601; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (1UL)); } goto ldv_16601; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (1UL)); } goto ldv_16601; default: __bad_percpu_size(); } ldv_16601: ; goto ldv_16606; case 2UL: pao_ID_____0 = 1; switch (8UL) { case 1UL: ; if (pao_ID_____0 == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (1UL)); } goto ldv_16612; case 2UL: ; if (pao_ID_____0 == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (1UL)); } goto ldv_16612; case 4UL: ; if (pao_ID_____0 == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (1UL)); } goto ldv_16612; case 8UL: ; if (pao_ID_____0 == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (1UL)); } goto ldv_16612; default: __bad_percpu_size(); } ldv_16612: ; goto ldv_16606; case 4UL: pao_ID_____1 = 1; switch (8UL) { case 1UL: ; if (pao_ID_____1 == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (1UL)); } goto ldv_16622; case 2UL: ; if (pao_ID_____1 == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (1UL)); } goto ldv_16622; case 4UL: ; if (pao_ID_____1 == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (1UL)); } goto ldv_16622; case 8UL: ; if (pao_ID_____1 == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (1UL)); } goto ldv_16622; default: __bad_percpu_size(); } ldv_16622: ; goto ldv_16606; case 8UL: pao_ID_____2 = 1; switch (8UL) { case 1UL: ; if (pao_ID_____2 == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (1UL)); } goto ldv_16632; case 2UL: ; if (pao_ID_____2 == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (1UL)); } goto ldv_16632; case 4UL: ; if (pao_ID_____2 == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (1UL)); } goto ldv_16632; case 8UL: ; if (pao_ID_____2 == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (1UL)); } goto ldv_16632; default: __bad_percpu_size(); } ldv_16632: ; goto ldv_16606; default: __bad_size_call_parameter(); goto ldv_16606; } ldv_16606: ret = 1; } else if ((ref->percpu_count_ptr & 2UL) == 0UL) { ret = atomic64_add_unless___0(& ref->count, 1L, 0L); } else { } rcu_read_unlock_sched___0(); return (ret != 0); } } __inline static void percpu_ref_put_many___0(struct percpu_ref *ref , unsigned long nr ) { unsigned long *percpu_count ; void const *__vpp_verify ; int pao_ID__ ; int pao_ID_____0 ; int pao_ID_____1 ; int pao_ID_____2 ; int tmp ; long tmp___0 ; bool tmp___1 ; { rcu_read_lock_sched___0(); tmp___1 = __ref_is_percpu___1(ref, & percpu_count); if ((int )tmp___1) { __vpp_verify = (void const *)0; switch (8UL) { case 1UL: pao_ID__ = 0; switch (8UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (- nr)); } goto ldv_16649; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16649; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16649; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (- nr)); } goto ldv_16649; default: __bad_percpu_size(); } ldv_16649: ; goto ldv_16654; case 2UL: pao_ID_____0 = 0; switch (8UL) { case 1UL: ; if (pao_ID_____0 == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (- nr)); } goto ldv_16660; case 2UL: ; if (pao_ID_____0 == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16660; case 4UL: ; if (pao_ID_____0 == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16660; case 8UL: ; if (pao_ID_____0 == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (- nr)); } goto ldv_16660; default: __bad_percpu_size(); } ldv_16660: ; goto ldv_16654; case 4UL: pao_ID_____1 = 0; switch (8UL) { case 1UL: ; if (pao_ID_____1 == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (- nr)); } goto ldv_16670; case 2UL: ; if (pao_ID_____1 == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16670; case 4UL: ; if (pao_ID_____1 == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16670; case 8UL: ; if (pao_ID_____1 == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (- nr)); } goto ldv_16670; default: __bad_percpu_size(); } ldv_16670: ; goto ldv_16654; case 8UL: pao_ID_____2 = 0; switch (8UL) { case 1UL: ; if (pao_ID_____2 == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (- nr)); } goto ldv_16680; case 2UL: ; if (pao_ID_____2 == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16680; case 4UL: ; if (pao_ID_____2 == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16680; case 8UL: ; if (pao_ID_____2 == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (- nr)); } goto ldv_16680; default: __bad_percpu_size(); } ldv_16680: ; goto ldv_16654; default: __bad_size_call_parameter(); goto ldv_16654; } ldv_16654: ; } else { tmp = atomic_long_sub_and_test((long )nr, & ref->count); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { (*(ref->release))(ref); } else { } } rcu_read_unlock_sched___0(); return; } } __inline static void percpu_ref_put___0(struct percpu_ref *ref ) { { percpu_ref_put_many___0(ref, 1UL); return; } } void disable_work_2(struct work_struct *work ) ; void call_and_disable_work_2(struct work_struct *work ) ; void invoke_work_2(void) ; __inline static void put_unaligned_be16(u16 val , void *p ) { __u16 tmp ; { tmp = __fswab16((int )val); *((__be16 *)p) = tmp; return; } } __inline static void put_unaligned_be32(u32 val , void *p ) { __u32 tmp ; { tmp = __fswab32(val); *((__be32 *)p) = tmp; return; } } __inline static void put_unaligned_be64(u64 val , void *p ) { __u64 tmp ; { tmp = __fswab64(val); *((__be64 *)p) = tmp; return; } } extern unsigned long msleep_interruptible(unsigned int ) ; int core_alua_check_nonop_delay(struct se_cmd *cmd ) ; struct t10_alua_lu_gp *default_lu_gp ; struct kmem_cache *t10_alua_lu_gp_cache ; struct kmem_cache *t10_alua_lu_gp_mem_cache ; struct kmem_cache *t10_alua_tg_pt_gp_cache ; struct kmem_cache *t10_alua_lba_map_cache ; struct kmem_cache *t10_alua_lba_map_mem_cache ; sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *cmd ) ; sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *cmd ) ; sense_reason_t target_emulate_report_referrals(struct se_cmd *cmd ) ; void target_detach_tg_pt_gp(struct se_lun *lun ) ; void target_attach_tg_pt_gp(struct se_lun *lun , struct t10_alua_tg_pt_gp *tg_pt_gp ) ; sense_reason_t target_alua_state_check(struct se_cmd *cmd ) ; static sense_reason_t core_alua_check_transition(int state , int valid , int *primary ) ; static int core_alua_set_tg_pt_secondary_state(struct se_lun *lun , int explicit , int offline ) ; static char *core_alua_dump_state(int state ) ; static void __target_attach_tg_pt_gp(struct se_lun *lun , struct t10_alua_tg_pt_gp *tg_pt_gp ) ; static u16 alua_lu_gps_counter ; static u32 alua_lu_gps_count ; static spinlock_t lu_gps_lock = {{{{{0}}, 3735899821U, 4294967295U, (void *)-1, {0, {0, 0}, "lu_gps_lock", 0, 0UL}}}}; static struct list_head lu_gps_list = {& lu_gps_list, & lu_gps_list}; sense_reason_t target_emulate_report_referrals(struct se_cmd *cmd ) { struct se_device *dev ; struct t10_alua_lba_map *map ; struct t10_alua_lba_map_member *map_mem ; unsigned char *buf ; u32 rd_len ; u32 off ; void *tmp ; int tmp___0 ; struct list_head const *__mptr ; int desc_num ; int pg_num ; struct list_head const *__mptr___0 ; int alua_state ; int alua_pg_id ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; { dev = cmd->se_dev; rd_len = 0U; if (cmd->data_length <= 3U) { printk("\fREPORT REFERRALS allocation length %u too small\n", cmd->data_length); return (8U); } else { } tmp = transport_kmap_data_sg(cmd); buf = (unsigned char *)tmp; if ((unsigned long )buf == (unsigned long )((unsigned char *)0U)) { return (10U); } else { } off = 4U; spin_lock(& dev->t10_alua.lba_map_lock); tmp___0 = list_empty((struct list_head const *)(& dev->t10_alua.lba_map_list)); if (tmp___0 != 0) { spin_unlock(& dev->t10_alua.lba_map_lock); transport_kunmap_data_sg(cmd); return (2U); } else { } __mptr = (struct list_head const *)dev->t10_alua.lba_map_list.next; map = (struct t10_alua_lba_map *)__mptr + 0xfffffffffffffff0UL; goto ldv_57355; ldv_57354: desc_num = (int )(off + 3U); off = off + 4U; if (cmd->data_length > off) { put_unaligned_be64(map->lba_map_first_lba, (void *)buf + (unsigned long )off); } else { } off = off + 8U; if (cmd->data_length > off) { put_unaligned_be64(map->lba_map_last_lba, (void *)buf + (unsigned long )off); } else { } off = off + 8U; rd_len = rd_len + 20U; pg_num = 0; __mptr___0 = (struct list_head const *)map->lba_map_mem_list.next; map_mem = (struct t10_alua_lba_map_member *)__mptr___0; goto ldv_57352; ldv_57351: alua_state = map_mem->lba_map_mem_alua_state; alua_pg_id = map_mem->lba_map_mem_alua_pg_id; if (cmd->data_length > off) { *(buf + (unsigned long )off) = (unsigned int )((unsigned char )alua_state) & 15U; } else { } off = off + 2U; if (cmd->data_length > off) { *(buf + (unsigned long )off) = (unsigned char )(alua_pg_id >> 8); } else { } off = off + 1U; if (cmd->data_length > off) { *(buf + (unsigned long )off) = (unsigned char )alua_pg_id; } else { } off = off + 1U; rd_len = rd_len + 4U; pg_num = pg_num + 1; __mptr___1 = (struct list_head const *)map_mem->lba_map_mem_list.next; map_mem = (struct t10_alua_lba_map_member *)__mptr___1; ldv_57352: ; if ((unsigned long )(& map_mem->lba_map_mem_list) != (unsigned long )(& map->lba_map_mem_list)) { goto ldv_57351; } else { } if (cmd->data_length > (u32 )desc_num) { *(buf + (unsigned long )desc_num) = (unsigned char )pg_num; } else { } __mptr___2 = (struct list_head const *)map->lba_map_list.next; map = (struct t10_alua_lba_map *)__mptr___2 + 0xfffffffffffffff0UL; ldv_57355: ; if ((unsigned long )(& map->lba_map_list) != (unsigned long )(& dev->t10_alua.lba_map_list)) { goto ldv_57354; } else { } spin_unlock(& dev->t10_alua.lba_map_lock); put_unaligned_be16((int )((u16 )rd_len), (void *)buf + 2U); transport_kunmap_data_sg(cmd); target_complete_cmd(cmd, 0); return (0U); } } sense_reason_t target_emulate_report_target_port_groups(struct se_cmd *cmd ) { struct se_device *dev ; struct t10_alua_tg_pt_gp *tg_pt_gp ; struct se_lun *lun ; unsigned char *buf ; u32 rd_len ; u32 off ; int ext_hdr ; void *tmp ; struct list_head const *__mptr ; u32 tmp___0 ; u32 tmp___1 ; int tmp___2 ; u32 tmp___3 ; u32 tmp___4 ; u32 tmp___5 ; u32 tmp___6 ; u32 tmp___7 ; u32 tmp___8 ; u32 tmp___9 ; struct list_head const *__mptr___0 ; u32 tmp___10 ; u32 tmp___11 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; { dev = cmd->se_dev; rd_len = 0U; ext_hdr = (int )*(cmd->t_task_cdb + 1UL) & 32; if (ext_hdr != 0) { off = 8U; } else { off = 4U; } if (cmd->data_length < off) { printk("\fREPORT TARGET PORT GROUPS allocation length %u too small for %s header\n", cmd->data_length, ext_hdr != 0 ? (char *)"extended" : (char *)"normal"); return (8U); } else { } tmp = transport_kmap_data_sg(cmd); buf = (unsigned char *)tmp; if ((unsigned long )buf == (unsigned long )((unsigned char *)0U)) { return (10U); } else { } spin_lock(& dev->t10_alua.tg_pt_gps_lock); __mptr = (struct list_head const *)dev->t10_alua.tg_pt_gps_list.next; tg_pt_gp = (struct t10_alua_tg_pt_gp *)__mptr + 0xfffffffffffffe60UL; goto ldv_57380; ldv_57379: ; if ((tg_pt_gp->tg_pt_gp_members * 4U + off) + 8U > cmd->data_length) { rd_len = (tg_pt_gp->tg_pt_gp_members + 2U) * 4U + rd_len; goto ldv_57371; } else { } if (tg_pt_gp->tg_pt_gp_pref != 0) { *(buf + (unsigned long )off) = 128U; } else { } tmp___0 = off; off = off + 1U; tmp___1 = off; off = off + 1U; tmp___2 = atomic_read((atomic_t const *)(& tg_pt_gp->tg_pt_gp_alua_access_state)); *(buf + (unsigned long )tmp___0) = (unsigned char )((int )((signed char )*(buf + (unsigned long )tmp___1)) | (int )((signed char )tmp___2)); tmp___3 = off; off = off + 1U; tmp___4 = off; off = off + 1U; *(buf + (unsigned long )tmp___3) = (unsigned char )((int )((signed char )*(buf + (unsigned long )tmp___4)) | (int )((signed char )tg_pt_gp->tg_pt_gp_alua_supported_states)); tmp___5 = off; off = off + 1U; *(buf + (unsigned long )tmp___5) = (unsigned char )((int )tg_pt_gp->tg_pt_gp_id >> 8); tmp___6 = off; off = off + 1U; *(buf + (unsigned long )tmp___6) = (unsigned char )tg_pt_gp->tg_pt_gp_id; off = off + 1U; tmp___7 = off; off = off + 1U; *(buf + (unsigned long )tmp___7) = (unsigned char )tg_pt_gp->tg_pt_gp_alua_access_status; tmp___8 = off; off = off + 1U; *(buf + (unsigned long )tmp___8) = 0U; tmp___9 = off; off = off + 1U; *(buf + (unsigned long )tmp___9) = (unsigned char )tg_pt_gp->tg_pt_gp_members; rd_len = rd_len + 8U; spin_lock(& tg_pt_gp->tg_pt_gp_lock); __mptr___0 = (struct list_head const *)tg_pt_gp->tg_pt_gp_lun_list.next; lun = (struct se_lun *)__mptr___0 + 0xfffffffffffffed0UL; goto ldv_57377; ldv_57376: off = off + 2U; tmp___10 = off; off = off + 1U; *(buf + (unsigned long )tmp___10) = (unsigned char )((int )lun->lun_rtpi >> 8); tmp___11 = off; off = off + 1U; *(buf + (unsigned long )tmp___11) = (unsigned char )lun->lun_rtpi; rd_len = rd_len + 4U; __mptr___1 = (struct list_head const *)lun->lun_tg_pt_gp_link.next; lun = (struct se_lun *)__mptr___1 + 0xfffffffffffffed0UL; ldv_57377: ; if ((unsigned long )(& lun->lun_tg_pt_gp_link) != (unsigned long )(& tg_pt_gp->tg_pt_gp_lun_list)) { goto ldv_57376; } else { } spin_unlock(& tg_pt_gp->tg_pt_gp_lock); ldv_57371: __mptr___2 = (struct list_head const *)tg_pt_gp->tg_pt_gp_list.next; tg_pt_gp = (struct t10_alua_tg_pt_gp *)__mptr___2 + 0xfffffffffffffe60UL; ldv_57380: ; if ((unsigned long )(& tg_pt_gp->tg_pt_gp_list) != (unsigned long )(& dev->t10_alua.tg_pt_gps_list)) { goto ldv_57379; } else { } spin_unlock(& dev->t10_alua.tg_pt_gps_lock); put_unaligned_be32(rd_len, (void *)buf); if (ext_hdr != 0) { *(buf + 4UL) = 16U; spin_lock(& (cmd->se_lun)->lun_tg_pt_gp_lock); tg_pt_gp = (cmd->se_lun)->lun_tg_pt_gp; if ((unsigned long )tg_pt_gp != (unsigned long )((struct t10_alua_tg_pt_gp *)0)) { *(buf + 5UL) = (unsigned char )tg_pt_gp->tg_pt_gp_implicit_trans_secs; } else { } spin_unlock(& (cmd->se_lun)->lun_tg_pt_gp_lock); } else { } transport_kunmap_data_sg(cmd); target_complete_cmd(cmd, 0); return (0U); } } sense_reason_t target_emulate_set_target_port_groups(struct se_cmd *cmd ) { struct se_device *dev ; struct se_lun *l_lun ; struct se_node_acl *nacl ; struct t10_alua_tg_pt_gp *tg_pt_gp ; struct t10_alua_tg_pt_gp *l_tg_pt_gp ; unsigned char *buf ; unsigned char *ptr ; sense_reason_t rc ; u32 len ; int alua_access_state ; int primary ; int valid_states ; u16 tg_pt_id ; u16 rtpi ; void *tmp ; struct _ddebug descriptor ; long tmp___0 ; bool found ; struct list_head const *__mptr ; int tmp___1 ; struct list_head const *__mptr___0 ; struct se_lun *lun ; struct list_head const *__mptr___1 ; int tmp___2 ; struct list_head const *__mptr___2 ; { dev = cmd->se_dev; l_lun = cmd->se_lun; nacl = (cmd->se_sess)->se_node_acl; tg_pt_gp = (struct t10_alua_tg_pt_gp *)0; rc = 0U; len = 4U; primary = 0; if (cmd->data_length <= 3U) { printk("\fSET TARGET PORT GROUPS parameter list length %u too small\n", cmd->data_length); return (9U); } else { } tmp = transport_kmap_data_sg(cmd); buf = (unsigned char *)tmp; if ((unsigned long )buf == (unsigned long )((unsigned char *)0U)) { return (10U); } else { } spin_lock(& l_lun->lun_tg_pt_gp_lock); l_tg_pt_gp = l_lun->lun_tg_pt_gp; if ((unsigned long )l_tg_pt_gp == (unsigned long )((struct t10_alua_tg_pt_gp *)0)) { spin_unlock(& l_lun->lun_tg_pt_gp_lock); printk("\vUnable to access l_lun->tg_pt_gp\n"); rc = 2U; goto out; } else { } if ((l_tg_pt_gp->tg_pt_gp_alua_access_type & 32) == 0) { spin_unlock(& l_lun->lun_tg_pt_gp_lock); descriptor.modname = "target_core_mod"; descriptor.function = "target_emulate_set_target_port_groups"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_alua.c"; descriptor.format = "Unable to process SET_TARGET_PORT_GROUPS while TPGS_EXPLICIT_ALUA is disabled\n"; descriptor.lineno = 318U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "Unable to process SET_TARGET_PORT_GROUPS while TPGS_EXPLICIT_ALUA is disabled\n"); } else { } rc = 2U; goto out; } else { } valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; spin_unlock(& l_lun->lun_tg_pt_gp_lock); ptr = buf + 4UL; goto ldv_57421; ldv_57420: found = 0; alua_access_state = (int )*ptr & 15; rc = core_alua_check_transition(alua_access_state, valid_states, & primary); if (rc != 0U) { goto out; } else { } if (primary != 0) { tg_pt_id = get_unaligned_be16((void const *)ptr + 2U); spin_lock(& dev->t10_alua.tg_pt_gps_lock); __mptr = (struct list_head const *)dev->t10_alua.tg_pt_gps_list.next; tg_pt_gp = (struct t10_alua_tg_pt_gp *)__mptr + 0xfffffffffffffe60UL; goto ldv_57410; ldv_57409: ; if (tg_pt_gp->tg_pt_gp_valid_id == 0) { goto ldv_57407; } else { } if ((int )tg_pt_gp->tg_pt_gp_id != (int )tg_pt_id) { goto ldv_57407; } else { } atomic_inc_mb(& tg_pt_gp->tg_pt_gp_ref_cnt); spin_unlock(& dev->t10_alua.tg_pt_gps_lock); tmp___1 = core_alua_do_port_transition(tg_pt_gp, dev, l_lun, nacl, alua_access_state, 1); if (tmp___1 == 0) { found = 1; } else { } spin_lock(& dev->t10_alua.tg_pt_gps_lock); atomic_dec_mb(& tg_pt_gp->tg_pt_gp_ref_cnt); goto ldv_57408; ldv_57407: __mptr___0 = (struct list_head const *)tg_pt_gp->tg_pt_gp_list.next; tg_pt_gp = (struct t10_alua_tg_pt_gp *)__mptr___0 + 0xfffffffffffffe60UL; ldv_57410: ; if ((unsigned long )(& tg_pt_gp->tg_pt_gp_list) != (unsigned long )(& dev->t10_alua.tg_pt_gps_list)) { goto ldv_57409; } else { } ldv_57408: spin_unlock(& dev->t10_alua.tg_pt_gps_lock); } else { rtpi = get_unaligned_be16((void const *)ptr + 2U); spin_lock(& dev->se_port_lock); __mptr___1 = (struct list_head const *)dev->dev_sep_list.next; lun = (struct se_lun *)__mptr___1 + 0xfffffffffffffb88UL; goto ldv_57419; ldv_57418: ; if ((int )lun->lun_rtpi != (int )rtpi) { goto ldv_57416; } else { } spin_unlock(& dev->se_port_lock); tmp___2 = core_alua_set_tg_pt_secondary_state(lun, 1, 1); if (tmp___2 == 0) { found = 1; } else { } spin_lock(& dev->se_port_lock); goto ldv_57417; ldv_57416: __mptr___2 = (struct list_head const *)lun->lun_dev_link.next; lun = (struct se_lun *)__mptr___2 + 0xfffffffffffffb88UL; ldv_57419: ; if ((unsigned long )(& lun->lun_dev_link) != (unsigned long )(& dev->dev_sep_list)) { goto ldv_57418; } else { } ldv_57417: spin_unlock(& dev->se_port_lock); } if (! found) { rc = 9U; goto out; } else { } ptr = ptr + 4UL; len = len + 4U; ldv_57421: ; if (cmd->data_length > len) { goto ldv_57420; } else { } out: transport_kunmap_data_sg(cmd); if (rc == 0U) { target_complete_cmd(cmd, 0); } else { } return (rc); } } __inline static void set_ascq(struct se_cmd *cmd , u8 alua_ascq ) { struct _ddebug descriptor ; char *tmp ; long tmp___0 ; { descriptor.modname = "target_core_mod"; descriptor.function = "set_ascq"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_alua.c"; descriptor.format = "[%s]: ALUA TG Port not available, SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n"; descriptor.lineno = 452U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = (*((cmd->se_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "[%s]: ALUA TG Port not available, SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n", tmp, (int )alua_ascq); } else { } cmd->scsi_asc = 4U; cmd->scsi_ascq = alua_ascq; return; } } __inline static void core_alua_state_nonoptimized(struct se_cmd *cmd , unsigned char *cdb , int nonop_delay_msecs ) { { cmd->se_cmd_flags = cmd->se_cmd_flags | 32768U; cmd->alua_nonop_delay = nonop_delay_msecs; return; } } __inline static int core_alua_state_lba_dependent(struct se_cmd *cmd , struct t10_alua_tg_pt_gp *tg_pt_gp ) { struct se_device *dev ; u64 segment_size ; u64 segment_mult ; u64 sectors ; u64 lba ; struct t10_alua_lba_map *cur_map ; struct t10_alua_lba_map *map ; struct t10_alua_lba_map_member *map_mem ; struct list_head const *__mptr ; u64 start_lba ; u64 last_lba ; u64 first_lba ; u64 tmp ; uint32_t __base ; uint32_t __rem ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; { dev = cmd->se_dev; if ((cmd->se_cmd_flags & 8U) == 0U) { return (0); } else { } spin_lock(& dev->t10_alua.lba_map_lock); segment_size = (u64 )dev->t10_alua.lba_map_segment_size; segment_mult = (u64 )dev->t10_alua.lba_map_segment_multiplier; sectors = (u64 )(cmd->data_length / dev->dev_attrib.block_size); lba = cmd->t_task_lba; goto ldv_57473; ldv_57472: cur_map = (struct t10_alua_lba_map *)0; __mptr = (struct list_head const *)dev->t10_alua.lba_map_list.next; map = (struct t10_alua_lba_map *)__mptr + 0xfffffffffffffff0UL; goto ldv_57459; ldv_57458: first_lba = map->lba_map_first_lba; if (segment_mult != 0ULL) { tmp = lba; __base = (uint32_t )segment_size * (uint32_t )segment_mult; __rem = (uint32_t )(tmp % (u64 )__base); tmp = tmp / (u64 )__base; start_lba = (u64 )__rem; last_lba = (first_lba + segment_size) - 1ULL; if (start_lba >= first_lba && start_lba <= last_lba) { lba = lba + segment_size; cur_map = map; goto ldv_57457; } else { } } else { last_lba = map->lba_map_last_lba; if (lba >= first_lba && lba <= last_lba) { lba = last_lba + 1ULL; cur_map = map; goto ldv_57457; } else { } } __mptr___0 = (struct list_head const *)map->lba_map_list.next; map = (struct t10_alua_lba_map *)__mptr___0 + 0xfffffffffffffff0UL; ldv_57459: ; if ((unsigned long )(& map->lba_map_list) != (unsigned long )(& dev->t10_alua.lba_map_list)) { goto ldv_57458; } else { } ldv_57457: ; if ((unsigned long )cur_map == (unsigned long )((struct t10_alua_lba_map *)0)) { spin_unlock(& dev->t10_alua.lba_map_lock); set_ascq(cmd, 12); return (1); } else { } __mptr___1 = (struct list_head const *)cur_map->lba_map_mem_list.next; map_mem = (struct t10_alua_lba_map_member *)__mptr___1; goto ldv_57470; ldv_57469: ; if (map_mem->lba_map_mem_alua_pg_id != (int )tg_pt_gp->tg_pt_gp_id) { goto ldv_57464; } else { } switch (map_mem->lba_map_mem_alua_state) { case 2: spin_unlock(& dev->t10_alua.lba_map_lock); set_ascq(cmd, 11); return (1); case 3: spin_unlock(& dev->t10_alua.lba_map_lock); set_ascq(cmd, 12); return (1); default: ; goto ldv_57468; } ldv_57468: ; ldv_57464: __mptr___2 = (struct list_head const *)map_mem->lba_map_mem_list.next; map_mem = (struct t10_alua_lba_map_member *)__mptr___2; ldv_57470: ; if ((unsigned long )(& map_mem->lba_map_mem_list) != (unsigned long )(& cur_map->lba_map_mem_list)) { goto ldv_57469; } else { } ldv_57473: ; if (cmd->t_task_lba + sectors > lba) { goto ldv_57472; } else { } spin_unlock(& dev->t10_alua.lba_map_lock); return (0); } } __inline static int core_alua_state_standby(struct se_cmd *cmd , unsigned char *cdb ) { { switch ((int )*cdb) { case 18: ; case 76: ; case 77: ; case 21: ; case 26: ; case 160: ; case 28: ; case 29: ; case 37: ; return (0); case 158: ; switch ((int )*(cdb + 1UL) & 31) { case 16: ; return (0); default: set_ascq(cmd, 11); return (1); } case 163: ; switch ((int )*(cdb + 1UL) & 31) { case 10: ; return (0); default: set_ascq(cmd, 11); return (1); } case 164: ; switch ((int )*(cdb + 1UL)) { case 10: ; return (0); default: set_ascq(cmd, 11); return (1); } case 3: ; case 94: ; case 95: ; case 60: ; case 59: ; return (0); default: set_ascq(cmd, 11); return (1); } return (0); } } __inline static int core_alua_state_unavailable(struct se_cmd *cmd , unsigned char *cdb ) { { switch ((int )*cdb) { case 18: ; case 160: ; return (0); case 163: ; switch ((int )*(cdb + 1UL) & 31) { case 10: ; return (0); default: set_ascq(cmd, 12); return (1); } case 164: ; switch ((int )*(cdb + 1UL)) { case 10: ; return (0); default: set_ascq(cmd, 12); return (1); } case 3: ; case 60: ; case 59: ; return (0); default: set_ascq(cmd, 12); return (1); } return (0); } } __inline static int core_alua_state_transition(struct se_cmd *cmd , unsigned char *cdb ) { { switch ((int )*cdb) { case 18: ; case 160: ; return (0); case 163: ; switch ((int )*(cdb + 1UL) & 31) { case 10: ; return (0); default: set_ascq(cmd, 10); return (1); } case 3: ; case 60: ; case 59: ; return (0); default: set_ascq(cmd, 10); return (1); } return (0); } } sense_reason_t target_alua_state_check(struct se_cmd *cmd ) { struct se_device *dev ; unsigned char *cdb ; struct se_lun *lun ; struct t10_alua_tg_pt_gp *tg_pt_gp ; int out_alua_state ; int nonop_delay_msecs ; struct _ddebug descriptor ; long tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; { dev = cmd->se_dev; cdb = cmd->t_task_cdb; lun = cmd->se_lun; if ((int )(dev->se_hba)->hba_flags & 1) { return (0U); } else { } if ((int )(dev->transport)->transport_flags & 1) { return (0U); } else { } tmp___0 = atomic_read((atomic_t const *)(& lun->lun_tg_pt_secondary_offline)); if (tmp___0 != 0) { descriptor.modname = "target_core_mod"; descriptor.function = "target_alua_state_check"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_alua.c"; descriptor.format = "ALUA: Got secondary offline status for local target port\n"; descriptor.lineno = 700U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "ALUA: Got secondary offline status for local target port\n"); } else { } set_ascq(cmd, 18); return (15U); } else { } if ((unsigned long )lun->lun_tg_pt_gp == (unsigned long )((struct t10_alua_tg_pt_gp *)0)) { return (0U); } else { } spin_lock(& lun->lun_tg_pt_gp_lock); tg_pt_gp = lun->lun_tg_pt_gp; out_alua_state = atomic_read((atomic_t const *)(& tg_pt_gp->tg_pt_gp_alua_access_state)); nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; spin_unlock(& lun->lun_tg_pt_gp_lock); if (out_alua_state == 0) { return (0U); } else { } switch (out_alua_state) { case 1: core_alua_state_nonoptimized(cmd, cdb, nonop_delay_msecs); goto ldv_57544; case 2: tmp___1 = core_alua_state_standby(cmd, cdb); if (tmp___1 != 0) { return (15U); } else { } goto ldv_57544; case 3: tmp___2 = core_alua_state_unavailable(cmd, cdb); if (tmp___2 != 0) { return (15U); } else { } goto ldv_57544; case 15: tmp___3 = core_alua_state_transition(cmd, cdb); if (tmp___3 != 0) { return (15U); } else { } goto ldv_57544; case 4: tmp___4 = core_alua_state_lba_dependent(cmd, tg_pt_gp); if (tmp___4 != 0) { return (15U); } else { } goto ldv_57544; case 14: ; default: printk("\vUnknown ALUA access state: 0x%02x\n", out_alua_state); return (8U); } ldv_57544: ; return (0U); } } static sense_reason_t core_alua_check_transition(int state , int valid , int *primary ) { char *tmp ; { switch (state) { case 0: ; if ((valid & 1) == 0) { goto not_supported; } else { } *primary = 1; goto ldv_57558; case 1: ; if ((valid & 2) == 0) { goto not_supported; } else { } *primary = 1; goto ldv_57558; case 2: ; if ((valid & 4) == 0) { goto not_supported; } else { } *primary = 1; goto ldv_57558; case 3: ; if ((valid & 8) == 0) { goto not_supported; } else { } *primary = 1; goto ldv_57558; case 4: ; if ((valid & 16) == 0) { goto not_supported; } else { } *primary = 1; goto ldv_57558; case 14: ; if ((valid & 64) == 0) { goto not_supported; } else { } *primary = 0; goto ldv_57558; case 15: ; goto not_supported; default: printk("\vUnknown ALUA access state: 0x%02x\n", state); return (9U); } ldv_57558: ; return (0U); not_supported: tmp = core_alua_dump_state(state); printk("\vALUA access state %s not supported", tmp); return (9U); } } static char *core_alua_dump_state(int state ) { { switch (state) { case 0: ; return ((char *)"Active/Optimized"); case 1: ; return ((char *)"Active/NonOptimized"); case 4: ; return ((char *)"LBA Dependent"); case 2: ; return ((char *)"Standby"); case 3: ; return ((char *)"Unavailable"); case 14: ; return ((char *)"Offline"); case 15: ; return ((char *)"Transitioning"); default: ; return ((char *)"Unknown"); } return ((char *)0); } } char *core_alua_dump_status(int status ) { { switch (status) { case 0: ; return ((char *)"None"); case 1: ; return ((char *)"Altered by Explicit STPG"); case 2: ; return ((char *)"Altered by Implicit ALUA"); default: ; return ((char *)"Unknown"); } return ((char *)0); } } int core_alua_check_nonop_delay(struct se_cmd *cmd ) { int tmp ; { if ((cmd->se_cmd_flags & 32768U) == 0U) { return (0); } else { } tmp = preempt_count(); if (((unsigned long )tmp & 2096896UL) != 0UL) { return (0); } else { } if (cmd->alua_nonop_delay == 0) { return (0); } else { } msleep_interruptible((unsigned int )cmd->alua_nonop_delay); return (0); } } static char const __kstrtab_core_alua_check_nonop_delay[28U] = { 'c', 'o', 'r', 'e', '_', 'a', 'l', 'u', 'a', '_', 'c', 'h', 'e', 'c', 'k', '_', 'n', 'o', 'n', 'o', 'p', '_', 'd', 'e', 'l', 'a', 'y', '\000'}; struct kernel_symbol const __ksymtab_core_alua_check_nonop_delay ; struct kernel_symbol const __ksymtab_core_alua_check_nonop_delay = {(unsigned long )(& core_alua_check_nonop_delay), (char const *)(& __kstrtab_core_alua_check_nonop_delay)}; static int core_alua_write_tpg_metadata(char const *path , unsigned char *md_buf , u32 md_buf_len ) { struct file *file ; struct file *tmp ; int ret ; bool tmp___0 ; ssize_t tmp___1 ; { tmp = filp_open(path, 578, 384); file = tmp; tmp___0 = IS_ERR((void const *)file); if ((int )tmp___0) { printk("\vfilp_open(%s) for ALUA metadata failed\n", path); return (-19); } else { } tmp___1 = kernel_write(file, (char const *)md_buf, (size_t )md_buf_len, 0LL); ret = (int )tmp___1; if (ret < 0) { printk("\vError writing ALUA metadata file: %s\n", path); } else { } fput(file); return (ret < 0 ? -5 : 0); } } static int core_alua_update_tpg_primary_metadata(struct t10_alua_tg_pt_gp *tg_pt_gp ) { unsigned char *md_buf ; struct t10_wwn *wwn ; char path[512U] ; int len ; int rc ; void *tmp ; char *tmp___0 ; { wwn = & (tg_pt_gp->tg_pt_gp_dev)->t10_wwn; tmp = kzalloc(1024UL, 208U); md_buf = (unsigned char *)tmp; if ((unsigned long )md_buf == (unsigned long )((unsigned char *)0U)) { printk("\vUnable to allocate buf for ALUA metadata\n"); return (-12); } else { } memset((void *)(& path), 0, 512UL); len = snprintf((char *)md_buf, 1024UL, "tg_pt_gp_id=%hu\nalua_access_state=0x%02x\nalua_access_status=0x%02x\n", (int )tg_pt_gp->tg_pt_gp_id, tg_pt_gp->tg_pt_gp_alua_pending_state, tg_pt_gp->tg_pt_gp_alua_access_status); tmp___0 = config_item_name(& tg_pt_gp->tg_pt_gp_group.cg_item); snprintf((char *)(& path), 512UL, "/var/target/alua/tpgs_%s/%s", (char *)(& wwn->unit_serial), tmp___0); rc = core_alua_write_tpg_metadata((char const *)(& path), md_buf, (u32 )len); kfree((void const *)md_buf); return (rc); } } static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp ) { struct se_dev_entry *se_deve ; struct se_lun *lun ; struct se_lun_acl *lacl ; struct list_head const *__mptr ; bool tmp ; int tmp___0 ; struct list_head const *__mptr___0 ; struct se_lun_acl *________p1 ; struct se_lun_acl *_________p1 ; union __anonunion___u_382___1 __u ; bool __warned ; int tmp___1 ; int tmp___2 ; int tmp___3 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; { spin_lock(& tg_pt_gp->tg_pt_gp_lock); __mptr = (struct list_head const *)tg_pt_gp->tg_pt_gp_lun_list.next; lun = (struct se_lun *)__mptr + 0xfffffffffffffed0UL; goto ldv_57639; ldv_57638: tmp = percpu_ref_tryget_live___0(& lun->lun_ref); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto ldv_57619; } else { } spin_unlock(& tg_pt_gp->tg_pt_gp_lock); spin_lock(& lun->lun_deve_lock); __mptr___0 = (struct list_head const *)lun->lun_deve_list.next; se_deve = (struct se_dev_entry *)__mptr___0 + 0xfffffffffffffef0UL; goto ldv_57636; ldv_57635: __read_once_size((void const volatile *)(& se_deve->se_lun_acl), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned) { tmp___2 = lock_is_held(& lun->lun_deve_lock.__annonCompField17.__annonCompField16.dep_map); if (tmp___2 == 0) { tmp___3 = rcu_read_lock_held(); if (tmp___3 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_alua.c", 973, "suspicious rcu_dereference_check() usage"); } else { } } else { } } else { } lacl = ________p1; if ((tg_pt_gp->tg_pt_gp_alua_access_status == 1 && (unsigned long )tg_pt_gp->tg_pt_gp_alua_lun != (unsigned long )((struct se_lun *)0)) && (unsigned long )tg_pt_gp->tg_pt_gp_alua_lun == (unsigned long )lun) { goto ldv_57634; } else { } if (((unsigned long )lacl != (unsigned long )((struct se_lun_acl *)0) && (unsigned long )tg_pt_gp->tg_pt_gp_alua_nacl != (unsigned long )((struct se_node_acl *)0)) && (unsigned long )tg_pt_gp->tg_pt_gp_alua_nacl == (unsigned long )lacl->se_lun_nacl) { goto ldv_57634; } else { } core_scsi3_ua_allocate(se_deve, 42, 6); ldv_57634: __mptr___1 = (struct list_head const *)se_deve->lun_link.next; se_deve = (struct se_dev_entry *)__mptr___1 + 0xfffffffffffffef0UL; ldv_57636: ; if ((unsigned long )(& se_deve->lun_link) != (unsigned long )(& lun->lun_deve_list)) { goto ldv_57635; } else { } spin_unlock(& lun->lun_deve_lock); spin_lock(& tg_pt_gp->tg_pt_gp_lock); percpu_ref_put___0(& lun->lun_ref); ldv_57619: __mptr___2 = (struct list_head const *)lun->lun_tg_pt_gp_link.next; lun = (struct se_lun *)__mptr___2 + 0xfffffffffffffed0UL; ldv_57639: ; if ((unsigned long )(& lun->lun_tg_pt_gp_link) != (unsigned long )(& tg_pt_gp->tg_pt_gp_lun_list)) { goto ldv_57638; } else { } spin_unlock(& tg_pt_gp->tg_pt_gp_lock); return; } } static void core_alua_do_transition_tg_pt_work(struct work_struct *work ) { struct t10_alua_tg_pt_gp *tg_pt_gp ; struct work_struct const *__mptr ; struct se_device *dev ; bool explicit ; struct _ddebug descriptor ; char *tmp ; char *tmp___0 ; char *tmp___1 ; long tmp___2 ; { __mptr = (struct work_struct const *)work; tg_pt_gp = (struct t10_alua_tg_pt_gp *)__mptr + 0xfffffffffffffe30UL; dev = tg_pt_gp->tg_pt_gp_dev; explicit = tg_pt_gp->tg_pt_gp_alua_access_status == 1; if (tg_pt_gp->tg_pt_gp_write_metadata != 0) { ldv_mutex_lock_275(& tg_pt_gp->tg_pt_gp_md_mutex); core_alua_update_tpg_primary_metadata(tg_pt_gp); ldv_mutex_unlock_276(& tg_pt_gp->tg_pt_gp_md_mutex); } else { } atomic_set(& tg_pt_gp->tg_pt_gp_alua_access_state, tg_pt_gp->tg_pt_gp_alua_pending_state); descriptor.modname = "target_core_mod"; descriptor.function = "core_alua_do_transition_tg_pt_work"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_alua.c"; descriptor.format = "Successful %s ALUA transition TG PT Group: %s ID: %hu from primary access state %s to %s\n"; descriptor.lineno = 1046U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { tmp = core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state); tmp___0 = core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_previous_state); tmp___1 = config_item_name(& tg_pt_gp->tg_pt_gp_group.cg_item); __dynamic_pr_debug(& descriptor, "Successful %s ALUA transition TG PT Group: %s ID: %hu from primary access state %s to %s\n", (int )explicit ? (char *)"explicit" : (char *)"implicit", tmp___1, (int )tg_pt_gp->tg_pt_gp_id, tmp___0, tmp); } else { } core_alua_queue_state_change_ua(tg_pt_gp); spin_lock(& dev->t10_alua.tg_pt_gps_lock); atomic_dec(& tg_pt_gp->tg_pt_gp_ref_cnt); spin_unlock(& dev->t10_alua.tg_pt_gps_lock); if ((unsigned long )tg_pt_gp->tg_pt_gp_transition_complete != (unsigned long )((struct completion *)0)) { complete(tg_pt_gp->tg_pt_gp_transition_complete); } else { } return; } } static int core_alua_do_transition_tg_pt(struct t10_alua_tg_pt_gp *tg_pt_gp , int new_state , int explicit ) { struct se_device *dev ; struct completion wait ; int tmp ; int tmp___0 ; unsigned long transition_tmo ; { dev = tg_pt_gp->tg_pt_gp_dev; init_completion(& wait); wait = wait; tmp = atomic_read((atomic_t const *)(& tg_pt_gp->tg_pt_gp_alua_access_state)); if (tmp == new_state) { return (0); } else { } if (new_state == 15) { return (-11); } else { } if (explicit == 0 && tg_pt_gp->tg_pt_gp_implicit_trans_secs != 0) { tmp___0 = atomic_read((atomic_t const *)(& tg_pt_gp->tg_pt_gp_alua_access_state)); if (tmp___0 == 15) { tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; tg_pt_gp->tg_pt_gp_transition_complete = & wait; ldv_flush_delayed_work_277(& tg_pt_gp->tg_pt_gp_transition_work); wait_for_completion(& wait); tg_pt_gp->tg_pt_gp_transition_complete = (struct completion *)0; return (0); } else { } } else { } tg_pt_gp->tg_pt_gp_alua_previous_state = atomic_read((atomic_t const *)(& tg_pt_gp->tg_pt_gp_alua_access_state)); tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; atomic_set(& tg_pt_gp->tg_pt_gp_alua_access_state, 15); tg_pt_gp->tg_pt_gp_alua_access_status = explicit != 0 ? 1 : 2; core_alua_queue_state_change_ua(tg_pt_gp); if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0) { msleep_interruptible((unsigned int )tg_pt_gp->tg_pt_gp_trans_delay_msecs); } else { } spin_lock(& dev->t10_alua.tg_pt_gps_lock); atomic_inc(& tg_pt_gp->tg_pt_gp_ref_cnt); spin_unlock(& dev->t10_alua.tg_pt_gps_lock); if (explicit == 0 && tg_pt_gp->tg_pt_gp_implicit_trans_secs != 0) { transition_tmo = (unsigned long )(tg_pt_gp->tg_pt_gp_implicit_trans_secs * 250); queue_delayed_work((tg_pt_gp->tg_pt_gp_dev)->tmr_wq, & tg_pt_gp->tg_pt_gp_transition_work, transition_tmo); } else { tg_pt_gp->tg_pt_gp_transition_complete = & wait; queue_delayed_work((tg_pt_gp->tg_pt_gp_dev)->tmr_wq, & tg_pt_gp->tg_pt_gp_transition_work, 0UL); wait_for_completion(& wait); tg_pt_gp->tg_pt_gp_transition_complete = (struct completion *)0; } return (0); } } int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *l_tg_pt_gp , struct se_device *l_dev , struct se_lun *l_lun , struct se_node_acl *l_nacl , int new_state , int explicit ) { struct se_device *dev ; struct t10_alua_lu_gp *lu_gp ; struct t10_alua_lu_gp_member *lu_gp_mem ; struct t10_alua_lu_gp_member *local_lu_gp_mem ; struct t10_alua_tg_pt_gp *tg_pt_gp ; int primary ; int valid_states ; int rc ; sense_reason_t tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct _ddebug descriptor ; char *tmp___0 ; char *tmp___1 ; long tmp___2 ; { rc = 0; valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; tmp = core_alua_check_transition(new_state, valid_states, & primary); if (tmp != 0U) { return (-22); } else { } local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; spin_lock(& local_lu_gp_mem->lu_gp_mem_lock); lu_gp = local_lu_gp_mem->lu_gp; atomic_inc(& lu_gp->lu_gp_ref_cnt); spin_unlock(& local_lu_gp_mem->lu_gp_mem_lock); if ((unsigned int )lu_gp->lu_gp_id == 0U) { l_tg_pt_gp->tg_pt_gp_alua_lun = l_lun; l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, new_state, explicit); atomic_dec_mb(& lu_gp->lu_gp_ref_cnt); return (rc); } else { } spin_lock(& lu_gp->lu_gp_lock); __mptr = (struct list_head const *)lu_gp->lu_gp_mem_list.next; lu_gp_mem = (struct t10_alua_lu_gp_member *)__mptr + 0xffffffffffffffa0UL; goto ldv_57689; ldv_57688: dev = lu_gp_mem->lu_gp_mem_dev; atomic_inc_mb(& lu_gp_mem->lu_gp_mem_ref_cnt); spin_unlock(& lu_gp->lu_gp_lock); spin_lock(& dev->t10_alua.tg_pt_gps_lock); __mptr___0 = (struct list_head const *)dev->t10_alua.tg_pt_gps_list.next; tg_pt_gp = (struct t10_alua_tg_pt_gp *)__mptr___0 + 0xfffffffffffffe60UL; goto ldv_57687; ldv_57686: ; if (tg_pt_gp->tg_pt_gp_valid_id == 0) { goto ldv_57684; } else { } if ((int )l_tg_pt_gp->tg_pt_gp_id != (int )tg_pt_gp->tg_pt_gp_id) { goto ldv_57684; } else { } if ((unsigned long )l_tg_pt_gp == (unsigned long )tg_pt_gp) { tg_pt_gp->tg_pt_gp_alua_lun = l_lun; tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; } else { tg_pt_gp->tg_pt_gp_alua_lun = (struct se_lun *)0; tg_pt_gp->tg_pt_gp_alua_nacl = (struct se_node_acl *)0; } atomic_inc_mb(& tg_pt_gp->tg_pt_gp_ref_cnt); spin_unlock(& dev->t10_alua.tg_pt_gps_lock); rc = core_alua_do_transition_tg_pt(tg_pt_gp, new_state, explicit); spin_lock(& dev->t10_alua.tg_pt_gps_lock); atomic_dec_mb(& tg_pt_gp->tg_pt_gp_ref_cnt); if (rc != 0) { goto ldv_57685; } else { } ldv_57684: __mptr___1 = (struct list_head const *)tg_pt_gp->tg_pt_gp_list.next; tg_pt_gp = (struct t10_alua_tg_pt_gp *)__mptr___1 + 0xfffffffffffffe60UL; ldv_57687: ; if ((unsigned long )(& tg_pt_gp->tg_pt_gp_list) != (unsigned long )(& dev->t10_alua.tg_pt_gps_list)) { goto ldv_57686; } else { } ldv_57685: spin_unlock(& dev->t10_alua.tg_pt_gps_lock); spin_lock(& lu_gp->lu_gp_lock); atomic_dec_mb(& lu_gp_mem->lu_gp_mem_ref_cnt); __mptr___2 = (struct list_head const *)lu_gp_mem->lu_gp_mem_list.next; lu_gp_mem = (struct t10_alua_lu_gp_member *)__mptr___2 + 0xffffffffffffffa0UL; ldv_57689: ; if ((unsigned long )(& lu_gp_mem->lu_gp_mem_list) != (unsigned long )(& lu_gp->lu_gp_mem_list)) { goto ldv_57688; } else { } spin_unlock(& lu_gp->lu_gp_lock); if (rc == 0) { descriptor.modname = "target_core_mod"; descriptor.function = "core_alua_do_port_transition"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_alua.c"; descriptor.format = "Successfully processed LU Group: %s all ALUA TG PT Group IDs: %hu %s transition to primary state: %s\n"; descriptor.lineno = 1240U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___0 = core_alua_dump_state(new_state); tmp___1 = config_item_name(& lu_gp->lu_gp_group.cg_item); __dynamic_pr_debug(& descriptor, "Successfully processed LU Group: %s all ALUA TG PT Group IDs: %hu %s transition to primary state: %s\n", tmp___1, (int )l_tg_pt_gp->tg_pt_gp_id, explicit != 0 ? (char *)"explicit" : (char *)"implicit", tmp___0); } else { } } else { } atomic_dec_mb(& lu_gp->lu_gp_ref_cnt); return (rc); } } static int core_alua_update_tpg_secondary_metadata(struct se_lun *lun ) { struct se_portal_group *se_tpg ; unsigned char *md_buf ; char path[512U] ; char wwn[256U] ; int len ; int rc ; void *tmp ; char *tmp___0 ; u16 tmp___1 ; int tmp___2 ; char *tmp___3 ; { se_tpg = lun->lun_tpg; ldv_mutex_lock_278(& lun->lun_tg_pt_md_mutex); tmp = kzalloc(1024UL, 208U); md_buf = (unsigned char *)tmp; if ((unsigned long )md_buf == (unsigned long )((unsigned char *)0U)) { printk("\vUnable to allocate buf for ALUA metadata\n"); rc = -12; goto out_unlock; } else { } memset((void *)(& path), 0, 512UL); memset((void *)(& wwn), 0, 256UL); tmp___0 = (*((se_tpg->se_tpg_tfo)->tpg_get_wwn))(se_tpg); len = snprintf((char *)(& wwn), 256UL, "%s", tmp___0); if ((unsigned long )(se_tpg->se_tpg_tfo)->tpg_get_tag != (unsigned long )((u16 (*/* const */)(struct se_portal_group * ))0)) { tmp___1 = (*((se_tpg->se_tpg_tfo)->tpg_get_tag))(se_tpg); snprintf((char *)(& wwn) + (unsigned long )len, (size_t )(256 - len), "+%hu", (int )tmp___1); } else { } tmp___2 = atomic_read((atomic_t const *)(& lun->lun_tg_pt_secondary_offline)); len = snprintf((char *)md_buf, 1024UL, "alua_tg_pt_offline=%d\nalua_tg_pt_status=0x%02x\n", tmp___2, lun->lun_tg_pt_secondary_stat); tmp___3 = (*((se_tpg->se_tpg_tfo)->get_fabric_name))(); snprintf((char *)(& path), 512UL, "/var/target/alua/%s/%s/lun_%llu", tmp___3, (char *)(& wwn), lun->unpacked_lun); rc = core_alua_write_tpg_metadata((char const *)(& path), md_buf, (u32 )len); kfree((void const *)md_buf); out_unlock: ldv_mutex_unlock_279(& lun->lun_tg_pt_md_mutex); return (rc); } } static int core_alua_set_tg_pt_secondary_state(struct se_lun *lun , int explicit , int offline ) { struct t10_alua_tg_pt_gp *tg_pt_gp ; int trans_delay_msecs ; struct _ddebug descriptor ; char *tmp ; long tmp___0 ; { spin_lock(& lun->lun_tg_pt_gp_lock); tg_pt_gp = lun->lun_tg_pt_gp; if ((unsigned long )tg_pt_gp == (unsigned long )((struct t10_alua_tg_pt_gp *)0)) { spin_unlock(& lun->lun_tg_pt_gp_lock); printk("\vUnable to complete secondary state transition\n"); return (-22); } else { } trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs; if (offline != 0) { atomic_set(& lun->lun_tg_pt_secondary_offline, 1); } else { atomic_set(& lun->lun_tg_pt_secondary_offline, 0); } lun->lun_tg_pt_secondary_stat = explicit != 0 ? 1 : 2; descriptor.modname = "target_core_mod"; descriptor.function = "core_alua_set_tg_pt_secondary_state"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_alua.c"; descriptor.format = "Successful %s ALUA transition TG PT Group: %s ID: %hu to secondary access state: %s\n"; descriptor.lineno = 1323U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = config_item_name(& tg_pt_gp->tg_pt_gp_group.cg_item); __dynamic_pr_debug(& descriptor, "Successful %s ALUA transition TG PT Group: %s ID: %hu to secondary access state: %s\n", explicit != 0 ? (char *)"explicit" : (char *)"implicit", tmp, (int )tg_pt_gp->tg_pt_gp_id, offline != 0 ? (char *)"OFFLINE" : (char *)"ONLINE"); } else { } spin_unlock(& lun->lun_tg_pt_gp_lock); if (trans_delay_msecs != 0) { msleep_interruptible((unsigned int )trans_delay_msecs); } else { } if (lun->lun_tg_pt_secondary_write_md != 0) { core_alua_update_tpg_secondary_metadata(lun); } else { } return (0); } } struct t10_alua_lba_map *core_alua_allocate_lba_map(struct list_head *list , u64 first_lba , u64 last_lba ) { struct t10_alua_lba_map *lba_map ; void *tmp ; void *tmp___0 ; { tmp = kmem_cache_zalloc(t10_alua_lba_map_cache, 208U); lba_map = (struct t10_alua_lba_map *)tmp; if ((unsigned long )lba_map == (unsigned long )((struct t10_alua_lba_map *)0)) { printk("\vUnable to allocate struct t10_alua_lba_map\n"); tmp___0 = ERR_PTR(-12L); return ((struct t10_alua_lba_map *)tmp___0); } else { } INIT_LIST_HEAD(& lba_map->lba_map_mem_list); lba_map->lba_map_first_lba = first_lba; lba_map->lba_map_last_lba = last_lba; list_add_tail(& lba_map->lba_map_list, list); return (lba_map); } } int core_alua_allocate_lba_map_mem(struct t10_alua_lba_map *lba_map , int pg_id , int state ) { struct t10_alua_lba_map_member *lba_map_mem ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; void *tmp ; { __mptr = (struct list_head const *)lba_map->lba_map_mem_list.next; lba_map_mem = (struct t10_alua_lba_map_member *)__mptr; goto ldv_57729; ldv_57728: ; if (lba_map_mem->lba_map_mem_alua_pg_id == pg_id) { printk("\vDuplicate pg_id %d in lba_map\n", pg_id); return (-22); } else { } __mptr___0 = (struct list_head const *)lba_map_mem->lba_map_mem_list.next; lba_map_mem = (struct t10_alua_lba_map_member *)__mptr___0; ldv_57729: ; if ((unsigned long )(& lba_map_mem->lba_map_mem_list) != (unsigned long )(& lba_map->lba_map_mem_list)) { goto ldv_57728; } else { } tmp = kmem_cache_zalloc(t10_alua_lba_map_mem_cache, 208U); lba_map_mem = (struct t10_alua_lba_map_member *)tmp; if ((unsigned long )lba_map_mem == (unsigned long )((struct t10_alua_lba_map_member *)0)) { printk("\vUnable to allocate struct t10_alua_lba_map_mem\n"); return (-12); } else { } lba_map_mem->lba_map_mem_alua_state = state; lba_map_mem->lba_map_mem_alua_pg_id = pg_id; list_add_tail(& lba_map_mem->lba_map_mem_list, & lba_map->lba_map_mem_list); return (0); } } void core_alua_free_lba_map(struct list_head *lba_list ) { struct t10_alua_lba_map *lba_map ; struct t10_alua_lba_map *lba_map_tmp ; struct t10_alua_lba_map_member *lba_map_mem ; struct t10_alua_lba_map_member *lba_map_mem_tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; { __mptr = (struct list_head const *)lba_list->next; lba_map = (struct t10_alua_lba_map *)__mptr + 0xfffffffffffffff0UL; __mptr___0 = (struct list_head const *)lba_map->lba_map_list.next; lba_map_tmp = (struct t10_alua_lba_map *)__mptr___0 + 0xfffffffffffffff0UL; goto ldv_57754; ldv_57753: __mptr___1 = (struct list_head const *)lba_map->lba_map_mem_list.next; lba_map_mem = (struct t10_alua_lba_map_member *)__mptr___1; __mptr___2 = (struct list_head const *)lba_map_mem->lba_map_mem_list.next; lba_map_mem_tmp = (struct t10_alua_lba_map_member *)__mptr___2; goto ldv_57751; ldv_57750: list_del(& lba_map_mem->lba_map_mem_list); kmem_cache_free(t10_alua_lba_map_mem_cache, (void *)lba_map_mem); lba_map_mem = lba_map_mem_tmp; __mptr___3 = (struct list_head const *)lba_map_mem_tmp->lba_map_mem_list.next; lba_map_mem_tmp = (struct t10_alua_lba_map_member *)__mptr___3; ldv_57751: ; if ((unsigned long )(& lba_map_mem->lba_map_mem_list) != (unsigned long )(& lba_map->lba_map_mem_list)) { goto ldv_57750; } else { } list_del(& lba_map->lba_map_list); kmem_cache_free(t10_alua_lba_map_cache, (void *)lba_map); lba_map = lba_map_tmp; __mptr___4 = (struct list_head const *)lba_map_tmp->lba_map_list.next; lba_map_tmp = (struct t10_alua_lba_map *)__mptr___4 + 0xfffffffffffffff0UL; ldv_57754: ; if ((unsigned long )(& lba_map->lba_map_list) != (unsigned long )lba_list) { goto ldv_57753; } else { } return; } } void core_alua_set_lba_map(struct se_device *dev , struct list_head *lba_map_list , int segment_size , int segment_mult ) { struct list_head old_lba_map_list ; struct t10_alua_tg_pt_gp *tg_pt_gp ; int activate ; int supported ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { activate = 0; INIT_LIST_HEAD(& old_lba_map_list); spin_lock(& dev->t10_alua.lba_map_lock); dev->t10_alua.lba_map_segment_size = (u32 )segment_size; dev->t10_alua.lba_map_segment_multiplier = (u32 )segment_mult; list_splice_init(& dev->t10_alua.lba_map_list, & old_lba_map_list); if ((unsigned long )lba_map_list != (unsigned long )((struct list_head *)0)) { list_splice_init(lba_map_list, & dev->t10_alua.lba_map_list); activate = 1; } else { } spin_unlock(& dev->t10_alua.lba_map_lock); spin_lock(& dev->t10_alua.tg_pt_gps_lock); __mptr = (struct list_head const *)dev->t10_alua.tg_pt_gps_list.next; tg_pt_gp = (struct t10_alua_tg_pt_gp *)__mptr + 0xfffffffffffffe60UL; goto ldv_57772; ldv_57771: ; if (tg_pt_gp->tg_pt_gp_valid_id == 0) { goto ldv_57770; } else { } supported = tg_pt_gp->tg_pt_gp_alua_supported_states; if (activate != 0) { supported = supported | 16; } else { supported = supported & -17; } tg_pt_gp->tg_pt_gp_alua_supported_states = supported; ldv_57770: __mptr___0 = (struct list_head const *)tg_pt_gp->tg_pt_gp_list.next; tg_pt_gp = (struct t10_alua_tg_pt_gp *)__mptr___0 + 0xfffffffffffffe60UL; ldv_57772: ; if ((unsigned long )(& tg_pt_gp->tg_pt_gp_list) != (unsigned long )(& dev->t10_alua.tg_pt_gps_list)) { goto ldv_57771; } else { } spin_unlock(& dev->t10_alua.tg_pt_gps_lock); core_alua_free_lba_map(& old_lba_map_list); return; } } struct t10_alua_lu_gp *core_alua_allocate_lu_gp(char const *name , int def_group ) { struct t10_alua_lu_gp *lu_gp ; void *tmp ; void *tmp___0 ; struct lock_class_key __key ; u16 tmp___1 ; { tmp = kmem_cache_zalloc(t10_alua_lu_gp_cache, 208U); lu_gp = (struct t10_alua_lu_gp *)tmp; if ((unsigned long )lu_gp == (unsigned long )((struct t10_alua_lu_gp *)0)) { printk("\vUnable to allocate struct t10_alua_lu_gp\n"); tmp___0 = ERR_PTR(-12L); return ((struct t10_alua_lu_gp *)tmp___0); } else { } INIT_LIST_HEAD(& lu_gp->lu_gp_node); INIT_LIST_HEAD(& lu_gp->lu_gp_mem_list); spinlock_check(& lu_gp->lu_gp_lock); __raw_spin_lock_init(& lu_gp->lu_gp_lock.__annonCompField17.rlock, "&(&lu_gp->lu_gp_lock)->rlock", & __key); atomic_set(& lu_gp->lu_gp_ref_cnt, 0); if (def_group != 0) { tmp___1 = alua_lu_gps_counter; alua_lu_gps_counter = (u16 )((int )alua_lu_gps_counter + 1); lu_gp->lu_gp_id = tmp___1; lu_gp->lu_gp_valid_id = 1; alua_lu_gps_count = alua_lu_gps_count + 1U; } else { } return (lu_gp); } } int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp , u16 lu_gp_id ) { struct t10_alua_lu_gp *lu_gp_tmp ; u16 lu_gp_id_tmp ; u16 tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { if (lu_gp->lu_gp_valid_id != 0) { printk("\fALUA LU Group already has a valid ID, ignoring request\n"); return (-22); } else { } spin_lock(& lu_gps_lock); if (alua_lu_gps_count == 65535U) { printk("\vMaximum ALUA alua_lu_gps_count: 0x0000ffff reached\n"); spin_unlock(& lu_gps_lock); kmem_cache_free(t10_alua_lu_gp_cache, (void *)lu_gp); return (-28); } else { } again: ; if ((unsigned int )lu_gp_id == 0U) { tmp = alua_lu_gps_counter; alua_lu_gps_counter = (u16 )((int )alua_lu_gps_counter + 1); lu_gp_id_tmp = tmp; } else { lu_gp_id_tmp = lu_gp_id; } __mptr = (struct list_head const *)lu_gps_list.next; lu_gp_tmp = (struct t10_alua_lu_gp *)__mptr + 0xffffffffffffff38UL; goto ldv_57792; ldv_57791: ; if ((int )lu_gp_tmp->lu_gp_id == (int )lu_gp_id_tmp) { if ((unsigned int )lu_gp_id == 0U) { goto again; } else { } printk("\fALUA Logical Unit Group ID: %hu already exists, ignoring request\n", (int )lu_gp_id); spin_unlock(& lu_gps_lock); return (-22); } else { } __mptr___0 = (struct list_head const *)lu_gp_tmp->lu_gp_node.next; lu_gp_tmp = (struct t10_alua_lu_gp *)__mptr___0 + 0xffffffffffffff38UL; ldv_57792: ; if ((unsigned long )(& lu_gp_tmp->lu_gp_node) != (unsigned long )(& lu_gps_list)) { goto ldv_57791; } else { } lu_gp->lu_gp_id = lu_gp_id_tmp; lu_gp->lu_gp_valid_id = 1; list_add_tail(& lu_gp->lu_gp_node, & lu_gps_list); alua_lu_gps_count = alua_lu_gps_count + 1U; spin_unlock(& lu_gps_lock); return (0); } } static struct t10_alua_lu_gp_member *core_alua_allocate_lu_gp_mem(struct se_device *dev ) { struct t10_alua_lu_gp_member *lu_gp_mem ; void *tmp ; void *tmp___0 ; struct lock_class_key __key ; { tmp = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, 208U); lu_gp_mem = (struct t10_alua_lu_gp_member *)tmp; if ((unsigned long )lu_gp_mem == (unsigned long )((struct t10_alua_lu_gp_member *)0)) { printk("\vUnable to allocate struct t10_alua_lu_gp_member\n"); tmp___0 = ERR_PTR(-12L); return ((struct t10_alua_lu_gp_member *)tmp___0); } else { } INIT_LIST_HEAD(& lu_gp_mem->lu_gp_mem_list); spinlock_check(& lu_gp_mem->lu_gp_mem_lock); __raw_spin_lock_init(& lu_gp_mem->lu_gp_mem_lock.__annonCompField17.rlock, "&(&lu_gp_mem->lu_gp_mem_lock)->rlock", & __key); atomic_set(& lu_gp_mem->lu_gp_mem_ref_cnt, 0); lu_gp_mem->lu_gp_mem_dev = dev; dev->dev_alua_lu_gp_mem = lu_gp_mem; return (lu_gp_mem); } } void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp ) { struct t10_alua_lu_gp_member *lu_gp_mem ; struct t10_alua_lu_gp_member *lu_gp_mem_tmp ; int tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { spin_lock(& lu_gps_lock); list_del(& lu_gp->lu_gp_node); alua_lu_gps_count = alua_lu_gps_count - 1U; spin_unlock(& lu_gps_lock); goto ldv_57805; ldv_57804: cpu_relax(); ldv_57805: tmp = atomic_read((atomic_t const *)(& lu_gp->lu_gp_ref_cnt)); if (tmp != 0) { goto ldv_57804; } else { } spin_lock(& lu_gp->lu_gp_lock); __mptr = (struct list_head const *)lu_gp->lu_gp_mem_list.next; lu_gp_mem = (struct t10_alua_lu_gp_member *)__mptr + 0xffffffffffffffa0UL; __mptr___0 = (struct list_head const *)lu_gp_mem->lu_gp_mem_list.next; lu_gp_mem_tmp = (struct t10_alua_lu_gp_member *)__mptr___0 + 0xffffffffffffffa0UL; goto ldv_57814; ldv_57813: ; if ((int )lu_gp_mem->lu_gp_assoc) { list_del(& lu_gp_mem->lu_gp_mem_list); lu_gp->lu_gp_members = lu_gp->lu_gp_members - 1U; lu_gp_mem->lu_gp_assoc = 0; } else { } spin_unlock(& lu_gp->lu_gp_lock); spin_lock(& lu_gp_mem->lu_gp_mem_lock); if ((unsigned long )lu_gp != (unsigned long )default_lu_gp) { __core_alua_attach_lu_gp_mem(lu_gp_mem, default_lu_gp); } else { lu_gp_mem->lu_gp = (struct t10_alua_lu_gp *)0; } spin_unlock(& lu_gp_mem->lu_gp_mem_lock); spin_lock(& lu_gp->lu_gp_lock); lu_gp_mem = lu_gp_mem_tmp; __mptr___1 = (struct list_head const *)lu_gp_mem_tmp->lu_gp_mem_list.next; lu_gp_mem_tmp = (struct t10_alua_lu_gp_member *)__mptr___1 + 0xffffffffffffffa0UL; ldv_57814: ; if ((unsigned long )(& lu_gp_mem->lu_gp_mem_list) != (unsigned long )(& lu_gp->lu_gp_mem_list)) { goto ldv_57813; } else { } spin_unlock(& lu_gp->lu_gp_lock); kmem_cache_free(t10_alua_lu_gp_cache, (void *)lu_gp); return; } } void core_alua_free_lu_gp_mem(struct se_device *dev ) { struct t10_alua_lu_gp *lu_gp ; struct t10_alua_lu_gp_member *lu_gp_mem ; int tmp ; { lu_gp_mem = dev->dev_alua_lu_gp_mem; if ((unsigned long )lu_gp_mem == (unsigned long )((struct t10_alua_lu_gp_member *)0)) { return; } else { } goto ldv_57822; ldv_57821: cpu_relax(); ldv_57822: tmp = atomic_read((atomic_t const *)(& lu_gp_mem->lu_gp_mem_ref_cnt)); if (tmp != 0) { goto ldv_57821; } else { } spin_lock(& lu_gp_mem->lu_gp_mem_lock); lu_gp = lu_gp_mem->lu_gp; if ((unsigned long )lu_gp != (unsigned long )((struct t10_alua_lu_gp *)0)) { spin_lock(& lu_gp->lu_gp_lock); if ((int )lu_gp_mem->lu_gp_assoc) { list_del(& lu_gp_mem->lu_gp_mem_list); lu_gp->lu_gp_members = lu_gp->lu_gp_members - 1U; lu_gp_mem->lu_gp_assoc = 0; } else { } spin_unlock(& lu_gp->lu_gp_lock); lu_gp_mem->lu_gp = (struct t10_alua_lu_gp *)0; } else { } spin_unlock(& lu_gp_mem->lu_gp_mem_lock); kmem_cache_free(t10_alua_lu_gp_mem_cache, (void *)lu_gp_mem); return; } } struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(char const *name ) { struct t10_alua_lu_gp *lu_gp ; struct config_item *ci ; struct list_head const *__mptr ; char *tmp ; int tmp___0 ; struct list_head const *__mptr___0 ; { spin_lock(& lu_gps_lock); __mptr = (struct list_head const *)lu_gps_list.next; lu_gp = (struct t10_alua_lu_gp *)__mptr + 0xffffffffffffff38UL; goto ldv_57835; ldv_57834: ; if (lu_gp->lu_gp_valid_id == 0) { goto ldv_57833; } else { } ci = & lu_gp->lu_gp_group.cg_item; tmp = config_item_name(ci); tmp___0 = strcmp((char const *)tmp, name); if (tmp___0 == 0) { atomic_inc(& lu_gp->lu_gp_ref_cnt); spin_unlock(& lu_gps_lock); return (lu_gp); } else { } ldv_57833: __mptr___0 = (struct list_head const *)lu_gp->lu_gp_node.next; lu_gp = (struct t10_alua_lu_gp *)__mptr___0 + 0xffffffffffffff38UL; ldv_57835: ; if ((unsigned long )(& lu_gp->lu_gp_node) != (unsigned long )(& lu_gps_list)) { goto ldv_57834; } else { } spin_unlock(& lu_gps_lock); return ((struct t10_alua_lu_gp *)0); } } void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp ) { { spin_lock(& lu_gps_lock); atomic_dec(& lu_gp->lu_gp_ref_cnt); spin_unlock(& lu_gps_lock); return; } } void __core_alua_attach_lu_gp_mem(struct t10_alua_lu_gp_member *lu_gp_mem , struct t10_alua_lu_gp *lu_gp ) { { spin_lock(& lu_gp->lu_gp_lock); lu_gp_mem->lu_gp = lu_gp; lu_gp_mem->lu_gp_assoc = 1; list_add_tail(& lu_gp_mem->lu_gp_mem_list, & lu_gp->lu_gp_mem_list); lu_gp->lu_gp_members = lu_gp->lu_gp_members + 1U; spin_unlock(& lu_gp->lu_gp_lock); return; } } void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *lu_gp_mem , struct t10_alua_lu_gp *lu_gp ) { { spin_lock(& lu_gp->lu_gp_lock); list_del(& lu_gp_mem->lu_gp_mem_list); lu_gp_mem->lu_gp = (struct t10_alua_lu_gp *)0; lu_gp_mem->lu_gp_assoc = 0; lu_gp->lu_gp_members = lu_gp->lu_gp_members - 1U; spin_unlock(& lu_gp->lu_gp_lock); return; } } struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev , char const *name , int def_group ) { struct t10_alua_tg_pt_gp *tg_pt_gp ; void *tmp ; struct lock_class_key __key ; struct lock_class_key __key___0 ; struct lock_class_key __key___1 ; atomic_long_t __constr_expr_0 ; struct lock_class_key __key___2 ; u16 tmp___0 ; { tmp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, 208U); tg_pt_gp = (struct t10_alua_tg_pt_gp *)tmp; if ((unsigned long )tg_pt_gp == (unsigned long )((struct t10_alua_tg_pt_gp *)0)) { printk("\vUnable to allocate struct t10_alua_tg_pt_gp\n"); return ((struct t10_alua_tg_pt_gp *)0); } else { } INIT_LIST_HEAD(& tg_pt_gp->tg_pt_gp_list); INIT_LIST_HEAD(& tg_pt_gp->tg_pt_gp_lun_list); __mutex_init(& tg_pt_gp->tg_pt_gp_md_mutex, "&tg_pt_gp->tg_pt_gp_md_mutex", & __key); spinlock_check(& tg_pt_gp->tg_pt_gp_lock); __raw_spin_lock_init(& tg_pt_gp->tg_pt_gp_lock.__annonCompField17.rlock, "&(&tg_pt_gp->tg_pt_gp_lock)->rlock", & __key___0); atomic_set(& tg_pt_gp->tg_pt_gp_ref_cnt, 0); __init_work(& tg_pt_gp->tg_pt_gp_transition_work.work, 0); __constr_expr_0.counter = 137438953408L; tg_pt_gp->tg_pt_gp_transition_work.work.data = __constr_expr_0; lockdep_init_map(& tg_pt_gp->tg_pt_gp_transition_work.work.lockdep_map, "(&(&tg_pt_gp->tg_pt_gp_transition_work)->work)", & __key___1, 0); INIT_LIST_HEAD(& tg_pt_gp->tg_pt_gp_transition_work.work.entry); tg_pt_gp->tg_pt_gp_transition_work.work.func = & core_alua_do_transition_tg_pt_work; init_timer_key(& tg_pt_gp->tg_pt_gp_transition_work.timer, 2097152U, "(&(&tg_pt_gp->tg_pt_gp_transition_work)->timer)", & __key___2); tg_pt_gp->tg_pt_gp_transition_work.timer.function = & delayed_work_timer_fn; tg_pt_gp->tg_pt_gp_transition_work.timer.data = (unsigned long )(& tg_pt_gp->tg_pt_gp_transition_work); tg_pt_gp->tg_pt_gp_dev = dev; atomic_set(& tg_pt_gp->tg_pt_gp_alua_access_state, 0); tg_pt_gp->tg_pt_gp_alua_access_type = 48; tg_pt_gp->tg_pt_gp_nonop_delay_msecs = 100; tg_pt_gp->tg_pt_gp_trans_delay_msecs = 0; tg_pt_gp->tg_pt_gp_implicit_trans_secs = 0; tg_pt_gp->tg_pt_gp_alua_supported_states = 207; if (def_group != 0) { spin_lock(& dev->t10_alua.tg_pt_gps_lock); tmp___0 = dev->t10_alua.alua_tg_pt_gps_counter; dev->t10_alua.alua_tg_pt_gps_counter = (u16 )((int )dev->t10_alua.alua_tg_pt_gps_counter + 1); tg_pt_gp->tg_pt_gp_id = tmp___0; tg_pt_gp->tg_pt_gp_valid_id = 1; dev->t10_alua.alua_tg_pt_gps_count = dev->t10_alua.alua_tg_pt_gps_count + 1U; list_add_tail(& tg_pt_gp->tg_pt_gp_list, & dev->t10_alua.tg_pt_gps_list); spin_unlock(& dev->t10_alua.tg_pt_gps_lock); } else { } return (tg_pt_gp); } } int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *tg_pt_gp , u16 tg_pt_gp_id ) { struct se_device *dev ; struct t10_alua_tg_pt_gp *tg_pt_gp_tmp ; u16 tg_pt_gp_id_tmp ; u16 tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { dev = tg_pt_gp->tg_pt_gp_dev; if (tg_pt_gp->tg_pt_gp_valid_id != 0) { printk("\fALUA TG PT Group already has a valid ID, ignoring request\n"); return (-22); } else { } spin_lock(& dev->t10_alua.tg_pt_gps_lock); if (dev->t10_alua.alua_tg_pt_gps_count == 65535U) { printk("\vMaximum ALUA alua_tg_pt_gps_count: 0x0000ffff reached\n"); spin_unlock(& dev->t10_alua.tg_pt_gps_lock); kmem_cache_free(t10_alua_tg_pt_gp_cache, (void *)tg_pt_gp); return (-28); } else { } again: ; if ((unsigned int )tg_pt_gp_id == 0U) { tmp = dev->t10_alua.alua_tg_pt_gps_counter; dev->t10_alua.alua_tg_pt_gps_counter = (u16 )((int )dev->t10_alua.alua_tg_pt_gps_counter + 1); tg_pt_gp_id_tmp = tmp; } else { tg_pt_gp_id_tmp = tg_pt_gp_id; } __mptr = (struct list_head const *)dev->t10_alua.tg_pt_gps_list.next; tg_pt_gp_tmp = (struct t10_alua_tg_pt_gp *)__mptr + 0xfffffffffffffe60UL; goto ldv_57872; ldv_57871: ; if ((int )tg_pt_gp_tmp->tg_pt_gp_id == (int )tg_pt_gp_id_tmp) { if ((unsigned int )tg_pt_gp_id == 0U) { goto again; } else { } printk("\vALUA Target Port Group ID: %hu already exists, ignoring request\n", (int )tg_pt_gp_id); spin_unlock(& dev->t10_alua.tg_pt_gps_lock); return (-22); } else { } __mptr___0 = (struct list_head const *)tg_pt_gp_tmp->tg_pt_gp_list.next; tg_pt_gp_tmp = (struct t10_alua_tg_pt_gp *)__mptr___0 + 0xfffffffffffffe60UL; ldv_57872: ; if ((unsigned long )(& tg_pt_gp_tmp->tg_pt_gp_list) != (unsigned long )(& dev->t10_alua.tg_pt_gps_list)) { goto ldv_57871; } else { } tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp; tg_pt_gp->tg_pt_gp_valid_id = 1; list_add_tail(& tg_pt_gp->tg_pt_gp_list, & dev->t10_alua.tg_pt_gps_list); dev->t10_alua.alua_tg_pt_gps_count = dev->t10_alua.alua_tg_pt_gps_count + 1U; spin_unlock(& dev->t10_alua.tg_pt_gps_lock); return (0); } } void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *tg_pt_gp ) { struct se_device *dev ; struct se_lun *lun ; struct se_lun *next ; int tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { dev = tg_pt_gp->tg_pt_gp_dev; spin_lock(& dev->t10_alua.tg_pt_gps_lock); list_del(& tg_pt_gp->tg_pt_gp_list); dev->t10_alua.alua_tg_pt_gps_counter = (u16 )((int )dev->t10_alua.alua_tg_pt_gps_counter - 1); spin_unlock(& dev->t10_alua.tg_pt_gps_lock); ldv_flush_delayed_work_280(& tg_pt_gp->tg_pt_gp_transition_work); goto ldv_57881; ldv_57880: cpu_relax(); ldv_57881: tmp = atomic_read((atomic_t const *)(& tg_pt_gp->tg_pt_gp_ref_cnt)); if (tmp != 0) { goto ldv_57880; } else { } spin_lock(& tg_pt_gp->tg_pt_gp_lock); __mptr = (struct list_head const *)tg_pt_gp->tg_pt_gp_lun_list.next; lun = (struct se_lun *)__mptr + 0xfffffffffffffed0UL; __mptr___0 = (struct list_head const *)lun->lun_tg_pt_gp_link.next; next = (struct se_lun *)__mptr___0 + 0xfffffffffffffed0UL; goto ldv_57890; ldv_57889: list_del_init(& lun->lun_tg_pt_gp_link); tg_pt_gp->tg_pt_gp_members = tg_pt_gp->tg_pt_gp_members - 1U; spin_unlock(& tg_pt_gp->tg_pt_gp_lock); spin_lock(& lun->lun_tg_pt_gp_lock); if ((unsigned long )dev->t10_alua.default_tg_pt_gp != (unsigned long )tg_pt_gp) { __target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); } else { lun->lun_tg_pt_gp = (struct t10_alua_tg_pt_gp *)0; } spin_unlock(& lun->lun_tg_pt_gp_lock); spin_lock(& tg_pt_gp->tg_pt_gp_lock); lun = next; __mptr___1 = (struct list_head const *)next->lun_tg_pt_gp_link.next; next = (struct se_lun *)__mptr___1 + 0xfffffffffffffed0UL; ldv_57890: ; if ((unsigned long )(& lun->lun_tg_pt_gp_link) != (unsigned long )(& tg_pt_gp->tg_pt_gp_lun_list)) { goto ldv_57889; } else { } spin_unlock(& tg_pt_gp->tg_pt_gp_lock); kmem_cache_free(t10_alua_tg_pt_gp_cache, (void *)tg_pt_gp); return; } } static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(struct se_device *dev , char const *name ) { struct t10_alua_tg_pt_gp *tg_pt_gp ; struct config_item *ci ; struct list_head const *__mptr ; char *tmp ; int tmp___0 ; struct list_head const *__mptr___0 ; { spin_lock(& dev->t10_alua.tg_pt_gps_lock); __mptr = (struct list_head const *)dev->t10_alua.tg_pt_gps_list.next; tg_pt_gp = (struct t10_alua_tg_pt_gp *)__mptr + 0xfffffffffffffe60UL; goto ldv_57904; ldv_57903: ; if (tg_pt_gp->tg_pt_gp_valid_id == 0) { goto ldv_57902; } else { } ci = & tg_pt_gp->tg_pt_gp_group.cg_item; tmp = config_item_name(ci); tmp___0 = strcmp((char const *)tmp, name); if (tmp___0 == 0) { atomic_inc(& tg_pt_gp->tg_pt_gp_ref_cnt); spin_unlock(& dev->t10_alua.tg_pt_gps_lock); return (tg_pt_gp); } else { } ldv_57902: __mptr___0 = (struct list_head const *)tg_pt_gp->tg_pt_gp_list.next; tg_pt_gp = (struct t10_alua_tg_pt_gp *)__mptr___0 + 0xfffffffffffffe60UL; ldv_57904: ; if ((unsigned long )(& tg_pt_gp->tg_pt_gp_list) != (unsigned long )(& dev->t10_alua.tg_pt_gps_list)) { goto ldv_57903; } else { } spin_unlock(& dev->t10_alua.tg_pt_gps_lock); return ((struct t10_alua_tg_pt_gp *)0); } } static void core_alua_put_tg_pt_gp_from_name(struct t10_alua_tg_pt_gp *tg_pt_gp ) { struct se_device *dev ; { dev = tg_pt_gp->tg_pt_gp_dev; spin_lock(& dev->t10_alua.tg_pt_gps_lock); atomic_dec(& tg_pt_gp->tg_pt_gp_ref_cnt); spin_unlock(& dev->t10_alua.tg_pt_gps_lock); return; } } static void __target_attach_tg_pt_gp(struct se_lun *lun , struct t10_alua_tg_pt_gp *tg_pt_gp ) { struct se_dev_entry *se_deve ; int tmp ; long tmp___0 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { tmp = queued_spin_is_locked(& lun->lun_tg_pt_gp_lock.__annonCompField17.rlock.raw_lock); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_alua.c"), "i" (1884), "i" (12UL)); ldv_57915: ; goto ldv_57915; } else { } spin_lock(& tg_pt_gp->tg_pt_gp_lock); lun->lun_tg_pt_gp = tg_pt_gp; list_add_tail(& lun->lun_tg_pt_gp_link, & tg_pt_gp->tg_pt_gp_lun_list); tg_pt_gp->tg_pt_gp_members = tg_pt_gp->tg_pt_gp_members + 1U; spin_lock(& lun->lun_deve_lock); __mptr = (struct list_head const *)lun->lun_deve_list.next; se_deve = (struct se_dev_entry *)__mptr + 0xfffffffffffffef0UL; goto ldv_57921; ldv_57920: core_scsi3_ua_allocate(se_deve, 63, 3); __mptr___0 = (struct list_head const *)se_deve->lun_link.next; se_deve = (struct se_dev_entry *)__mptr___0 + 0xfffffffffffffef0UL; ldv_57921: ; if ((unsigned long )(& se_deve->lun_link) != (unsigned long )(& lun->lun_deve_list)) { goto ldv_57920; } else { } spin_unlock(& lun->lun_deve_lock); spin_unlock(& tg_pt_gp->tg_pt_gp_lock); return; } } void target_attach_tg_pt_gp(struct se_lun *lun , struct t10_alua_tg_pt_gp *tg_pt_gp ) { { spin_lock(& lun->lun_tg_pt_gp_lock); __target_attach_tg_pt_gp(lun, tg_pt_gp); spin_unlock(& lun->lun_tg_pt_gp_lock); return; } } static void __target_detach_tg_pt_gp(struct se_lun *lun , struct t10_alua_tg_pt_gp *tg_pt_gp ) { int tmp ; long tmp___0 ; { tmp = queued_spin_is_locked(& lun->lun_tg_pt_gp_lock.__annonCompField17.rlock.raw_lock); tmp___0 = ldv__builtin_expect(tmp == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_alua.c"), "i" (1909), "i" (12UL)); ldv_57931: ; goto ldv_57931; } else { } spin_lock(& tg_pt_gp->tg_pt_gp_lock); list_del_init(& lun->lun_tg_pt_gp_link); tg_pt_gp->tg_pt_gp_members = tg_pt_gp->tg_pt_gp_members - 1U; spin_unlock(& tg_pt_gp->tg_pt_gp_lock); lun->lun_tg_pt_gp = (struct t10_alua_tg_pt_gp *)0; return; } } void target_detach_tg_pt_gp(struct se_lun *lun ) { struct t10_alua_tg_pt_gp *tg_pt_gp ; { spin_lock(& lun->lun_tg_pt_gp_lock); tg_pt_gp = lun->lun_tg_pt_gp; if ((unsigned long )tg_pt_gp != (unsigned long )((struct t10_alua_tg_pt_gp *)0)) { __target_detach_tg_pt_gp(lun, tg_pt_gp); } else { } spin_unlock(& lun->lun_tg_pt_gp_lock); return; } } ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun , char *page ) { struct config_item *tg_pt_ci ; struct t10_alua_tg_pt_gp *tg_pt_gp ; ssize_t len ; char *tmp ; int tmp___0 ; char *tmp___1 ; int tmp___2 ; char *tmp___3 ; char *tmp___4 ; int tmp___5 ; { len = 0L; spin_lock(& lun->lun_tg_pt_gp_lock); tg_pt_gp = lun->lun_tg_pt_gp; if ((unsigned long )tg_pt_gp != (unsigned long )((struct t10_alua_tg_pt_gp *)0)) { tg_pt_ci = & tg_pt_gp->tg_pt_gp_group.cg_item; tmp = core_alua_dump_status(lun->lun_tg_pt_secondary_stat); tmp___0 = atomic_read((atomic_t const *)(& lun->lun_tg_pt_secondary_offline)); tmp___1 = core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status); tmp___2 = atomic_read((atomic_t const *)(& tg_pt_gp->tg_pt_gp_alua_access_state)); tmp___3 = core_alua_dump_state(tmp___2); tmp___4 = config_item_name(tg_pt_ci); tmp___5 = sprintf(page, "TG Port Alias: %s\nTG Port Group ID: %hu\nTG Port Primary Access State: %s\nTG Port Primary Access Status: %s\nTG Port Secondary Access State: %s\nTG Port Secondary Access Status: %s\n", tmp___4, (int )tg_pt_gp->tg_pt_gp_id, tmp___3, tmp___1, tmp___0 != 0 ? (char *)"Offline" : (char *)"None", tmp); len = (ssize_t )tmp___5 + len; } else { } spin_unlock(& lun->lun_tg_pt_gp_lock); return (len); } } ssize_t core_alua_store_tg_pt_gp_info(struct se_lun *lun , char const *page , size_t count ) { struct se_portal_group *tpg ; struct se_device *dev ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_384___1 __u ; int tmp ; struct t10_alua_tg_pt_gp *tg_pt_gp ; struct t10_alua_tg_pt_gp *tg_pt_gp_new ; unsigned char buf[256U] ; int move ; char *tmp___0 ; char *tmp___1 ; int tmp___2 ; struct _ddebug descriptor ; char *tmp___3 ; char *tmp___4 ; u16 tmp___5 ; char *tmp___6 ; long tmp___7 ; struct _ddebug descriptor___0 ; char *tmp___8 ; char *tmp___9 ; u16 tmp___10 ; char *tmp___11 ; long tmp___12 ; { tpg = lun->lun_tpg; __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); dev = ________p1; tg_pt_gp = (struct t10_alua_tg_pt_gp *)0; tg_pt_gp_new = (struct t10_alua_tg_pt_gp *)0; move = 0; if ((int )(dev->transport)->transport_flags & 1 || (int )(dev->se_hba)->hba_flags & 1) { return (-19L); } else { } if (count > 256UL) { printk("\vALUA Target Port Group alias too large!\n"); return (-22L); } else { } memset((void *)(& buf), 0, 256UL); memcpy((void *)(& buf), (void const *)page, count); tmp___1 = strstrip((char *)(& buf)); tmp___2 = strcmp((char const *)tmp___1, "NULL"); if (tmp___2 != 0) { tmp___0 = strstrip((char *)(& buf)); tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(dev, (char const *)tmp___0); if ((unsigned long )tg_pt_gp_new == (unsigned long )((struct t10_alua_tg_pt_gp *)0)) { return (-19L); } else { } } else { } spin_lock(& lun->lun_tg_pt_gp_lock); tg_pt_gp = lun->lun_tg_pt_gp; if ((unsigned long )tg_pt_gp != (unsigned long )((struct t10_alua_tg_pt_gp *)0)) { if ((unsigned long )tg_pt_gp_new == (unsigned long )((struct t10_alua_tg_pt_gp *)0)) { descriptor.modname = "target_core_mod"; descriptor.function = "core_alua_store_tg_pt_gp_info"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_alua.c"; descriptor.format = "Target_Core_ConfigFS: Moving %s/tpgt_%hu/%s from ALUA Target Port Group: alua/%s, ID: %hu back to default_tg_pt_gp\n"; descriptor.lineno = 2016U; descriptor.flags = 0U; tmp___7 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___7 != 0L) { tmp___3 = config_item_name(& tg_pt_gp->tg_pt_gp_group.cg_item); tmp___4 = config_item_name(& lun->lun_group.cg_item); tmp___5 = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); tmp___6 = (*((tpg->se_tpg_tfo)->tpg_get_wwn))(tpg); __dynamic_pr_debug(& descriptor, "Target_Core_ConfigFS: Moving %s/tpgt_%hu/%s from ALUA Target Port Group: alua/%s, ID: %hu back to default_tg_pt_gp\n", tmp___6, (int )tmp___5, tmp___4, tmp___3, (int )tg_pt_gp->tg_pt_gp_id); } else { } __target_detach_tg_pt_gp(lun, tg_pt_gp); __target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); spin_unlock(& lun->lun_tg_pt_gp_lock); return ((ssize_t )count); } else { } __target_detach_tg_pt_gp(lun, tg_pt_gp); move = 1; } else { } __target_attach_tg_pt_gp(lun, tg_pt_gp_new); spin_unlock(& lun->lun_tg_pt_gp_lock); descriptor___0.modname = "target_core_mod"; descriptor___0.function = "core_alua_store_tg_pt_gp_info"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_alua.c"; descriptor___0.format = "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA Target Port Group: alua/%s, ID: %hu\n"; descriptor___0.lineno = 2037U; descriptor___0.flags = 0U; tmp___12 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___12 != 0L) { tmp___8 = config_item_name(& tg_pt_gp_new->tg_pt_gp_group.cg_item); tmp___9 = config_item_name(& lun->lun_group.cg_item); tmp___10 = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); tmp___11 = (*((tpg->se_tpg_tfo)->tpg_get_wwn))(tpg); __dynamic_pr_debug(& descriptor___0, "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA Target Port Group: alua/%s, ID: %hu\n", move != 0 ? (char *)"Moving" : (char *)"Adding", tmp___11, (int )tmp___10, tmp___9, tmp___8, (int )tg_pt_gp_new->tg_pt_gp_id); } else { } core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); return ((ssize_t )count); } } ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *tg_pt_gp , char *page ) { int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { if ((tg_pt_gp->tg_pt_gp_alua_access_type & 32) != 0 && (tg_pt_gp->tg_pt_gp_alua_access_type & 16) != 0) { tmp = sprintf(page, "Implicit and Explicit\n"); return ((ssize_t )tmp); } else if ((tg_pt_gp->tg_pt_gp_alua_access_type & 16) != 0) { tmp___0 = sprintf(page, "Implicit\n"); return ((ssize_t )tmp___0); } else if ((tg_pt_gp->tg_pt_gp_alua_access_type & 32) != 0) { tmp___1 = sprintf(page, "Explicit\n"); return ((ssize_t )tmp___1); } else { tmp___2 = sprintf(page, "None\n"); return ((ssize_t )tmp___2); } } } ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *tg_pt_gp , char const *page , size_t count ) { unsigned long tmp ; int ret ; { ret = kstrtoul(page, 0U, & tmp); if (ret < 0) { printk("\vUnable to extract alua_access_type\n"); return ((ssize_t )ret); } else { } if (((tmp != 0UL && tmp != 1UL) && tmp != 2UL) && tmp != 3UL) { printk("\vIllegal value for alua_access_type: %lu\n", tmp); return (-22L); } else { } if (tmp == 3UL) { tg_pt_gp->tg_pt_gp_alua_access_type = 48; } else if (tmp == 2UL) { tg_pt_gp->tg_pt_gp_alua_access_type = 32; } else if (tmp == 1UL) { tg_pt_gp->tg_pt_gp_alua_access_type = 16; } else { tg_pt_gp->tg_pt_gp_alua_access_type = 0; } return ((ssize_t )count); } } ssize_t core_alua_show_nonop_delay_msecs(struct t10_alua_tg_pt_gp *tg_pt_gp , char *page ) { int tmp ; { tmp = sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs); return ((ssize_t )tmp); } } ssize_t core_alua_store_nonop_delay_msecs(struct t10_alua_tg_pt_gp *tg_pt_gp , char const *page , size_t count ) { unsigned long tmp ; int ret ; { ret = kstrtoul(page, 0U, & tmp); if (ret < 0) { printk("\vUnable to extract nonop_delay_msecs\n"); return ((ssize_t )ret); } else { } if (tmp > 10000UL) { printk("\vPassed nonop_delay_msecs: %lu, exceeds ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp, 10000); return (-22L); } else { } tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int )tmp; return ((ssize_t )count); } } ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *tg_pt_gp , char *page ) { int tmp ; { tmp = sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs); return ((ssize_t )tmp); } } ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *tg_pt_gp , char const *page , size_t count ) { unsigned long tmp ; int ret ; { ret = kstrtoul(page, 0U, & tmp); if (ret < 0) { printk("\vUnable to extract trans_delay_msecs\n"); return ((ssize_t )ret); } else { } if (tmp > 30000UL) { printk("\vPassed trans_delay_msecs: %lu, exceeds ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp, 30000); return (-22L); } else { } tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int )tmp; return ((ssize_t )count); } } ssize_t core_alua_show_implicit_trans_secs(struct t10_alua_tg_pt_gp *tg_pt_gp , char *page ) { int tmp ; { tmp = sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_implicit_trans_secs); return ((ssize_t )tmp); } } ssize_t core_alua_store_implicit_trans_secs(struct t10_alua_tg_pt_gp *tg_pt_gp , char const *page , size_t count ) { unsigned long tmp ; int ret ; { ret = kstrtoul(page, 0U, & tmp); if (ret < 0) { printk("\vUnable to extract implicit_trans_secs\n"); return ((ssize_t )ret); } else { } if (tmp > 255UL) { printk("\vPassed implicit_trans_secs: %lu, exceeds ALUA_MAX_IMPLICIT_TRANS_SECS: %d\n", tmp, 255); return (-22L); } else { } tg_pt_gp->tg_pt_gp_implicit_trans_secs = (int )tmp; return ((ssize_t )count); } } ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *tg_pt_gp , char *page ) { int tmp ; { tmp = sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref); return ((ssize_t )tmp); } } ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *tg_pt_gp , char const *page , size_t count ) { unsigned long tmp ; int ret ; { ret = kstrtoul(page, 0U, & tmp); if (ret < 0) { printk("\vUnable to extract preferred ALUA value\n"); return ((ssize_t )ret); } else { } if (tmp != 0UL && tmp != 1UL) { printk("\vIllegal value for preferred ALUA: %lu\n", tmp); return (-22L); } else { } tg_pt_gp->tg_pt_gp_pref = (int )tmp; return ((ssize_t )count); } } ssize_t core_alua_show_offline_bit(struct se_lun *lun , char *page ) { int tmp ; int tmp___0 ; { tmp = atomic_read((atomic_t const *)(& lun->lun_tg_pt_secondary_offline)); tmp___0 = sprintf(page, "%d\n", tmp); return ((ssize_t )tmp___0); } } ssize_t core_alua_store_offline_bit(struct se_lun *lun , char const *page , size_t count ) { struct se_device *dev ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_386___1 __u ; int tmp ; unsigned long tmp___0 ; int ret ; { __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); dev = ________p1; if ((int )(dev->transport)->transport_flags & 1 || (int )(dev->se_hba)->hba_flags & 1) { return (-19L); } else { } ret = kstrtoul(page, 0U, & tmp___0); if (ret < 0) { printk("\vUnable to extract alua_tg_pt_offline value\n"); return ((ssize_t )ret); } else { } if (tmp___0 != 0UL && tmp___0 != 1UL) { printk("\vIllegal value for alua_tg_pt_offline: %lu\n", tmp___0); return (-22L); } else { } ret = core_alua_set_tg_pt_secondary_state(lun, 0, (int )tmp___0); if (ret < 0) { return (-22L); } else { } return ((ssize_t )count); } } ssize_t core_alua_show_secondary_status(struct se_lun *lun , char *page ) { int tmp ; { tmp = sprintf(page, "%d\n", lun->lun_tg_pt_secondary_stat); return ((ssize_t )tmp); } } ssize_t core_alua_store_secondary_status(struct se_lun *lun , char const *page , size_t count ) { unsigned long tmp ; int ret ; { ret = kstrtoul(page, 0U, & tmp); if (ret < 0) { printk("\vUnable to extract alua_tg_pt_status\n"); return ((ssize_t )ret); } else { } if ((tmp != 0UL && tmp != 1UL) && tmp != 2UL) { printk("\vIllegal value for alua_tg_pt_status: %lu\n", tmp); return (-22L); } else { } lun->lun_tg_pt_secondary_stat = (int )tmp; return ((ssize_t )count); } } ssize_t core_alua_show_secondary_write_metadata(struct se_lun *lun , char *page ) { int tmp ; { tmp = sprintf(page, "%d\n", lun->lun_tg_pt_secondary_write_md); return ((ssize_t )tmp); } } ssize_t core_alua_store_secondary_write_metadata(struct se_lun *lun , char const *page , size_t count ) { unsigned long tmp ; int ret ; { ret = kstrtoul(page, 0U, & tmp); if (ret < 0) { printk("\vUnable to extract alua_tg_pt_write_md\n"); return ((ssize_t )ret); } else { } if (tmp != 0UL && tmp != 1UL) { printk("\vIllegal value for alua_tg_pt_write_md: %lu\n", tmp); return (-22L); } else { } lun->lun_tg_pt_secondary_write_md = (int )tmp; return ((ssize_t )count); } } int core_setup_alua(struct se_device *dev ) { struct t10_alua_lu_gp_member *lu_gp_mem ; long tmp ; bool tmp___0 ; struct _ddebug descriptor ; long tmp___1 ; { if (((int )(dev->transport)->transport_flags & 1) == 0 && ((dev->se_hba)->hba_flags & 1U) == 0U) { lu_gp_mem = core_alua_allocate_lu_gp_mem(dev); tmp___0 = IS_ERR((void const *)lu_gp_mem); if ((int )tmp___0) { tmp = PTR_ERR((void const *)lu_gp_mem); return ((int )tmp); } else { } spin_lock(& lu_gp_mem->lu_gp_mem_lock); __core_alua_attach_lu_gp_mem(lu_gp_mem, default_lu_gp); spin_unlock(& lu_gp_mem->lu_gp_mem_lock); descriptor.modname = "target_core_mod"; descriptor.function = "core_setup_alua"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_alua.c"; descriptor.format = "%s: Adding to default ALUA LU Group: core/alua/lu_gps/default_lu_gp\n"; descriptor.lineno = 2335U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor, "%s: Adding to default ALUA LU Group: core/alua/lu_gps/default_lu_gp\n", (char const *)(& (dev->transport)->name)); } else { } } else { } return (0); } } void disable_work_2(struct work_struct *work ) { { if ((ldv_work_2_0 == 3 || ldv_work_2_0 == 2) && (unsigned long )ldv_work_struct_2_0 == (unsigned long )work) { ldv_work_2_0 = 1; } else { } if ((ldv_work_2_1 == 3 || ldv_work_2_1 == 2) && (unsigned long )ldv_work_struct_2_1 == (unsigned long )work) { ldv_work_2_1 = 1; } else { } if ((ldv_work_2_2 == 3 || ldv_work_2_2 == 2) && (unsigned long )ldv_work_struct_2_2 == (unsigned long )work) { ldv_work_2_2 = 1; } else { } if ((ldv_work_2_3 == 3 || ldv_work_2_3 == 2) && (unsigned long )ldv_work_struct_2_3 == (unsigned long )work) { ldv_work_2_3 = 1; } else { } return; } } void call_and_disable_work_2(struct work_struct *work ) { { if ((ldv_work_2_0 == 2 || ldv_work_2_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_2_0) { core_alua_do_transition_tg_pt_work(work); ldv_work_2_0 = 1; return; } else { } if ((ldv_work_2_1 == 2 || ldv_work_2_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_2_1) { core_alua_do_transition_tg_pt_work(work); ldv_work_2_1 = 1; return; } else { } if ((ldv_work_2_2 == 2 || ldv_work_2_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_2_2) { core_alua_do_transition_tg_pt_work(work); ldv_work_2_2 = 1; return; } else { } if ((ldv_work_2_3 == 2 || ldv_work_2_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_2_3) { core_alua_do_transition_tg_pt_work(work); ldv_work_2_3 = 1; return; } else { } return; } } void activate_work_2(struct work_struct *work , int state ) { { if (ldv_work_2_0 == 0) { ldv_work_struct_2_0 = work; ldv_work_2_0 = state; return; } else { } if (ldv_work_2_1 == 0) { ldv_work_struct_2_1 = work; ldv_work_2_1 = state; return; } else { } if (ldv_work_2_2 == 0) { ldv_work_struct_2_2 = work; ldv_work_2_2 = state; return; } else { } if (ldv_work_2_3 == 0) { ldv_work_struct_2_3 = work; ldv_work_2_3 = state; return; } else { } return; } } void call_and_disable_all_2(int state ) { { if (ldv_work_2_0 == state) { call_and_disable_work_2(ldv_work_struct_2_0); } else { } if (ldv_work_2_1 == state) { call_and_disable_work_2(ldv_work_struct_2_1); } else { } if (ldv_work_2_2 == state) { call_and_disable_work_2(ldv_work_struct_2_2); } else { } if (ldv_work_2_3 == state) { call_and_disable_work_2(ldv_work_struct_2_3); } else { } return; } } void invoke_work_2(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_2_0 == 2 || ldv_work_2_0 == 3) { ldv_work_2_0 = 4; core_alua_do_transition_tg_pt_work(ldv_work_struct_2_0); ldv_work_2_0 = 1; } else { } goto ldv_58093; case 1: ; if (ldv_work_2_1 == 2 || ldv_work_2_1 == 3) { ldv_work_2_1 = 4; core_alua_do_transition_tg_pt_work(ldv_work_struct_2_0); ldv_work_2_1 = 1; } else { } goto ldv_58093; case 2: ; if (ldv_work_2_2 == 2 || ldv_work_2_2 == 3) { ldv_work_2_2 = 4; core_alua_do_transition_tg_pt_work(ldv_work_struct_2_0); ldv_work_2_2 = 1; } else { } goto ldv_58093; case 3: ; if (ldv_work_2_3 == 2 || ldv_work_2_3 == 3) { ldv_work_2_3 = 4; core_alua_do_transition_tg_pt_work(ldv_work_struct_2_0); ldv_work_2_3 = 1; } else { } goto ldv_58093; default: ldv_stop(); } ldv_58093: ; return; } } void work_init_2(void) { { ldv_work_2_0 = 0; ldv_work_2_1 = 0; ldv_work_2_2 = 0; ldv_work_2_3 = 0; return; } } bool ldv_queue_work_on_263(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_264(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_265(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_266(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_267(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_268(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_269(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_270(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_271(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_272(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_273(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_274(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_lock_275(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_276(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } bool ldv_flush_delayed_work_277(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = flush_delayed_work(ldv_func_arg1); ldv_func_res = tmp; call_and_disable_work_2(& ldv_func_arg1->work); return (ldv_func_res); } } void ldv_mutex_lock_278(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lun_tg_pt_md_mutex_of_se_lun(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_279(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lun_tg_pt_md_mutex_of_se_lun(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } bool ldv_flush_delayed_work_280(struct delayed_work *ldv_func_arg1 ) { ldv_func_ret_type___6 ldv_func_res ; bool tmp ; { tmp = flush_delayed_work(ldv_func_arg1); ldv_func_res = tmp; call_and_disable_work_2(& ldv_func_arg1->work); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static void list_move_tail(struct list_head *list , struct list_head *head ) { { __list_del_entry(list); list_add_tail(list, head); return; } } int ldv_mutex_trylock_311(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_309(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_312(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_313(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_308(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_310(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_314(struct mutex *ldv_func_arg1 ) ; bool ldv_queue_work_on_303(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_305(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_304(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_307(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_306(struct workqueue_struct *ldv_func_arg1 ) ; extern bool cancel_work_sync(struct work_struct * ) ; bool ldv_cancel_work_sync_315(struct work_struct *ldv_func_arg1 ) ; bool ldv_cancel_work_sync_316(struct work_struct *ldv_func_arg1 ) ; bool transport_wait_for_tasks(struct se_cmd *cmd ) ; int target_put_sess_cmd(struct se_cmd *se_cmd ) ; int core_tmr_alloc_req(struct se_cmd *se_cmd , void *fabric_tmr_ptr , u8 function , gfp_t gfp_flags ) ; void core_tmr_release_req(struct se_tmr_req *tmr ) ; void core_tmr_abort_task(struct se_device *dev , struct se_tmr_req *tmr , struct se_session *se_sess ) ; void transport_cmd_finish_abort(struct se_cmd *cmd , int remove ) ; bool target_stop_cmd(struct se_cmd *cmd , unsigned long *flags ) ; void transport_send_task_abort(struct se_cmd *cmd ) ; int core_tmr_alloc_req(struct se_cmd *se_cmd , void *fabric_tmr_ptr , u8 function , gfp_t gfp_flags ) { struct se_tmr_req *tmr ; void *tmp ; { tmp = kzalloc(64UL, gfp_flags); tmr = (struct se_tmr_req *)tmp; if ((unsigned long )tmr == (unsigned long )((struct se_tmr_req *)0)) { printk("\vUnable to allocate struct se_tmr_req\n"); return (-12); } else { } se_cmd->se_cmd_flags = se_cmd->se_cmd_flags | 16U; se_cmd->se_tmr_req = tmr; tmr->task_cmd = se_cmd; tmr->fabric_tmr_ptr = fabric_tmr_ptr; tmr->function = function; INIT_LIST_HEAD(& tmr->tmr_list); return (0); } } static char const __kstrtab_core_tmr_alloc_req[19U] = { 'c', 'o', 'r', 'e', '_', 't', 'm', 'r', '_', 'a', 'l', 'l', 'o', 'c', '_', 'r', 'e', 'q', '\000'}; struct kernel_symbol const __ksymtab_core_tmr_alloc_req ; struct kernel_symbol const __ksymtab_core_tmr_alloc_req = {(unsigned long )(& core_tmr_alloc_req), (char const *)(& __kstrtab_core_tmr_alloc_req)}; void core_tmr_release_req(struct se_tmr_req *tmr ) { struct se_device *dev ; unsigned long flags ; raw_spinlock_t *tmp ; { dev = tmr->tmr_dev; if ((unsigned long )dev != (unsigned long )((struct se_device *)0)) { tmp = spinlock_check(& dev->se_tmr_lock); flags = _raw_spin_lock_irqsave(tmp); list_del(& tmr->tmr_list); spin_unlock_irqrestore(& dev->se_tmr_lock, flags); } else { } kfree((void const *)tmr); return; } } static void core_tmr_handle_tas_abort(struct se_node_acl *tmr_nacl , struct se_cmd *cmd , int tas ) { bool remove ; { remove = 1; if (((unsigned long )tmr_nacl != (unsigned long )((struct se_node_acl *)0) && (unsigned long )(cmd->se_sess)->se_node_acl != (unsigned long )tmr_nacl) && tas != 0) { remove = 0; transport_send_task_abort(cmd); } else { } transport_cmd_finish_abort(cmd, (int )remove); return; } } static int target_check_cdb_and_preempt(struct list_head *list , struct se_cmd *cmd ) { struct t10_pr_registration *reg ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { if ((unsigned long )list == (unsigned long )((struct list_head *)0)) { return (0); } else { } __mptr = (struct list_head const *)list->next; reg = (struct t10_pr_registration *)__mptr + 0xfffffffffffffd80UL; goto ldv_57232; ldv_57231: ; if (reg->pr_res_key == cmd->pr_res_key) { return (0); } else { } __mptr___0 = (struct list_head const *)reg->pr_reg_abort_list.next; reg = (struct t10_pr_registration *)__mptr___0 + 0xfffffffffffffd80UL; ldv_57232: ; if ((unsigned long )(& reg->pr_reg_abort_list) != (unsigned long )list) { goto ldv_57231; } else { } return (1); } } void core_tmr_abort_task(struct se_device *dev , struct se_tmr_req *tmr , struct se_session *se_sess ) { struct se_cmd *se_cmd ; unsigned long flags ; u64 ref_tag ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; char *tmp___0 ; struct list_head const *__mptr___0 ; { tmp = spinlock_check(& se_sess->sess_cmd_lock); flags = _raw_spin_lock_irqsave(tmp); __mptr = (struct list_head const *)se_sess->sess_cmd_list.next; se_cmd = (struct se_cmd *)__mptr + 0xffffffffffffff70UL; goto ldv_57252; ldv_57251: ; if ((unsigned long )se_cmd->se_dev != (unsigned long )dev) { goto ldv_57249; } else { } if ((se_cmd->se_cmd_flags & 16U) != 0U) { goto ldv_57249; } else { } ref_tag = se_cmd->tag; if (tmr->ref_task_tag != ref_tag) { goto ldv_57249; } else { } tmp___0 = (*((se_cmd->se_tfo)->get_fabric_name))(); printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", tmp___0, ref_tag); spin_lock(& se_cmd->t_state_lock); if ((se_cmd->transport_state & 4U) != 0U) { printk("ABORT_TASK: ref_tag: %llu already complete, skipping\n", ref_tag); spin_unlock(& se_cmd->t_state_lock); spin_unlock_irqrestore(& se_sess->sess_cmd_lock, flags); goto out; } else { } se_cmd->transport_state = se_cmd->transport_state | 1U; spin_unlock(& se_cmd->t_state_lock); list_del_init(& se_cmd->se_cmd_list); kref_get(& se_cmd->cmd_kref); spin_unlock_irqrestore(& se_sess->sess_cmd_lock, flags); ldv_cancel_work_sync_315(& se_cmd->work); transport_wait_for_tasks(se_cmd); target_put_sess_cmd(se_cmd); transport_cmd_finish_abort(se_cmd, 1); printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for ref_tag: %llu\n", ref_tag); tmr->response = 1U; return; ldv_57249: __mptr___0 = (struct list_head const *)se_cmd->se_cmd_list.next; se_cmd = (struct se_cmd *)__mptr___0 + 0xffffffffffffff70UL; ldv_57252: ; if ((unsigned long )(& se_cmd->se_cmd_list) != (unsigned long )(& se_sess->sess_cmd_list)) { goto ldv_57251; } else { } spin_unlock_irqrestore(& se_sess->sess_cmd_lock, flags); out: printk("ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST for ref_tag: %lld\n", tmr->ref_task_tag); tmr->response = 2U; return; } } static void core_tmr_drain_tmr_list(struct se_device *dev , struct se_tmr_req *tmr , struct list_head *preempt_and_abort_list ) { struct list_head drain_tmr_list ; struct se_tmr_req *tmr_p ; struct se_tmr_req *tmr_pp ; struct se_cmd *cmd ; unsigned long flags ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; int tmp___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct _ddebug descriptor ; long tmp___1 ; struct list_head const *__mptr___4 ; { drain_tmr_list.next = & drain_tmr_list; drain_tmr_list.prev = & drain_tmr_list; tmp = spinlock_check(& dev->se_tmr_lock); flags = _raw_spin_lock_irqsave(tmp); __mptr = (struct list_head const *)dev->dev_tmr_list.next; tmr_p = (struct se_tmr_req *)__mptr + 0xffffffffffffffd0UL; __mptr___0 = (struct list_head const *)tmr_p->tmr_list.next; tmr_pp = (struct se_tmr_req *)__mptr___0 + 0xffffffffffffffd0UL; goto ldv_57275; ldv_57274: ; if ((unsigned long )tmr_p == (unsigned long )tmr) { goto ldv_57273; } else { } cmd = tmr_p->task_cmd; if ((unsigned long )cmd == (unsigned long )((struct se_cmd *)0)) { printk("\vUnable to locate struct se_cmd for TMR\n"); goto ldv_57273; } else { } tmp___0 = target_check_cdb_and_preempt(preempt_and_abort_list, cmd); if (tmp___0 != 0) { goto ldv_57273; } else { } spin_lock(& cmd->t_state_lock); if ((cmd->transport_state & 2U) == 0U) { spin_unlock(& cmd->t_state_lock); goto ldv_57273; } else { } if ((unsigned int )cmd->t_state == 11U) { spin_unlock(& cmd->t_state_lock); goto ldv_57273; } else { } spin_unlock(& cmd->t_state_lock); list_move_tail(& tmr_p->tmr_list, & drain_tmr_list); ldv_57273: tmr_p = tmr_pp; __mptr___1 = (struct list_head const *)tmr_pp->tmr_list.next; tmr_pp = (struct se_tmr_req *)__mptr___1 + 0xffffffffffffffd0UL; ldv_57275: ; if ((unsigned long )(& tmr_p->tmr_list) != (unsigned long )(& dev->dev_tmr_list)) { goto ldv_57274; } else { } spin_unlock_irqrestore(& dev->se_tmr_lock, flags); __mptr___2 = (struct list_head const *)drain_tmr_list.next; tmr_p = (struct se_tmr_req *)__mptr___2 + 0xffffffffffffffd0UL; __mptr___3 = (struct list_head const *)tmr_p->tmr_list.next; tmr_pp = (struct se_tmr_req *)__mptr___3 + 0xffffffffffffffd0UL; goto ldv_57286; ldv_57285: list_del_init(& tmr_p->tmr_list); cmd = tmr_p->task_cmd; descriptor.modname = "target_core_mod"; descriptor.function = "core_tmr_drain_tmr_list"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_tmr.c"; descriptor.format = "LUN_RESET: %s releasing TMR %p Function: 0x%02x, Response: 0x%02x, t_state: %d\n"; descriptor.lineno = 226U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor, "LUN_RESET: %s releasing TMR %p Function: 0x%02x, Response: 0x%02x, t_state: %d\n", (unsigned long )preempt_and_abort_list != (unsigned long )((struct list_head *)0) ? (char *)"Preempt" : (char *)"", tmr_p, (int )tmr_p->function, (int )tmr_p->response, (unsigned int )cmd->t_state); } else { } transport_cmd_finish_abort(cmd, 1); tmr_p = tmr_pp; __mptr___4 = (struct list_head const *)tmr_pp->tmr_list.next; tmr_pp = (struct se_tmr_req *)__mptr___4 + 0xffffffffffffffd0UL; ldv_57286: ; if ((unsigned long )(& tmr_p->tmr_list) != (unsigned long )(& drain_tmr_list)) { goto ldv_57285; } else { } return; } } static void core_tmr_drain_state_list(struct se_device *dev , struct se_cmd *prout_cmd , struct se_node_acl *tmr_nacl , int tas , struct list_head *preempt_and_abort_list ) { struct list_head drain_task_list ; struct se_cmd *cmd ; struct se_cmd *next ; unsigned long flags ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; int tmp___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct _ddebug descriptor ; int tmp___1 ; long tmp___2 ; struct _ddebug descriptor___0 ; long tmp___3 ; raw_spinlock_t *tmp___4 ; int tmp___5 ; { drain_task_list.next = & drain_task_list; drain_task_list.prev = & drain_task_list; tmp = spinlock_check(& dev->execute_task_lock); flags = _raw_spin_lock_irqsave(tmp); __mptr = (struct list_head const *)dev->state_list.next; cmd = (struct se_cmd *)__mptr + 0xfffffffffffffd78UL; __mptr___0 = (struct list_head const *)cmd->state_list.next; next = (struct se_cmd *)__mptr___0 + 0xfffffffffffffd78UL; goto ldv_57310; ldv_57309: tmp___0 = target_check_cdb_and_preempt(preempt_and_abort_list, cmd); if (tmp___0 != 0) { goto ldv_57308; } else { } if ((unsigned long )prout_cmd == (unsigned long )cmd) { goto ldv_57308; } else { } list_move_tail(& cmd->state_list, & drain_task_list); cmd->state_active = 0; ldv_57308: cmd = next; __mptr___1 = (struct list_head const *)next->state_list.next; next = (struct se_cmd *)__mptr___1 + 0xfffffffffffffd78UL; ldv_57310: ; if ((unsigned long )(& cmd->state_list) != (unsigned long )(& dev->state_list)) { goto ldv_57309; } else { } spin_unlock_irqrestore(& dev->execute_task_lock, flags); goto ldv_57321; ldv_57320: __mptr___2 = (struct list_head const *)drain_task_list.next; cmd = (struct se_cmd *)__mptr___2 + 0xfffffffffffffd78UL; list_del(& cmd->state_list); descriptor.modname = "target_core_mod"; descriptor.function = "core_tmr_drain_state_list"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_tmr.c"; descriptor.format = "LUN_RESET: %s cmd: %p ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %dcdb: 0x%02x\n"; descriptor.lineno = 295U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = (*((cmd->se_tfo)->get_cmd_state))(cmd); __dynamic_pr_debug(& descriptor, "LUN_RESET: %s cmd: %p ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %dcdb: 0x%02x\n", (unsigned long )preempt_and_abort_list != (unsigned long )((struct list_head *)0) ? (char *)"Preempt" : (char *)"", cmd, cmd->tag, 0, tmp___1, (unsigned int )cmd->t_state, (int )*(cmd->t_task_cdb)); } else { } descriptor___0.modname = "target_core_mod"; descriptor___0.function = "core_tmr_drain_state_list"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_tmr.c"; descriptor___0.format = "LUN_RESET: ITT[0x%08llx] - pr_res_key: 0x%016Lx -- CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n"; descriptor___0.lineno = 302U; descriptor___0.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_pr_debug(& descriptor___0, "LUN_RESET: ITT[0x%08llx] - pr_res_key: 0x%016Lx -- CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", cmd->tag, cmd->pr_res_key, (cmd->transport_state & 2U) != 0U, (cmd->transport_state & 32U) != 0U, (cmd->transport_state & 16U) != 0U); } else { } if ((unsigned int )cmd->t_state == 6U) { ldv_cancel_work_sync_316(& cmd->work); } else { } tmp___4 = spinlock_check(& cmd->t_state_lock); flags = _raw_spin_lock_irqsave(tmp___4); target_stop_cmd(cmd, & flags); cmd->transport_state = cmd->transport_state | 1U; spin_unlock_irqrestore(& cmd->t_state_lock, flags); core_tmr_handle_tas_abort(tmr_nacl, cmd, tas); ldv_57321: tmp___5 = list_empty((struct list_head const *)(& drain_task_list)); if (tmp___5 == 0) { goto ldv_57320; } else { } return; } } int core_tmr_lun_reset(struct se_device *dev , struct se_tmr_req *tmr , struct list_head *preempt_and_abort_list , struct se_cmd *prout_cmd ) { struct se_node_acl *tmr_nacl ; struct se_portal_group *tmr_tpg ; int tas ; struct _ddebug descriptor ; char *tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; long tmp___1 ; struct _ddebug descriptor___1 ; long tmp___2 ; struct _ddebug descriptor___2 ; long tmp___3 ; { tmr_nacl = (struct se_node_acl *)0; tmr_tpg = (struct se_portal_group *)0; tas = dev->dev_attrib.emulate_tas; if (((unsigned long )tmr != (unsigned long )((struct se_tmr_req *)0) && (unsigned long )tmr->task_cmd != (unsigned long )((struct se_cmd *)0)) && (unsigned long )(tmr->task_cmd)->se_sess != (unsigned long )((struct se_session *)0)) { tmr_nacl = ((tmr->task_cmd)->se_sess)->se_node_acl; tmr_tpg = ((tmr->task_cmd)->se_sess)->se_tpg; if ((unsigned long )tmr_nacl != (unsigned long )((struct se_node_acl *)0) && (unsigned long )tmr_tpg != (unsigned long )((struct se_portal_group *)0)) { descriptor.modname = "target_core_mod"; descriptor.function = "core_tmr_lun_reset"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_tmr.c"; descriptor.format = "LUN_RESET: TMR caller fabric: %s initiator port %s\n"; descriptor.lineno = 356U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = (*((tmr_tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "LUN_RESET: TMR caller fabric: %s initiator port %s\n", tmp, (char *)(& tmr_nacl->initiatorname)); } else { } } else { } } else { } descriptor___0.modname = "target_core_mod"; descriptor___0.function = "core_tmr_lun_reset"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_tmr.c"; descriptor___0.format = "LUN_RESET: %s starting for [%s], tas: %d\n"; descriptor___0.lineno = 361U; descriptor___0.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor___0, "LUN_RESET: %s starting for [%s], tas: %d\n", (unsigned long )preempt_and_abort_list != (unsigned long )((struct list_head *)0) ? (char *)"Preempt" : (char *)"TMR", (char const *)(& (dev->transport)->name), tas); } else { } core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas, preempt_and_abort_list); if ((unsigned long )preempt_and_abort_list == (unsigned long )((struct list_head *)0) && (int )dev->dev_reservation_flags & 1) { spin_lock(& dev->dev_reservation_lock); dev->dev_reserved_node_acl = (struct se_node_acl *)0; dev->dev_reservation_flags = dev->dev_reservation_flags & 4294967294U; spin_unlock(& dev->dev_reservation_lock); descriptor___1.modname = "target_core_mod"; descriptor___1.function = "core_tmr_lun_reset"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_tmr.c"; descriptor___1.format = "LUN_RESET: SCSI-2 Released reservation\n"; descriptor___1.lineno = 377U; descriptor___1.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_pr_debug(& descriptor___1, "LUN_RESET: SCSI-2 Released reservation\n"); } else { } } else { } atomic_long_inc(& dev->num_resets); descriptor___2.modname = "target_core_mod"; descriptor___2.function = "core_tmr_lun_reset"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_tmr.c"; descriptor___2.format = "LUN_RESET: %s for [%s] Complete\n"; descriptor___2.lineno = 384U; descriptor___2.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_pr_debug(& descriptor___2, "LUN_RESET: %s for [%s] Complete\n", (unsigned long )preempt_and_abort_list != (unsigned long )((struct list_head *)0) ? (char *)"Preempt" : (char *)"TMR", (char const *)(& (dev->transport)->name)); } else { } return (0); } } bool ldv_queue_work_on_303(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_304(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_305(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_306(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_307(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_308(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_309(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_310(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_311(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_312(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_313(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_314(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } bool ldv_cancel_work_sync_315(struct work_struct *ldv_func_arg1 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = cancel_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_2(ldv_func_arg1); return (ldv_func_res); } } bool ldv_cancel_work_sync_316(struct work_struct *ldv_func_arg1 ) { ldv_func_ret_type___6 ldv_func_res ; bool tmp ; { tmp = cancel_work_sync(ldv_func_arg1); ldv_func_res = tmp; disable_work_2(ldv_func_arg1); return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static void list_add(struct list_head *new , struct list_head *head ) { { __list_add(new, head, head->next); return; } } __inline static void list_move(struct list_head *list , struct list_head *head ) { { __list_del_entry(list); list_add(list, head); return; } } __inline static void *ERR_PTR(long error ) ; __inline static long PTR_ERR(void const *ptr ) ; __inline static bool IS_ERR(void const *ptr ) ; int ldv_mutex_trylock_343(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_341(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_344(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_345(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_348(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_350(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_352(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_354(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_355(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_356(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_358(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_360(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_361(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_363(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_365(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_367(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_369(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_371(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_373(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_340(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_342(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_346(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_347(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_349(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_351(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_353(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_357(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_359(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_362(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_364(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_366(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_368(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_370(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_372(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_tpg_lun_mutex_of_se_portal_group(struct mutex *lock ) ; void ldv_mutex_unlock_tpg_lun_mutex_of_se_portal_group(struct mutex *lock ) ; extern void _raw_spin_lock_bh(raw_spinlock_t * ) ; extern void _raw_spin_unlock_bh(raw_spinlock_t * ) ; __inline static void spin_lock_bh(spinlock_t *lock ) { { _raw_spin_lock_bh(& lock->__annonCompField17.rlock); return; } } __inline static void spin_unlock_bh(spinlock_t *lock ) { { _raw_spin_unlock_bh(& lock->__annonCompField17.rlock); return; } } bool ldv_queue_work_on_335(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_337(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_336(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_339(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_338(struct workqueue_struct *ldv_func_arg1 ) ; extern int percpu_ref_init(struct percpu_ref * , percpu_ref_func_t * , unsigned int , gfp_t ) ; extern void percpu_ref_exit(struct percpu_ref * ) ; void target_get_session(struct se_session *se_sess ) ; void target_put_session(struct se_session *se_sess ) ; void target_put_nacl(struct se_node_acl *nacl ) ; struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg , unsigned char *initiatorname ) ; struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *tpg , unsigned char *initiatorname ) ; int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *tpg , unsigned char *initiatorname , u32 queue_depth , int force ) ; int core_tpg_set_initiator_node_tag(struct se_portal_group *tpg , struct se_node_acl *acl , char const *new_tag ) ; int core_tpg_register(struct se_wwn *se_wwn , struct se_portal_group *se_tpg , int proto_id ) ; int core_tpg_deregister(struct se_portal_group *se_tpg ) ; void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl ) ; void transport_clear_lun_ref(struct se_lun *lun ) ; static spinlock_t tpg_lock = {{{{{0}}, 3735899821U, 4294967295U, (void *)-1, {0, {0, 0}, "tpg_lock", 0, 0UL}}}}; static struct list_head tpg_list = {& tpg_list, & tpg_list}; struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg , char const *initiatorname ) { struct se_node_acl *acl ; struct list_head const *__mptr ; int tmp ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)tpg->acl_node_list.next; acl = (struct se_node_acl *)__mptr + 0xfffffffffffffb70UL; goto ldv_57324; ldv_57323: tmp = strcmp((char const *)(& acl->initiatorname), initiatorname); if (tmp == 0) { return (acl); } else { } __mptr___0 = (struct list_head const *)acl->acl_list.next; acl = (struct se_node_acl *)__mptr___0 + 0xfffffffffffffb70UL; ldv_57324: ; if ((unsigned long )(& acl->acl_list) != (unsigned long )(& tpg->acl_node_list)) { goto ldv_57323; } else { } return ((struct se_node_acl *)0); } } struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg , unsigned char *initiatorname ) { struct se_node_acl *acl ; { ldv_mutex_lock_347(& tpg->acl_node_mutex); acl = __core_tpg_get_initiator_node_acl(tpg, (char const *)initiatorname); ldv_mutex_unlock_348(& tpg->acl_node_mutex); return (acl); } } static char const __kstrtab_core_tpg_get_initiator_node_acl[32U] = { 'c', 'o', 'r', 'e', '_', 't', 'p', 'g', '_', 'g', 'e', 't', '_', 'i', 'n', 'i', 't', 'i', 'a', 't', 'o', 'r', '_', 'n', 'o', 'd', 'e', '_', 'a', 'c', 'l', '\000'}; struct kernel_symbol const __ksymtab_core_tpg_get_initiator_node_acl ; struct kernel_symbol const __ksymtab_core_tpg_get_initiator_node_acl = {(unsigned long )(& core_tpg_get_initiator_node_acl), (char const *)(& __kstrtab_core_tpg_get_initiator_node_acl)}; void core_tpg_add_node_to_devs(struct se_node_acl *acl , struct se_portal_group *tpg , struct se_lun *lun_orig ) { u32 lun_access ; struct se_lun *lun ; struct se_device *dev ; struct hlist_node *____ptr ; struct hlist_node *________p1 ; struct hlist_node *_________p1 ; union __anonunion___u_382___2 __u ; int tmp ; struct hlist_node const *__mptr ; struct se_lun *tmp___0 ; struct se_device *________p1___0 ; struct se_device *_________p1___0 ; union __anonunion___u_384___2 __u___0 ; bool __warned___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; u32 tmp___4 ; int tmp___5 ; struct _ddebug descriptor ; u16 tmp___6 ; char *tmp___7 ; long tmp___8 ; struct hlist_node *____ptr___0 ; struct hlist_node *________p1___1 ; struct hlist_node *_________p1___1 ; union __anonunion___u_386___2 __u___1 ; int tmp___9 ; struct hlist_node const *__mptr___0 ; struct se_lun *tmp___10 ; { lun_access = 0U; ldv_mutex_lock_349(& tpg->tpg_lun_mutex); __read_once_size((void const volatile *)(& tpg->tpg_lun_hlist.first), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); ____ptr = ________p1; if ((unsigned long )____ptr != (unsigned long )((struct hlist_node *)0)) { __mptr = (struct hlist_node const *)____ptr; tmp___0 = (struct se_lun *)__mptr + 0xfffffffffffffb78UL; } else { tmp___0 = (struct se_lun *)0; } lun = tmp___0; goto ldv_57409; ldv_57408: ; if ((unsigned long )lun_orig != (unsigned long )((struct se_lun *)0) && (unsigned long )lun != (unsigned long )lun_orig) { goto ldv_57395; } else { } __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned___0) { tmp___2 = lock_is_held(& tpg->tpg_lun_mutex.dep_map); if (tmp___2 == 0) { tmp___3 = rcu_read_lock_held(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_tpg.c", 105, "suspicious rcu_dereference_check() usage"); } else { } } else { } } else { } dev = ________p1___0; tmp___5 = (*((tpg->se_tpg_tfo)->tpg_check_demo_mode_write_protect))(tpg); if (tmp___5 == 0) { lun_access = 2U; } else { tmp___4 = (*((dev->transport)->get_device_type))(dev); if (tmp___4 == 0U) { lun_access = 1U; } else { lun_access = 2U; } } descriptor.modname = "target_core_mod"; descriptor.function = "core_tpg_add_node_to_devs"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_tpg.c"; descriptor.format = "TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s access for LUN in Demo Mode\n"; descriptor.lineno = 128U; descriptor.flags = 0U; tmp___8 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___8 != 0L) { tmp___6 = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); tmp___7 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "TARGET_CORE[%s]->TPG[%u]_LUN[%llu] - Adding %s access for LUN in Demo Mode\n", tmp___7, (int )tmp___6, lun->unpacked_lun, lun_access == 2U ? (char *)"READ-WRITE" : (char *)"READ-ONLY"); } else { } core_enable_device_list_for_node(lun, (struct se_lun_acl *)0, lun->unpacked_lun, lun_access, acl, tpg); core_scsi3_check_aptpl_registration(dev, tpg, lun, acl, lun->unpacked_lun); ldv_57395: __read_once_size((void const volatile *)(& lun->link.next), (void *)(& __u___1.__c), 8); _________p1___1 = __u___1.__val; ________p1___1 = _________p1___1; tmp___9 = debug_lockdep_rcu_enabled(); ____ptr___0 = ________p1___1; if ((unsigned long )____ptr___0 != (unsigned long )((struct hlist_node *)0)) { __mptr___0 = (struct hlist_node const *)____ptr___0; tmp___10 = (struct se_lun *)__mptr___0 + 0xfffffffffffffb78UL; } else { tmp___10 = (struct se_lun *)0; } lun = tmp___10; ldv_57409: ; if ((unsigned long )lun != (unsigned long )((struct se_lun *)0)) { goto ldv_57408; } else { } ldv_mutex_unlock_350(& tpg->tpg_lun_mutex); return; } } static int core_set_queue_depth_for_node(struct se_portal_group *tpg , struct se_node_acl *acl ) { char *tmp ; { if (acl->queue_depth == 0U) { tmp = (*((tpg->se_tpg_tfo)->get_fabric_name))(); printk("\vQueue depth for %s Initiator Node: %s is 0,defaulting to 1.\n", tmp, (char *)(& acl->initiatorname)); acl->queue_depth = 1U; } else { } return (0); } } static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg , unsigned char const *initiatorname ) { struct se_node_acl *acl ; unsigned long _max1 ; unsigned long _max2 ; void *tmp ; struct lock_class_key __key ; struct lock_class_key __key___0 ; int tmp___0 ; { _max1 = 1304UL; _max2 = (tpg->se_tpg_tfo)->node_acl_size; tmp = kzalloc((unsigned long const )_max1 > (unsigned long const )_max2 ? (unsigned long const )_max1 : _max2, 208U); acl = (struct se_node_acl *)tmp; if ((unsigned long )acl == (unsigned long )((struct se_node_acl *)0)) { return ((struct se_node_acl *)0); } else { } INIT_LIST_HEAD(& acl->acl_list); INIT_LIST_HEAD(& acl->acl_sess_list); acl->lun_entry_hlist.first = (struct hlist_node *)0; kref_init(& acl->acl_kref); init_completion(& acl->acl_free_comp); spinlock_check(& acl->nacl_sess_lock); __raw_spin_lock_init(& acl->nacl_sess_lock.__annonCompField17.rlock, "&(&acl->nacl_sess_lock)->rlock", & __key); __mutex_init(& acl->lun_entry_mutex, "&acl->lun_entry_mutex", & __key___0); atomic_set(& acl->acl_pr_ref_count, 0); if ((unsigned long )(tpg->se_tpg_tfo)->tpg_get_default_depth != (unsigned long )((u32 (*/* const */)(struct se_portal_group * ))0)) { acl->queue_depth = (*((tpg->se_tpg_tfo)->tpg_get_default_depth))(tpg); } else { acl->queue_depth = 1U; } snprintf((char *)(& acl->initiatorname), 224UL, "%s", initiatorname); acl->se_tpg = tpg; acl->acl_index = scsi_get_new_index(2); (*((tpg->se_tpg_tfo)->set_default_node_attributes))(acl); tmp___0 = core_set_queue_depth_for_node(tpg, acl); if (tmp___0 < 0) { goto out_free_acl; } else { } return (acl); out_free_acl: kfree((void const *)acl); return ((struct se_node_acl *)0); } } static void target_add_node_acl(struct se_node_acl *acl ) { struct se_portal_group *tpg ; struct _ddebug descriptor ; char *tmp ; u16 tmp___0 ; char *tmp___1 ; long tmp___2 ; { tpg = acl->se_tpg; ldv_mutex_lock_351(& tpg->acl_node_mutex); list_add_tail(& acl->acl_list, & tpg->acl_node_list); tpg->num_node_acls = tpg->num_node_acls + 1U; ldv_mutex_unlock_352(& tpg->acl_node_mutex); descriptor.modname = "target_core_mod"; descriptor.function = "target_add_node_acl"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_tpg.c"; descriptor.format = "%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s Initiator Node: %s\n"; descriptor.lineno = 215U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { tmp = (*((tpg->se_tpg_tfo)->get_fabric_name))(); tmp___0 = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); tmp___1 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s Initiator Node: %s\n", tmp___1, (int )tmp___0, (int )acl->dynamic_node_acl ? (char *)"DYNAMIC" : (char *)"", acl->queue_depth, tmp, (char *)(& acl->initiatorname)); } else { } return; } } struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *tpg , unsigned char *initiatorname ) { struct se_node_acl *acl ; int tmp ; int tmp___0 ; { acl = core_tpg_get_initiator_node_acl(tpg, initiatorname); if ((unsigned long )acl != (unsigned long )((struct se_node_acl *)0)) { return (acl); } else { } tmp = (*((tpg->se_tpg_tfo)->tpg_check_demo_mode))(tpg); if (tmp == 0) { return ((struct se_node_acl *)0); } else { } acl = target_alloc_node_acl(tpg, (unsigned char const *)initiatorname); if ((unsigned long )acl == (unsigned long )((struct se_node_acl *)0)) { return ((struct se_node_acl *)0); } else { } acl->dynamic_node_acl = 1; if ((unsigned long )(tpg->se_tpg_tfo)->tpg_check_demo_mode_login_only == (unsigned long )((int (*/* const */)(struct se_portal_group * ))0)) { core_tpg_add_node_to_devs(acl, tpg, (struct se_lun *)0); } else { tmp___0 = (*((tpg->se_tpg_tfo)->tpg_check_demo_mode_login_only))(tpg); if (tmp___0 != 1) { core_tpg_add_node_to_devs(acl, tpg, (struct se_lun *)0); } else { } } target_add_node_acl(acl); return (acl); } } static char const __kstrtab_core_tpg_check_initiator_node_acl[34U] = { 'c', 'o', 'r', 'e', '_', 't', 'p', 'g', '_', 'c', 'h', 'e', 'c', 'k', '_', 'i', 'n', 'i', 't', 'i', 'a', 't', 'o', 'r', '_', 'n', 'o', 'd', 'e', '_', 'a', 'c', 'l', '\000'}; struct kernel_symbol const __ksymtab_core_tpg_check_initiator_node_acl ; struct kernel_symbol const __ksymtab_core_tpg_check_initiator_node_acl = {(unsigned long )(& core_tpg_check_initiator_node_acl), (char const *)(& __kstrtab_core_tpg_check_initiator_node_acl)}; void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl ) { int tmp ; { goto ldv_57449; ldv_57448: cpu_relax(); ldv_57449: tmp = atomic_read((atomic_t const *)(& nacl->acl_pr_ref_count)); if (tmp != 0) { goto ldv_57448; } else { } return; } } struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *tpg , char const *initiatorname ) { struct se_node_acl *acl ; struct _ddebug descriptor ; u16 tmp ; char *tmp___0 ; long tmp___1 ; u16 tmp___2 ; char *tmp___3 ; void *tmp___4 ; void *tmp___5 ; { ldv_mutex_lock_353(& tpg->acl_node_mutex); acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname); if ((unsigned long )acl != (unsigned long )((struct se_node_acl *)0)) { if ((int )acl->dynamic_node_acl) { acl->dynamic_node_acl = 0; descriptor.modname = "target_core_mod"; descriptor.function = "core_tpg_add_initiator_node_acl"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_tpg.c"; descriptor.format = "%s_TPG[%u] - Replacing dynamic ACL for %s\n"; descriptor.lineno = 269U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); tmp___0 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "%s_TPG[%u] - Replacing dynamic ACL for %s\n", tmp___0, (int )tmp, initiatorname); } else { } ldv_mutex_unlock_354(& tpg->acl_node_mutex); return (acl); } else { } tmp___2 = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); tmp___3 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); printk("\vACL entry for %s Initiator Node %s already exists for TPG %u, ignoring request.\n", tmp___3, initiatorname, (int )tmp___2); ldv_mutex_unlock_355(& tpg->acl_node_mutex); tmp___4 = ERR_PTR(-17L); return ((struct se_node_acl *)tmp___4); } else { } ldv_mutex_unlock_356(& tpg->acl_node_mutex); acl = target_alloc_node_acl(tpg, (unsigned char const *)initiatorname); if ((unsigned long )acl == (unsigned long )((struct se_node_acl *)0)) { tmp___5 = ERR_PTR(-12L); return ((struct se_node_acl *)tmp___5); } else { } target_add_node_acl(acl); return (acl); } } void core_tpg_del_initiator_node_acl(struct se_node_acl *acl ) { struct se_portal_group *tpg ; struct list_head sess_list ; struct se_session *sess ; struct se_session *sess_tmp ; unsigned long flags ; int rc ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; struct _ddebug descriptor ; char *tmp___0 ; u16 tmp___1 ; char *tmp___2 ; long tmp___3 ; { tpg = acl->se_tpg; sess_list.next = & sess_list; sess_list.prev = & sess_list; ldv_mutex_lock_357(& tpg->acl_node_mutex); if ((int )acl->dynamic_node_acl) { acl->dynamic_node_acl = 0; } else { } list_del(& acl->acl_list); tpg->num_node_acls = tpg->num_node_acls - 1U; ldv_mutex_unlock_358(& tpg->acl_node_mutex); tmp = spinlock_check(& acl->nacl_sess_lock); flags = _raw_spin_lock_irqsave(tmp); acl->acl_stop = 1; __mptr = (struct list_head const *)acl->acl_sess_list.next; sess = (struct se_session *)__mptr + 0xffffffffffffffc0UL; __mptr___0 = (struct list_head const *)sess->sess_acl_list.next; sess_tmp = (struct se_session *)__mptr___0 + 0xffffffffffffffc0UL; goto ldv_57478; ldv_57477: ; if ((unsigned int )*((unsigned char *)sess + 0UL) != 0U) { goto ldv_57476; } else { } target_get_session(sess); list_move(& sess->sess_acl_list, & sess_list); ldv_57476: sess = sess_tmp; __mptr___1 = (struct list_head const *)sess_tmp->sess_acl_list.next; sess_tmp = (struct se_session *)__mptr___1 + 0xffffffffffffffc0UL; ldv_57478: ; if ((unsigned long )(& sess->sess_acl_list) != (unsigned long )(& acl->acl_sess_list)) { goto ldv_57477; } else { } spin_unlock_irqrestore(& acl->nacl_sess_lock, flags); __mptr___2 = (struct list_head const *)sess_list.next; sess = (struct se_session *)__mptr___2 + 0xffffffffffffffc0UL; __mptr___3 = (struct list_head const *)sess->sess_acl_list.next; sess_tmp = (struct se_session *)__mptr___3 + 0xffffffffffffffc0UL; goto ldv_57488; ldv_57487: list_del(& sess->sess_acl_list); rc = (*((tpg->se_tpg_tfo)->shutdown_session))(sess); target_put_session(sess); if (rc == 0) { goto ldv_57486; } else { } target_put_session(sess); ldv_57486: sess = sess_tmp; __mptr___4 = (struct list_head const *)sess_tmp->sess_acl_list.next; sess_tmp = (struct se_session *)__mptr___4 + 0xffffffffffffffc0UL; ldv_57488: ; if ((unsigned long )(& sess->sess_acl_list) != (unsigned long )(& sess_list)) { goto ldv_57487; } else { } target_put_nacl(acl); wait_for_completion(& acl->acl_free_comp); core_tpg_wait_for_nacl_pr_ref(acl); core_free_device_list_for_node(acl, tpg); descriptor.modname = "target_core_mod"; descriptor.function = "core_tpg_del_initiator_node_acl"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_tpg.c"; descriptor.format = "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s Initiator Node: %s\n"; descriptor.lineno = 342U; descriptor.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___3 != 0L) { tmp___0 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); tmp___1 = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); tmp___2 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s Initiator Node: %s\n", tmp___2, (int )tmp___1, acl->queue_depth, tmp___0, (char *)(& acl->initiatorname)); } else { } kfree((void const *)acl); return; } } int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *tpg , unsigned char *initiatorname , u32 queue_depth , int force ) { struct se_session *sess ; struct se_session *init_sess ; struct se_node_acl *acl ; unsigned long flags ; int dynamic_acl ; u16 tmp ; char *tmp___0 ; raw_spinlock_t *tmp___1 ; struct list_head const *__mptr ; char *tmp___2 ; int tmp___3 ; struct list_head const *__mptr___0 ; int tmp___4 ; struct _ddebug descriptor ; u16 tmp___5 ; char *tmp___6 ; long tmp___7 ; { init_sess = (struct se_session *)0; dynamic_acl = 0; ldv_mutex_lock_359(& tpg->acl_node_mutex); acl = __core_tpg_get_initiator_node_acl(tpg, (char const *)initiatorname); if ((unsigned long )acl == (unsigned long )((struct se_node_acl *)0)) { tmp = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); tmp___0 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); printk("\vAccess Control List entry for %s Initiator Node %s does not exists for TPG %hu, ignoring request.\n", tmp___0, initiatorname, (int )tmp); ldv_mutex_unlock_360(& tpg->acl_node_mutex); return (-19); } else { } if ((int )acl->dynamic_node_acl) { acl->dynamic_node_acl = 0; dynamic_acl = 1; } else { } ldv_mutex_unlock_361(& tpg->acl_node_mutex); tmp___1 = spinlock_check(& tpg->session_lock); flags = _raw_spin_lock_irqsave(tmp___1); __mptr = (struct list_head const *)tpg->tpg_sess_list.next; sess = (struct se_session *)__mptr + 0xffffffffffffffd0UL; goto ldv_57513; ldv_57512: ; if ((unsigned long )sess->se_node_acl != (unsigned long )acl) { goto ldv_57510; } else { } if (force == 0) { tmp___2 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); printk("\vUnable to change queue depth for %s Initiator Node: %s while session is operational. To forcefully change the queue depth and force session reinstatement use the \"force=1\" parameter.\n", tmp___2, initiatorname); spin_unlock_irqrestore(& tpg->session_lock, flags); ldv_mutex_lock_362(& tpg->acl_node_mutex); if (dynamic_acl != 0) { acl->dynamic_node_acl = 1; } else { } ldv_mutex_unlock_363(& tpg->acl_node_mutex); return (-17); } else { } tmp___3 = (*((tpg->se_tpg_tfo)->shutdown_session))(sess); if (tmp___3 == 0) { goto ldv_57510; } else { } init_sess = sess; goto ldv_57511; ldv_57510: __mptr___0 = (struct list_head const *)sess->sess_list.next; sess = (struct se_session *)__mptr___0 + 0xffffffffffffffd0UL; ldv_57513: ; if ((unsigned long )(& sess->sess_list) != (unsigned long )(& tpg->tpg_sess_list)) { goto ldv_57512; } else { } ldv_57511: acl->queue_depth = queue_depth; tmp___4 = core_set_queue_depth_for_node(tpg, acl); if (tmp___4 < 0) { spin_unlock_irqrestore(& tpg->session_lock, flags); if ((unsigned long )init_sess != (unsigned long )((struct se_session *)0)) { (*((tpg->se_tpg_tfo)->close_session))(init_sess); } else { } ldv_mutex_lock_364(& tpg->acl_node_mutex); if (dynamic_acl != 0) { acl->dynamic_node_acl = 1; } else { } ldv_mutex_unlock_365(& tpg->acl_node_mutex); return (-22); } else { } spin_unlock_irqrestore(& tpg->session_lock, flags); if ((unsigned long )init_sess != (unsigned long )((struct se_session *)0)) { (*((tpg->se_tpg_tfo)->close_session))(init_sess); } else { } descriptor.modname = "target_core_mod"; descriptor.function = "core_tpg_set_initiator_node_queue_depth"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_tpg.c"; descriptor.format = "Successfully changed queue depth to: %d for Initiator Node: %s on %s Target Portal Group: %u\n"; descriptor.lineno = 447U; descriptor.flags = 0U; tmp___7 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___7 != 0L) { tmp___5 = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); tmp___6 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "Successfully changed queue depth to: %d for Initiator Node: %s on %s Target Portal Group: %u\n", queue_depth, initiatorname, tmp___6, (int )tmp___5); } else { } ldv_mutex_lock_366(& tpg->acl_node_mutex); if (dynamic_acl != 0) { acl->dynamic_node_acl = 1; } else { } ldv_mutex_unlock_367(& tpg->acl_node_mutex); return (0); } } static char const __kstrtab_core_tpg_set_initiator_node_queue_depth[40U] = { 'c', 'o', 'r', 'e', '_', 't', 'p', 'g', '_', 's', 'e', 't', '_', 'i', 'n', 'i', 't', 'i', 'a', 't', 'o', 'r', '_', 'n', 'o', 'd', 'e', '_', 'q', 'u', 'e', 'u', 'e', '_', 'd', 'e', 'p', 't', 'h', '\000'}; struct kernel_symbol const __ksymtab_core_tpg_set_initiator_node_queue_depth ; struct kernel_symbol const __ksymtab_core_tpg_set_initiator_node_queue_depth = {(unsigned long )(& core_tpg_set_initiator_node_queue_depth), (char const *)(& __kstrtab_core_tpg_set_initiator_node_queue_depth)}; int core_tpg_set_initiator_node_tag(struct se_portal_group *tpg , struct se_node_acl *acl , char const *new_tag ) { size_t tmp ; int tmp___0 ; int tmp___1 ; { tmp = strlen(new_tag); if (tmp > 63UL) { return (-22); } else { } tmp___0 = strncmp("NULL", new_tag, 4UL); if (tmp___0 == 0) { acl->acl_tag[0] = 0; return (0); } else { } tmp___1 = snprintf((char *)(& acl->acl_tag), 64UL, "%s", new_tag); return (tmp___1); } } static char const __kstrtab_core_tpg_set_initiator_node_tag[32U] = { 'c', 'o', 'r', 'e', '_', 't', 'p', 'g', '_', 's', 'e', 't', '_', 'i', 'n', 'i', 't', 'i', 'a', 't', 'o', 'r', '_', 'n', 'o', 'd', 'e', '_', 't', 'a', 'g', '\000'}; struct kernel_symbol const __ksymtab_core_tpg_set_initiator_node_tag ; struct kernel_symbol const __ksymtab_core_tpg_set_initiator_node_tag = {(unsigned long )(& core_tpg_set_initiator_node_tag), (char const *)(& __kstrtab_core_tpg_set_initiator_node_tag)}; static void core_tpg_lun_ref_release(struct percpu_ref *ref ) { struct se_lun *lun ; struct percpu_ref const *__mptr ; { __mptr = (struct percpu_ref const *)ref; lun = (struct se_lun *)__mptr + 0xfffffffffffffbc0UL; complete(& lun->lun_ref_comp); return; } } int core_tpg_register(struct se_wwn *se_wwn , struct se_portal_group *se_tpg , int proto_id ) { int ret ; struct lock_class_key __key ; struct lock_class_key __key___0 ; struct lock_class_key __key___1 ; long tmp ; bool tmp___0 ; struct _ddebug descriptor ; u16 tmp___1 ; char *tmp___3 ; char *tmp___4 ; char *tmp___5 ; char *tmp___6 ; long tmp___7 ; { if ((unsigned long )se_tpg == (unsigned long )((struct se_portal_group *)0)) { return (-22); } else { } if ((unsigned long )se_wwn != (unsigned long )((struct se_wwn *)0)) { se_tpg->se_tpg_tfo = (se_wwn->wwn_tf)->tf_ops; } else { } if ((unsigned long )se_tpg->se_tpg_tfo == (unsigned long )((struct target_core_fabric_ops const *)0)) { printk("\vUnable to locate se_tpg->se_tpg_tfo pointer\n"); return (-22); } else { } se_tpg->tpg_lun_hlist.first = (struct hlist_node *)0; se_tpg->proto_id = proto_id; se_tpg->se_tpg_wwn = se_wwn; atomic_set(& se_tpg->tpg_pr_ref_count, 0); INIT_LIST_HEAD(& se_tpg->acl_node_list); INIT_LIST_HEAD(& se_tpg->se_tpg_node); INIT_LIST_HEAD(& se_tpg->tpg_sess_list); spinlock_check(& se_tpg->session_lock); __raw_spin_lock_init(& se_tpg->session_lock.__annonCompField17.rlock, "&(&se_tpg->session_lock)->rlock", & __key); __mutex_init(& se_tpg->tpg_lun_mutex, "&se_tpg->tpg_lun_mutex", & __key___0); __mutex_init(& se_tpg->acl_node_mutex, "&se_tpg->acl_node_mutex", & __key___1); if (se_tpg->proto_id >= 0) { se_tpg->tpg_virt_lun0 = core_tpg_alloc_lun(se_tpg, 0ULL); tmp___0 = IS_ERR((void const *)se_tpg->tpg_virt_lun0); if ((int )tmp___0) { tmp = PTR_ERR((void const *)se_tpg->tpg_virt_lun0); return ((int )tmp); } else { } ret = core_tpg_add_lun(se_tpg, se_tpg->tpg_virt_lun0, 1U, g_lun0_dev); if (ret < 0) { kfree((void const *)se_tpg->tpg_virt_lun0); return (ret); } else { } } else { } spin_lock_bh(& tpg_lock); list_add_tail(& se_tpg->se_tpg_node, & tpg_list); spin_unlock_bh(& tpg_lock); descriptor.modname = "target_core_mod"; descriptor.function = "core_tpg_register"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_tpg.c"; descriptor.format = "TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, Proto: %d, Portal Tag: %u\n"; descriptor.lineno = 547U; descriptor.flags = 0U; tmp___7 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___7 != 0L) { tmp___1 = (*((se_tpg->se_tpg_tfo)->tpg_get_tag))(se_tpg); tmp___5 = (*((se_tpg->se_tpg_tfo)->tpg_get_wwn))(se_tpg); if ((unsigned long )tmp___5 != (unsigned long )((char *)0)) { tmp___3 = (*((se_tpg->se_tpg_tfo)->tpg_get_wwn))(se_tpg); tmp___4 = tmp___3; } else { tmp___4 = (char *)0; } tmp___6 = (*((se_tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "TARGET_CORE[%s]: Allocated portal_group for endpoint: %s, Proto: %d, Portal Tag: %u\n", tmp___6, tmp___4, se_tpg->proto_id, (int )tmp___1); } else { } return (0); } } static char const __kstrtab_core_tpg_register[18U] = { 'c', 'o', 'r', 'e', '_', 't', 'p', 'g', '_', 'r', 'e', 'g', 'i', 's', 't', 'e', 'r', '\000'}; struct kernel_symbol const __ksymtab_core_tpg_register ; struct kernel_symbol const __ksymtab_core_tpg_register = {(unsigned long )(& core_tpg_register), (char const *)(& __kstrtab_core_tpg_register)}; int core_tpg_deregister(struct se_portal_group *se_tpg ) { struct target_core_fabric_ops const *tfo ; struct se_node_acl *nacl ; struct se_node_acl *nacl_tmp ; struct list_head node_list ; struct _ddebug descriptor ; u16 tmp ; char *tmp___1 ; char *tmp___2 ; char *tmp___3 ; char *tmp___4 ; long tmp___5 ; int tmp___6 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { tfo = se_tpg->se_tpg_tfo; node_list.next = & node_list; node_list.prev = & node_list; descriptor.modname = "target_core_mod"; descriptor.function = "core_tpg_deregister"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_tpg.c"; descriptor.format = "TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, Proto: %d, Portal Tag: %u\n"; descriptor.lineno = 562U; descriptor.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___5 != 0L) { tmp = (*(tfo->tpg_get_tag))(se_tpg); tmp___3 = (*(tfo->tpg_get_wwn))(se_tpg); if ((unsigned long )tmp___3 != (unsigned long )((char *)0)) { tmp___1 = (*(tfo->tpg_get_wwn))(se_tpg); tmp___2 = tmp___1; } else { tmp___2 = (char *)0; } tmp___4 = (*(tfo->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "TARGET_CORE[%s]: Deallocating portal_group for endpoint: %s, Proto: %d, Portal Tag: %u\n", tmp___4, tmp___2, se_tpg->proto_id, (int )tmp); } else { } spin_lock_bh(& tpg_lock); list_del(& se_tpg->se_tpg_node); spin_unlock_bh(& tpg_lock); goto ldv_57576; ldv_57575: cpu_relax(); ldv_57576: tmp___6 = atomic_read((atomic_t const *)(& se_tpg->tpg_pr_ref_count)); if (tmp___6 != 0) { goto ldv_57575; } else { } ldv_mutex_lock_368(& se_tpg->acl_node_mutex); list_splice_init(& se_tpg->acl_node_list, & node_list); ldv_mutex_unlock_369(& se_tpg->acl_node_mutex); __mptr = (struct list_head const *)node_list.next; nacl = (struct se_node_acl *)__mptr + 0xfffffffffffffb70UL; __mptr___0 = (struct list_head const *)nacl->acl_list.next; nacl_tmp = (struct se_node_acl *)__mptr___0 + 0xfffffffffffffb70UL; goto ldv_57585; ldv_57584: list_del(& nacl->acl_list); se_tpg->num_node_acls = se_tpg->num_node_acls - 1U; core_tpg_wait_for_nacl_pr_ref(nacl); core_free_device_list_for_node(nacl, se_tpg); kfree((void const *)nacl); nacl = nacl_tmp; __mptr___1 = (struct list_head const *)nacl_tmp->acl_list.next; nacl_tmp = (struct se_node_acl *)__mptr___1 + 0xfffffffffffffb70UL; ldv_57585: ; if ((unsigned long )(& nacl->acl_list) != (unsigned long )(& node_list)) { goto ldv_57584; } else { } if (se_tpg->proto_id >= 0) { core_tpg_remove_lun(se_tpg, se_tpg->tpg_virt_lun0); kfree_call_rcu(& (se_tpg->tpg_virt_lun0)->callback_head, (void (*)(struct callback_head * ))1176); } else { } return (0); } } static char const __kstrtab_core_tpg_deregister[20U] = { 'c', 'o', 'r', 'e', '_', 't', 'p', 'g', '_', 'd', 'e', 'r', 'e', 'g', 'i', 's', 't', 'e', 'r', '\000'}; struct kernel_symbol const __ksymtab_core_tpg_deregister ; struct kernel_symbol const __ksymtab_core_tpg_deregister = {(unsigned long )(& core_tpg_deregister), (char const *)(& __kstrtab_core_tpg_deregister)}; struct se_lun *core_tpg_alloc_lun(struct se_portal_group *tpg , u64 unpacked_lun ) { struct se_lun *lun ; void *tmp ; void *tmp___0 ; struct lock_class_key __key ; struct lock_class_key __key___0 ; struct lock_class_key __key___1 ; { tmp = kzalloc(1192UL, 208U); lun = (struct se_lun *)tmp; if ((unsigned long )lun == (unsigned long )((struct se_lun *)0)) { printk("\vUnable to allocate se_lun memory\n"); tmp___0 = ERR_PTR(-12L); return ((struct se_lun *)tmp___0); } else { } lun->unpacked_lun = unpacked_lun; lun->lun_link_magic = 4294932337U; atomic_set(& lun->lun_acl_count, 0); init_completion(& lun->lun_ref_comp); INIT_LIST_HEAD(& lun->lun_deve_list); INIT_LIST_HEAD(& lun->lun_dev_link); atomic_set(& lun->lun_tg_pt_secondary_offline, 0); spinlock_check(& lun->lun_deve_lock); __raw_spin_lock_init(& lun->lun_deve_lock.__annonCompField17.rlock, "&(&lun->lun_deve_lock)->rlock", & __key); __mutex_init(& lun->lun_tg_pt_md_mutex, "&lun->lun_tg_pt_md_mutex", & __key___0); INIT_LIST_HEAD(& lun->lun_tg_pt_gp_link); spinlock_check(& lun->lun_tg_pt_gp_lock); __raw_spin_lock_init(& lun->lun_tg_pt_gp_lock.__annonCompField17.rlock, "&(&lun->lun_tg_pt_gp_lock)->rlock", & __key___1); lun->lun_tpg = tpg; return (lun); } } extern void __compiletime_assert_649(void) ; int core_tpg_add_lun(struct se_portal_group *tpg , struct se_lun *lun , u32 lun_access , struct se_device *dev ) { int ret ; bool __cond ; struct se_device *__var ; { ret = percpu_ref_init(& lun->lun_ref, & core_tpg_lun_ref_release, 0U, 208U); if (ret < 0) { goto out; } else { } ret = core_alloc_rtpi(lun, dev); if (ret != 0) { goto out_kill_ref; } else { } if (((int )(dev->transport)->transport_flags & 1) == 0 && ((dev->se_hba)->hba_flags & 1U) == 0U) { target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); } else { } ldv_mutex_lock_370(& tpg->tpg_lun_mutex); spin_lock(& dev->se_port_lock); lun->lun_index = dev->dev_index; __cond = 0; if ((int )__cond) { __compiletime_assert_649(); } else { } __asm__ volatile ("": : : "memory"); __var = (struct se_device *)0; *((struct se_device * volatile *)(& lun->lun_se_dev)) = dev; dev->export_count = dev->export_count + 1U; list_add_tail(& lun->lun_dev_link, & dev->dev_sep_list); spin_unlock(& dev->se_port_lock); lun->lun_access = lun_access; if (((dev->se_hba)->hba_flags & 1U) == 0U) { hlist_add_head_rcu(& lun->link, & tpg->tpg_lun_hlist); } else { } ldv_mutex_unlock_371(& tpg->tpg_lun_mutex); return (0); out_kill_ref: percpu_ref_exit(& lun->lun_ref); out: ; return (ret); } } extern void __compiletime_assert_693(void) ; void core_tpg_remove_lun(struct se_portal_group *tpg , struct se_lun *lun ) { struct se_device *dev ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_388___1 __u ; int tmp ; bool __cond ; struct se_device *__var ; { __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); dev = ________p1; core_clear_lun_from_tpg(lun, tpg); transport_clear_lun_ref(lun); ldv_mutex_lock_372(& tpg->tpg_lun_mutex); if ((unsigned long )lun->lun_se_dev != (unsigned long )((struct se_device *)0)) { target_detach_tg_pt_gp(lun); spin_lock(& dev->se_port_lock); list_del(& lun->lun_dev_link); dev->export_count = dev->export_count - 1U; __cond = 0; if ((int )__cond) { __compiletime_assert_693(); } else { } __asm__ volatile ("": : : "memory"); __var = (struct se_device *)0; *((struct se_device * volatile *)(& lun->lun_se_dev)) = (struct se_device */* volatile */)0; spin_unlock(& dev->se_port_lock); } else { } if (((dev->se_hba)->hba_flags & 1U) == 0U) { hlist_del_rcu(& lun->link); } else { } ldv_mutex_unlock_373(& tpg->tpg_lun_mutex); percpu_ref_exit(& lun->lun_ref); return; } } bool ldv_queue_work_on_335(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_336(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_337(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_338(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_339(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_340(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_341(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_342(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_343(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_344(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_345(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_346(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_lock_347(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_348(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_349(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_tpg_lun_mutex_of_se_portal_group(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_350(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_tpg_lun_mutex_of_se_portal_group(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_351(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_352(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_353(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_354(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_355(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_356(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_357(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_358(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_359(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_360(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_361(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_362(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_363(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_364(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_365(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_366(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_367(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_368(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_369(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_370(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_tpg_lun_mutex_of_se_portal_group(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_371(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_tpg_lun_mutex_of_se_portal_group(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_372(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_tpg_lun_mutex_of_se_portal_group(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_373(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_tpg_lun_mutex_of_se_portal_group(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern struct pv_irq_ops pv_irq_ops ; __inline static int fls(int x ) { int r ; { __asm__ ("bsrl %1,%0": "=r" (r): "rm" (x), "0" (-1)); return (r + 1); } } __inline static int __ilog2_u32(u32 n ) { int tmp ; { tmp = fls((int )n); return (tmp + -1); } } extern void dump_stack(void) ; extern void __might_sleep(char const * , int , int ) ; __inline static unsigned long arch_local_save_flags(void) { unsigned long __ret ; unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.save_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (831), "i" (12UL)); ldv_4801: ; goto ldv_4801; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (43UL), [paravirt_opptr] "i" (& pv_irq_ops.save_fl.func), [paravirt_clobber] "i" (1): "memory", "cc"); __ret = __eax; return (__ret); } } __inline static void arch_local_irq_restore(unsigned long f ) { unsigned long __edi ; unsigned long __esi ; unsigned long __edx ; unsigned long __ecx ; unsigned long __eax ; long tmp ; { __edi = __edi; __esi = __esi; __edx = __edx; __ecx = __ecx; __eax = __eax; tmp = ldv__builtin_expect((unsigned long )pv_irq_ops.restore_fl.func == (unsigned long )((void *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"./arch/x86/include/asm/paravirt.h"), "i" (836), "i" (12UL)); ldv_4811: ; goto ldv_4811; } else { } __asm__ volatile ("771:\n\tcall *%c2;\n772:\n.pushsection .parainstructions,\"a\"\n .balign 8 \n .quad 771b\n .byte %c1\n .byte 772b-771b\n .short %c3\n.popsection\n": "=a" (__eax): [paravirt_typenum] "i" (44UL), [paravirt_opptr] "i" (& pv_irq_ops.restore_fl.func), [paravirt_clobber] "i" (1), "D" (f): "memory", "cc"); return; } } __inline static void *ERR_PTR(long error ) ; __inline static bool IS_ERR(void const *ptr ) ; __inline static int arch_irqs_disabled_flags(unsigned long flags ) { { return ((flags & 512UL) == 0UL); } } extern void trace_hardirqs_on(void) ; extern void trace_hardirqs_off(void) ; __inline static int atomic_dec_and_test(atomic_t *v ) { char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; decl %0; sete %1": "+m" (v->counter), "=qm" (c): : "memory"); return ((int )((signed char )c) != 0); } } __inline static int atomic_cmpxchg(atomic_t *v , int old , int new ) { int __ret ; int __old ; int __new ; u8 volatile *__ptr ; u16 volatile *__ptr___0 ; u32 volatile *__ptr___1 ; u64 volatile *__ptr___2 ; { __old = old; __new = new; switch (4UL) { case 1UL: __ptr = (u8 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgb %2,%1": "=a" (__ret), "+m" (*__ptr): "q" (__new), "0" (__old): "memory"); goto ldv_5616; case 2UL: __ptr___0 = (u16 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgw %2,%1": "=a" (__ret), "+m" (*__ptr___0): "r" (__new), "0" (__old): "memory"); goto ldv_5616; case 4UL: __ptr___1 = (u32 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgl %2,%1": "=a" (__ret), "+m" (*__ptr___1): "r" (__new), "0" (__old): "memory"); goto ldv_5616; case 8UL: __ptr___2 = (u64 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgq %2,%1": "=a" (__ret), "+m" (*__ptr___2): "r" (__new), "0" (__old): "memory"); goto ldv_5616; default: __cmpxchg_wrong_size(); } ldv_5616: ; return (__ret); } } __inline static int __atomic_add_unless(atomic_t *v , int a , int u ) { int c ; int old ; long tmp ; long tmp___0 ; { c = atomic_read((atomic_t const *)v); ldv_5645: tmp = ldv__builtin_expect(c == u, 0L); if (tmp != 0L) { goto ldv_5644; } else { } old = atomic_cmpxchg(v, c, c + a); tmp___0 = ldv__builtin_expect(old == c, 1L); if (tmp___0 != 0L) { goto ldv_5644; } else { } c = old; goto ldv_5645; ldv_5644: ; return (c); } } __inline static int atomic_add_unless(atomic_t *v , int a , int u ) { int tmp ; { tmp = __atomic_add_unless(v, a, u); return (tmp != u); } } extern int debug_locks ; int ldv_mutex_trylock_427(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_422(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_425(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_428(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_431(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_423(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_424(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_426(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_430(struct mutex *ldv_func_arg1 ) ; __inline static int preempt_count___0(void) { int pfo_ret__ ; { switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (__preempt_count)); goto ldv_6716; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6716; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6716; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6716; default: __bad_percpu_size(); } ldv_6716: ; return (pfo_ret__ & 2147483647); } } __inline static void __preempt_count_add___3(int val ) { int pao_ID__ ; { pao_ID__ = 0; switch (4UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (__preempt_count): "qi" (val)); } goto ldv_6773; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (__preempt_count): "ri" (val)); } goto ldv_6773; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (__preempt_count): "ri" (val)); } goto ldv_6773; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (__preempt_count): "re" (val)); } goto ldv_6773; default: __bad_percpu_size(); } ldv_6773: ; return; } } __inline static void __preempt_count_sub___3(int val ) { int pao_ID__ ; { pao_ID__ = 0; switch (4UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (__preempt_count): "qi" (- val)); } goto ldv_6785; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (__preempt_count): "ri" (- val)); } goto ldv_6785; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (__preempt_count): "ri" (- val)); } goto ldv_6785; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%0": "+m" (__preempt_count)); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%0": "+m" (__preempt_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (__preempt_count): "re" (- val)); } goto ldv_6785; default: __bad_percpu_size(); } ldv_6785: ; return; } } __inline static int static_key_count(struct static_key *key ) { int tmp ; { tmp = atomic_read((atomic_t const *)(& key->enabled)); return (tmp); } } __inline static bool static_key_false(struct static_key *key ) { int tmp ; long tmp___0 ; { tmp = static_key_count(key); tmp___0 = ldv__builtin_expect(tmp > 0, 0L); if (tmp___0 != 0L) { return (1); } else { } return (0); } } extern void complete_all(struct completion * ) ; extern bool rcu_lockdep_current_cpu_online(void) ; __inline static int rcu_read_lock_sched_held(void) { int lockdep_opinion ; int tmp ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; int tmp___4 ; unsigned long _flags ; int tmp___5 ; int tmp___6 ; { lockdep_opinion = 0; tmp = debug_lockdep_rcu_enabled(); if (tmp == 0) { return (1); } else { } tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } tmp___2 = rcu_lockdep_current_cpu_online(); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { return (0); } else { } if (debug_locks != 0) { lockdep_opinion = lock_is_held(& rcu_sched_lock_map); } else { } if (lockdep_opinion != 0) { tmp___6 = 1; } else { tmp___4 = preempt_count___0(); if (tmp___4 != 0) { tmp___6 = 1; } else { _flags = arch_local_save_flags(); tmp___5 = arch_irqs_disabled_flags(_flags); if (tmp___5 != 0) { tmp___6 = 1; } else { tmp___6 = 0; } } } return (tmp___6); } } __inline static void rcu_read_lock_sched___2(void) { bool __warned ; int tmp ; bool tmp___0 ; int tmp___1 ; { __preempt_count_add___3(1); __asm__ volatile ("": : : "memory"); rcu_lock_acquire(& rcu_sched_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 965, "rcu_read_lock_sched() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_lock_sched_notrace(void) { { __preempt_count_add___3(1); __asm__ volatile ("": : : "memory"); return; } } __inline static void rcu_read_unlock_sched___2(void) { bool __warned ; int tmp ; bool tmp___0 ; int tmp___1 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_watching(); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 983, "rcu_read_unlock_sched() used illegally while idle"); } else { } } else { } rcu_lock_release(& rcu_sched_lock_map); __asm__ volatile ("": : : "memory"); __preempt_count_sub___3(1); return; } } __inline static void rcu_read_unlock_sched_notrace(void) { { __asm__ volatile ("": : : "memory"); __preempt_count_sub___3(1); return; } } extern struct workqueue_struct *system_wq ; void ldv_destroy_workqueue_429(struct workqueue_struct *ldv_func_arg1 ) ; bool ldv_queue_work_on_417(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_419(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_418(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_421(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_420(struct workqueue_struct *ldv_func_arg1 ) ; __inline static bool queue_work(struct workqueue_struct *wq , struct work_struct *work ) { bool tmp ; { tmp = ldv_queue_work_on_417(8192, wq, work); return (tmp); } } __inline static bool schedule_work(struct work_struct *work ) { bool tmp ; { tmp = queue_work(system_wq, work); return (tmp); } } extern struct page *alloc_pages_current(gfp_t , unsigned int ) ; __inline static struct page *alloc_pages(gfp_t gfp_mask , unsigned int order ) { struct page *tmp ; { tmp = alloc_pages_current(gfp_mask, order); return (tmp); } } extern void __free_pages(struct page * , unsigned int ) ; __inline static void percpu_ref_kill(struct percpu_ref *ref ) { { return; } } __inline static bool __ref_is_percpu___2(struct percpu_ref *ref , unsigned long **percpu_countp ) { unsigned long percpu_ptr ; unsigned long _________p1 ; union __anonunion___u_192___2 __u ; long tmp ; { __read_once_size((void const volatile *)(& ref->percpu_count_ptr), (void *)(& __u.__c), 8); _________p1 = __u.__val; percpu_ptr = _________p1; tmp = ldv__builtin_expect((percpu_ptr & 3UL) != 0UL, 0L); if (tmp != 0L) { return (0); } else { } *percpu_countp = (unsigned long *)percpu_ptr; return (1); } } __inline static void percpu_ref_put_many___1(struct percpu_ref *ref , unsigned long nr ) { unsigned long *percpu_count ; void const *__vpp_verify ; int pao_ID__ ; int pao_ID_____0 ; int pao_ID_____1 ; int pao_ID_____2 ; int tmp ; long tmp___0 ; bool tmp___1 ; { rcu_read_lock_sched___2(); tmp___1 = __ref_is_percpu___2(ref, & percpu_count); if ((int )tmp___1) { __vpp_verify = (void const *)0; switch (8UL) { case 1UL: pao_ID__ = 0; switch (8UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (- nr)); } goto ldv_16637; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16637; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16637; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (- nr)); } goto ldv_16637; default: __bad_percpu_size(); } ldv_16637: ; goto ldv_16642; case 2UL: pao_ID_____0 = 0; switch (8UL) { case 1UL: ; if (pao_ID_____0 == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (- nr)); } goto ldv_16648; case 2UL: ; if (pao_ID_____0 == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16648; case 4UL: ; if (pao_ID_____0 == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16648; case 8UL: ; if (pao_ID_____0 == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____0 == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (- nr)); } goto ldv_16648; default: __bad_percpu_size(); } ldv_16648: ; goto ldv_16642; case 4UL: pao_ID_____1 = 0; switch (8UL) { case 1UL: ; if (pao_ID_____1 == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (- nr)); } goto ldv_16658; case 2UL: ; if (pao_ID_____1 == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16658; case 4UL: ; if (pao_ID_____1 == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16658; case 8UL: ; if (pao_ID_____1 == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____1 == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (- nr)); } goto ldv_16658; default: __bad_percpu_size(); } ldv_16658: ; goto ldv_16642; case 8UL: pao_ID_____2 = 0; switch (8UL) { case 1UL: ; if (pao_ID_____2 == 1) { __asm__ ("incb %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decb %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addb %1, %%gs:%0": "+m" (*percpu_count): "qi" (- nr)); } goto ldv_16668; case 2UL: ; if (pao_ID_____2 == 1) { __asm__ ("incw %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decw %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addw %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16668; case 4UL: ; if (pao_ID_____2 == 1) { __asm__ ("incl %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decl %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addl %1, %%gs:%0": "+m" (*percpu_count): "ri" (- nr)); } goto ldv_16668; case 8UL: ; if (pao_ID_____2 == 1) { __asm__ ("incq %%gs:%0": "+m" (*percpu_count)); } else if (pao_ID_____2 == -1) { __asm__ ("decq %%gs:%0": "+m" (*percpu_count)); } else { __asm__ ("addq %1, %%gs:%0": "+m" (*percpu_count): "re" (- nr)); } goto ldv_16668; default: __bad_percpu_size(); } ldv_16668: ; goto ldv_16642; default: __bad_size_call_parameter(); goto ldv_16642; } ldv_16642: ; } else { tmp = atomic_long_sub_and_test((long )nr, & ref->count); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { (*(ref->release))(ref); } else { } } rcu_read_unlock_sched___2(); return; } } __inline static void percpu_ref_put___1(struct percpu_ref *ref ) { { percpu_ref_put_many___1(ref, 1UL); return; } } extern struct kmem_cache *kmem_cache_create(char const * , size_t , size_t , unsigned long , void (*)(void * ) ) ; extern void kmem_cache_destroy(struct kmem_cache * ) ; void activate_work_5(struct work_struct *work , int state ) ; void call_and_disable_work_3(struct work_struct *work ) ; void disable_work_3(struct work_struct *work ) ; void invoke_work_4(void) ; void activate_work_6(struct work_struct *work , int state ) ; void invoke_work_5(void) ; void call_and_disable_work_4(struct work_struct *work ) ; void invoke_work_6(void) ; void activate_work_3(struct work_struct *work , int state ) ; void disable_work_5(struct work_struct *work ) ; void call_and_disable_all_6(int state ) ; void activate_work_4(struct work_struct *work , int state ) ; void call_and_disable_all_5(int state ) ; void invoke_work_3(void) ; void call_and_disable_all_4(int state ) ; void disable_work_4(struct work_struct *work ) ; void call_and_disable_all_3(int state ) ; void call_and_disable_work_5(struct work_struct *work ) ; void disable_work_6(struct work_struct *work ) ; void call_and_disable_work_6(struct work_struct *work ) ; __inline static int kref_put_spinlock_irqsave(struct kref *kref , void (*release)(struct kref * ) , spinlock_t *lock ) { unsigned long flags ; int __ret_warn_on ; long tmp ; int tmp___0 ; raw_spinlock_t *tmp___1 ; int tmp___2 ; int tmp___3 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 121); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_add_unless(& kref->refcount, -1, 1); if (tmp___0 != 0) { return (0); } else { } tmp___1 = spinlock_check(lock); flags = _raw_spin_lock_irqsave(tmp___1); tmp___3 = atomic_dec_and_test(& kref->refcount); if (tmp___3 != 0) { (*release)(kref); tmp___2 = arch_irqs_disabled_flags(flags); if (tmp___2 != 0) { arch_local_irq_restore(flags); trace_hardirqs_off(); } else { trace_hardirqs_on(); arch_local_irq_restore(flags); } return (1); } else { } spin_unlock_irqrestore(lock, flags); return (0); } } extern void *vmap(struct page ** , unsigned int , unsigned long , pgprot_t ) ; extern void vunmap(void const * ) ; extern void kvfree(void const * ) ; __inline static void *lowmem_page_address(struct page const *page ) { { return ((void *)((unsigned long )((unsigned long long )(((long )page + 24189255811072L) / 64L) << 12) + 0xffff880000000000UL)); } } __inline static void sg_assign_page(struct scatterlist *sg , struct page *page ) { unsigned long page_link ; long tmp ; long tmp___0 ; long tmp___1 ; { page_link = sg->page_link & 3UL; tmp = ldv__builtin_expect(((unsigned long )page & 3UL) != 0UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (90), "i" (12UL)); ldv_31849: ; goto ldv_31849; } else { } tmp___0 = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (92), "i" (12UL)); ldv_31850: ; goto ldv_31850; } else { } tmp___1 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (93), "i" (12UL)); ldv_31851: ; goto ldv_31851; } else { } sg->page_link = page_link | (unsigned long )page; return; } } __inline static void sg_set_page(struct scatterlist *sg , struct page *page , unsigned int len , unsigned int offset ) { { sg_assign_page(sg, page); sg->offset = offset; sg->length = len; return; } } __inline static struct page *sg_page(struct scatterlist *sg ) { long tmp ; long tmp___0 ; { tmp = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (123), "i" (12UL)); ldv_31861: ; goto ldv_31861; } else { } tmp___0 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (124), "i" (12UL)); ldv_31862: ; goto ldv_31862; } else { } return ((struct page *)(sg->page_link & 0xfffffffffffffffcUL)); } } extern struct scatterlist *sg_next(struct scatterlist * ) ; extern void sg_init_table(struct scatterlist * , unsigned int ) ; __inline static void *kmap(struct page *page ) { void *tmp ; { __might_sleep("include/linux/highmem.h", 58, 0); tmp = lowmem_page_address((struct page const *)page); return (tmp); } } __inline static void kunmap(struct page *page ) { { return; } } extern void percpu_ida_destroy(struct percpu_ida * ) ; extern int __percpu_ida_init(struct percpu_ida * , unsigned long , unsigned long , unsigned long ) ; __inline static int percpu_ida_init(struct percpu_ida *pool , unsigned long nr_tags ) { int tmp ; { tmp = __percpu_ida_init(pool, nr_tags, 48UL, 32UL); return (tmp); } } void target_complete_cmd_with_length(struct se_cmd *cmd , u8 scsi_status , int length ) ; void sbc_dif_generate(struct se_cmd *cmd ) ; sense_reason_t sbc_dif_verify(struct se_cmd *cmd , sector_t start , unsigned int sectors , unsigned int ei_lba , struct scatterlist *psg , int psg_off ) ; void transport_set_vpd_proto_id(struct t10_vpd *vpd , unsigned char *page_83 ) ; int transport_set_vpd_assoc(struct t10_vpd *vpd , unsigned char *page_83 ) ; int transport_set_vpd_ident_type(struct t10_vpd *vpd , unsigned char *page_83 ) ; int transport_set_vpd_ident(struct t10_vpd *vpd , unsigned char *page_83 ) ; int target_alloc_sgl(struct scatterlist **sgl , unsigned int *nents , u32 length , bool zero_page ) ; sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *cmd , struct scatterlist *sgl , u32 sgl_count , struct scatterlist *sgl_bidi , u32 sgl_bidi_count ) ; struct se_session *transport_init_session(enum target_prot_op sup_prot_ops ) ; int transport_alloc_session_tags(struct se_session *se_sess , unsigned int tag_num , unsigned int tag_size ) ; struct se_session *transport_init_session_tags(unsigned int tag_num , unsigned int tag_size , enum target_prot_op sup_prot_ops ) ; void __transport_register_session(struct se_portal_group *se_tpg , struct se_node_acl *se_nacl , struct se_session *se_sess , void *fabric_sess_ptr ) ; void transport_register_session(struct se_portal_group *se_tpg , struct se_node_acl *se_nacl , struct se_session *se_sess , void *fabric_sess_ptr ) ; ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg , char *page ) ; void transport_free_session(struct se_session *se_sess ) ; void transport_deregister_session_configfs(struct se_session *se_sess ) ; void transport_deregister_session(struct se_session *se_sess ) ; void transport_init_se_cmd(struct se_cmd *cmd , struct target_core_fabric_ops const *tfo , struct se_session *se_sess , u32 data_length , int data_direction , int task_attr , unsigned char *sense_buffer ) ; sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *cmd , unsigned char *cdb ) ; int target_submit_cmd_map_sgls(struct se_cmd *se_cmd , struct se_session *se_sess , unsigned char *cdb , unsigned char *sense , u64 unpacked_lun , u32 data_length , int task_attr , int data_dir , int flags , struct scatterlist *sgl , u32 sgl_count , struct scatterlist *sgl_bidi , u32 sgl_bidi_count , struct scatterlist *sgl_prot , u32 sgl_prot_count ) ; int target_submit_cmd(struct se_cmd *se_cmd , struct se_session *se_sess , unsigned char *cdb , unsigned char *sense , u64 unpacked_lun , u32 data_length , int task_attr , int data_dir , int flags ) ; int target_submit_tmr(struct se_cmd *se_cmd , struct se_session *se_sess , unsigned char *sense , u64 unpacked_lun , void *fabric_tmr_ptr , unsigned char tm_type , gfp_t gfp , unsigned int tag , int flags ) ; int transport_handle_cdb_direct(struct se_cmd *cmd ) ; sense_reason_t transport_generic_new_cmd(struct se_cmd *cmd ) ; void target_execute_cmd(struct se_cmd *cmd ) ; int transport_generic_free_cmd(struct se_cmd *cmd , int wait_for_tasks ) ; int transport_check_aborted_status(struct se_cmd *cmd , int send_status ) ; int transport_send_check_condition_and_sense(struct se_cmd *cmd , sense_reason_t reason , int from_transport ) ; int target_get_sess_cmd(struct se_cmd *se_cmd , bool ack_kref ) ; void target_sess_cmd_list_set_waiting(struct se_session *se_sess ) ; void target_wait_for_sess_cmds(struct se_session *se_sess ) ; int transport_generic_handle_tmr(struct se_cmd *cmd ) ; void transport_generic_request_failure(struct se_cmd *cmd , sense_reason_t sense_reason ) ; void __target_execute_cmd(struct se_cmd *cmd ) ; sense_reason_t target_cmd_size_check(struct se_cmd *cmd , unsigned int size ) ; bool target_check_wce(struct se_device *dev ) ; bool target_check_fua(struct se_device *dev ) ; struct kmem_cache *t10_alua_lu_gp_cache ; struct kmem_cache *t10_alua_lu_gp_mem_cache ; struct kmem_cache *t10_alua_tg_pt_gp_cache ; struct kmem_cache *t10_alua_lba_map_cache ; struct kmem_cache *t10_alua_lba_map_mem_cache ; struct kmem_cache *t10_pr_reg_cache ; struct kmem_cache *se_ua_cache ; sense_reason_t target_scsi3_ua_check(struct se_cmd *cmd ) ; void core_scsi3_ua_for_check_condition(struct se_cmd *cmd , u8 *asc , u8 *ascq ) ; __inline static bool seq_buf_has_overflowed(struct seq_buf *s ) { { return (s->len > s->size); } } __inline static bool trace_seq_has_overflowed(struct trace_seq *s ) { bool tmp ; int tmp___0 ; { if (s->full != 0) { tmp___0 = 1; } else { tmp = seq_buf_has_overflowed(& s->seq); if ((int )tmp) { tmp___0 = 1; } else { tmp___0 = 0; } } return ((bool )tmp___0); } } extern void trace_seq_printf(struct trace_seq * , char const * , ...) ; __inline static unsigned int scsi_varlen_cdb_length(void const *hdr ) { { return ((unsigned int )((int )((struct scsi_varlen_cdb_hdr *)hdr)->additional_cdb_length + 8)); } } extern unsigned char const scsi_command_size_tbl[8U] ; __inline static unsigned int scsi_command_size(unsigned char const *cmnd ) { unsigned int tmp ; unsigned int tmp___0 ; { if ((unsigned int )((unsigned char )*cmnd) == 127U) { tmp = scsi_varlen_cdb_length((void const *)cmnd); tmp___0 = tmp; } else { tmp___0 = (unsigned int )scsi_command_size_tbl[((int )((unsigned char )*cmnd) >> 5) & 7]; } return (tmp___0); } } struct tracepoint __tracepoint_target_sequencer_start ; __inline static void trace_target_sequencer_start(struct se_cmd *cmd ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_404___0 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_406___0 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false(& __tracepoint_target_sequencer_start.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_target_sequencer_start.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("include/trace/events/target.h", 164, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_60311: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct se_cmd * ))it_func))(__data, cmd); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_60311; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_target_sequencer_start.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("include/trace/events/target.h", 164, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } struct tracepoint __tracepoint_target_cmd_complete ; __inline static void trace_target_cmd_complete(struct se_cmd *cmd ) { struct tracepoint_func *it_func_ptr ; void *it_func ; void *__data ; struct tracepoint_func *________p1 ; struct tracepoint_func *_________p1 ; union __anonunion___u_408___0 __u ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; struct tracepoint_func *________p1___0 ; struct tracepoint_func *_________p1___0 ; union __anonunion___u_410___0 __u___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp___1 = static_key_false(& __tracepoint_target_cmd_complete.key); if ((int )tmp___1) { rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_target_cmd_complete.funcs), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_sched_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("include/trace/events/target.h", 209, "suspicious rcu_dereference_check() usage"); } else { } } else { } it_func_ptr = ________p1; if ((unsigned long )it_func_ptr != (unsigned long )((struct tracepoint_func *)0)) { ldv_60362: it_func = it_func_ptr->func; __data = it_func_ptr->data; (*((void (*)(void * , struct se_cmd * ))it_func))(__data, cmd); it_func_ptr = it_func_ptr + 1; if ((unsigned long )it_func_ptr->func != (unsigned long )((void *)0)) { goto ldv_60362; } else { } } else { } rcu_read_unlock_sched_notrace(); } else { } rcu_read_lock_sched_notrace(); __read_once_size((void const volatile *)(& __tracepoint_target_cmd_complete.funcs), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_sched_held(); if (tmp___3 == 0) { __warned___0 = 1; lockdep_rcu_suspicious("include/trace/events/target.h", 209, "suspicious rcu_dereference_check() usage"); } else { } } else { } rcu_read_unlock_sched_notrace(); return; } } static char const __tpstrtab_target_sequencer_start[23U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 's', 'e', 'q', 'u', 'e', 'n', 'c', 'e', 'r', '_', 's', 't', 'a', 'r', 't', '\000'}; struct tracepoint __tracepoint_target_sequencer_start = {(char const *)(& __tpstrtab_target_sequencer_start), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; static char const __tpstrtab_target_cmd_complete[20U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 'c', 'm', 'd', '_', 'c', 'o', 'm', 'p', 'l', 'e', 't', 'e', '\000'}; struct tracepoint __tracepoint_target_cmd_complete = {(char const *)(& __tpstrtab_target_cmd_complete), {{0}}, (void (*)(void))0, (void (*)(void))0, (struct tracepoint_func *)0}; extern char const *trace_print_symbols_seq(struct trace_seq * , unsigned long , struct trace_print_flags const * ) ; extern char const *trace_print_hex_seq(struct trace_seq * , unsigned char const * , int ) ; extern int trace_raw_output_prep(struct trace_iterator * , struct trace_event * ) ; __inline static enum print_line_t trace_handle_return(struct trace_seq *s ) { bool tmp ; { tmp = trace_seq_has_overflowed(s); return ((int )tmp ? 0 : 1); } } extern int trace_event_reg(struct trace_event_call * , enum trace_reg , void * ) ; extern int trace_event_raw_init(struct trace_event_call * ) ; extern int trace_define_field(struct trace_event_call * , char const * , char const * , int , int , int , int ) ; static enum print_line_t trace_raw_output_target_sequencer_start(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_target_sequencer_start *field ; int ret ; unsigned int tmp___0 ; int tmp___1 ; unsigned int tmp___2 ; struct trace_print_flags symbols[5U] ; char const *tmp___3 ; char const *tmp___4 ; struct trace_print_flags symbols___0[93U] ; char const *tmp___5 ; enum print_line_t tmp___6 ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_target_sequencer_start *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } tmp___2 = scsi_command_size((unsigned char const *)(& field->cdb)); if (tmp___2 <= 16U) { tmp___0 = scsi_command_size((unsigned char const *)(& field->cdb)); tmp___1 = (int )field->cdb[tmp___0 - 1U]; } else { tmp___1 = (int )field->cdb[1]; } symbols[0].mask = 32UL; symbols[0].name = "SIMPLE"; symbols[1].mask = 33UL; symbols[1].name = "HEAD"; symbols[2].mask = 34UL; symbols[2].name = "ORDERED"; symbols[3].mask = 36UL; symbols[3].name = "ACA"; symbols[4].mask = 0xffffffffffffffffUL; symbols[4].name = (char const *)0; tmp___3 = trace_print_symbols_seq(p, (unsigned long )field->task_attribute, (struct trace_print_flags const *)(& symbols)); tmp___4 = trace_print_hex_seq(p, (unsigned char const *)(& field->cdb), 16); symbols___0[0].mask = 0UL; symbols___0[0].name = "TEST_UNIT_READY"; symbols___0[1].mask = 1UL; symbols___0[1].name = "REZERO_UNIT"; symbols___0[2].mask = 3UL; symbols___0[2].name = "REQUEST_SENSE"; symbols___0[3].mask = 4UL; symbols___0[3].name = "FORMAT_UNIT"; symbols___0[4].mask = 5UL; symbols___0[4].name = "READ_BLOCK_LIMITS"; symbols___0[5].mask = 7UL; symbols___0[5].name = "REASSIGN_BLOCKS"; symbols___0[6].mask = 7UL; symbols___0[6].name = "INITIALIZE_ELEMENT_STATUS"; symbols___0[7].mask = 8UL; symbols___0[7].name = "READ_6"; symbols___0[8].mask = 10UL; symbols___0[8].name = "WRITE_6"; symbols___0[9].mask = 11UL; symbols___0[9].name = "SEEK_6"; symbols___0[10].mask = 15UL; symbols___0[10].name = "READ_REVERSE"; symbols___0[11].mask = 16UL; symbols___0[11].name = "WRITE_FILEMARKS"; symbols___0[12].mask = 17UL; symbols___0[12].name = "SPACE"; symbols___0[13].mask = 18UL; symbols___0[13].name = "INQUIRY"; symbols___0[14].mask = 20UL; symbols___0[14].name = "RECOVER_BUFFERED_DATA"; symbols___0[15].mask = 21UL; symbols___0[15].name = "MODE_SELECT"; symbols___0[16].mask = 22UL; symbols___0[16].name = "RESERVE"; symbols___0[17].mask = 23UL; symbols___0[17].name = "RELEASE"; symbols___0[18].mask = 24UL; symbols___0[18].name = "COPY"; symbols___0[19].mask = 25UL; symbols___0[19].name = "ERASE"; symbols___0[20].mask = 26UL; symbols___0[20].name = "MODE_SENSE"; symbols___0[21].mask = 27UL; symbols___0[21].name = "START_STOP"; symbols___0[22].mask = 28UL; symbols___0[22].name = "RECEIVE_DIAGNOSTIC"; symbols___0[23].mask = 29UL; symbols___0[23].name = "SEND_DIAGNOSTIC"; symbols___0[24].mask = 30UL; symbols___0[24].name = "ALLOW_MEDIUM_REMOVAL"; symbols___0[25].mask = 36UL; symbols___0[25].name = "SET_WINDOW"; symbols___0[26].mask = 37UL; symbols___0[26].name = "READ_CAPACITY"; symbols___0[27].mask = 40UL; symbols___0[27].name = "READ_10"; symbols___0[28].mask = 42UL; symbols___0[28].name = "WRITE_10"; symbols___0[29].mask = 43UL; symbols___0[29].name = "SEEK_10"; symbols___0[30].mask = 43UL; symbols___0[30].name = "POSITION_TO_ELEMENT"; symbols___0[31].mask = 46UL; symbols___0[31].name = "WRITE_VERIFY"; symbols___0[32].mask = 47UL; symbols___0[32].name = "VERIFY"; symbols___0[33].mask = 48UL; symbols___0[33].name = "SEARCH_HIGH"; symbols___0[34].mask = 49UL; symbols___0[34].name = "SEARCH_EQUAL"; symbols___0[35].mask = 50UL; symbols___0[35].name = "SEARCH_LOW"; symbols___0[36].mask = 51UL; symbols___0[36].name = "SET_LIMITS"; symbols___0[37].mask = 52UL; symbols___0[37].name = "PRE_FETCH"; symbols___0[38].mask = 52UL; symbols___0[38].name = "READ_POSITION"; symbols___0[39].mask = 53UL; symbols___0[39].name = "SYNCHRONIZE_CACHE"; symbols___0[40].mask = 54UL; symbols___0[40].name = "LOCK_UNLOCK_CACHE"; symbols___0[41].mask = 55UL; symbols___0[41].name = "READ_DEFECT_DATA"; symbols___0[42].mask = 56UL; symbols___0[42].name = "MEDIUM_SCAN"; symbols___0[43].mask = 57UL; symbols___0[43].name = "COMPARE"; symbols___0[44].mask = 58UL; symbols___0[44].name = "COPY_VERIFY"; symbols___0[45].mask = 59UL; symbols___0[45].name = "WRITE_BUFFER"; symbols___0[46].mask = 60UL; symbols___0[46].name = "READ_BUFFER"; symbols___0[47].mask = 61UL; symbols___0[47].name = "UPDATE_BLOCK"; symbols___0[48].mask = 62UL; symbols___0[48].name = "READ_LONG"; symbols___0[49].mask = 63UL; symbols___0[49].name = "WRITE_LONG"; symbols___0[50].mask = 64UL; symbols___0[50].name = "CHANGE_DEFINITION"; symbols___0[51].mask = 65UL; symbols___0[51].name = "WRITE_SAME"; symbols___0[52].mask = 66UL; symbols___0[52].name = "UNMAP"; symbols___0[53].mask = 67UL; symbols___0[53].name = "READ_TOC"; symbols___0[54].mask = 76UL; symbols___0[54].name = "LOG_SELECT"; symbols___0[55].mask = 77UL; symbols___0[55].name = "LOG_SENSE"; symbols___0[56].mask = 83UL; symbols___0[56].name = "XDWRITEREAD_10"; symbols___0[57].mask = 85UL; symbols___0[57].name = "MODE_SELECT_10"; symbols___0[58].mask = 86UL; symbols___0[58].name = "RESERVE_10"; symbols___0[59].mask = 87UL; symbols___0[59].name = "RELEASE_10"; symbols___0[60].mask = 90UL; symbols___0[60].name = "MODE_SENSE_10"; symbols___0[61].mask = 94UL; symbols___0[61].name = "PERSISTENT_RESERVE_IN"; symbols___0[62].mask = 95UL; symbols___0[62].name = "PERSISTENT_RESERVE_OUT"; symbols___0[63].mask = 127UL; symbols___0[63].name = "VARIABLE_LENGTH_CMD"; symbols___0[64].mask = 160UL; symbols___0[64].name = "REPORT_LUNS"; symbols___0[65].mask = 163UL; symbols___0[65].name = "MAINTENANCE_IN"; symbols___0[66].mask = 164UL; symbols___0[66].name = "MAINTENANCE_OUT"; symbols___0[67].mask = 165UL; symbols___0[67].name = "MOVE_MEDIUM"; symbols___0[68].mask = 166UL; symbols___0[68].name = "EXCHANGE_MEDIUM"; symbols___0[69].mask = 168UL; symbols___0[69].name = "READ_12"; symbols___0[70].mask = 170UL; symbols___0[70].name = "WRITE_12"; symbols___0[71].mask = 174UL; symbols___0[71].name = "WRITE_VERIFY_12"; symbols___0[72].mask = 176UL; symbols___0[72].name = "SEARCH_HIGH_12"; symbols___0[73].mask = 177UL; symbols___0[73].name = "SEARCH_EQUAL_12"; symbols___0[74].mask = 178UL; symbols___0[74].name = "SEARCH_LOW_12"; symbols___0[75].mask = 184UL; symbols___0[75].name = "READ_ELEMENT_STATUS"; symbols___0[76].mask = 182UL; symbols___0[76].name = "SEND_VOLUME_TAG"; symbols___0[77].mask = 234UL; symbols___0[77].name = "WRITE_LONG_2"; symbols___0[78].mask = 136UL; symbols___0[78].name = "READ_16"; symbols___0[79].mask = 138UL; symbols___0[79].name = "WRITE_16"; symbols___0[80].mask = 143UL; symbols___0[80].name = "VERIFY_16"; symbols___0[81].mask = 147UL; symbols___0[81].name = "WRITE_SAME_16"; symbols___0[82].mask = 158UL; symbols___0[82].name = "SERVICE_ACTION_IN_16"; symbols___0[83].mask = 16UL; symbols___0[83].name = "SAI_READ_CAPACITY_16"; symbols___0[84].mask = 18UL; symbols___0[84].name = "SAI_GET_LBA_STATUS"; symbols___0[85].mask = 10UL; symbols___0[85].name = "MI_REPORT_TARGET_PGS"; symbols___0[86].mask = 10UL; symbols___0[86].name = "MO_SET_TARGET_PGS"; symbols___0[87].mask = 9UL; symbols___0[87].name = "READ_32"; symbols___0[88].mask = 11UL; symbols___0[88].name = "WRITE_32"; symbols___0[89].mask = 13UL; symbols___0[89].name = "WRITE_SAME_32"; symbols___0[90].mask = 133UL; symbols___0[90].name = "ATA_16"; symbols___0[91].mask = 161UL; symbols___0[91].name = "ATA_12"; symbols___0[92].mask = 0xffffffffffffffffUL; symbols___0[92].name = (char const *)0; tmp___5 = trace_print_symbols_seq(p, (unsigned long )field->opcode, (struct trace_print_flags const *)(& symbols___0)); trace_seq_printf(s, "%s -> LUN %03u %s data_length %6u CDB %s (TA:%s C:%02x)\n", (char *)field + ((unsigned long )field->__data_loc_initiator & 65535UL), field->unpacked_lun, tmp___5, field->data_length, tmp___4, tmp___3, tmp___1); tmp___6 = trace_handle_return(s); return (tmp___6); } } static enum print_line_t trace_raw_output_target_cmd_complete(struct trace_iterator *iter , int flags , struct trace_event *trace_event ) { struct trace_seq *s ; struct trace_seq *p ; struct trace_event_raw_target_cmd_complete *field ; int ret ; unsigned int tmp___0 ; int tmp___1 ; unsigned int tmp___2 ; struct trace_print_flags symbols[5U] ; char const *tmp___3 ; char const *tmp___4 ; struct trace_print_flags symbols___0[93U] ; char const *tmp___5 ; char const *tmp___6 ; struct trace_print_flags symbols___1[12U] ; char const *tmp___7 ; enum print_line_t tmp___8 ; { s = & iter->seq; p = & iter->tmp_seq; field = (struct trace_event_raw_target_cmd_complete *)iter->ent; ret = trace_raw_output_prep(iter, trace_event); if (ret != 1) { return ((enum print_line_t )ret); } else { } tmp___2 = scsi_command_size((unsigned char const *)(& field->cdb)); if (tmp___2 <= 16U) { tmp___0 = scsi_command_size((unsigned char const *)(& field->cdb)); tmp___1 = (int )field->cdb[tmp___0 - 1U]; } else { tmp___1 = (int )field->cdb[1]; } symbols[0].mask = 32UL; symbols[0].name = "SIMPLE"; symbols[1].mask = 33UL; symbols[1].name = "HEAD"; symbols[2].mask = 34UL; symbols[2].name = "ORDERED"; symbols[3].mask = 36UL; symbols[3].name = "ACA"; symbols[4].mask = 0xffffffffffffffffUL; symbols[4].name = (char const *)0; tmp___3 = trace_print_symbols_seq(p, (unsigned long )field->task_attribute, (struct trace_print_flags const *)(& symbols)); tmp___4 = trace_print_hex_seq(p, (unsigned char const *)(& field->cdb), 16); symbols___0[0].mask = 0UL; symbols___0[0].name = "TEST_UNIT_READY"; symbols___0[1].mask = 1UL; symbols___0[1].name = "REZERO_UNIT"; symbols___0[2].mask = 3UL; symbols___0[2].name = "REQUEST_SENSE"; symbols___0[3].mask = 4UL; symbols___0[3].name = "FORMAT_UNIT"; symbols___0[4].mask = 5UL; symbols___0[4].name = "READ_BLOCK_LIMITS"; symbols___0[5].mask = 7UL; symbols___0[5].name = "REASSIGN_BLOCKS"; symbols___0[6].mask = 7UL; symbols___0[6].name = "INITIALIZE_ELEMENT_STATUS"; symbols___0[7].mask = 8UL; symbols___0[7].name = "READ_6"; symbols___0[8].mask = 10UL; symbols___0[8].name = "WRITE_6"; symbols___0[9].mask = 11UL; symbols___0[9].name = "SEEK_6"; symbols___0[10].mask = 15UL; symbols___0[10].name = "READ_REVERSE"; symbols___0[11].mask = 16UL; symbols___0[11].name = "WRITE_FILEMARKS"; symbols___0[12].mask = 17UL; symbols___0[12].name = "SPACE"; symbols___0[13].mask = 18UL; symbols___0[13].name = "INQUIRY"; symbols___0[14].mask = 20UL; symbols___0[14].name = "RECOVER_BUFFERED_DATA"; symbols___0[15].mask = 21UL; symbols___0[15].name = "MODE_SELECT"; symbols___0[16].mask = 22UL; symbols___0[16].name = "RESERVE"; symbols___0[17].mask = 23UL; symbols___0[17].name = "RELEASE"; symbols___0[18].mask = 24UL; symbols___0[18].name = "COPY"; symbols___0[19].mask = 25UL; symbols___0[19].name = "ERASE"; symbols___0[20].mask = 26UL; symbols___0[20].name = "MODE_SENSE"; symbols___0[21].mask = 27UL; symbols___0[21].name = "START_STOP"; symbols___0[22].mask = 28UL; symbols___0[22].name = "RECEIVE_DIAGNOSTIC"; symbols___0[23].mask = 29UL; symbols___0[23].name = "SEND_DIAGNOSTIC"; symbols___0[24].mask = 30UL; symbols___0[24].name = "ALLOW_MEDIUM_REMOVAL"; symbols___0[25].mask = 36UL; symbols___0[25].name = "SET_WINDOW"; symbols___0[26].mask = 37UL; symbols___0[26].name = "READ_CAPACITY"; symbols___0[27].mask = 40UL; symbols___0[27].name = "READ_10"; symbols___0[28].mask = 42UL; symbols___0[28].name = "WRITE_10"; symbols___0[29].mask = 43UL; symbols___0[29].name = "SEEK_10"; symbols___0[30].mask = 43UL; symbols___0[30].name = "POSITION_TO_ELEMENT"; symbols___0[31].mask = 46UL; symbols___0[31].name = "WRITE_VERIFY"; symbols___0[32].mask = 47UL; symbols___0[32].name = "VERIFY"; symbols___0[33].mask = 48UL; symbols___0[33].name = "SEARCH_HIGH"; symbols___0[34].mask = 49UL; symbols___0[34].name = "SEARCH_EQUAL"; symbols___0[35].mask = 50UL; symbols___0[35].name = "SEARCH_LOW"; symbols___0[36].mask = 51UL; symbols___0[36].name = "SET_LIMITS"; symbols___0[37].mask = 52UL; symbols___0[37].name = "PRE_FETCH"; symbols___0[38].mask = 52UL; symbols___0[38].name = "READ_POSITION"; symbols___0[39].mask = 53UL; symbols___0[39].name = "SYNCHRONIZE_CACHE"; symbols___0[40].mask = 54UL; symbols___0[40].name = "LOCK_UNLOCK_CACHE"; symbols___0[41].mask = 55UL; symbols___0[41].name = "READ_DEFECT_DATA"; symbols___0[42].mask = 56UL; symbols___0[42].name = "MEDIUM_SCAN"; symbols___0[43].mask = 57UL; symbols___0[43].name = "COMPARE"; symbols___0[44].mask = 58UL; symbols___0[44].name = "COPY_VERIFY"; symbols___0[45].mask = 59UL; symbols___0[45].name = "WRITE_BUFFER"; symbols___0[46].mask = 60UL; symbols___0[46].name = "READ_BUFFER"; symbols___0[47].mask = 61UL; symbols___0[47].name = "UPDATE_BLOCK"; symbols___0[48].mask = 62UL; symbols___0[48].name = "READ_LONG"; symbols___0[49].mask = 63UL; symbols___0[49].name = "WRITE_LONG"; symbols___0[50].mask = 64UL; symbols___0[50].name = "CHANGE_DEFINITION"; symbols___0[51].mask = 65UL; symbols___0[51].name = "WRITE_SAME"; symbols___0[52].mask = 66UL; symbols___0[52].name = "UNMAP"; symbols___0[53].mask = 67UL; symbols___0[53].name = "READ_TOC"; symbols___0[54].mask = 76UL; symbols___0[54].name = "LOG_SELECT"; symbols___0[55].mask = 77UL; symbols___0[55].name = "LOG_SENSE"; symbols___0[56].mask = 83UL; symbols___0[56].name = "XDWRITEREAD_10"; symbols___0[57].mask = 85UL; symbols___0[57].name = "MODE_SELECT_10"; symbols___0[58].mask = 86UL; symbols___0[58].name = "RESERVE_10"; symbols___0[59].mask = 87UL; symbols___0[59].name = "RELEASE_10"; symbols___0[60].mask = 90UL; symbols___0[60].name = "MODE_SENSE_10"; symbols___0[61].mask = 94UL; symbols___0[61].name = "PERSISTENT_RESERVE_IN"; symbols___0[62].mask = 95UL; symbols___0[62].name = "PERSISTENT_RESERVE_OUT"; symbols___0[63].mask = 127UL; symbols___0[63].name = "VARIABLE_LENGTH_CMD"; symbols___0[64].mask = 160UL; symbols___0[64].name = "REPORT_LUNS"; symbols___0[65].mask = 163UL; symbols___0[65].name = "MAINTENANCE_IN"; symbols___0[66].mask = 164UL; symbols___0[66].name = "MAINTENANCE_OUT"; symbols___0[67].mask = 165UL; symbols___0[67].name = "MOVE_MEDIUM"; symbols___0[68].mask = 166UL; symbols___0[68].name = "EXCHANGE_MEDIUM"; symbols___0[69].mask = 168UL; symbols___0[69].name = "READ_12"; symbols___0[70].mask = 170UL; symbols___0[70].name = "WRITE_12"; symbols___0[71].mask = 174UL; symbols___0[71].name = "WRITE_VERIFY_12"; symbols___0[72].mask = 176UL; symbols___0[72].name = "SEARCH_HIGH_12"; symbols___0[73].mask = 177UL; symbols___0[73].name = "SEARCH_EQUAL_12"; symbols___0[74].mask = 178UL; symbols___0[74].name = "SEARCH_LOW_12"; symbols___0[75].mask = 184UL; symbols___0[75].name = "READ_ELEMENT_STATUS"; symbols___0[76].mask = 182UL; symbols___0[76].name = "SEND_VOLUME_TAG"; symbols___0[77].mask = 234UL; symbols___0[77].name = "WRITE_LONG_2"; symbols___0[78].mask = 136UL; symbols___0[78].name = "READ_16"; symbols___0[79].mask = 138UL; symbols___0[79].name = "WRITE_16"; symbols___0[80].mask = 143UL; symbols___0[80].name = "VERIFY_16"; symbols___0[81].mask = 147UL; symbols___0[81].name = "WRITE_SAME_16"; symbols___0[82].mask = 158UL; symbols___0[82].name = "SERVICE_ACTION_IN_16"; symbols___0[83].mask = 16UL; symbols___0[83].name = "SAI_READ_CAPACITY_16"; symbols___0[84].mask = 18UL; symbols___0[84].name = "SAI_GET_LBA_STATUS"; symbols___0[85].mask = 10UL; symbols___0[85].name = "MI_REPORT_TARGET_PGS"; symbols___0[86].mask = 10UL; symbols___0[86].name = "MO_SET_TARGET_PGS"; symbols___0[87].mask = 9UL; symbols___0[87].name = "READ_32"; symbols___0[88].mask = 11UL; symbols___0[88].name = "WRITE_32"; symbols___0[89].mask = 13UL; symbols___0[89].name = "WRITE_SAME_32"; symbols___0[90].mask = 133UL; symbols___0[90].name = "ATA_16"; symbols___0[91].mask = 161UL; symbols___0[91].name = "ATA_12"; symbols___0[92].mask = 0xffffffffffffffffUL; symbols___0[92].name = (char const *)0; tmp___5 = trace_print_symbols_seq(p, (unsigned long )field->opcode, (struct trace_print_flags const *)(& symbols___0)); tmp___6 = trace_print_hex_seq(p, (unsigned char const *)(& field->sense_data), (int )field->sense_length); symbols___1[0].mask = 0UL; symbols___1[0].name = "GOOD"; symbols___1[1].mask = 2UL; symbols___1[1].name = "CHECK CONDITION"; symbols___1[2].mask = 4UL; symbols___1[2].name = "CONDITION MET"; symbols___1[3].mask = 8UL; symbols___1[3].name = "BUSY"; symbols___1[4].mask = 16UL; symbols___1[4].name = "INTERMEDIATE"; symbols___1[5].mask = 20UL; symbols___1[5].name = "INTERMEDIATE CONDITION MET"; symbols___1[6].mask = 24UL; symbols___1[6].name = "RESERVATION CONFLICT"; symbols___1[7].mask = 34UL; symbols___1[7].name = "COMMAND TERMINATED"; symbols___1[8].mask = 40UL; symbols___1[8].name = "TASK SET FULL"; symbols___1[9].mask = 48UL; symbols___1[9].name = "ACA ACTIVE"; symbols___1[10].mask = 64UL; symbols___1[10].name = "TASK ABORTED"; symbols___1[11].mask = 0xffffffffffffffffUL; symbols___1[11].name = (char const *)0; tmp___7 = trace_print_symbols_seq(p, (unsigned long )field->scsi_status, (struct trace_print_flags const *)(& symbols___1)); trace_seq_printf(s, "%s <- LUN %03u status %s (sense len %d%s%s) %s data_length %6u CDB %s (TA:%s C:%02x)\n", (char *)field + ((unsigned long )field->__data_loc_initiator & 65535UL), field->unpacked_lun, tmp___7, (int )field->sense_length, (unsigned int )field->sense_length != 0U ? (char *)" / " : (char *)"", tmp___6, tmp___5, field->data_length, tmp___4, tmp___3, tmp___1); tmp___8 = trace_handle_return(s); return (tmp___8); } } static int trace_event_define_fields_target_sequencer_start(struct trace_event_call *event_call ) { int ret ; char *type_str ; { ret = trace_define_field(event_call, "unsigned int", "unpacked_lun", 8, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "unsigned int", "opcode", 12, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "unsigned int", "data_length", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "unsigned int", "task_attribute", 20, 4, 0, 0); if (ret != 0) { return (ret); } else { } type_str = (char *)"unsigned char[32]"; ret = trace_define_field(event_call, (char const *)type_str, "cdb", 24, 32, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "__data_loc char[]", "initiator", 56, 4, 1, 0); return (ret); } } static int trace_event_define_fields_target_cmd_complete(struct trace_event_call *event_call ) { int ret ; char *type_str ; char *type_str___0 ; { ret = trace_define_field(event_call, "unsigned int", "unpacked_lun", 8, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "unsigned int", "opcode", 12, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "unsigned int", "data_length", 16, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "unsigned int", "task_attribute", 20, 4, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "unsigned char", "scsi_status", 24, 1, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "unsigned char", "sense_length", 25, 1, 0, 0); if (ret != 0) { return (ret); } else { } type_str = (char *)"unsigned char[32]"; ret = trace_define_field(event_call, (char const *)type_str, "cdb", 26, 32, 0, 0); if (ret != 0) { return (ret); } else { } type_str___0 = (char *)"unsigned char[18]"; ret = trace_define_field(event_call, (char const *)type_str___0, "sense_data", 58, 18, 0, 0); if (ret != 0) { return (ret); } else { } ret = trace_define_field(event_call, "__data_loc char[]", "initiator", 76, 4, 1, 0); return (ret); } } static struct workqueue_struct *target_completion_wq ; static struct kmem_cache *se_sess_cache ; static void transport_complete_task_attr(struct se_cmd *cmd ) ; static void transport_handle_queue_full(struct se_cmd *cmd , struct se_device *dev ) ; static int transport_put_cmd(struct se_cmd *cmd ) ; static void target_complete_ok_work(struct work_struct *work ) ; int init_se_kmem_caches(void) { struct lock_class_key __key ; char const *__lock_name ; struct workqueue_struct *tmp ; { se_sess_cache = kmem_cache_create("se_sess_cache", 1536UL, 64UL, 0UL, (void (*)(void * ))0); if ((unsigned long )se_sess_cache == (unsigned long )((struct kmem_cache *)0)) { printk("\vkmem_cache_create() for struct se_session failed\n"); goto out; } else { } se_ua_cache = kmem_cache_create("se_ua_cache", 24UL, 8UL, 0UL, (void (*)(void * ))0); if ((unsigned long )se_ua_cache == (unsigned long )((struct kmem_cache *)0)) { printk("\vkmem_cache_create() for struct se_ua failed\n"); goto out_free_sess_cache; } else { } t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", 704UL, 8UL, 0UL, (void (*)(void * ))0); if ((unsigned long )t10_pr_reg_cache == (unsigned long )((struct kmem_cache *)0)) { printk("\vkmem_cache_create() for struct t10_pr_registration failed\n"); goto out_free_ua_cache; } else { } t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", 232UL, 8UL, 0UL, (void (*)(void * ))0); if ((unsigned long )t10_alua_lu_gp_cache == (unsigned long )((struct kmem_cache *)0)) { printk("\vkmem_cache_create() for t10_alua_lu_gp_cache failed\n"); goto out_free_pr_reg_cache; } else { } t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", 112UL, 8UL, 0UL, (void (*)(void * ))0); if ((unsigned long )t10_alua_lu_gp_mem_cache == (unsigned long )((struct kmem_cache *)0)) { printk("\vkmem_cache_create() for t10_alua_lu_gp_mem_cache failed\n"); goto out_free_lu_gp_cache; } else { } t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", 696UL, 8UL, 0UL, (void (*)(void * ))0); if ((unsigned long )t10_alua_tg_pt_gp_cache == (unsigned long )((struct kmem_cache *)0)) { printk("\vkmem_cache_create() for t10_alua_tg_pt_gp_cache failed\n"); goto out_free_lu_gp_mem_cache; } else { } t10_alua_lba_map_cache = kmem_cache_create("t10_alua_lba_map_cache", 48UL, 8UL, 0UL, (void (*)(void * ))0); if ((unsigned long )t10_alua_lba_map_cache == (unsigned long )((struct kmem_cache *)0)) { printk("\vkmem_cache_create() for t10_alua_lba_map_cache failed\n"); goto out_free_tg_pt_gp_cache; } else { } t10_alua_lba_map_mem_cache = kmem_cache_create("t10_alua_lba_map_mem_cache", 24UL, 8UL, 0UL, (void (*)(void * ))0); if ((unsigned long )t10_alua_lba_map_mem_cache == (unsigned long )((struct kmem_cache *)0)) { printk("\vkmem_cache_create() for t10_alua_lba_map_mem_cache failed\n"); goto out_free_lba_map_cache; } else { } __lock_name = "\"target_completion\""; tmp = __alloc_workqueue_key("target_completion", 8U, 0, & __key, __lock_name); target_completion_wq = tmp; if ((unsigned long )target_completion_wq == (unsigned long )((struct workqueue_struct *)0)) { goto out_free_lba_map_mem_cache; } else { } return (0); out_free_lba_map_mem_cache: kmem_cache_destroy(t10_alua_lba_map_mem_cache); out_free_lba_map_cache: kmem_cache_destroy(t10_alua_lba_map_cache); out_free_tg_pt_gp_cache: kmem_cache_destroy(t10_alua_tg_pt_gp_cache); out_free_lu_gp_mem_cache: kmem_cache_destroy(t10_alua_lu_gp_mem_cache); out_free_lu_gp_cache: kmem_cache_destroy(t10_alua_lu_gp_cache); out_free_pr_reg_cache: kmem_cache_destroy(t10_pr_reg_cache); out_free_ua_cache: kmem_cache_destroy(se_ua_cache); out_free_sess_cache: kmem_cache_destroy(se_sess_cache); out: ; return (-12); } } void release_se_kmem_caches(void) { { ldv_destroy_workqueue_429(target_completion_wq); kmem_cache_destroy(se_sess_cache); kmem_cache_destroy(se_ua_cache); kmem_cache_destroy(t10_pr_reg_cache); kmem_cache_destroy(t10_alua_lu_gp_cache); kmem_cache_destroy(t10_alua_lu_gp_mem_cache); kmem_cache_destroy(t10_alua_tg_pt_gp_cache); kmem_cache_destroy(t10_alua_lba_map_cache); kmem_cache_destroy(t10_alua_lba_map_mem_cache); return; } } static spinlock_t scsi_mib_index_lock = {{{{{0}}, 3735899821U, 4294967295U, (void *)-1, {0, {0, 0}, "scsi_mib_index_lock", 0, 0UL}}}}; static u32 scsi_mib_index[3U] ; u32 scsi_get_new_index(scsi_index_t type ) { u32 new_index ; long tmp ; { tmp = ldv__builtin_expect((unsigned int )type > 2U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"), "i" (190), "i" (12UL)); ldv_62856: ; goto ldv_62856; } else { } spin_lock(& scsi_mib_index_lock); scsi_mib_index[(unsigned int )type] = scsi_mib_index[(unsigned int )type] + 1U; new_index = scsi_mib_index[(unsigned int )type]; spin_unlock(& scsi_mib_index_lock); return (new_index); } } void transport_subsystem_check_init(void) { int ret ; int sub_api_initialized ; { if (sub_api_initialized != 0) { return; } else { } ret = __request_module(1, "target_core_iblock"); if (ret != 0) { printk("\vUnable to load target_core_iblock\n"); } else { } ret = __request_module(1, "target_core_file"); if (ret != 0) { printk("\vUnable to load target_core_file\n"); } else { } ret = __request_module(1, "target_core_pscsi"); if (ret != 0) { printk("\vUnable to load target_core_pscsi\n"); } else { } ret = __request_module(1, "target_core_user"); if (ret != 0) { printk("\vUnable to load target_core_user\n"); } else { } sub_api_initialized = 1; return; } } struct se_session *transport_init_session(enum target_prot_op sup_prot_ops ) { struct se_session *se_sess ; void *tmp ; void *tmp___0 ; struct lock_class_key __key ; { tmp = kmem_cache_zalloc(se_sess_cache, 208U); se_sess = (struct se_session *)tmp; if ((unsigned long )se_sess == (unsigned long )((struct se_session *)0)) { printk("\vUnable to allocate struct se_session from se_sess_cache\n"); tmp___0 = ERR_PTR(-12L); return ((struct se_session *)tmp___0); } else { } INIT_LIST_HEAD(& se_sess->sess_list); INIT_LIST_HEAD(& se_sess->sess_acl_list); INIT_LIST_HEAD(& se_sess->sess_cmd_list); INIT_LIST_HEAD(& se_sess->sess_wait_list); spinlock_check(& se_sess->sess_cmd_lock); __raw_spin_lock_init(& se_sess->sess_cmd_lock.__annonCompField17.rlock, "&(&se_sess->sess_cmd_lock)->rlock", & __key); kref_init(& se_sess->sess_kref); se_sess->sup_prot_ops = sup_prot_ops; return (se_sess); } } static char const __kstrtab_transport_init_session[23U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'i', 'n', 'i', 't', '_', 's', 'e', 's', 's', 'i', 'o', 'n', '\000'}; struct kernel_symbol const __ksymtab_transport_init_session ; struct kernel_symbol const __ksymtab_transport_init_session = {(unsigned long )(& transport_init_session), (char const *)(& __kstrtab_transport_init_session)}; int transport_alloc_session_tags(struct se_session *se_sess , unsigned int tag_num , unsigned int tag_size ) { int rc ; { se_sess->sess_cmd_map = kzalloc((size_t )(tag_num * tag_size), 1744U); if ((unsigned long )se_sess->sess_cmd_map == (unsigned long )((void *)0)) { se_sess->sess_cmd_map = vzalloc((unsigned long )(tag_num * tag_size)); if ((unsigned long )se_sess->sess_cmd_map == (unsigned long )((void *)0)) { printk("\vUnable to allocate se_sess->sess_cmd_map\n"); return (-12); } else { } } else { } rc = percpu_ida_init(& se_sess->sess_tag_pool, (unsigned long )tag_num); if (rc < 0) { printk("\vUnable to init se_sess->sess_tag_pool, tag_num: %u\n", tag_num); kvfree((void const *)se_sess->sess_cmd_map); se_sess->sess_cmd_map = (void *)0; return (-12); } else { } return (0); } } static char const __kstrtab_transport_alloc_session_tags[29U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'a', 'l', 'l', 'o', 'c', '_', 's', 'e', 's', 's', 'i', 'o', 'n', '_', 't', 'a', 'g', 's', '\000'}; struct kernel_symbol const __ksymtab_transport_alloc_session_tags ; struct kernel_symbol const __ksymtab_transport_alloc_session_tags = {(unsigned long )(& transport_alloc_session_tags), (char const *)(& __kstrtab_transport_alloc_session_tags)}; struct se_session *transport_init_session_tags(unsigned int tag_num , unsigned int tag_size , enum target_prot_op sup_prot_ops ) { struct se_session *se_sess ; int rc ; bool tmp ; void *tmp___0 ; { se_sess = transport_init_session(sup_prot_ops); tmp = IS_ERR((void const *)se_sess); if ((int )tmp) { return (se_sess); } else { } rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); if (rc < 0) { transport_free_session(se_sess); tmp___0 = ERR_PTR(-12L); return ((struct se_session *)tmp___0); } else { } return (se_sess); } } static char const __kstrtab_transport_init_session_tags[28U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'i', 'n', 'i', 't', '_', 's', 'e', 's', 's', 'i', 'o', 'n', '_', 't', 'a', 'g', 's', '\000'}; struct kernel_symbol const __ksymtab_transport_init_session_tags ; struct kernel_symbol const __ksymtab_transport_init_session_tags = {(unsigned long )(& transport_init_session_tags), (char const *)(& __kstrtab_transport_init_session_tags)}; void __transport_register_session(struct se_portal_group *se_tpg , struct se_node_acl *se_nacl , struct se_session *se_sess , void *fabric_sess_ptr ) { struct target_core_fabric_ops const *tfo ; unsigned char buf[16U] ; enum target_prot_type tmp ; int tmp___0 ; struct _ddebug descriptor ; char *tmp___1 ; long tmp___2 ; { tfo = se_tpg->se_tpg_tfo; se_sess->se_tpg = se_tpg; se_sess->fabric_sess_ptr = fabric_sess_ptr; if ((unsigned long )se_nacl != (unsigned long )((struct se_node_acl *)0)) { if ((unsigned int )se_nacl->saved_prot_type != 0U) { se_sess->sess_prot_type = se_nacl->saved_prot_type; } else if ((unsigned long )tfo->tpg_check_prot_fabric_only != (unsigned long )((int (*/* const */)(struct se_portal_group * ))0)) { tmp___0 = (*(tfo->tpg_check_prot_fabric_only))(se_tpg); tmp = (enum target_prot_type )tmp___0; se_nacl->saved_prot_type = tmp; se_sess->sess_prot_type = tmp; } else { } if ((unsigned long )(se_tpg->se_tpg_tfo)->sess_get_initiator_sid != (unsigned long )((u32 (*/* const */)(struct se_session * , unsigned char * , u32 ))0)) { memset((void *)(& buf), 0, 16UL); (*((se_tpg->se_tpg_tfo)->sess_get_initiator_sid))(se_sess, (unsigned char *)(& buf), 16U); se_sess->sess_bin_isid = get_unaligned_be64((void const *)(& buf)); } else { } kref_get(& se_nacl->acl_kref); spin_lock_irq(& se_nacl->nacl_sess_lock); se_nacl->nacl_sess = se_sess; list_add_tail(& se_sess->sess_acl_list, & se_nacl->acl_sess_list); spin_unlock_irq(& se_nacl->nacl_sess_lock); } else { } list_add_tail(& se_sess->sess_list, & se_tpg->tpg_sess_list); descriptor.modname = "target_core_mod"; descriptor.function = "__transport_register_session"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n"; descriptor.lineno = 359U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = (*((se_tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", tmp___1, se_sess->fabric_sess_ptr); } else { } return; } } static char const __kstrtab___transport_register_session[29U] = { '_', '_', 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'r', 'e', 'g', 'i', 's', 't', 'e', 'r', '_', 's', 'e', 's', 's', 'i', 'o', 'n', '\000'}; struct kernel_symbol const __ksymtab___transport_register_session ; struct kernel_symbol const __ksymtab___transport_register_session = {(unsigned long )(& __transport_register_session), (char const *)(& __kstrtab___transport_register_session)}; void transport_register_session(struct se_portal_group *se_tpg , struct se_node_acl *se_nacl , struct se_session *se_sess , void *fabric_sess_ptr ) { unsigned long flags ; raw_spinlock_t *tmp ; { tmp = spinlock_check(& se_tpg->session_lock); flags = _raw_spin_lock_irqsave(tmp); __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); spin_unlock_irqrestore(& se_tpg->session_lock, flags); return; } } static char const __kstrtab_transport_register_session[27U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'r', 'e', 'g', 'i', 's', 't', 'e', 'r', '_', 's', 'e', 's', 's', 'i', 'o', 'n', '\000'}; struct kernel_symbol const __ksymtab_transport_register_session ; struct kernel_symbol const __ksymtab_transport_register_session = {(unsigned long )(& transport_register_session), (char const *)(& __kstrtab_transport_register_session)}; static void target_release_session(struct kref *kref ) { struct se_session *se_sess ; struct kref const *__mptr ; struct se_portal_group *se_tpg ; { __mptr = (struct kref const *)kref; se_sess = (struct se_session *)__mptr + 0xffffffffffffff48UL; se_tpg = se_sess->se_tpg; (*((se_tpg->se_tpg_tfo)->close_session))(se_sess); return; } } void target_get_session(struct se_session *se_sess ) { { kref_get(& se_sess->sess_kref); return; } } static char const __kstrtab_target_get_session[19U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 'g', 'e', 't', '_', 's', 'e', 's', 's', 'i', 'o', 'n', '\000'}; struct kernel_symbol const __ksymtab_target_get_session ; struct kernel_symbol const __ksymtab_target_get_session = {(unsigned long )(& target_get_session), (char const *)(& __kstrtab_target_get_session)}; void target_put_session(struct se_session *se_sess ) { { kref_put(& se_sess->sess_kref, & target_release_session); return; } } static char const __kstrtab_target_put_session[19U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 'p', 'u', 't', '_', 's', 'e', 's', 's', 'i', 'o', 'n', '\000'}; struct kernel_symbol const __ksymtab_target_put_session ; struct kernel_symbol const __ksymtab_target_put_session = {(unsigned long )(& target_put_session), (char const *)(& __kstrtab_target_put_session)}; ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg , char *page ) { struct se_session *se_sess ; ssize_t len ; struct list_head const *__mptr ; size_t tmp ; int tmp___0 ; struct list_head const *__mptr___0 ; { len = 0L; spin_lock_bh(& se_tpg->session_lock); __mptr = (struct list_head const *)se_tpg->tpg_sess_list.next; se_sess = (struct se_session *)__mptr + 0xffffffffffffffd0UL; goto ldv_62985; ldv_62984: ; if ((unsigned long )se_sess->se_node_acl == (unsigned long )((struct se_node_acl *)0)) { goto ldv_62982; } else { } if (! (se_sess->se_node_acl)->dynamic_node_acl) { goto ldv_62982; } else { } tmp = strlen((char const *)(& (se_sess->se_node_acl)->initiatorname)); if ((tmp + (unsigned long )len) + 1UL > 4096UL) { goto ldv_62983; } else { } tmp___0 = snprintf(page + (unsigned long )len, 4096UL - (unsigned long )len, "%s\n", (char *)(& (se_sess->se_node_acl)->initiatorname)); len = (ssize_t )tmp___0 + len; len = len + 1L; ldv_62982: __mptr___0 = (struct list_head const *)se_sess->sess_list.next; se_sess = (struct se_session *)__mptr___0 + 0xffffffffffffffd0UL; ldv_62985: ; if ((unsigned long )(& se_sess->sess_list) != (unsigned long )(& se_tpg->tpg_sess_list)) { goto ldv_62984; } else { } ldv_62983: spin_unlock_bh(& se_tpg->session_lock); return (len); } } static char const __kstrtab_target_show_dynamic_sessions[29U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 's', 'h', 'o', 'w', '_', 'd', 'y', 'n', 'a', 'm', 'i', 'c', '_', 's', 'e', 's', 's', 'i', 'o', 'n', 's', '\000'}; struct kernel_symbol const __ksymtab_target_show_dynamic_sessions ; struct kernel_symbol const __ksymtab_target_show_dynamic_sessions = {(unsigned long )(& target_show_dynamic_sessions), (char const *)(& __kstrtab_target_show_dynamic_sessions)}; static void target_complete_nacl(struct kref *kref ) { struct se_node_acl *nacl ; struct kref const *__mptr ; { __mptr = (struct kref const *)kref; nacl = (struct se_node_acl *)__mptr + 0xfffffffffffffaf0UL; complete(& nacl->acl_free_comp); return; } } void target_put_nacl(struct se_node_acl *nacl ) { { kref_put(& nacl->acl_kref, & target_complete_nacl); return; } } void transport_deregister_session_configfs(struct se_session *se_sess ) { struct se_node_acl *se_nacl ; unsigned long flags ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; int tmp___0 ; { se_nacl = se_sess->se_node_acl; if ((unsigned long )se_nacl != (unsigned long )((struct se_node_acl *)0)) { tmp = spinlock_check(& se_nacl->nacl_sess_lock); flags = _raw_spin_lock_irqsave(tmp); if (! se_nacl->acl_stop) { list_del(& se_sess->sess_acl_list); } else { } tmp___0 = list_empty((struct list_head const *)(& se_nacl->acl_sess_list)); if (tmp___0 != 0) { se_nacl->nacl_sess = (struct se_session *)0; } else { __mptr = (struct list_head const *)se_nacl->acl_sess_list.prev; se_nacl->nacl_sess = (struct se_session *)__mptr + 0xffffffffffffffc0UL; } spin_unlock_irqrestore(& se_nacl->nacl_sess_lock, flags); } else { } return; } } static char const __kstrtab_transport_deregister_session_configfs[38U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'd', 'e', 'r', 'e', 'g', 'i', 's', 't', 'e', 'r', '_', 's', 'e', 's', 's', 'i', 'o', 'n', '_', 'c', 'o', 'n', 'f', 'i', 'g', 'f', 's', '\000'}; struct kernel_symbol const __ksymtab_transport_deregister_session_configfs ; struct kernel_symbol const __ksymtab_transport_deregister_session_configfs = {(unsigned long )(& transport_deregister_session_configfs), (char const *)(& __kstrtab_transport_deregister_session_configfs)}; void transport_free_session(struct se_session *se_sess ) { { if ((unsigned long )se_sess->sess_cmd_map != (unsigned long )((void *)0)) { percpu_ida_destroy(& se_sess->sess_tag_pool); kvfree((void const *)se_sess->sess_cmd_map); } else { } kmem_cache_free(se_sess_cache, (void *)se_sess); return; } } static char const __kstrtab_transport_free_session[23U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'f', 'r', 'e', 'e', '_', 's', 'e', 's', 's', 'i', 'o', 'n', '\000'}; struct kernel_symbol const __ksymtab_transport_free_session ; struct kernel_symbol const __ksymtab_transport_free_session = {(unsigned long )(& transport_free_session), (char const *)(& __kstrtab_transport_free_session)}; void transport_deregister_session(struct se_session *se_sess ) { struct se_portal_group *se_tpg ; struct target_core_fabric_ops const *se_tfo ; struct se_node_acl *se_nacl ; unsigned long flags ; bool comp_nacl ; bool drop_nacl ; raw_spinlock_t *tmp ; int tmp___0 ; struct _ddebug descriptor ; char *tmp___1 ; long tmp___2 ; { se_tpg = se_sess->se_tpg; comp_nacl = 1; drop_nacl = 0; if ((unsigned long )se_tpg == (unsigned long )((struct se_portal_group *)0)) { transport_free_session(se_sess); return; } else { } se_tfo = se_tpg->se_tpg_tfo; tmp = spinlock_check(& se_tpg->session_lock); flags = _raw_spin_lock_irqsave(tmp); list_del(& se_sess->sess_list); se_sess->se_tpg = (struct se_portal_group *)0; se_sess->fabric_sess_ptr = (void *)0; spin_unlock_irqrestore(& se_tpg->session_lock, flags); se_nacl = se_sess->se_node_acl; ldv_mutex_lock_430(& se_tpg->acl_node_mutex); if ((unsigned long )se_nacl != (unsigned long )((struct se_node_acl *)0) && (int )se_nacl->dynamic_node_acl) { tmp___0 = (*(se_tfo->tpg_check_demo_mode_cache))(se_tpg); if (tmp___0 == 0) { list_del(& se_nacl->acl_list); se_tpg->num_node_acls = se_tpg->num_node_acls - 1U; drop_nacl = 1; } else { } } else { } ldv_mutex_unlock_431(& se_tpg->acl_node_mutex); if ((int )drop_nacl) { core_tpg_wait_for_nacl_pr_ref(se_nacl); core_free_device_list_for_node(se_nacl, se_tpg); kfree((void const *)se_nacl); comp_nacl = 0; } else { } descriptor.modname = "target_core_mod"; descriptor.function = "transport_deregister_session"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "TARGET_CORE[%s]: Deregistered fabric_sess\n"; descriptor.lineno = 517U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = (*((se_tpg->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "TARGET_CORE[%s]: Deregistered fabric_sess\n", tmp___1); } else { } if ((unsigned long )se_nacl != (unsigned long )((struct se_node_acl *)0) && (int )comp_nacl) { target_put_nacl(se_nacl); } else { } transport_free_session(se_sess); return; } } static char const __kstrtab_transport_deregister_session[29U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'd', 'e', 'r', 'e', 'g', 'i', 's', 't', 'e', 'r', '_', 's', 'e', 's', 's', 'i', 'o', 'n', '\000'}; struct kernel_symbol const __ksymtab_transport_deregister_session ; struct kernel_symbol const __ksymtab_transport_deregister_session = {(unsigned long )(& transport_deregister_session), (char const *)(& __kstrtab_transport_deregister_session)}; static void target_remove_from_state_list(struct se_cmd *cmd ) { struct se_device *dev ; unsigned long flags ; raw_spinlock_t *tmp ; { dev = cmd->se_dev; if ((unsigned long )dev == (unsigned long )((struct se_device *)0)) { return; } else { } if ((cmd->transport_state & 512U) != 0U) { return; } else { } tmp = spinlock_check(& dev->execute_task_lock); flags = _raw_spin_lock_irqsave(tmp); if ((int )cmd->state_active) { list_del(& cmd->state_list); cmd->state_active = 0; } else { } spin_unlock_irqrestore(& dev->execute_task_lock, flags); return; } } static int transport_cmd_check_stop(struct se_cmd *cmd , bool remove_from_lists , bool write_pending ) { unsigned long flags ; raw_spinlock_t *tmp ; struct _ddebug descriptor ; long tmp___0 ; int tmp___1 ; { tmp = spinlock_check(& cmd->t_state_lock); flags = _raw_spin_lock_irqsave(tmp); if ((int )write_pending) { cmd->t_state = 3; } else { } if ((int )remove_from_lists) { target_remove_from_state_list(cmd); cmd->se_lun = (struct se_lun *)0; } else { } if ((cmd->transport_state & 32U) != 0U) { descriptor.modname = "target_core_mod"; descriptor.function = "transport_cmd_check_stop"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "%s:%d CMD_T_STOP for ITT: 0x%08llx\n"; descriptor.lineno = 576U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "%s:%d CMD_T_STOP for ITT: 0x%08llx\n", "transport_cmd_check_stop", 576, cmd->tag); } else { } spin_unlock_irqrestore(& cmd->t_state_lock, flags); complete_all(& cmd->t_transport_stop_comp); return (1); } else { } cmd->transport_state = cmd->transport_state & 4294967293U; if ((int )remove_from_lists) { if ((unsigned long )(cmd->se_tfo)->check_stop_free != (unsigned long )((int (*/* const */)(struct se_cmd * ))0)) { spin_unlock_irqrestore(& cmd->t_state_lock, flags); tmp___1 = (*((cmd->se_tfo)->check_stop_free))(cmd); return (tmp___1); } else { } } else { } spin_unlock_irqrestore(& cmd->t_state_lock, flags); return (0); } } static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd ) { int tmp ; { tmp = transport_cmd_check_stop(cmd, 1, 0); return (tmp); } } static void transport_lun_remove_cmd(struct se_cmd *cmd ) { struct se_lun *lun ; int __ret ; int __old ; int __new ; u8 volatile *__ptr ; u16 volatile *__ptr___0 ; u32 volatile *__ptr___1 ; u64 volatile *__ptr___2 ; { lun = cmd->se_lun; if ((unsigned long )lun == (unsigned long )((struct se_lun *)0)) { return; } else { } __old = 1; __new = 0; switch (4UL) { case 1UL: __ptr = (u8 volatile *)(& cmd->lun_ref_active); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgb %2,%1": "=a" (__ret), "+m" (*__ptr): "q" (__new), "0" (__old): "memory"); goto ldv_63082; case 2UL: __ptr___0 = (u16 volatile *)(& cmd->lun_ref_active); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgw %2,%1": "=a" (__ret), "+m" (*__ptr___0): "r" (__new), "0" (__old): "memory"); goto ldv_63082; case 4UL: __ptr___1 = (u32 volatile *)(& cmd->lun_ref_active); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgl %2,%1": "=a" (__ret), "+m" (*__ptr___1): "r" (__new), "0" (__old): "memory"); goto ldv_63082; case 8UL: __ptr___2 = (u64 volatile *)(& cmd->lun_ref_active); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgq %2,%1": "=a" (__ret), "+m" (*__ptr___2): "r" (__new), "0" (__old): "memory"); goto ldv_63082; default: __cmpxchg_wrong_size(); } ldv_63082: ; if (__ret != 0) { percpu_ref_put___1(& lun->lun_ref); } else { } return; } } void transport_cmd_finish_abort(struct se_cmd *cmd , int remove ) { int tmp ; { if ((cmd->se_cmd_flags & 256U) != 0U) { transport_lun_remove_cmd(cmd); } else { } if (remove != 0) { (*((cmd->se_tfo)->aborted_task))(cmd); } else { } tmp = transport_cmd_check_stop_to_fabric(cmd); if (tmp != 0) { return; } else { } if (remove != 0) { transport_put_cmd(cmd); } else { } return; } } static void target_complete_failure_work(struct work_struct *work ) { struct se_cmd *cmd ; struct work_struct const *__mptr ; { __mptr = (struct work_struct const *)work; cmd = (struct se_cmd *)__mptr + 0xfffffffffffffdf8UL; transport_generic_request_failure(cmd, 10U); return; } } static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd ) { struct se_device *dev ; int __ret_warn_on ; long tmp ; struct _ddebug descriptor ; long tmp___0 ; { dev = cmd->se_dev; __ret_warn_on = (unsigned long )cmd->se_lun == (unsigned long )((struct se_lun *)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c", 654); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); if ((unsigned long )dev == (unsigned long )((struct se_device *)0)) { return ((unsigned char *)0U); } else { } if ((cmd->se_cmd_flags & 2048U) != 0U) { return ((unsigned char *)0U); } else { } cmd->scsi_sense_length = 96U; descriptor.modname = "target_core_mod"; descriptor.function = "transport_get_sense_buffer"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n"; descriptor.lineno = 665U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", (dev->se_hba)->hba_id, (char const *)(& (dev->transport)->name), (int )cmd->scsi_status); } else { } return ((unsigned char *)cmd->sense_buffer); } } void target_complete_cmd(struct se_cmd *cmd , u8 scsi_status ) { struct se_device *dev ; int success ; unsigned long flags ; raw_spinlock_t *tmp ; unsigned char *tmp___0 ; struct lock_class_key __key ; atomic_long_t __constr_expr_0 ; struct lock_class_key __key___0 ; atomic_long_t __constr_expr_1 ; { dev = cmd->se_dev; success = (unsigned int )scsi_status == 0U; cmd->scsi_status = scsi_status; tmp = spinlock_check(& cmd->t_state_lock); flags = _raw_spin_lock_irqsave(tmp); cmd->transport_state = cmd->transport_state & 4294966783U; if ((unsigned long )dev != (unsigned long )((struct se_device *)0) && (unsigned long )(dev->transport)->transport_complete != (unsigned long )((void (*/* const */)(struct se_cmd * , struct scatterlist * , unsigned char * ))0)) { tmp___0 = transport_get_sense_buffer(cmd); (*((dev->transport)->transport_complete))(cmd, cmd->t_data_sg, tmp___0); if ((cmd->se_cmd_flags & 2U) != 0U) { success = 1; } else { } } else { } if ((cmd->transport_state & 256U) != 0U) { spin_unlock_irqrestore(& cmd->t_state_lock, flags); complete(& cmd->task_stop_comp); return; } else { } if ((int )cmd->transport_state & 1 && (cmd->transport_state & 32U) != 0U) { spin_unlock_irqrestore(& cmd->t_state_lock, flags); complete_all(& cmd->t_transport_stop_comp); return; } else if (success == 0) { __init_work(& cmd->work, 0); __constr_expr_0.counter = 137438953408L; cmd->work.data = __constr_expr_0; lockdep_init_map(& cmd->work.lockdep_map, "(&cmd->work)", & __key, 0); INIT_LIST_HEAD(& cmd->work.entry); cmd->work.func = & target_complete_failure_work; } else { __init_work(& cmd->work, 0); __constr_expr_1.counter = 137438953408L; cmd->work.data = __constr_expr_1; lockdep_init_map(& cmd->work.lockdep_map, "(&cmd->work)", & __key___0, 0); INIT_LIST_HEAD(& cmd->work.entry); cmd->work.func = & target_complete_ok_work; } cmd->t_state = 6; cmd->transport_state = cmd->transport_state | 6U; spin_unlock_irqrestore(& cmd->t_state_lock, flags); queue_work(target_completion_wq, & cmd->work); return; } } static char const __kstrtab_target_complete_cmd[20U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 'c', 'o', 'm', 'p', 'l', 'e', 't', 'e', '_', 'c', 'm', 'd', '\000'}; struct kernel_symbol const __ksymtab_target_complete_cmd ; struct kernel_symbol const __ksymtab_target_complete_cmd = {(unsigned long )(& target_complete_cmd), (char const *)(& __kstrtab_target_complete_cmd)}; void target_complete_cmd_with_length(struct se_cmd *cmd , u8 scsi_status , int length ) { { if ((unsigned int )scsi_status == 0U && (u32 )length < cmd->data_length) { if ((cmd->se_cmd_flags & 8192U) != 0U) { cmd->residual_count = cmd->residual_count + (cmd->data_length - (u32 )length); } else { cmd->se_cmd_flags = cmd->se_cmd_flags | 8192U; cmd->residual_count = cmd->data_length - (u32 )length; } cmd->data_length = (u32 )length; } else { } target_complete_cmd(cmd, (int )scsi_status); return; } } static char const __kstrtab_target_complete_cmd_with_length[32U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 'c', 'o', 'm', 'p', 'l', 'e', 't', 'e', '_', 'c', 'm', 'd', '_', 'w', 'i', 't', 'h', '_', 'l', 'e', 'n', 'g', 't', 'h', '\000'}; struct kernel_symbol const __ksymtab_target_complete_cmd_with_length ; struct kernel_symbol const __ksymtab_target_complete_cmd_with_length = {(unsigned long )(& target_complete_cmd_with_length), (char const *)(& __kstrtab_target_complete_cmd_with_length)}; static void target_add_to_state_list(struct se_cmd *cmd ) { struct se_device *dev ; unsigned long flags ; raw_spinlock_t *tmp ; { dev = cmd->se_dev; tmp = spinlock_check(& dev->execute_task_lock); flags = _raw_spin_lock_irqsave(tmp); if (! cmd->state_active) { list_add_tail(& cmd->state_list, & dev->state_list); cmd->state_active = 1; } else { } spin_unlock_irqrestore(& dev->execute_task_lock, flags); return; } } static void transport_write_pending_qf(struct se_cmd *cmd ) ; static void transport_complete_qf(struct se_cmd *cmd ) ; void target_qf_do_work(struct work_struct *work ) { struct se_device *dev ; struct work_struct const *__mptr ; struct list_head qf_cmd_list ; struct se_cmd *cmd ; struct se_cmd *cmd_tmp ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct _ddebug descriptor ; char *tmp ; long tmp___0 ; struct list_head const *__mptr___2 ; { __mptr = (struct work_struct const *)work; dev = (struct se_device *)__mptr + 0xfffffffffffffd40UL; qf_cmd_list.next = & qf_cmd_list; qf_cmd_list.prev = & qf_cmd_list; spin_lock_irq(& dev->qf_cmd_lock); list_splice_init(& dev->qf_cmd_list, & qf_cmd_list); spin_unlock_irq(& dev->qf_cmd_lock); __mptr___0 = (struct list_head const *)qf_cmd_list.next; cmd = (struct se_cmd *)__mptr___0 + 0xffffffffffffffa0UL; __mptr___1 = (struct list_head const *)cmd->se_qf_node.next; cmd_tmp = (struct se_cmd *)__mptr___1 + 0xffffffffffffffa0UL; goto ldv_63175; ldv_63174: list_del(& cmd->se_qf_node); atomic_dec_mb(& dev->dev_qf_count); descriptor.modname = "target_core_mod"; descriptor.function = "target_qf_do_work"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "Processing %s cmd: %p QUEUE_FULL in work queue context: %s\n"; descriptor.lineno = 776U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = (*((cmd->se_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "Processing %s cmd: %p QUEUE_FULL in work queue context: %s\n", tmp, cmd, (unsigned int )cmd->t_state != 19U ? ((unsigned int )cmd->t_state == 18U ? (char *)"WRITE_PENDING" : (char *)"UNKNOWN") : (char *)"COMPLETE_OK"); } else { } if ((unsigned int )cmd->t_state == 18U) { transport_write_pending_qf(cmd); } else if ((unsigned int )cmd->t_state == 19U) { transport_complete_qf(cmd); } else { } cmd = cmd_tmp; __mptr___2 = (struct list_head const *)cmd_tmp->se_qf_node.next; cmd_tmp = (struct se_cmd *)__mptr___2 + 0xffffffffffffffa0UL; ldv_63175: ; if ((unsigned long )(& cmd->se_qf_node) != (unsigned long )(& qf_cmd_list)) { goto ldv_63174; } else { } return; } } unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd ) { { switch ((unsigned int )cmd->data_direction) { case 3U: ; return ((unsigned char *)"NONE"); case 2U: ; return ((unsigned char *)"READ"); case 1U: ; return ((unsigned char *)"WRITE"); case 0U: ; return ((unsigned char *)"BIDI"); default: ; goto ldv_63185; } ldv_63185: ; return ((unsigned char *)"UNKNOWN"); } } void transport_dump_dev_state(struct se_device *dev , char *b , int *bl ) { int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; { tmp = sprintf(b + (unsigned long )*bl, "Status: "); *bl = *bl + tmp; if (dev->export_count != 0U) { tmp___0 = sprintf(b + (unsigned long )*bl, "ACTIVATED"); *bl = *bl + tmp___0; } else { tmp___1 = sprintf(b + (unsigned long )*bl, "DEACTIVATED"); *bl = *bl + tmp___1; } tmp___2 = sprintf(b + (unsigned long )*bl, " Max Queue Depth: %d", dev->queue_depth); *bl = *bl + tmp___2; tmp___3 = sprintf(b + (unsigned long )*bl, " SectorSize: %u HwMaxSectors: %u\n", dev->dev_attrib.block_size, dev->dev_attrib.hw_max_sectors); *bl = *bl + tmp___3; tmp___4 = sprintf(b + (unsigned long )*bl, " "); *bl = *bl + tmp___4; return; } } void transport_dump_vpd_proto_id(struct t10_vpd *vpd , unsigned char *p_buf , int p_buf_len ) { unsigned char buf[254U] ; int len ; struct _ddebug descriptor ; long tmp ; { memset((void *)(& buf), 0, 254UL); len = sprintf((char *)(& buf), "T10 VPD Protocol Identifier: "); switch (vpd->protocol_identifier) { case 0U: sprintf((char *)(& buf) + (unsigned long )len, "Fibre Channel\n"); goto ldv_63199; case 16U: sprintf((char *)(& buf) + (unsigned long )len, "Parallel SCSI\n"); goto ldv_63199; case 32U: sprintf((char *)(& buf) + (unsigned long )len, "SSA\n"); goto ldv_63199; case 48U: sprintf((char *)(& buf) + (unsigned long )len, "IEEE 1394\n"); goto ldv_63199; case 64U: sprintf((char *)(& buf) + (unsigned long )len, "SCSI Remote Direct Memory Access Protocol\n"); goto ldv_63199; case 80U: sprintf((char *)(& buf) + (unsigned long )len, "Internet SCSI (iSCSI)\n"); goto ldv_63199; case 96U: sprintf((char *)(& buf) + (unsigned long )len, "SAS Serial SCSI Protocol\n"); goto ldv_63199; case 112U: sprintf((char *)(& buf) + (unsigned long )len, "Automation/Drive Interface Transport Protocol\n"); goto ldv_63199; case 128U: sprintf((char *)(& buf) + (unsigned long )len, "AT Attachment Interface ATA/ATAPI\n"); goto ldv_63199; default: sprintf((char *)(& buf) + (unsigned long )len, "Unknown 0x%02x\n", vpd->protocol_identifier); goto ldv_63199; } ldv_63199: ; if ((unsigned long )p_buf != (unsigned long )((unsigned char *)0U)) { strncpy((char *)p_buf, (char const *)(& buf), (__kernel_size_t )p_buf_len); } else { descriptor.modname = "target_core_mod"; descriptor.function = "transport_dump_vpd_proto_id"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "%s"; descriptor.lineno = 871U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "%s", (unsigned char *)(& buf)); } else { } } return; } } void transport_set_vpd_proto_id(struct t10_vpd *vpd , unsigned char *page_83 ) { { if ((int )((signed char )*(page_83 + 1UL)) < 0) { vpd->protocol_identifier = (u32 )*page_83 & 240U; vpd->protocol_identifier_set = 1; transport_dump_vpd_proto_id(vpd, (unsigned char *)0U, 0); } else { } return; } } static char const __kstrtab_transport_set_vpd_proto_id[27U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 's', 'e', 't', '_', 'v', 'p', 'd', '_', 'p', 'r', 'o', 't', 'o', '_', 'i', 'd', '\000'}; struct kernel_symbol const __ksymtab_transport_set_vpd_proto_id ; struct kernel_symbol const __ksymtab_transport_set_vpd_proto_id = {(unsigned long )(& transport_set_vpd_proto_id), (char const *)(& __kstrtab_transport_set_vpd_proto_id)}; int transport_dump_vpd_assoc(struct t10_vpd *vpd , unsigned char *p_buf , int p_buf_len ) { unsigned char buf[254U] ; int ret ; int len ; struct _ddebug descriptor ; long tmp ; { ret = 0; memset((void *)(& buf), 0, 254UL); len = sprintf((char *)(& buf), "T10 VPD Identifier Association: "); switch (vpd->association) { case 0U: sprintf((char *)(& buf) + (unsigned long )len, "addressed logical unit\n"); goto ldv_63232; case 16U: sprintf((char *)(& buf) + (unsigned long )len, "target port\n"); goto ldv_63232; case 32U: sprintf((char *)(& buf) + (unsigned long )len, "SCSI target device\n"); goto ldv_63232; default: sprintf((char *)(& buf) + (unsigned long )len, "Unknown 0x%02x\n", vpd->association); ret = -22; goto ldv_63232; } ldv_63232: ; if ((unsigned long )p_buf != (unsigned long )((unsigned char *)0U)) { strncpy((char *)p_buf, (char const *)(& buf), (__kernel_size_t )p_buf_len); } else { descriptor.modname = "target_core_mod"; descriptor.function = "transport_dump_vpd_assoc"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "%s"; descriptor.lineno = 921U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "%s", (unsigned char *)(& buf)); } else { } } return (ret); } } int transport_set_vpd_assoc(struct t10_vpd *vpd , unsigned char *page_83 ) { int tmp ; { vpd->association = (u32 )*(page_83 + 1UL) & 48U; tmp = transport_dump_vpd_assoc(vpd, (unsigned char *)0U, 0); return (tmp); } } static char const __kstrtab_transport_set_vpd_assoc[24U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 's', 'e', 't', '_', 'v', 'p', 'd', '_', 'a', 's', 's', 'o', 'c', '\000'}; struct kernel_symbol const __ksymtab_transport_set_vpd_assoc ; struct kernel_symbol const __ksymtab_transport_set_vpd_assoc = {(unsigned long )(& transport_set_vpd_assoc), (char const *)(& __kstrtab_transport_set_vpd_assoc)}; int transport_dump_vpd_ident_type(struct t10_vpd *vpd , unsigned char *p_buf , int p_buf_len ) { unsigned char buf[254U] ; int ret ; int len ; size_t tmp ; struct _ddebug descriptor ; long tmp___0 ; { ret = 0; memset((void *)(& buf), 0, 254UL); len = sprintf((char *)(& buf), "T10 VPD Identifier Type: "); switch (vpd->device_identifier_type) { case 0U: sprintf((char *)(& buf) + (unsigned long )len, "Vendor specific\n"); goto ldv_63259; case 1U: sprintf((char *)(& buf) + (unsigned long )len, "T10 Vendor ID based\n"); goto ldv_63259; case 2U: sprintf((char *)(& buf) + (unsigned long )len, "EUI-64 based\n"); goto ldv_63259; case 3U: sprintf((char *)(& buf) + (unsigned long )len, "NAA\n"); goto ldv_63259; case 4U: sprintf((char *)(& buf) + (unsigned long )len, "Relative target port identifier\n"); goto ldv_63259; case 8U: sprintf((char *)(& buf) + (unsigned long )len, "SCSI name string\n"); goto ldv_63259; default: sprintf((char *)(& buf) + (unsigned long )len, "Unsupported: 0x%02x\n", vpd->device_identifier_type); ret = -22; goto ldv_63259; } ldv_63259: ; if ((unsigned long )p_buf != (unsigned long )((unsigned char *)0U)) { tmp = strlen((char const *)(& buf)); if ((size_t )p_buf_len < tmp + 1UL) { return (-22); } else { } strncpy((char *)p_buf, (char const *)(& buf), (__kernel_size_t )p_buf_len); } else { descriptor.modname = "target_core_mod"; descriptor.function = "transport_dump_vpd_ident_type"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "%s"; descriptor.lineno = 981U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "%s", (unsigned char *)(& buf)); } else { } } return (ret); } } int transport_set_vpd_ident_type(struct t10_vpd *vpd , unsigned char *page_83 ) { int tmp ; { vpd->device_identifier_type = (u32 )*(page_83 + 1UL) & 15U; tmp = transport_dump_vpd_ident_type(vpd, (unsigned char *)0U, 0); return (tmp); } } static char const __kstrtab_transport_set_vpd_ident_type[29U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 's', 'e', 't', '_', 'v', 'p', 'd', '_', 'i', 'd', 'e', 'n', 't', '_', 't', 'y', 'p', 'e', '\000'}; struct kernel_symbol const __ksymtab_transport_set_vpd_ident_type ; struct kernel_symbol const __ksymtab_transport_set_vpd_ident_type = {(unsigned long )(& transport_set_vpd_ident_type), (char const *)(& __kstrtab_transport_set_vpd_ident_type)}; int transport_dump_vpd_ident(struct t10_vpd *vpd , unsigned char *p_buf , int p_buf_len ) { unsigned char buf[254U] ; int ret ; struct _ddebug descriptor ; long tmp ; { ret = 0; memset((void *)(& buf), 0, 254UL); switch (vpd->device_identifier_code_set) { case 1U: snprintf((char *)(& buf), 254UL, "T10 VPD Binary Device Identifier: %s\n", (unsigned char *)(& vpd->device_identifier)); goto ldv_63288; case 2U: snprintf((char *)(& buf), 254UL, "T10 VPD ASCII Device Identifier: %s\n", (unsigned char *)(& vpd->device_identifier)); goto ldv_63288; case 3U: snprintf((char *)(& buf), 254UL, "T10 VPD UTF-8 Device Identifier: %s\n", (unsigned char *)(& vpd->device_identifier)); goto ldv_63288; default: sprintf((char *)(& buf), "T10 VPD Device Identifier encoding unsupported: 0x%02x", vpd->device_identifier_code_set); ret = -22; goto ldv_63288; } ldv_63288: ; if ((unsigned long )p_buf != (unsigned long )((unsigned char *)0U)) { strncpy((char *)p_buf, (char const *)(& buf), (__kernel_size_t )p_buf_len); } else { descriptor.modname = "target_core_mod"; descriptor.function = "transport_dump_vpd_ident"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "%s"; descriptor.lineno = 1035U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "%s", (unsigned char *)(& buf)); } else { } } return (ret); } } int transport_set_vpd_ident(struct t10_vpd *vpd , unsigned char *page_83 ) { char hex_str[17U] ; int j ; int i ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; { hex_str[0] = '0'; hex_str[1] = '1'; hex_str[2] = '2'; hex_str[3] = '3'; hex_str[4] = '4'; hex_str[5] = '5'; hex_str[6] = '6'; hex_str[7] = '7'; hex_str[8] = '8'; hex_str[9] = '9'; hex_str[10] = 'a'; hex_str[11] = 'b'; hex_str[12] = 'c'; hex_str[13] = 'd'; hex_str[14] = 'e'; hex_str[15] = 'f'; hex_str[16] = '\000'; j = 0; i = 4; vpd->device_identifier_code_set = (u32 )*page_83 & 15U; switch (vpd->device_identifier_code_set) { case 1U: tmp = j; j = j + 1; vpd->device_identifier[tmp] = (unsigned char )hex_str[vpd->device_identifier_type]; goto ldv_63303; ldv_63302: tmp___0 = j; j = j + 1; vpd->device_identifier[tmp___0] = (unsigned char )hex_str[(int )*(page_83 + (unsigned long )i) >> 4]; tmp___1 = j; j = j + 1; vpd->device_identifier[tmp___1] = (unsigned char )hex_str[(int )*(page_83 + (unsigned long )i) & 15]; i = i + 1; ldv_63303: ; if ((int )*(page_83 + 3UL) + 4 > i) { goto ldv_63302; } else { } goto ldv_63305; case 2U: ; case 3U: ; goto ldv_63309; ldv_63308: tmp___2 = j; j = j + 1; tmp___3 = i; i = i + 1; vpd->device_identifier[tmp___2] = *(page_83 + (unsigned long )tmp___3); ldv_63309: ; if ((int )*(page_83 + 3UL) + 4 > i) { goto ldv_63308; } else { } goto ldv_63305; default: ; goto ldv_63305; } ldv_63305: tmp___4 = transport_dump_vpd_ident(vpd, (unsigned char *)0U, 0); return (tmp___4); } } static char const __kstrtab_transport_set_vpd_ident[24U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 's', 'e', 't', '_', 'v', 'p', 'd', '_', 'i', 'd', 'e', 'n', 't', '\000'}; struct kernel_symbol const __ksymtab_transport_set_vpd_ident ; struct kernel_symbol const __ksymtab_transport_set_vpd_ident = {(unsigned long )(& transport_set_vpd_ident), (char const *)(& __kstrtab_transport_set_vpd_ident)}; sense_reason_t target_cmd_size_check(struct se_cmd *cmd , unsigned int size ) { struct se_device *dev ; char *tmp ; { dev = cmd->se_dev; if ((unsigned int )*((unsigned char *)cmd + 36UL) != 0U) { cmd->data_length = size; } else if (cmd->data_length != size) { tmp = (*((cmd->se_tfo)->get_fabric_name))(); printk("\fTARGET_CORE[%s]: Expected Transfer Length: %u does not match SCSI CDB Length: %u for SAM Opcode: 0x%02x\n", tmp, cmd->data_length, size, (int )*(cmd->t_task_cdb)); if ((unsigned int )cmd->data_direction == 1U) { printk("\vRejecting underflow/overflow WRITE data\n"); return (8U); } else { } if (dev->dev_attrib.block_size != 512U) { printk("\vFailing OVERFLOW/UNDERFLOW for LBA op CDB on non 512-byte sector setup subsystem plugin: %s\n", (char const *)(& (dev->transport)->name)); return (8U); } else { } if (cmd->data_length < size) { cmd->se_cmd_flags = cmd->se_cmd_flags | 4096U; cmd->residual_count = size - cmd->data_length; } else { cmd->se_cmd_flags = cmd->se_cmd_flags | 8192U; cmd->residual_count = cmd->data_length - size; cmd->data_length = size; } } else { } return (0U); } } void transport_init_se_cmd(struct se_cmd *cmd , struct target_core_fabric_ops const *tfo , struct se_session *se_sess , u32 data_length , int data_direction , int task_attr , unsigned char *sense_buffer ) { struct lock_class_key __key ; { INIT_LIST_HEAD(& cmd->se_delayed_node); INIT_LIST_HEAD(& cmd->se_qf_node); INIT_LIST_HEAD(& cmd->se_cmd_list); INIT_LIST_HEAD(& cmd->state_list); init_completion(& cmd->t_transport_stop_comp); init_completion(& cmd->cmd_wait_comp); init_completion(& cmd->task_stop_comp); spinlock_check(& cmd->t_state_lock); __raw_spin_lock_init(& cmd->t_state_lock.__annonCompField17.rlock, "&(&cmd->t_state_lock)->rlock", & __key); kref_init(& cmd->cmd_kref); cmd->transport_state = 128U; cmd->se_tfo = tfo; cmd->se_sess = se_sess; cmd->data_length = data_length; cmd->data_direction = (enum dma_data_direction )data_direction; cmd->sam_task_attr = task_attr; cmd->sense_buffer = (void *)sense_buffer; cmd->state_active = 0; return; } } static char const __kstrtab_transport_init_se_cmd[22U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'i', 'n', 'i', 't', '_', 's', 'e', '_', 'c', 'm', 'd', '\000'}; struct kernel_symbol const __ksymtab_transport_init_se_cmd ; struct kernel_symbol const __ksymtab_transport_init_se_cmd = {(unsigned long )(& transport_init_se_cmd), (char const *)(& __kstrtab_transport_init_se_cmd)}; static sense_reason_t transport_check_alloc_task_attr(struct se_cmd *cmd ) { struct se_device *dev ; struct _ddebug descriptor ; long tmp ; int tmp___0 ; struct _ddebug descriptor___0 ; long tmp___1 ; { dev = cmd->se_dev; if ((int )(dev->transport)->transport_flags & 1) { return (0U); } else { } if (cmd->sam_task_attr == 36) { descriptor.modname = "target_core_mod"; descriptor.function = "transport_check_alloc_task_attr"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "SAM Task Attribute ACA emulation is not supported\n"; descriptor.lineno = 1177U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "SAM Task Attribute ACA emulation is not supported\n"); } else { } return (8U); } else { } tmp___0 = atomic_add_return(1, & dev->dev_ordered_id); cmd->se_ordered_id = (u32 )tmp___0; descriptor___0.modname = "target_core_mod"; descriptor___0.function = "transport_check_alloc_task_attr"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor___0.format = "Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n"; descriptor___0.lineno = 1187U; descriptor___0.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor___0, "Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", cmd->se_ordered_id, cmd->sam_task_attr, (char const *)(& (dev->transport)->name)); } else { } return (0U); } } sense_reason_t target_setup_cmd_from_cdb(struct se_cmd *cmd , unsigned char *cdb ) { struct se_device *dev ; sense_reason_t ret ; unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; void *tmp___2 ; unsigned int tmp___3 ; unsigned int tmp___4 ; unsigned int tmp___5 ; { dev = cmd->se_dev; tmp___0 = scsi_command_size((unsigned char const *)cdb); if (tmp___0 > 260U) { tmp = scsi_command_size((unsigned char const *)cdb); printk("\vReceived SCSI CDB with command_size: %d that exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", tmp, 260); return (8U); } else { } tmp___4 = scsi_command_size((unsigned char const *)cdb); if (tmp___4 > 32U) { tmp___1 = scsi_command_size((unsigned char const *)cdb); tmp___2 = kzalloc((size_t )tmp___1, 208U); cmd->t_task_cdb = (unsigned char *)tmp___2; if ((unsigned long )cmd->t_task_cdb == (unsigned long )((unsigned char *)0U)) { tmp___3 = scsi_command_size((unsigned char const *)cdb); printk("\vUnable to allocate cmd->t_task_cdb %u > sizeof(cmd->__t_task_cdb): %lu ops\n", tmp___3, 32UL); return (18U); } else { } } else { cmd->t_task_cdb = (unsigned char *)(& cmd->__t_task_cdb); } tmp___5 = scsi_command_size((unsigned char const *)cdb); memcpy((void *)cmd->t_task_cdb, (void const *)cdb, (size_t )tmp___5); trace_target_sequencer_start(cmd); ret = target_scsi3_ua_check(cmd); if (ret != 0U) { return (ret); } else { } ret = target_alua_state_check(cmd); if (ret != 0U) { return (ret); } else { } ret = target_check_reservation(cmd); if (ret != 0U) { cmd->scsi_status = 24U; return (ret); } else { } ret = (*((dev->transport)->parse_cdb))(cmd); if (ret != 0U) { return (ret); } else { } ret = transport_check_alloc_task_attr(cmd); if (ret != 0U) { return (ret); } else { } cmd->se_cmd_flags = cmd->se_cmd_flags | 1U; atomic_long_inc(& (cmd->se_lun)->lun_stats.cmd_pdus); return (0U); } } static char const __kstrtab_target_setup_cmd_from_cdb[26U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 's', 'e', 't', 'u', 'p', '_', 'c', 'm', 'd', '_', 'f', 'r', 'o', 'm', '_', 'c', 'd', 'b', '\000'}; struct kernel_symbol const __ksymtab_target_setup_cmd_from_cdb ; struct kernel_symbol const __ksymtab_target_setup_cmd_from_cdb = {(unsigned long )(& target_setup_cmd_from_cdb), (char const *)(& __kstrtab_target_setup_cmd_from_cdb)}; int transport_handle_cdb_direct(struct se_cmd *cmd ) { sense_reason_t ret ; int tmp ; { if ((unsigned long )cmd->se_lun == (unsigned long )((struct se_lun *)0)) { dump_stack(); printk("\vcmd->se_lun is NULL\n"); return (-22); } else { } tmp = preempt_count___0(); if (((unsigned long )tmp & 2096896UL) != 0UL) { dump_stack(); printk("\vtransport_generic_handle_cdb cannot be called from interrupt context\n"); return (-22); } else { } cmd->t_state = 1; cmd->transport_state = cmd->transport_state | 2U; ret = transport_generic_new_cmd(cmd); if (ret != 0U) { transport_generic_request_failure(cmd, ret); } else { } return (0); } } static char const __kstrtab_transport_handle_cdb_direct[28U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'h', 'a', 'n', 'd', 'l', 'e', '_', 'c', 'd', 'b', '_', 'd', 'i', 'r', 'e', 'c', 't', '\000'}; struct kernel_symbol const __ksymtab_transport_handle_cdb_direct ; struct kernel_symbol const __ksymtab_transport_handle_cdb_direct = {(unsigned long )(& transport_handle_cdb_direct), (char const *)(& __kstrtab_transport_handle_cdb_direct)}; sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *cmd , struct scatterlist *sgl , u32 sgl_count , struct scatterlist *sgl_bidi , u32 sgl_bidi_count ) { { if ((unsigned long )sgl == (unsigned long )((struct scatterlist *)0) || sgl_count == 0U) { return (0U); } else { } if ((cmd->se_cmd_flags & 4096U) != 0U) { printk("\fRejecting SCSI DATA overflow for fabric using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); return (8U); } else { } cmd->t_data_sg = sgl; cmd->t_data_nents = sgl_count; cmd->t_bidi_data_sg = sgl_bidi; cmd->t_bidi_data_nents = sgl_bidi_count; cmd->se_cmd_flags = cmd->se_cmd_flags | 131072U; return (0U); } } int target_submit_cmd_map_sgls(struct se_cmd *se_cmd , struct se_session *se_sess , unsigned char *cdb , unsigned char *sense , u64 unpacked_lun , u32 data_length , int task_attr , int data_dir , int flags , struct scatterlist *sgl , u32 sgl_count , struct scatterlist *sgl_bidi , u32 sgl_bidi_count , struct scatterlist *sgl_prot , u32 sgl_prot_count ) { struct se_portal_group *se_tpg ; sense_reason_t rc ; int ret ; long tmp ; long tmp___0 ; int tmp___1 ; long tmp___2 ; long tmp___3 ; unsigned char *buf ; struct page *tmp___4 ; void *tmp___5 ; struct page *tmp___6 ; { se_tpg = se_sess->se_tpg; tmp = ldv__builtin_expect((unsigned long )se_tpg == (unsigned long )((struct se_portal_group *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"), "i" (1373), "i" (12UL)); ldv_63407: ; goto ldv_63407; } else { } tmp___0 = ldv__builtin_expect((long )((unsigned long )se_cmd->se_tfo != (unsigned long )((struct target_core_fabric_ops const *)0) || (unsigned long )se_cmd->se_sess != (unsigned long )((struct se_session *)0)), 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"), "i" (1374), "i" (12UL)); ldv_63408: ; goto ldv_63408; } else { } tmp___1 = preempt_count___0(); tmp___2 = ldv__builtin_expect(((unsigned long )tmp___1 & 2096896UL) != 0UL, 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"), "i" (1375), "i" (12UL)); ldv_63409: ; goto ldv_63409; } else { } transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length, data_dir, task_attr, sense); if ((flags & 4) != 0) { se_cmd->unknown_data_length = 1U; } else { } ret = target_get_sess_cmd(se_cmd, (flags & 2) != 0); if (ret != 0) { return (ret); } else { } if (flags & 1) { se_cmd->se_cmd_flags = se_cmd->se_cmd_flags | 1024U; } else { } rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun); if (rc != 0U) { transport_send_check_condition_and_sense(se_cmd, rc, 0); target_put_sess_cmd(se_cmd); return (0); } else { } rc = target_setup_cmd_from_cdb(se_cmd, cdb); if (rc != 0U) { transport_generic_request_failure(se_cmd, rc); return (0); } else { } if (sgl_prot_count != 0U) { se_cmd->t_prot_sg = sgl_prot; se_cmd->t_prot_nents = sgl_prot_count; se_cmd->se_cmd_flags = se_cmd->se_cmd_flags | 2097152U; } else { } if (sgl_count != 0U) { tmp___3 = ldv__builtin_expect((unsigned long )sgl == (unsigned long )((struct scatterlist *)0), 0L); if (tmp___3 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"), "i" (1431), "i" (12UL)); ldv_63410: ; goto ldv_63410; } else { } if ((se_cmd->se_cmd_flags & 8U) == 0U && (unsigned int )se_cmd->data_direction == 2U) { buf = (unsigned char *)0U; if ((unsigned long )sgl != (unsigned long )((struct scatterlist *)0)) { tmp___4 = sg_page(sgl); tmp___5 = kmap(tmp___4); buf = (unsigned char *)tmp___5 + (unsigned long )sgl->offset; } else { } if ((unsigned long )buf != (unsigned long )((unsigned char *)0U)) { memset((void *)buf, 0, (size_t )sgl->length); tmp___6 = sg_page(sgl); kunmap(tmp___6); } else { } } else { } rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, sgl_bidi, sgl_bidi_count); if (rc != 0U) { transport_generic_request_failure(se_cmd, rc); return (0); } else { } } else { } core_alua_check_nonop_delay(se_cmd); transport_handle_cdb_direct(se_cmd); return (0); } } static char const __kstrtab_target_submit_cmd_map_sgls[27U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 's', 'u', 'b', 'm', 'i', 't', '_', 'c', 'm', 'd', '_', 'm', 'a', 'p', '_', 's', 'g', 'l', 's', '\000'}; struct kernel_symbol const __ksymtab_target_submit_cmd_map_sgls ; struct kernel_symbol const __ksymtab_target_submit_cmd_map_sgls = {(unsigned long )(& target_submit_cmd_map_sgls), (char const *)(& __kstrtab_target_submit_cmd_map_sgls)}; int target_submit_cmd(struct se_cmd *se_cmd , struct se_session *se_sess , unsigned char *cdb , unsigned char *sense , u64 unpacked_lun , u32 data_length , int task_attr , int data_dir , int flags ) { int tmp ; { tmp = target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense, unpacked_lun, data_length, task_attr, data_dir, flags, (struct scatterlist *)0, 0U, (struct scatterlist *)0, 0U, (struct scatterlist *)0, 0U); return (tmp); } } static char const __kstrtab_target_submit_cmd[18U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 's', 'u', 'b', 'm', 'i', 't', '_', 'c', 'm', 'd', '\000'}; struct kernel_symbol const __ksymtab_target_submit_cmd ; struct kernel_symbol const __ksymtab_target_submit_cmd = {(unsigned long )(& target_submit_cmd), (char const *)(& __kstrtab_target_submit_cmd)}; static void target_complete_tmr_failure(struct work_struct *work ) { struct se_cmd *se_cmd ; struct work_struct const *__mptr ; { __mptr = (struct work_struct const *)work; se_cmd = (struct se_cmd *)__mptr + 0xfffffffffffffdf8UL; (se_cmd->se_tmr_req)->response = 3U; (*((se_cmd->se_tfo)->queue_tm_rsp))(se_cmd); transport_cmd_check_stop_to_fabric(se_cmd); return; } } int target_submit_tmr(struct se_cmd *se_cmd , struct se_session *se_sess , unsigned char *sense , u64 unpacked_lun , void *fabric_tmr_ptr , unsigned char tm_type , gfp_t gfp , unsigned int tag , int flags ) { struct se_portal_group *se_tpg ; int ret ; long tmp ; struct lock_class_key __key ; atomic_long_t __constr_expr_0 ; { se_tpg = se_sess->se_tpg; tmp = ldv__builtin_expect((unsigned long )se_tpg == (unsigned long )((struct se_portal_group *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"), "i" (1543), "i" (12UL)); ldv_63478: ; goto ldv_63478; } else { } transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0U, 3, 32, sense); ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, (int )tm_type, gfp); if (ret < 0) { return (-12); } else { } if ((unsigned int )tm_type == 1U) { (se_cmd->se_tmr_req)->ref_task_tag = (u64 )tag; } else { } ret = target_get_sess_cmd(se_cmd, (flags & 2) != 0); if (ret != 0) { core_tmr_release_req(se_cmd->se_tmr_req); return (ret); } else { } ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); if (ret != 0) { __init_work(& se_cmd->work, 0); __constr_expr_0.counter = 137438953408L; se_cmd->work.data = __constr_expr_0; lockdep_init_map(& se_cmd->work.lockdep_map, "(&se_cmd->work)", & __key, 0); INIT_LIST_HEAD(& se_cmd->work.entry); se_cmd->work.func = & target_complete_tmr_failure; schedule_work(& se_cmd->work); return (0); } else { } transport_generic_handle_tmr(se_cmd); return (0); } } static char const __kstrtab_target_submit_tmr[18U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 's', 'u', 'b', 'm', 'i', 't', '_', 't', 'm', 'r', '\000'}; struct kernel_symbol const __ksymtab_target_submit_tmr ; struct kernel_symbol const __ksymtab_target_submit_tmr = {(unsigned long )(& target_submit_tmr), (char const *)(& __kstrtab_target_submit_tmr)}; bool target_stop_cmd(struct se_cmd *cmd , unsigned long *flags ) { bool was_active ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; raw_spinlock_t *tmp___1 ; { was_active = 0; if ((cmd->transport_state & 512U) != 0U) { cmd->transport_state = cmd->transport_state | 256U; spin_unlock_irqrestore(& cmd->t_state_lock, *flags); descriptor.modname = "target_core_mod"; descriptor.function = "target_stop_cmd"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "cmd %p waiting to complete\n"; descriptor.lineno = 1594U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "cmd %p waiting to complete\n", cmd); } else { } wait_for_completion(& cmd->task_stop_comp); descriptor___0.modname = "target_core_mod"; descriptor___0.function = "target_stop_cmd"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor___0.format = "cmd %p stopped successfully\n"; descriptor___0.lineno = 1596U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor___0, "cmd %p stopped successfully\n", cmd); } else { } tmp___1 = spinlock_check(& cmd->t_state_lock); *flags = _raw_spin_lock_irqsave(tmp___1); cmd->transport_state = cmd->transport_state & 4294967039U; cmd->transport_state = cmd->transport_state & 4294966783U; was_active = 1; } else { } return (was_active); } } void transport_generic_request_failure(struct se_cmd *cmd , sense_reason_t sense_reason ) { int ret ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; int tmp___0 ; long tmp___1 ; struct _ddebug descriptor___1 ; long tmp___2 ; int tmp___3 ; { ret = 0; descriptor.modname = "target_core_mod"; descriptor.function = "transport_generic_request_failure"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx CDB: 0x%02x\n"; descriptor.lineno = 1616U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx CDB: 0x%02x\n", cmd, cmd->tag, (int )*(cmd->t_task_cdb)); } else { } descriptor___0.modname = "target_core_mod"; descriptor___0.function = "transport_generic_request_failure"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor___0.format = "-----[ i_state: %d t_state: %d sense_reason: %d\n"; descriptor___0.lineno = 1619U; descriptor___0.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___1 != 0L) { tmp___0 = (*((cmd->se_tfo)->get_cmd_state))(cmd); __dynamic_pr_debug(& descriptor___0, "-----[ i_state: %d t_state: %d sense_reason: %d\n", tmp___0, (unsigned int )cmd->t_state, sense_reason); } else { } descriptor___1.modname = "target_core_mod"; descriptor___1.function = "transport_generic_request_failure"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor___1.format = "-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n"; descriptor___1.lineno = 1623U; descriptor___1.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_pr_debug(& descriptor___1, "-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", (cmd->transport_state & 2U) != 0U, (cmd->transport_state & 32U) != 0U, (cmd->transport_state & 16U) != 0U); } else { } transport_complete_task_attr(cmd); if ((cmd->se_cmd_flags & 524288U) != 0U && (unsigned long )cmd->transport_complete_callback != (unsigned long )((sense_reason_t (*)(struct se_cmd * , bool ))0)) { (*(cmd->transport_complete_callback))(cmd, 0); } else { } switch (sense_reason) { case 1U: ; case 2U: ; case 8U: ; case 9U: ; case 19U: ; case 10U: ; case 11U: ; case 12U: ; case 17U: ; case 13U: ; case 14U: ; case 15U: ; case 21U: ; case 22U: ; case 23U: ; goto ldv_63531; case 18U: sense_reason = 10U; goto ldv_63531; case 16U: cmd->scsi_status = 24U; if ((unsigned long )cmd->se_sess != (unsigned long )((struct se_session *)0) && (cmd->se_dev)->dev_attrib.emulate_ua_intlck_ctrl == 2) { target_ua_allocate_lun((cmd->se_sess)->se_node_acl, (u32 )cmd->orig_fe_lun, 44, 9); } else { } trace_target_cmd_complete(cmd); ret = (*((cmd->se_tfo)->queue_status))(cmd); if (ret == -11 || ret == -12) { goto queue_full; } else { } goto check_stop; default: printk("\vUnknown transport error for CDB 0x%02x: %d\n", (int )*(cmd->t_task_cdb), sense_reason); sense_reason = 2U; goto ldv_63531; } ldv_63531: ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); if (ret == -11 || ret == -12) { goto queue_full; } else { } check_stop: transport_lun_remove_cmd(cmd); tmp___3 = transport_cmd_check_stop_to_fabric(cmd); return; queue_full: cmd->t_state = 19; transport_handle_queue_full(cmd, cmd->se_dev); return; } } static char const __kstrtab_transport_generic_request_failure[34U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'g', 'e', 'n', 'e', 'r', 'i', 'c', '_', 'r', 'e', 'q', 'u', 'e', 's', 't', '_', 'f', 'a', 'i', 'l', 'u', 'r', 'e', '\000'}; struct kernel_symbol const __ksymtab_transport_generic_request_failure ; struct kernel_symbol const __ksymtab_transport_generic_request_failure = {(unsigned long )(& transport_generic_request_failure), (char const *)(& __kstrtab_transport_generic_request_failure)}; void __target_execute_cmd(struct se_cmd *cmd ) { sense_reason_t ret ; { if ((unsigned long )cmd->execute_cmd != (unsigned long )((sense_reason_t (*)(struct se_cmd * ))0)) { ret = (*(cmd->execute_cmd))(cmd); if (ret != 0U) { spin_lock_irq(& cmd->t_state_lock); cmd->transport_state = cmd->transport_state & 4294966767U; spin_unlock_irq(& cmd->t_state_lock); transport_generic_request_failure(cmd, ret); } else { } } else { } return; } } static int target_write_prot_action(struct se_cmd *cmd ) { u32 sectors ; int tmp ; long tmp___0 ; { switch ((unsigned int )cmd->prot_op) { case 2U: ; if (((unsigned int )(cmd->se_sess)->sup_prot_ops & 2U) == 0U) { sbc_dif_generate(cmd); } else { } goto ldv_63554; case 8U: ; if (((unsigned int )(cmd->se_sess)->sup_prot_ops & 8U) != 0U) { goto ldv_63554; } else { } tmp = __ilog2_u32((cmd->se_dev)->dev_attrib.block_size); sectors = cmd->data_length >> tmp; cmd->pi_err = sbc_dif_verify(cmd, (sector_t )cmd->t_task_lba, sectors, 0U, cmd->t_prot_sg, 0); tmp___0 = ldv__builtin_expect(cmd->pi_err != 0U, 0L); if (tmp___0 != 0L) { spin_lock_irq(& cmd->t_state_lock); cmd->transport_state = cmd->transport_state & 4294966767U; spin_unlock_irq(& cmd->t_state_lock); transport_generic_request_failure(cmd, cmd->pi_err); return (-1); } else { } goto ldv_63554; default: ; goto ldv_63554; } ldv_63554: ; return (0); } } static bool target_handle_task_attr(struct se_cmd *cmd ) { struct se_device *dev ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; int tmp___1 ; int tmp___2 ; struct _ddebug descriptor___1 ; long tmp___3 ; { dev = cmd->se_dev; if ((int )(dev->transport)->transport_flags & 1) { return (0); } else { } switch (cmd->sam_task_attr) { case 33: descriptor.modname = "target_core_mod"; descriptor.function = "target_handle_task_attr"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "Added HEAD_OF_QUEUE for CDB: 0x%02x, se_ordered_id: %u\n"; descriptor.lineno = 1772U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Added HEAD_OF_QUEUE for CDB: 0x%02x, se_ordered_id: %u\n", (int )*(cmd->t_task_cdb), cmd->se_ordered_id); } else { } return (0); case 34: atomic_inc_mb(& dev->dev_ordered_sync); descriptor___0.modname = "target_core_mod"; descriptor___0.function = "target_handle_task_attr"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor___0.format = "Added ORDERED for CDB: 0x%02x to ordered list, se_ordered_id: %u\n"; descriptor___0.lineno = 1779U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor___0, "Added ORDERED for CDB: 0x%02x to ordered list, se_ordered_id: %u\n", (int )*(cmd->t_task_cdb), cmd->se_ordered_id); } else { } tmp___1 = atomic_read((atomic_t const *)(& dev->simple_cmds)); if (tmp___1 == 0) { return (0); } else { } goto ldv_63566; default: atomic_inc_mb(& dev->simple_cmds); goto ldv_63566; } ldv_63566: tmp___2 = atomic_read((atomic_t const *)(& dev->dev_ordered_sync)); if (tmp___2 == 0) { return (0); } else { } spin_lock(& dev->delayed_cmd_lock); list_add_tail(& cmd->se_delayed_node, & dev->delayed_cmd_list); spin_unlock(& dev->delayed_cmd_lock); descriptor___1.modname = "target_core_mod"; descriptor___1.function = "target_handle_task_attr"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor___1.format = "Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD list, se_ordered_id: %u\n"; descriptor___1.lineno = 1806U; descriptor___1.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_pr_debug(& descriptor___1, "Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD list, se_ordered_id: %u\n", (int )*(cmd->t_task_cdb), cmd->sam_task_attr, cmd->se_ordered_id); } else { } return (1); } } void target_execute_cmd(struct se_cmd *cmd ) { int tmp ; struct _ddebug descriptor ; long tmp___0 ; int tmp___1 ; bool tmp___2 ; { tmp = transport_check_aborted_status(cmd, 1); if (tmp != 0) { return; } else { } spin_lock_irq(& cmd->t_state_lock); if ((cmd->transport_state & 32U) != 0U) { descriptor.modname = "target_core_mod"; descriptor.function = "target_execute_cmd"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "%s:%d CMD_T_STOP for ITT: 0x%08llx\n"; descriptor.lineno = 1825U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "%s:%d CMD_T_STOP for ITT: 0x%08llx\n", "target_execute_cmd", 1825, cmd->tag); } else { } spin_unlock_irq(& cmd->t_state_lock); complete_all(& cmd->t_transport_stop_comp); return; } else { } cmd->t_state = 5; cmd->transport_state = cmd->transport_state | 530U; spin_unlock_irq(& cmd->t_state_lock); tmp___1 = target_write_prot_action(cmd); if (tmp___1 != 0) { return; } else { } tmp___2 = target_handle_task_attr(cmd); if ((int )tmp___2) { spin_lock_irq(& cmd->t_state_lock); cmd->transport_state = cmd->transport_state & 4294966767U; spin_unlock_irq(& cmd->t_state_lock); return; } else { } __target_execute_cmd(cmd); return; } } static char const __kstrtab_target_execute_cmd[19U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 'e', 'x', 'e', 'c', 'u', 't', 'e', '_', 'c', 'm', 'd', '\000'}; struct kernel_symbol const __ksymtab_target_execute_cmd ; struct kernel_symbol const __ksymtab_target_execute_cmd = {(unsigned long )(& target_execute_cmd), (char const *)(& __kstrtab_target_execute_cmd)}; static void target_restart_delayed_cmds(struct se_device *dev ) { struct se_cmd *cmd ; int tmp ; struct list_head const *__mptr ; { ldv_63588: spin_lock(& dev->delayed_cmd_lock); tmp = list_empty((struct list_head const *)(& dev->delayed_cmd_list)); if (tmp != 0) { spin_unlock(& dev->delayed_cmd_lock); goto ldv_63585; } else { } __mptr = (struct list_head const *)dev->delayed_cmd_list.next; cmd = (struct se_cmd *)__mptr + 0xffffffffffffffb0UL; list_del(& cmd->se_delayed_node); spin_unlock(& dev->delayed_cmd_lock); __target_execute_cmd(cmd); if (cmd->sam_task_attr == 34) { goto ldv_63585; } else { } goto ldv_63588; ldv_63585: ; return; } } static void transport_complete_task_attr(struct se_cmd *cmd ) { struct se_device *dev ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; struct _ddebug descriptor___1 ; long tmp___1 ; { dev = cmd->se_dev; if ((int )(dev->transport)->transport_flags & 1) { return; } else { } if (cmd->sam_task_attr == 32) { atomic_dec_mb(& dev->simple_cmds); dev->dev_cur_ordered_id = dev->dev_cur_ordered_id + 1U; descriptor.modname = "target_core_mod"; descriptor.function = "transport_complete_task_attr"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "Incremented dev->dev_cur_ordered_id: %u for SIMPLE: %u\n"; descriptor.lineno = 1893U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Incremented dev->dev_cur_ordered_id: %u for SIMPLE: %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); } else { } } else if (cmd->sam_task_attr == 33) { dev->dev_cur_ordered_id = dev->dev_cur_ordered_id + 1U; descriptor___0.modname = "target_core_mod"; descriptor___0.function = "transport_complete_task_attr"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor___0.format = "Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE: %u\n"; descriptor___0.lineno = 1898U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor___0, "Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); } else { } } else if (cmd->sam_task_attr == 34) { atomic_dec_mb(& dev->dev_ordered_sync); dev->dev_cur_ordered_id = dev->dev_cur_ordered_id + 1U; descriptor___1.modname = "target_core_mod"; descriptor___1.function = "transport_complete_task_attr"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor___1.format = "Incremented dev_cur_ordered_id: %u for ORDERED: %u\n"; descriptor___1.lineno = 1904U; descriptor___1.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor___1, "Incremented dev_cur_ordered_id: %u for ORDERED: %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id); } else { } } else { } target_restart_delayed_cmds(dev); return; } } static void transport_complete_qf(struct se_cmd *cmd ) { int ret ; { ret = 0; transport_complete_task_attr(cmd); if ((cmd->se_cmd_flags & 2U) != 0U) { trace_target_cmd_complete(cmd); ret = (*((cmd->se_tfo)->queue_status))(cmd); goto out; } else { } switch ((unsigned int )cmd->data_direction) { case 2U: trace_target_cmd_complete(cmd); ret = (*((cmd->se_tfo)->queue_data_in))(cmd); goto ldv_63603; case 1U: ; if ((cmd->se_cmd_flags & 1024U) != 0U) { ret = (*((cmd->se_tfo)->queue_data_in))(cmd); goto ldv_63603; } else { } case 3U: trace_target_cmd_complete(cmd); ret = (*((cmd->se_tfo)->queue_status))(cmd); goto ldv_63603; default: ; goto ldv_63603; } ldv_63603: ; out: ; if (ret < 0) { transport_handle_queue_full(cmd, cmd->se_dev); return; } else { } transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; } } static void transport_handle_queue_full(struct se_cmd *cmd , struct se_device *dev ) { { spin_lock_irq(& dev->qf_cmd_lock); list_add_tail(& cmd->se_qf_node, & (cmd->se_dev)->qf_cmd_list); atomic_inc_mb(& dev->dev_qf_count); spin_unlock_irq(& (cmd->se_dev)->qf_cmd_lock); schedule_work(& (cmd->se_dev)->qf_work_queue); return; } } static bool target_read_prot_action(struct se_cmd *cmd ) { u32 sectors ; int tmp ; { switch ((unsigned int )cmd->prot_op) { case 4U: ; if (((unsigned int )(cmd->se_sess)->sup_prot_ops & 4U) == 0U) { tmp = __ilog2_u32((cmd->se_dev)->dev_attrib.block_size); sectors = cmd->data_length >> tmp; cmd->pi_err = sbc_dif_verify(cmd, (sector_t )cmd->t_task_lba, sectors, 0U, cmd->t_prot_sg, 0); if (cmd->pi_err != 0U) { return (1); } else { } } else { } goto ldv_63616; case 1U: ; if ((int )(cmd->se_sess)->sup_prot_ops & 1) { goto ldv_63616; } else { } sbc_dif_generate(cmd); goto ldv_63616; default: ; goto ldv_63616; } ldv_63616: ; return (0); } } static void target_complete_ok_work(struct work_struct *work ) { struct se_cmd *cmd ; struct work_struct const *__mptr ; int ret ; int tmp ; int __ret_warn_on ; long tmp___0 ; sense_reason_t rc ; bool tmp___1 ; struct _ddebug descriptor ; long tmp___2 ; { __mptr = (struct work_struct const *)work; cmd = (struct se_cmd *)__mptr + 0xfffffffffffffdf8UL; transport_complete_task_attr(cmd); tmp = atomic_read((atomic_t const *)(& (cmd->se_dev)->dev_qf_count)); if (tmp != 0) { schedule_work(& (cmd->se_dev)->qf_work_queue); } else { } if ((cmd->se_cmd_flags & 2U) != 0U) { __ret_warn_on = (unsigned int )cmd->scsi_status == 0U; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c", 2014); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); ret = transport_send_check_condition_and_sense(cmd, 0U, 1); if (ret == -11 || ret == -12) { goto queue_full; } else { } transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; } else { } if ((unsigned long )cmd->transport_complete_callback != (unsigned long )((sense_reason_t (*)(struct se_cmd * , bool ))0)) { rc = (*(cmd->transport_complete_callback))(cmd, 1); if (rc == 0U && (cmd->se_cmd_flags & 1048576U) == 0U) { if ((cmd->se_cmd_flags & 524288U) != 0U && cmd->data_length == 0U) { goto queue_rsp; } else { } return; } else if (rc != 0U) { ret = transport_send_check_condition_and_sense(cmd, rc, 0); if (ret == -11 || ret == -12) { goto queue_full; } else { } transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; } else { } } else { } queue_rsp: ; switch ((unsigned int )cmd->data_direction) { case 2U: atomic_long_add((long )cmd->data_length, & (cmd->se_lun)->lun_stats.tx_data_octets); tmp___1 = target_read_prot_action(cmd); if ((int )tmp___1) { ret = transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0); if (ret == -11 || ret == -12) { goto queue_full; } else { } transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; } else { } trace_target_cmd_complete(cmd); ret = (*((cmd->se_tfo)->queue_data_in))(cmd); if (ret == -11 || ret == -12) { goto queue_full; } else { } goto ldv_63632; case 1U: atomic_long_add((long )cmd->data_length, & (cmd->se_lun)->lun_stats.rx_data_octets); if ((cmd->se_cmd_flags & 1024U) != 0U) { atomic_long_add((long )cmd->data_length, & (cmd->se_lun)->lun_stats.tx_data_octets); ret = (*((cmd->se_tfo)->queue_data_in))(cmd); if (ret == -11 || ret == -12) { goto queue_full; } else { } goto ldv_63632; } else { } case 3U: trace_target_cmd_complete(cmd); ret = (*((cmd->se_tfo)->queue_status))(cmd); if (ret == -11 || ret == -12) { goto queue_full; } else { } goto ldv_63632; default: ; goto ldv_63632; } ldv_63632: transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; queue_full: descriptor.modname = "target_core_mod"; descriptor.function = "target_complete_ok_work"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "Handling complete_ok QUEUE_FULL: se_cmd: %p, data_direction: %d\n"; descriptor.lineno = 2107U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_pr_debug(& descriptor, "Handling complete_ok QUEUE_FULL: se_cmd: %p, data_direction: %d\n", cmd, (unsigned int )cmd->data_direction); } else { } cmd->t_state = 19; transport_handle_queue_full(cmd, cmd->se_dev); return; } } __inline static void transport_free_sgl(struct scatterlist *sgl , int nents ) { struct scatterlist *sg ; int count ; struct page *tmp ; { count = 0; sg = sgl; goto ldv_63645; ldv_63644: tmp = sg_page(sg); __free_pages(tmp, 0U); count = count + 1; sg = sg_next(sg); ldv_63645: ; if (count < nents) { goto ldv_63644; } else { } kfree((void const *)sgl); return; } } __inline static void transport_reset_sgl_orig(struct se_cmd *cmd ) { { if ((unsigned long )cmd->t_data_sg_orig == (unsigned long )((struct scatterlist *)0)) { return; } else { } kfree((void const *)cmd->t_data_sg); cmd->t_data_sg = cmd->t_data_sg_orig; cmd->t_data_sg_orig = (struct scatterlist *)0; cmd->t_data_nents = cmd->t_data_nents_orig; cmd->t_data_nents_orig = 0U; return; } } __inline static void transport_free_pages(struct se_cmd *cmd ) { { if ((cmd->se_cmd_flags & 2097152U) == 0U) { transport_free_sgl(cmd->t_prot_sg, (int )cmd->t_prot_nents); cmd->t_prot_sg = (struct scatterlist *)0; cmd->t_prot_nents = 0U; } else { } if ((cmd->se_cmd_flags & 131072U) != 0U) { if ((cmd->se_cmd_flags & 524288U) != 0U) { transport_free_sgl(cmd->t_bidi_data_sg, (int )cmd->t_bidi_data_nents); cmd->t_bidi_data_sg = (struct scatterlist *)0; cmd->t_bidi_data_nents = 0U; } else { } transport_reset_sgl_orig(cmd); return; } else { } transport_reset_sgl_orig(cmd); transport_free_sgl(cmd->t_data_sg, (int )cmd->t_data_nents); cmd->t_data_sg = (struct scatterlist *)0; cmd->t_data_nents = 0U; transport_free_sgl(cmd->t_bidi_data_sg, (int )cmd->t_bidi_data_nents); cmd->t_bidi_data_sg = (struct scatterlist *)0; cmd->t_bidi_data_nents = 0U; return; } } static int transport_release_cmd(struct se_cmd *cmd ) { long tmp ; int tmp___0 ; { tmp = ldv__builtin_expect((unsigned long )cmd->se_tfo == (unsigned long )((struct target_core_fabric_ops const *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"), "i" (2181), "i" (12UL)); ldv_63656: ; goto ldv_63656; } else { } if ((cmd->se_cmd_flags & 16U) != 0U) { core_tmr_release_req(cmd->se_tmr_req); } else { } if ((unsigned long )cmd->t_task_cdb != (unsigned long )((unsigned char *)(& cmd->__t_task_cdb))) { kfree((void const *)cmd->t_task_cdb); } else { } tmp___0 = target_put_sess_cmd(cmd); return (tmp___0); } } static int transport_put_cmd(struct se_cmd *cmd ) { int tmp ; { transport_free_pages(cmd); tmp = transport_release_cmd(cmd); return (tmp); } } void *transport_kmap_data_sg(struct se_cmd *cmd ) { struct scatterlist *sg ; struct page **pages ; int i ; long tmp ; struct page *tmp___0 ; void *tmp___1 ; void *tmp___2 ; pgprot_t __constr_expr_0 ; { sg = cmd->t_data_sg; if (cmd->t_data_nents == 0U) { return ((void *)0); } else { } tmp = ldv__builtin_expect((unsigned long )sg == (unsigned long )((struct scatterlist *)0), 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"), "i" (2220), "i" (12UL)); ldv_63666: ; goto ldv_63666; } else { } if (cmd->t_data_nents == 1U) { tmp___0 = sg_page(sg); tmp___1 = kmap(tmp___0); return (tmp___1 + (unsigned long )sg->offset); } else { } tmp___2 = kmalloc((unsigned long )cmd->t_data_nents * 8UL, 208U); pages = (struct page **)tmp___2; if ((unsigned long )pages == (unsigned long )((struct page **)0)) { return ((void *)0); } else { } i = 0; sg = cmd->t_data_sg; goto ldv_63668; ldv_63667: *(pages + (unsigned long )i) = sg_page(sg); i = i + 1; sg = sg_next(sg); ldv_63668: ; if ((unsigned int )i < cmd->t_data_nents) { goto ldv_63667; } else { } __constr_expr_0.pgprot = 0x8000000000000163UL; cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, 4UL, __constr_expr_0); kfree((void const *)pages); if ((unsigned long )cmd->t_data_vmap == (unsigned long )((void *)0)) { return ((void *)0); } else { } return (cmd->t_data_vmap + (unsigned long )(cmd->t_data_sg)->offset); } } static char const __kstrtab_transport_kmap_data_sg[23U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'k', 'm', 'a', 'p', '_', 'd', 'a', 't', 'a', '_', 's', 'g', '\000'}; struct kernel_symbol const __ksymtab_transport_kmap_data_sg ; struct kernel_symbol const __ksymtab_transport_kmap_data_sg = {(unsigned long )(& transport_kmap_data_sg), (char const *)(& __kstrtab_transport_kmap_data_sg)}; void transport_kunmap_data_sg(struct se_cmd *cmd ) { struct page *tmp ; { if (cmd->t_data_nents == 0U) { return; } else if (cmd->t_data_nents == 1U) { tmp = sg_page(cmd->t_data_sg); kunmap(tmp); return; } else { } vunmap((void const *)cmd->t_data_vmap); cmd->t_data_vmap = (void *)0; return; } } static char const __kstrtab_transport_kunmap_data_sg[25U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'k', 'u', 'n', 'm', 'a', 'p', '_', 'd', 'a', 't', 'a', '_', 's', 'g', '\000'}; struct kernel_symbol const __ksymtab_transport_kunmap_data_sg ; struct kernel_symbol const __ksymtab_transport_kunmap_data_sg = {(unsigned long )(& transport_kunmap_data_sg), (char const *)(& __kstrtab_transport_kunmap_data_sg)}; int target_alloc_sgl(struct scatterlist **sgl , unsigned int *nents , u32 length , bool zero_page ) { struct scatterlist *sg ; struct page *page ; gfp_t zero_flag ; unsigned int nent ; int i ; void *tmp ; u32 page_len ; u32 __min1 ; u32 __min2 ; struct page *tmp___0 ; { zero_flag = (int )zero_page ? 32768U : 0U; i = 0; nent = (unsigned int )(((unsigned long )length + 4095UL) / 4096UL); tmp = kmalloc((unsigned long )nent * 40UL, 208U); sg = (struct scatterlist *)tmp; if ((unsigned long )sg == (unsigned long )((struct scatterlist *)0)) { return (-12); } else { } sg_init_table(sg, nent); goto ldv_63705; ldv_63704: __min1 = length; __min2 = 4096U; page_len = __min1 < __min2 ? __min1 : __min2; page = alloc_pages(zero_flag | 208U, 0U); if ((unsigned long )page == (unsigned long )((struct page *)0)) { goto out; } else { } sg_set_page(sg + (unsigned long )i, page, page_len, 0U); length = length - page_len; i = i + 1; ldv_63705: ; if (length != 0U) { goto ldv_63704; } else { } *sgl = sg; *nents = nent; return (0); out: ; goto ldv_63708; ldv_63707: i = i - 1; tmp___0 = sg_page(sg + (unsigned long )i); __free_pages(tmp___0, 0U); ldv_63708: ; if (i > 0) { goto ldv_63707; } else { } kfree((void const *)sg); return (-12); } } sense_reason_t transport_generic_new_cmd(struct se_cmd *cmd ) { int ret ; bool zero_flag ; u32 bidi_length ; u32 caw_length ; int __ret_warn_on ; long tmp ; struct _ddebug descriptor ; long tmp___0 ; { ret = 0; zero_flag = (cmd->se_cmd_flags & 8U) == 0U; if ((unsigned int )cmd->prot_op != 0U && (cmd->se_cmd_flags & 2097152U) == 0U) { ret = target_alloc_sgl(& cmd->t_prot_sg, & cmd->t_prot_nents, cmd->prot_length, 1); if (ret < 0) { return (10U); } else { } } else { } if ((cmd->se_cmd_flags & 131072U) == 0U && cmd->data_length != 0U) { if ((cmd->se_cmd_flags & 1024U) != 0U || (cmd->se_cmd_flags & 524288U) != 0U) { if ((cmd->se_cmd_flags & 524288U) != 0U) { bidi_length = cmd->t_task_nolb * (cmd->se_dev)->dev_attrib.block_size; } else { bidi_length = cmd->data_length; } ret = target_alloc_sgl(& cmd->t_bidi_data_sg, & cmd->t_bidi_data_nents, bidi_length, (int )zero_flag); if (ret < 0) { return (10U); } else { } } else { } ret = target_alloc_sgl(& cmd->t_data_sg, & cmd->t_data_nents, cmd->data_length, (int )zero_flag); if (ret < 0) { return (10U); } else { } } else if ((cmd->se_cmd_flags & 524288U) != 0U && cmd->data_length != 0U) { caw_length = cmd->t_task_nolb * (cmd->se_dev)->dev_attrib.block_size; ret = target_alloc_sgl(& cmd->t_bidi_data_sg, & cmd->t_bidi_data_nents, caw_length, (int )zero_flag); if (ret < 0) { return (10U); } else { } } else { } target_add_to_state_list(cmd); if ((unsigned int )cmd->data_direction != 1U || cmd->data_length == 0U) { target_execute_cmd(cmd); return (0U); } else { } transport_cmd_check_stop(cmd, 0, 1); ret = (*((cmd->se_tfo)->write_pending))(cmd); if (ret == -11 || ret == -12) { goto queue_full; } else { } __ret_warn_on = ret != 0; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c", 2377); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return (ret == 0 ? 0U : 10U); queue_full: descriptor.modname = "target_core_mod"; descriptor.function = "transport_generic_new_cmd"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "Handling write_pending QUEUE__FULL: se_cmd: %p\n"; descriptor.lineno = 2382U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); } else { } cmd->t_state = 18; transport_handle_queue_full(cmd, cmd->se_dev); return (0U); } } static char const __kstrtab_transport_generic_new_cmd[26U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'g', 'e', 'n', 'e', 'r', 'i', 'c', '_', 'n', 'e', 'w', '_', 'c', 'm', 'd', '\000'}; struct kernel_symbol const __ksymtab_transport_generic_new_cmd ; struct kernel_symbol const __ksymtab_transport_generic_new_cmd = {(unsigned long )(& transport_generic_new_cmd), (char const *)(& __kstrtab_transport_generic_new_cmd)}; static void transport_write_pending_qf(struct se_cmd *cmd ) { int ret ; struct _ddebug descriptor ; long tmp ; { ret = (*((cmd->se_tfo)->write_pending))(cmd); if (ret == -11 || ret == -12) { descriptor.modname = "target_core_mod"; descriptor.function = "transport_write_pending_qf"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "Handling write_pending QUEUE__FULL: se_cmd: %p\n"; descriptor.lineno = 2396U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); } else { } transport_handle_queue_full(cmd, cmd->se_dev); } else { } return; } } int transport_generic_free_cmd(struct se_cmd *cmd , int wait_for_tasks ) { unsigned long flags ; int ret ; raw_spinlock_t *tmp ; { ret = 0; if ((cmd->se_cmd_flags & 256U) == 0U) { if (wait_for_tasks != 0 && (cmd->se_cmd_flags & 16U) != 0U) { transport_wait_for_tasks(cmd); } else { } ret = transport_release_cmd(cmd); } else { if (wait_for_tasks != 0) { transport_wait_for_tasks(cmd); } else { } if ((int )cmd->state_active) { tmp = spinlock_check(& cmd->t_state_lock); flags = _raw_spin_lock_irqsave(tmp); target_remove_from_state_list(cmd); spin_unlock_irqrestore(& cmd->t_state_lock, flags); } else { } if ((unsigned long )cmd->se_lun != (unsigned long )((struct se_lun *)0)) { transport_lun_remove_cmd(cmd); } else { } ret = transport_put_cmd(cmd); } return (ret); } } static char const __kstrtab_transport_generic_free_cmd[27U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'g', 'e', 'n', 'e', 'r', 'i', 'c', '_', 'f', 'r', 'e', 'e', '_', 'c', 'm', 'd', '\000'}; struct kernel_symbol const __ksymtab_transport_generic_free_cmd ; struct kernel_symbol const __ksymtab_transport_generic_free_cmd = {(unsigned long )(& transport_generic_free_cmd), (char const *)(& __kstrtab_transport_generic_free_cmd)}; int target_get_sess_cmd(struct se_cmd *se_cmd , bool ack_kref ) { struct se_session *se_sess ; unsigned long flags ; int ret ; raw_spinlock_t *tmp ; { se_sess = se_cmd->se_sess; ret = 0; if ((int )ack_kref) { kref_get(& se_cmd->cmd_kref); } else { } tmp = spinlock_check(& se_sess->sess_cmd_lock); flags = _raw_spin_lock_irqsave(tmp); if ((unsigned int )*((unsigned char *)se_sess + 0UL) != 0U) { ret = -108; goto out; } else { } list_add_tail(& se_cmd->se_cmd_list, & se_sess->sess_cmd_list); out: spin_unlock_irqrestore(& se_sess->sess_cmd_lock, flags); if (ret != 0 && (int )ack_kref) { target_put_sess_cmd(se_cmd); } else { } return (ret); } } static char const __kstrtab_target_get_sess_cmd[20U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 'g', 'e', 't', '_', 's', 'e', 's', 's', '_', 'c', 'm', 'd', '\000'}; struct kernel_symbol const __ksymtab_target_get_sess_cmd ; struct kernel_symbol const __ksymtab_target_get_sess_cmd = {(unsigned long )(& target_get_sess_cmd), (char const *)(& __kstrtab_target_get_sess_cmd)}; static void target_release_cmd_kref(struct kref *kref ) { struct se_cmd *se_cmd ; struct kref const *__mptr ; struct se_session *se_sess ; int tmp ; { __mptr = (struct kref const *)kref; se_cmd = (struct se_cmd *)__mptr + 0xffffffffffffff00UL; se_sess = se_cmd->se_sess; tmp = list_empty((struct list_head const *)(& se_cmd->se_cmd_list)); if (tmp != 0) { spin_unlock(& se_sess->sess_cmd_lock); (*((se_cmd->se_tfo)->release_cmd))(se_cmd); return; } else { } if ((unsigned int )*((unsigned char *)se_sess + 0UL) != 0U && (unsigned int )*((unsigned char *)se_cmd + 36UL) != 0U) { spin_unlock(& se_sess->sess_cmd_lock); complete(& se_cmd->cmd_wait_comp); return; } else { } list_del(& se_cmd->se_cmd_list); spin_unlock(& se_sess->sess_cmd_lock); (*((se_cmd->se_tfo)->release_cmd))(se_cmd); return; } } int target_put_sess_cmd(struct se_cmd *se_cmd ) { struct se_session *se_sess ; int tmp ; { se_sess = se_cmd->se_sess; if ((unsigned long )se_sess == (unsigned long )((struct se_session *)0)) { (*((se_cmd->se_tfo)->release_cmd))(se_cmd); return (1); } else { } tmp = kref_put_spinlock_irqsave(& se_cmd->cmd_kref, & target_release_cmd_kref, & se_sess->sess_cmd_lock); return (tmp); } } static char const __kstrtab_target_put_sess_cmd[20U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 'p', 'u', 't', '_', 's', 'e', 's', 's', '_', 'c', 'm', 'd', '\000'}; struct kernel_symbol const __ksymtab_target_put_sess_cmd ; struct kernel_symbol const __ksymtab_target_put_sess_cmd = {(unsigned long )(& target_put_sess_cmd), (char const *)(& __kstrtab_target_put_sess_cmd)}; void target_sess_cmd_list_set_waiting(struct se_session *se_sess ) { struct se_cmd *se_cmd ; unsigned long flags ; raw_spinlock_t *tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { tmp = spinlock_check(& se_sess->sess_cmd_lock); flags = _raw_spin_lock_irqsave(tmp); if ((unsigned int )*((unsigned char *)se_sess + 0UL) != 0U) { spin_unlock_irqrestore(& se_sess->sess_cmd_lock, flags); return; } else { } se_sess->sess_tearing_down = 1U; list_splice_init(& se_sess->sess_cmd_list, & se_sess->sess_wait_list); __mptr = (struct list_head const *)se_sess->sess_wait_list.next; se_cmd = (struct se_cmd *)__mptr + 0xffffffffffffff70UL; goto ldv_63802; ldv_63801: se_cmd->cmd_wait_set = 1U; __mptr___0 = (struct list_head const *)se_cmd->se_cmd_list.next; se_cmd = (struct se_cmd *)__mptr___0 + 0xffffffffffffff70UL; ldv_63802: ; if ((unsigned long )(& se_cmd->se_cmd_list) != (unsigned long )(& se_sess->sess_wait_list)) { goto ldv_63801; } else { } spin_unlock_irqrestore(& se_sess->sess_cmd_lock, flags); return; } } static char const __kstrtab_target_sess_cmd_list_set_waiting[33U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 's', 'e', 's', 's', '_', 'c', 'm', 'd', '_', 'l', 'i', 's', 't', '_', 's', 'e', 't', '_', 'w', 'a', 'i', 't', 'i', 'n', 'g', '\000'}; struct kernel_symbol const __ksymtab_target_sess_cmd_list_set_waiting ; struct kernel_symbol const __ksymtab_target_sess_cmd_list_set_waiting = {(unsigned long )(& target_sess_cmd_list_set_waiting), (char const *)(& __kstrtab_target_sess_cmd_list_set_waiting)}; void target_wait_for_sess_cmds(struct se_session *se_sess ) { struct se_cmd *se_cmd ; struct se_cmd *tmp_cmd ; unsigned long flags ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct _ddebug descriptor ; int tmp ; long tmp___0 ; struct _ddebug descriptor___0 ; int tmp___1 ; long tmp___2 ; struct list_head const *__mptr___1 ; raw_spinlock_t *tmp___3 ; int __ret_warn_on ; int tmp___4 ; long tmp___5 ; { __mptr = (struct list_head const *)se_sess->sess_wait_list.next; se_cmd = (struct se_cmd *)__mptr + 0xffffffffffffff70UL; __mptr___0 = (struct list_head const *)se_cmd->se_cmd_list.next; tmp_cmd = (struct se_cmd *)__mptr___0 + 0xffffffffffffff70UL; goto ldv_63827; ldv_63826: list_del(& se_cmd->se_cmd_list); descriptor.modname = "target_core_mod"; descriptor.function = "target_wait_for_sess_cmds"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "Waiting for se_cmd: %p t_state: %d, fabric state: %d\n"; descriptor.lineno = 2545U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { tmp = (*((se_cmd->se_tfo)->get_cmd_state))(se_cmd); __dynamic_pr_debug(& descriptor, "Waiting for se_cmd: %p t_state: %d, fabric state: %d\n", se_cmd, (unsigned int )se_cmd->t_state, tmp); } else { } wait_for_completion(& se_cmd->cmd_wait_comp); descriptor___0.modname = "target_core_mod"; descriptor___0.function = "target_wait_for_sess_cmds"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor___0.format = "After cmd_wait_comp: se_cmd: %p t_state: %d fabric state: %d\n"; descriptor___0.lineno = 2550U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { tmp___1 = (*((se_cmd->se_tfo)->get_cmd_state))(se_cmd); __dynamic_pr_debug(& descriptor___0, "After cmd_wait_comp: se_cmd: %p t_state: %d fabric state: %d\n", se_cmd, (unsigned int )se_cmd->t_state, tmp___1); } else { } (*((se_cmd->se_tfo)->release_cmd))(se_cmd); se_cmd = tmp_cmd; __mptr___1 = (struct list_head const *)tmp_cmd->se_cmd_list.next; tmp_cmd = (struct se_cmd *)__mptr___1 + 0xffffffffffffff70UL; ldv_63827: ; if ((unsigned long )(& se_cmd->se_cmd_list) != (unsigned long )(& se_sess->sess_wait_list)) { goto ldv_63826; } else { } tmp___3 = spinlock_check(& se_sess->sess_cmd_lock); flags = _raw_spin_lock_irqsave(tmp___3); tmp___4 = list_empty((struct list_head const *)(& se_sess->sess_cmd_list)); __ret_warn_on = tmp___4 == 0; tmp___5 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___5 != 0L) { warn_slowpath_null("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c", 2556); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); spin_unlock_irqrestore(& se_sess->sess_cmd_lock, flags); return; } } static char const __kstrtab_target_wait_for_sess_cmds[26U] = { 't', 'a', 'r', 'g', 'e', 't', '_', 'w', 'a', 'i', 't', '_', 'f', 'o', 'r', '_', 's', 'e', 's', 's', '_', 'c', 'm', 'd', 's', '\000'}; struct kernel_symbol const __ksymtab_target_wait_for_sess_cmds ; struct kernel_symbol const __ksymtab_target_wait_for_sess_cmds = {(unsigned long )(& target_wait_for_sess_cmds), (char const *)(& __kstrtab_target_wait_for_sess_cmds)}; void transport_clear_lun_ref(struct se_lun *lun ) { { percpu_ref_kill(& lun->lun_ref); wait_for_completion(& lun->lun_ref_comp); return; } } bool transport_wait_for_tasks(struct se_cmd *cmd ) { unsigned long flags ; raw_spinlock_t *tmp ; struct _ddebug descriptor ; int tmp___0 ; long tmp___1 ; raw_spinlock_t *tmp___2 ; struct _ddebug descriptor___0 ; long tmp___3 ; { tmp = spinlock_check(& cmd->t_state_lock); flags = _raw_spin_lock_irqsave(tmp); if ((cmd->se_cmd_flags & 256U) == 0U && (cmd->se_cmd_flags & 16U) == 0U) { spin_unlock_irqrestore(& cmd->t_state_lock, flags); return (0); } else { } if ((cmd->se_cmd_flags & 1U) == 0U && (cmd->se_cmd_flags & 16U) == 0U) { spin_unlock_irqrestore(& cmd->t_state_lock, flags); return (0); } else { } if ((cmd->transport_state & 2U) == 0U) { spin_unlock_irqrestore(& cmd->t_state_lock, flags); return (0); } else { } cmd->transport_state = cmd->transport_state | 32U; descriptor.modname = "target_core_mod"; descriptor.function = "transport_wait_for_tasks"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n"; descriptor.lineno = 2600U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp___0 = (*((cmd->se_tfo)->get_cmd_state))(cmd); __dynamic_pr_debug(& descriptor, "wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n", cmd, cmd->tag, tmp___0, (unsigned int )cmd->t_state); } else { } spin_unlock_irqrestore(& cmd->t_state_lock, flags); wait_for_completion(& cmd->t_transport_stop_comp); tmp___2 = spinlock_check(& cmd->t_state_lock); flags = _raw_spin_lock_irqsave(tmp___2); cmd->transport_state = cmd->transport_state & 4294967261U; descriptor___0.modname = "target_core_mod"; descriptor___0.function = "transport_wait_for_tasks"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor___0.format = "wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n"; descriptor___0.lineno = 2610U; descriptor___0.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_pr_debug(& descriptor___0, "wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); } else { } spin_unlock_irqrestore(& cmd->t_state_lock, flags); return (1); } } static char const __kstrtab_transport_wait_for_tasks[25U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'w', 'a', 'i', 't', '_', 'f', 'o', 'r', '_', 't', 'a', 's', 'k', 's', '\000'}; struct kernel_symbol const __ksymtab_transport_wait_for_tasks ; struct kernel_symbol const __ksymtab_transport_wait_for_tasks = {(unsigned long )(& transport_wait_for_tasks), (char const *)(& __kstrtab_transport_wait_for_tasks)}; static int transport_get_sense_codes(struct se_cmd *cmd , u8 *asc , u8 *ascq ) { { *asc = cmd->scsi_asc; *ascq = cmd->scsi_ascq; return (0); } } static void transport_err_sector_info(unsigned char *buffer , sector_t bad_sector ) { { *(buffer + 7UL) = 12U; *(buffer + 8UL) = 0U; *(buffer + 9UL) = 10U; *(buffer + 10UL) = 128U; put_unaligned_be64((u64 )bad_sector, (void *)buffer + 12U); return; } } int transport_send_check_condition_and_sense(struct se_cmd *cmd , sense_reason_t reason , int from_transport ) { unsigned char *buffer ; unsigned long flags ; u8 asc ; u8 ascq ; raw_spinlock_t *tmp ; int tmp___0 ; { buffer = (unsigned char *)cmd->sense_buffer; asc = 0U; ascq = 0U; tmp = spinlock_check(& cmd->t_state_lock); flags = _raw_spin_lock_irqsave(tmp); if ((cmd->se_cmd_flags & 2048U) != 0U) { spin_unlock_irqrestore(& cmd->t_state_lock, flags); return (0); } else { } cmd->se_cmd_flags = cmd->se_cmd_flags | 2048U; spin_unlock_irqrestore(& cmd->t_state_lock, flags); if (reason == 0U && from_transport != 0) { goto after_reason; } else { } if (from_transport == 0) { cmd->se_cmd_flags = cmd->se_cmd_flags | 4U; } else { } switch (reason) { case 0U: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 2U; *(buffer + 12UL) = 0U; *(buffer + 13UL) = 0U; goto ldv_63887; case 1U: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 5U; *(buffer + 12UL) = 37U; goto ldv_63887; case 2U: ; case 7U: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 5U; *(buffer + 12UL) = 32U; goto ldv_63887; case 11U: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 5U; *(buffer + 12UL) = 36U; goto ldv_63887; case 13U: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 11U; *(buffer + 12UL) = 41U; *(buffer + 13UL) = 3U; goto ldv_63887; case 3U: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 11U; *(buffer + 12UL) = 12U; *(buffer + 13UL) = 13U; goto ldv_63887; case 8U: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 5U; *(buffer + 12UL) = 36U; goto ldv_63887; case 9U: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 5U; *(buffer + 12UL) = 38U; goto ldv_63887; case 19U: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 5U; *(buffer + 12UL) = 26U; goto ldv_63887; case 4U: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 11U; *(buffer + 12UL) = 12U; *(buffer + 13UL) = 12U; goto ldv_63887; case 5U: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 11U; *(buffer + 12UL) = 71U; *(buffer + 13UL) = 5U; goto ldv_63887; case 6U: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 11U; *(buffer + 12UL) = 17U; *(buffer + 13UL) = 19U; goto ldv_63887; case 12U: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 7U; *(buffer + 12UL) = 39U; goto ldv_63887; case 17U: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 5U; *(buffer + 12UL) = 33U; goto ldv_63887; case 14U: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 6U; core_scsi3_ua_for_check_condition(cmd, & asc, & ascq); *(buffer + 12UL) = asc; *(buffer + 13UL) = ascq; goto ldv_63887; case 15U: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 2U; transport_get_sense_codes(cmd, & asc, & ascq); *(buffer + 12UL) = asc; *(buffer + 13UL) = ascq; goto ldv_63887; case 20U: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 14U; *(buffer + 12UL) = 29U; *(buffer + 13UL) = 0U; goto ldv_63887; case 21U: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 5U; *(buffer + 12UL) = 16U; *(buffer + 13UL) = 1U; transport_err_sector_info(buffer, cmd->bad_sector); goto ldv_63887; case 22U: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 5U; *(buffer + 12UL) = 16U; *(buffer + 13UL) = 2U; transport_err_sector_info(buffer, cmd->bad_sector); goto ldv_63887; case 23U: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 5U; *(buffer + 12UL) = 16U; *(buffer + 13UL) = 3U; transport_err_sector_info(buffer, cmd->bad_sector); goto ldv_63887; case 10U: ; default: *buffer = 112U; *(buffer + 7UL) = 10U; *(buffer + 2UL) = 2U; *(buffer + 12UL) = 8U; goto ldv_63887; } ldv_63887: cmd->scsi_status = 2U; cmd->scsi_sense_length = 96U; after_reason: trace_target_cmd_complete(cmd); tmp___0 = (*((cmd->se_tfo)->queue_status))(cmd); return (tmp___0); } } static char const __kstrtab_transport_send_check_condition_and_sense[41U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 's', 'e', 'n', 'd', '_', 'c', 'h', 'e', 'c', 'k', '_', 'c', 'o', 'n', 'd', 'i', 't', 'i', 'o', 'n', '_', 'a', 'n', 'd', '_', 's', 'e', 'n', 's', 'e', '\000'}; struct kernel_symbol const __ksymtab_transport_send_check_condition_and_sense ; struct kernel_symbol const __ksymtab_transport_send_check_condition_and_sense = {(unsigned long )(& transport_send_check_condition_and_sense), (char const *)(& __kstrtab_transport_send_check_condition_and_sense)}; int transport_check_aborted_status(struct se_cmd *cmd , int send_status ) { struct _ddebug descriptor ; long tmp ; { if ((cmd->transport_state & 1U) == 0U) { return (0); } else { } if (send_status == 0 || (cmd->se_cmd_flags & 16384U) == 0U) { return (1); } else { } descriptor.modname = "target_core_mod"; descriptor.function = "transport_check_aborted_status"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n"; descriptor.lineno = 2913U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n", (int )*(cmd->t_task_cdb), cmd->tag); } else { } cmd->se_cmd_flags = cmd->se_cmd_flags & 4294950911U; cmd->scsi_status = 64U; trace_target_cmd_complete(cmd); (*((cmd->se_tfo)->queue_status))(cmd); return (1); } } static char const __kstrtab_transport_check_aborted_status[31U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'c', 'h', 'e', 'c', 'k', '_', 'a', 'b', 'o', 'r', 't', 'e', 'd', '_', 's', 't', 'a', 't', 'u', 's', '\000'}; struct kernel_symbol const __ksymtab_transport_check_aborted_status ; struct kernel_symbol const __ksymtab_transport_check_aborted_status = {(unsigned long )(& transport_check_aborted_status), (char const *)(& __kstrtab_transport_check_aborted_status)}; void transport_send_task_abort(struct se_cmd *cmd ) { unsigned long flags ; raw_spinlock_t *tmp ; int tmp___0 ; struct _ddebug descriptor ; long tmp___1 ; { tmp = spinlock_check(& cmd->t_state_lock); flags = _raw_spin_lock_irqsave(tmp); if ((cmd->se_cmd_flags & 2048U) != 0U) { spin_unlock_irqrestore(& cmd->t_state_lock, flags); return; } else { } spin_unlock_irqrestore(& cmd->t_state_lock, flags); if ((unsigned int )cmd->data_direction == 1U) { tmp___0 = (*((cmd->se_tfo)->write_pending_status))(cmd); if (tmp___0 != 0) { cmd->transport_state = cmd->transport_state | 1U; cmd->se_cmd_flags = cmd->se_cmd_flags | 16384U; return; } else { } } else { } cmd->scsi_status = 64U; transport_lun_remove_cmd(cmd); descriptor.modname = "target_core_mod"; descriptor.function = "transport_send_task_abort"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_transport.c"; descriptor.format = "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n"; descriptor.lineno = 2953U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor, "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", (int )*(cmd->t_task_cdb), cmd->tag); } else { } trace_target_cmd_complete(cmd); (*((cmd->se_tfo)->queue_status))(cmd); return; } } static void target_tmr_work(struct work_struct *work ) { struct se_cmd *cmd ; struct work_struct const *__mptr ; struct se_device *dev ; struct se_tmr_req *tmr ; int ret ; { __mptr = (struct work_struct const *)work; cmd = (struct se_cmd *)__mptr + 0xfffffffffffffdf8UL; dev = cmd->se_dev; tmr = cmd->se_tmr_req; switch ((int )tmr->function) { case 1: core_tmr_abort_task(dev, tmr, cmd->se_sess); goto ldv_63952; case 2: ; case 3: ; case 4: tmr->response = 4U; goto ldv_63952; case 5: ret = core_tmr_lun_reset(dev, tmr, (struct list_head *)0, (struct se_cmd *)0); tmr->response = ret == 0 ? 1U : 5U; if ((unsigned int )tmr->response == 1U) { target_ua_allocate_lun((cmd->se_sess)->se_node_acl, (u32 )cmd->orig_fe_lun, 41, 3); } else { } goto ldv_63952; case 6: tmr->response = 5U; goto ldv_63952; case 7: tmr->response = 5U; goto ldv_63952; default: printk("\vUknown TMR function: 0x%02x.\n", (int )tmr->function); tmr->response = 5U; goto ldv_63952; } ldv_63952: cmd->t_state = 11; (*((cmd->se_tfo)->queue_tm_rsp))(cmd); transport_cmd_check_stop_to_fabric(cmd); return; } } int transport_generic_handle_tmr(struct se_cmd *cmd ) { unsigned long flags ; raw_spinlock_t *tmp ; struct lock_class_key __key ; atomic_long_t __constr_expr_0 ; { tmp = spinlock_check(& cmd->t_state_lock); flags = _raw_spin_lock_irqsave(tmp); cmd->transport_state = cmd->transport_state | 2U; spin_unlock_irqrestore(& cmd->t_state_lock, flags); __init_work(& cmd->work, 0); __constr_expr_0.counter = 137438953408L; cmd->work.data = __constr_expr_0; lockdep_init_map(& cmd->work.lockdep_map, "(&cmd->work)", & __key, 0); INIT_LIST_HEAD(& cmd->work.entry); cmd->work.func = & target_tmr_work; queue_work((cmd->se_dev)->tmr_wq, & cmd->work); return (0); } } static char const __kstrtab_transport_generic_handle_tmr[29U] = { 't', 'r', 'a', 'n', 's', 'p', 'o', 'r', 't', '_', 'g', 'e', 'n', 'e', 'r', 'i', 'c', '_', 'h', 'a', 'n', 'd', 'l', 'e', '_', 't', 'm', 'r', '\000'}; struct kernel_symbol const __ksymtab_transport_generic_handle_tmr ; struct kernel_symbol const __ksymtab_transport_generic_handle_tmr = {(unsigned long )(& transport_generic_handle_tmr), (char const *)(& __kstrtab_transport_generic_handle_tmr)}; bool target_check_wce(struct se_device *dev ) { bool wce ; { wce = 0; if ((unsigned long )(dev->transport)->get_write_cache != (unsigned long )((bool (*/* const */)(struct se_device * ))0)) { wce = (*((dev->transport)->get_write_cache))(dev); } else if (dev->dev_attrib.emulate_write_cache > 0) { wce = 1; } else { } return (wce); } } bool target_check_fua(struct se_device *dev ) { bool tmp ; { tmp = target_check_wce(dev); return ((bool )((int )tmp && dev->dev_attrib.emulate_fua_write > 0)); } } void activate_work_5(struct work_struct *work , int state ) { { if (ldv_work_5_0 == 0) { ldv_work_struct_5_0 = work; ldv_work_5_0 = state; return; } else { } if (ldv_work_5_1 == 0) { ldv_work_struct_5_1 = work; ldv_work_5_1 = state; return; } else { } if (ldv_work_5_2 == 0) { ldv_work_struct_5_2 = work; ldv_work_5_2 = state; return; } else { } if (ldv_work_5_3 == 0) { ldv_work_struct_5_3 = work; ldv_work_5_3 = state; return; } else { } return; } } void call_and_disable_work_3(struct work_struct *work ) { { if ((ldv_work_3_0 == 2 || ldv_work_3_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_3_0) { target_complete_failure_work(work); ldv_work_3_0 = 1; return; } else { } if ((ldv_work_3_1 == 2 || ldv_work_3_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_3_1) { target_complete_failure_work(work); ldv_work_3_1 = 1; return; } else { } if ((ldv_work_3_2 == 2 || ldv_work_3_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_3_2) { target_complete_failure_work(work); ldv_work_3_2 = 1; return; } else { } if ((ldv_work_3_3 == 2 || ldv_work_3_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_3_3) { target_complete_failure_work(work); ldv_work_3_3 = 1; return; } else { } return; } } void disable_work_3(struct work_struct *work ) { { if ((ldv_work_3_0 == 3 || ldv_work_3_0 == 2) && (unsigned long )ldv_work_struct_3_0 == (unsigned long )work) { ldv_work_3_0 = 1; } else { } if ((ldv_work_3_1 == 3 || ldv_work_3_1 == 2) && (unsigned long )ldv_work_struct_3_1 == (unsigned long )work) { ldv_work_3_1 = 1; } else { } if ((ldv_work_3_2 == 3 || ldv_work_3_2 == 2) && (unsigned long )ldv_work_struct_3_2 == (unsigned long )work) { ldv_work_3_2 = 1; } else { } if ((ldv_work_3_3 == 3 || ldv_work_3_3 == 2) && (unsigned long )ldv_work_struct_3_3 == (unsigned long )work) { ldv_work_3_3 = 1; } else { } return; } } void invoke_work_4(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_4_0 == 2 || ldv_work_4_0 == 3) { ldv_work_4_0 = 4; target_complete_ok_work(ldv_work_struct_4_0); ldv_work_4_0 = 1; } else { } goto ldv_64001; case 1: ; if (ldv_work_4_1 == 2 || ldv_work_4_1 == 3) { ldv_work_4_1 = 4; target_complete_ok_work(ldv_work_struct_4_0); ldv_work_4_1 = 1; } else { } goto ldv_64001; case 2: ; if (ldv_work_4_2 == 2 || ldv_work_4_2 == 3) { ldv_work_4_2 = 4; target_complete_ok_work(ldv_work_struct_4_0); ldv_work_4_2 = 1; } else { } goto ldv_64001; case 3: ; if (ldv_work_4_3 == 2 || ldv_work_4_3 == 3) { ldv_work_4_3 = 4; target_complete_ok_work(ldv_work_struct_4_0); ldv_work_4_3 = 1; } else { } goto ldv_64001; default: ldv_stop(); } ldv_64001: ; return; } } void activate_work_6(struct work_struct *work , int state ) { { if (ldv_work_6_0 == 0) { ldv_work_struct_6_0 = work; ldv_work_6_0 = state; return; } else { } if (ldv_work_6_1 == 0) { ldv_work_struct_6_1 = work; ldv_work_6_1 = state; return; } else { } if (ldv_work_6_2 == 0) { ldv_work_struct_6_2 = work; ldv_work_6_2 = state; return; } else { } if (ldv_work_6_3 == 0) { ldv_work_struct_6_3 = work; ldv_work_6_3 = state; return; } else { } return; } } void invoke_work_5(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_5_0 == 2 || ldv_work_5_0 == 3) { ldv_work_5_0 = 4; target_complete_tmr_failure(ldv_work_struct_5_0); ldv_work_5_0 = 1; } else { } goto ldv_64016; case 1: ; if (ldv_work_5_1 == 2 || ldv_work_5_1 == 3) { ldv_work_5_1 = 4; target_complete_tmr_failure(ldv_work_struct_5_0); ldv_work_5_1 = 1; } else { } goto ldv_64016; case 2: ; if (ldv_work_5_2 == 2 || ldv_work_5_2 == 3) { ldv_work_5_2 = 4; target_complete_tmr_failure(ldv_work_struct_5_0); ldv_work_5_2 = 1; } else { } goto ldv_64016; case 3: ; if (ldv_work_5_3 == 2 || ldv_work_5_3 == 3) { ldv_work_5_3 = 4; target_complete_tmr_failure(ldv_work_struct_5_0); ldv_work_5_3 = 1; } else { } goto ldv_64016; default: ldv_stop(); } ldv_64016: ; return; } } void ldv_initialize_trace_event_class_86(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_target_sequencer_start_group0 = (struct trace_event_call *)tmp; return; } } void call_and_disable_work_4(struct work_struct *work ) { { if ((ldv_work_4_0 == 2 || ldv_work_4_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_4_0) { target_complete_ok_work(work); ldv_work_4_0 = 1; return; } else { } if ((ldv_work_4_1 == 2 || ldv_work_4_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_4_1) { target_complete_ok_work(work); ldv_work_4_1 = 1; return; } else { } if ((ldv_work_4_2 == 2 || ldv_work_4_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_4_2) { target_complete_ok_work(work); ldv_work_4_2 = 1; return; } else { } if ((ldv_work_4_3 == 2 || ldv_work_4_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_4_3) { target_complete_ok_work(work); ldv_work_4_3 = 1; return; } else { } return; } } void invoke_work_6(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_6_0 == 2 || ldv_work_6_0 == 3) { ldv_work_6_0 = 4; target_tmr_work(ldv_work_struct_6_0); ldv_work_6_0 = 1; } else { } goto ldv_64035; case 1: ; if (ldv_work_6_1 == 2 || ldv_work_6_1 == 3) { ldv_work_6_1 = 4; target_tmr_work(ldv_work_struct_6_0); ldv_work_6_1 = 1; } else { } goto ldv_64035; case 2: ; if (ldv_work_6_2 == 2 || ldv_work_6_2 == 3) { ldv_work_6_2 = 4; target_tmr_work(ldv_work_struct_6_0); ldv_work_6_2 = 1; } else { } goto ldv_64035; case 3: ; if (ldv_work_6_3 == 2 || ldv_work_6_3 == 3) { ldv_work_6_3 = 4; target_tmr_work(ldv_work_struct_6_0); ldv_work_6_3 = 1; } else { } goto ldv_64035; default: ldv_stop(); } ldv_64035: ; return; } } void activate_work_3(struct work_struct *work , int state ) { { if (ldv_work_3_0 == 0) { ldv_work_struct_3_0 = work; ldv_work_3_0 = state; return; } else { } if (ldv_work_3_1 == 0) { ldv_work_struct_3_1 = work; ldv_work_3_1 = state; return; } else { } if (ldv_work_3_2 == 0) { ldv_work_struct_3_2 = work; ldv_work_3_2 = state; return; } else { } if (ldv_work_3_3 == 0) { ldv_work_struct_3_3 = work; ldv_work_3_3 = state; return; } else { } return; } } void disable_work_5(struct work_struct *work ) { { if ((ldv_work_5_0 == 3 || ldv_work_5_0 == 2) && (unsigned long )ldv_work_struct_5_0 == (unsigned long )work) { ldv_work_5_0 = 1; } else { } if ((ldv_work_5_1 == 3 || ldv_work_5_1 == 2) && (unsigned long )ldv_work_struct_5_1 == (unsigned long )work) { ldv_work_5_1 = 1; } else { } if ((ldv_work_5_2 == 3 || ldv_work_5_2 == 2) && (unsigned long )ldv_work_struct_5_2 == (unsigned long )work) { ldv_work_5_2 = 1; } else { } if ((ldv_work_5_3 == 3 || ldv_work_5_3 == 2) && (unsigned long )ldv_work_struct_5_3 == (unsigned long )work) { ldv_work_5_3 = 1; } else { } return; } } void call_and_disable_all_6(int state ) { { if (ldv_work_6_0 == state) { call_and_disable_work_6(ldv_work_struct_6_0); } else { } if (ldv_work_6_1 == state) { call_and_disable_work_6(ldv_work_struct_6_1); } else { } if (ldv_work_6_2 == state) { call_and_disable_work_6(ldv_work_struct_6_2); } else { } if (ldv_work_6_3 == state) { call_and_disable_work_6(ldv_work_struct_6_3); } else { } return; } } void activate_work_4(struct work_struct *work , int state ) { { if (ldv_work_4_0 == 0) { ldv_work_struct_4_0 = work; ldv_work_4_0 = state; return; } else { } if (ldv_work_4_1 == 0) { ldv_work_struct_4_1 = work; ldv_work_4_1 = state; return; } else { } if (ldv_work_4_2 == 0) { ldv_work_struct_4_2 = work; ldv_work_4_2 = state; return; } else { } if (ldv_work_4_3 == 0) { ldv_work_struct_4_3 = work; ldv_work_4_3 = state; return; } else { } return; } } void call_and_disable_all_5(int state ) { { if (ldv_work_5_0 == state) { call_and_disable_work_5(ldv_work_struct_5_0); } else { } if (ldv_work_5_1 == state) { call_and_disable_work_5(ldv_work_struct_5_1); } else { } if (ldv_work_5_2 == state) { call_and_disable_work_5(ldv_work_struct_5_2); } else { } if (ldv_work_5_3 == state) { call_and_disable_work_5(ldv_work_struct_5_3); } else { } return; } } void ldv_initialize_trace_event_class_85(void) { void *tmp ; { tmp = ldv_init_zalloc(144UL); event_class_target_cmd_complete_group0 = (struct trace_event_call *)tmp; return; } } void invoke_work_3(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_3_0 == 2 || ldv_work_3_0 == 3) { ldv_work_3_0 = 4; target_complete_failure_work(ldv_work_struct_3_0); ldv_work_3_0 = 1; } else { } goto ldv_64066; case 1: ; if (ldv_work_3_1 == 2 || ldv_work_3_1 == 3) { ldv_work_3_1 = 4; target_complete_failure_work(ldv_work_struct_3_0); ldv_work_3_1 = 1; } else { } goto ldv_64066; case 2: ; if (ldv_work_3_2 == 2 || ldv_work_3_2 == 3) { ldv_work_3_2 = 4; target_complete_failure_work(ldv_work_struct_3_0); ldv_work_3_2 = 1; } else { } goto ldv_64066; case 3: ; if (ldv_work_3_3 == 2 || ldv_work_3_3 == 3) { ldv_work_3_3 = 4; target_complete_failure_work(ldv_work_struct_3_0); ldv_work_3_3 = 1; } else { } goto ldv_64066; default: ldv_stop(); } ldv_64066: ; return; } } void work_init_5(void) { { ldv_work_5_0 = 0; ldv_work_5_1 = 0; ldv_work_5_2 = 0; ldv_work_5_3 = 0; return; } } void call_and_disable_all_4(int state ) { { if (ldv_work_4_0 == state) { call_and_disable_work_4(ldv_work_struct_4_0); } else { } if (ldv_work_4_1 == state) { call_and_disable_work_4(ldv_work_struct_4_1); } else { } if (ldv_work_4_2 == state) { call_and_disable_work_4(ldv_work_struct_4_2); } else { } if (ldv_work_4_3 == state) { call_and_disable_work_4(ldv_work_struct_4_3); } else { } return; } } void disable_work_4(struct work_struct *work ) { { if ((ldv_work_4_0 == 3 || ldv_work_4_0 == 2) && (unsigned long )ldv_work_struct_4_0 == (unsigned long )work) { ldv_work_4_0 = 1; } else { } if ((ldv_work_4_1 == 3 || ldv_work_4_1 == 2) && (unsigned long )ldv_work_struct_4_1 == (unsigned long )work) { ldv_work_4_1 = 1; } else { } if ((ldv_work_4_2 == 3 || ldv_work_4_2 == 2) && (unsigned long )ldv_work_struct_4_2 == (unsigned long )work) { ldv_work_4_2 = 1; } else { } if ((ldv_work_4_3 == 3 || ldv_work_4_3 == 2) && (unsigned long )ldv_work_struct_4_3 == (unsigned long )work) { ldv_work_4_3 = 1; } else { } return; } } void work_init_4(void) { { ldv_work_4_0 = 0; ldv_work_4_1 = 0; ldv_work_4_2 = 0; ldv_work_4_3 = 0; return; } } void call_and_disable_all_3(int state ) { { if (ldv_work_3_0 == state) { call_and_disable_work_3(ldv_work_struct_3_0); } else { } if (ldv_work_3_1 == state) { call_and_disable_work_3(ldv_work_struct_3_1); } else { } if (ldv_work_3_2 == state) { call_and_disable_work_3(ldv_work_struct_3_2); } else { } if (ldv_work_3_3 == state) { call_and_disable_work_3(ldv_work_struct_3_3); } else { } return; } } void work_init_3(void) { { ldv_work_3_0 = 0; ldv_work_3_1 = 0; ldv_work_3_2 = 0; ldv_work_3_3 = 0; return; } } void call_and_disable_work_5(struct work_struct *work ) { { if ((ldv_work_5_0 == 2 || ldv_work_5_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_5_0) { target_complete_tmr_failure(work); ldv_work_5_0 = 1; return; } else { } if ((ldv_work_5_1 == 2 || ldv_work_5_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_5_1) { target_complete_tmr_failure(work); ldv_work_5_1 = 1; return; } else { } if ((ldv_work_5_2 == 2 || ldv_work_5_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_5_2) { target_complete_tmr_failure(work); ldv_work_5_2 = 1; return; } else { } if ((ldv_work_5_3 == 2 || ldv_work_5_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_5_3) { target_complete_tmr_failure(work); ldv_work_5_3 = 1; return; } else { } return; } } void work_init_6(void) { { ldv_work_6_0 = 0; ldv_work_6_1 = 0; ldv_work_6_2 = 0; ldv_work_6_3 = 0; return; } } void disable_work_6(struct work_struct *work ) { { if ((ldv_work_6_0 == 3 || ldv_work_6_0 == 2) && (unsigned long )ldv_work_struct_6_0 == (unsigned long )work) { ldv_work_6_0 = 1; } else { } if ((ldv_work_6_1 == 3 || ldv_work_6_1 == 2) && (unsigned long )ldv_work_struct_6_1 == (unsigned long )work) { ldv_work_6_1 = 1; } else { } if ((ldv_work_6_2 == 3 || ldv_work_6_2 == 2) && (unsigned long )ldv_work_struct_6_2 == (unsigned long )work) { ldv_work_6_2 = 1; } else { } if ((ldv_work_6_3 == 3 || ldv_work_6_3 == 2) && (unsigned long )ldv_work_struct_6_3 == (unsigned long )work) { ldv_work_6_3 = 1; } else { } return; } } void call_and_disable_work_6(struct work_struct *work ) { { if ((ldv_work_6_0 == 2 || ldv_work_6_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_6_0) { target_tmr_work(work); ldv_work_6_0 = 1; return; } else { } if ((ldv_work_6_1 == 2 || ldv_work_6_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_6_1) { target_tmr_work(work); ldv_work_6_1 = 1; return; } else { } if ((ldv_work_6_2 == 2 || ldv_work_6_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_6_2) { target_tmr_work(work); ldv_work_6_2 = 1; return; } else { } if ((ldv_work_6_3 == 2 || ldv_work_6_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_6_3) { target_tmr_work(work); ldv_work_6_3 = 1; return; } else { } return; } } void ldv_main_exported_85(void) { enum trace_reg ldvarg454 ; void *ldvarg453 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg453 = tmp; ldv_memset((void *)(& ldvarg454), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_85 == 1) { trace_event_define_fields_target_cmd_complete(event_class_target_cmd_complete_group0); ldv_state_variable_85 = 1; } else { } goto ldv_64111; case 1: ; if (ldv_state_variable_85 == 1) { trace_event_raw_init(event_class_target_cmd_complete_group0); ldv_state_variable_85 = 1; } else { } goto ldv_64111; case 2: ; if (ldv_state_variable_85 == 1) { trace_event_reg(event_class_target_cmd_complete_group0, ldvarg454, ldvarg453); ldv_state_variable_85 = 1; } else { } goto ldv_64111; default: ldv_stop(); } ldv_64111: ; return; } } void ldv_main_exported_87(void) { struct trace_event *ldvarg318 ; void *tmp ; struct trace_iterator *ldvarg317 ; void *tmp___0 ; int ldvarg319 ; int tmp___1 ; { tmp = ldv_init_zalloc(48UL); ldvarg318 = (struct trace_event *)tmp; tmp___0 = ldv_init_zalloc(8560UL); ldvarg317 = (struct trace_iterator *)tmp___0; ldv_memset((void *)(& ldvarg319), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_87 == 1) { trace_raw_output_target_cmd_complete(ldvarg317, ldvarg319, ldvarg318); ldv_state_variable_87 = 1; } else { } goto ldv_64122; default: ldv_stop(); } ldv_64122: ; return; } } void ldv_main_exported_88(void) { struct trace_event *ldvarg378 ; void *tmp ; struct trace_iterator *ldvarg377 ; void *tmp___0 ; int ldvarg379 ; int tmp___1 ; { tmp = ldv_init_zalloc(48UL); ldvarg378 = (struct trace_event *)tmp; tmp___0 = ldv_init_zalloc(8560UL); ldvarg377 = (struct trace_iterator *)tmp___0; ldv_memset((void *)(& ldvarg379), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_88 == 1) { trace_raw_output_target_sequencer_start(ldvarg377, ldvarg379, ldvarg378); ldv_state_variable_88 = 1; } else { } goto ldv_64131; default: ldv_stop(); } ldv_64131: ; return; } } void ldv_main_exported_86(void) { enum trace_reg ldvarg126 ; void *ldvarg125 ; void *tmp ; int tmp___0 ; { tmp = ldv_init_zalloc(1UL); ldvarg125 = tmp; ldv_memset((void *)(& ldvarg126), 0, 4UL); tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (ldv_state_variable_86 == 1) { trace_event_define_fields_target_sequencer_start(event_class_target_sequencer_start_group0); ldv_state_variable_86 = 1; } else { } goto ldv_64139; case 1: ; if (ldv_state_variable_86 == 1) { trace_event_raw_init(event_class_target_sequencer_start_group0); ldv_state_variable_86 = 1; } else { } goto ldv_64139; case 2: ; if (ldv_state_variable_86 == 1) { trace_event_reg(event_class_target_sequencer_start_group0, ldvarg126, ldvarg125); ldv_state_variable_86 = 1; } else { } goto ldv_64139; default: ldv_stop(); } ldv_64139: ; return; } } bool ldv_queue_work_on_417(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_418(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_419(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_420(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_421(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_unlock_422(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_423(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_lock_424(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_425(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_426(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_427(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_428(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_destroy_workqueue_429(struct workqueue_struct *ldv_func_arg1 ) { { destroy_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } void ldv_mutex_lock_430(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_431(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_acl_node_mutex_of_se_portal_group(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; __inline static __u32 __swab32p(__u32 const *p ) { __u32 tmp ; { tmp = __fswab32(*p); return (tmp); } } __inline static __u32 __be32_to_cpup(__be32 const *p ) { __u32 tmp ; { tmp = __swab32p(p); return (tmp); } } extern int memcmp(void const * , void const * , size_t ) ; int ldv_mutex_trylock_459(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_457(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_460(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_461(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_456(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_458(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_462(struct mutex *ldv_func_arg1 ) ; bool ldv_queue_work_on_451(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_453(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_452(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_455(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_454(struct workqueue_struct *ldv_func_arg1 ) ; extern __u16 crc_t10dif(unsigned char const * , size_t ) ; extern __u16 crc_t10dif_update(__u16 , unsigned char const * , size_t ) ; __inline static u32 get_unaligned_be32(void const *p ) { __u32 tmp ; { tmp = __be32_to_cpup((__be32 const *)p); return (tmp); } } extern int down_interruptible(struct semaphore * ) ; extern void up(struct semaphore * ) ; __inline static void pagefault_disabled_inc(void) { struct task_struct *tmp ; { tmp = get_current(); tmp->pagefault_disabled = tmp->pagefault_disabled + 1; return; } } __inline static void pagefault_disabled_dec(void) { struct task_struct *tmp ; int __ret_warn_on ; struct task_struct *tmp___0 ; long tmp___1 ; { tmp = get_current(); tmp->pagefault_disabled = tmp->pagefault_disabled - 1; tmp___0 = get_current(); __ret_warn_on = tmp___0->pagefault_disabled < 0; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_null("include/linux/uaccess.h", 15); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); return; } } __inline static void pagefault_disable(void) { { pagefault_disabled_inc(); __asm__ volatile ("": : : "memory"); return; } } __inline static void pagefault_enable(void) { { __asm__ volatile ("": : : "memory"); pagefault_disabled_dec(); return; } } __inline static void *kmap_atomic(struct page *page ) { void *tmp ; { __preempt_count_add___0(1); __asm__ volatile ("": : : "memory"); pagefault_disable(); tmp = lowmem_page_address((struct page const *)page); return (tmp); } } __inline static void __kunmap_atomic(void *addr ) { { pagefault_enable(); __asm__ volatile ("": : : "memory"); __preempt_count_sub___0(1); return; } } __inline static void sg_assign_page___0(struct scatterlist *sg , struct page *page ) { unsigned long page_link ; long tmp ; long tmp___0 ; long tmp___1 ; { page_link = sg->page_link & 3UL; tmp = ldv__builtin_expect(((unsigned long )page & 3UL) != 0UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (90), "i" (12UL)); ldv_32067: ; goto ldv_32067; } else { } tmp___0 = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (92), "i" (12UL)); ldv_32068: ; goto ldv_32068; } else { } tmp___1 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (93), "i" (12UL)); ldv_32069: ; goto ldv_32069; } else { } sg->page_link = page_link | (unsigned long )page; return; } } __inline static void sg_set_page___0(struct scatterlist *sg , struct page *page , unsigned int len , unsigned int offset ) { { sg_assign_page___0(sg, page); sg->offset = offset; sg->length = len; return; } } __inline static struct page *sg_page___0(struct scatterlist *sg ) { long tmp ; long tmp___0 ; { tmp = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (123), "i" (12UL)); ldv_32079: ; goto ldv_32079; } else { } tmp___0 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (124), "i" (12UL)); ldv_32080: ; goto ldv_32080; } else { } return ((struct page *)(sg->page_link & 0xfffffffffffffffcUL)); } } extern size_t sg_copy_to_buffer(struct scatterlist * , unsigned int , void * , size_t ) ; extern void sg_miter_start(struct sg_mapping_iter * , struct scatterlist * , unsigned int , unsigned int ) ; extern bool sg_miter_next(struct sg_mapping_iter * ) ; extern void sg_miter_stop(struct sg_mapping_iter * ) ; sense_reason_t spc_parse_cdb(struct se_cmd *cmd , unsigned int *size ) ; sense_reason_t sbc_parse_cdb(struct se_cmd *cmd , struct sbc_ops *ops ) ; u32 sbc_get_device_type(struct se_device *dev ) ; sector_t sbc_get_write_same_sectors(struct se_cmd *cmd ) ; void sbc_dif_copy_prot(struct se_cmd *cmd , unsigned int sectors , bool read , struct scatterlist *sg , int sg_off ) ; static sense_reason_t sbc_check_prot(struct se_device *dev , struct se_cmd *cmd , unsigned char *cdb , u32 sectors , bool is_write ) ; static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd ) ; static sense_reason_t sbc_emulate_readcapacity(struct se_cmd *cmd ) { struct se_device *dev ; unsigned char *cdb ; unsigned long long blocks_long ; sector_t tmp ; unsigned char *rbuf ; unsigned char buf[8U] ; u32 blocks ; void *tmp___0 ; u32 __min1 ; u32 __min2 ; { dev = cmd->se_dev; cdb = cmd->t_task_cdb; tmp = (*((dev->transport)->get_blocks))(dev); blocks_long = (unsigned long long )tmp; if (((int )*(cdb + 8UL) & 1) == 0 && (unsigned int )((((int )*(cdb + 2UL) | (int )*(cdb + 3UL)) | (int )*(cdb + 4UL)) | (int )*(cdb + 5UL)) != 0U) { return (8U); } else { } if (blocks_long > 4294967294ULL) { blocks = 4294967295U; } else { blocks = (unsigned int )blocks_long; } buf[0] = (unsigned char )(blocks >> 24); buf[1] = (unsigned char )(blocks >> 16); buf[2] = (unsigned char )(blocks >> 8); buf[3] = (unsigned char )blocks; buf[4] = (unsigned char )(dev->dev_attrib.block_size >> 24); buf[5] = (unsigned char )(dev->dev_attrib.block_size >> 16); buf[6] = (unsigned char )(dev->dev_attrib.block_size >> 8); buf[7] = (unsigned char )dev->dev_attrib.block_size; tmp___0 = transport_kmap_data_sg(cmd); rbuf = (unsigned char *)tmp___0; if ((unsigned long )rbuf != (unsigned long )((unsigned char *)0U)) { __min1 = 8U; __min2 = cmd->data_length; memcpy((void *)rbuf, (void const *)(& buf), (size_t )(__min1 < __min2 ? __min1 : __min2)); transport_kunmap_data_sg(cmd); } else { } target_complete_cmd_with_length(cmd, 0, 8); return (0U); } } static sense_reason_t sbc_emulate_readcapacity_16(struct se_cmd *cmd ) { struct se_device *dev ; struct se_session *sess ; int pi_prot_type ; unsigned char *rbuf ; unsigned char buf[32U] ; unsigned long long blocks ; sector_t tmp ; unsigned int tmp___0 ; u16 lalba ; sector_t tmp___1 ; void *tmp___2 ; u32 __min1 ; u32 __min2 ; { dev = cmd->se_dev; sess = cmd->se_sess; pi_prot_type = (int )dev->dev_attrib.pi_prot_type; tmp = (*((dev->transport)->get_blocks))(dev); blocks = (unsigned long long )tmp; memset((void *)(& buf), 0, 32UL); buf[0] = (unsigned char )(blocks >> 56); buf[1] = (unsigned char )(blocks >> 48); buf[2] = (unsigned char )(blocks >> 40); buf[3] = (unsigned char )(blocks >> 32); buf[4] = (unsigned char )(blocks >> 24); buf[5] = (unsigned char )(blocks >> 16); buf[6] = (unsigned char )(blocks >> 8); buf[7] = (unsigned char )blocks; buf[8] = (unsigned char )(dev->dev_attrib.block_size >> 24); buf[9] = (unsigned char )(dev->dev_attrib.block_size >> 16); buf[10] = (unsigned char )(dev->dev_attrib.block_size >> 8); buf[11] = (unsigned char )dev->dev_attrib.block_size; if (((unsigned int )sess->sup_prot_ops & 48U) != 0U) { if (pi_prot_type == 0) { pi_prot_type = (int )sess->sess_prot_type; } else { } if (pi_prot_type != 0) { buf[12] = (unsigned char )((int )((signed char )((pi_prot_type + -1) << 1)) | 1); } else { } } else { } if ((unsigned long )(dev->transport)->get_lbppbe != (unsigned long )((unsigned int (*/* const */)(struct se_device * ))0)) { tmp___0 = (*((dev->transport)->get_lbppbe))(dev); buf[13] = (unsigned int )((unsigned char )tmp___0) & 15U; } else { } if ((unsigned long )(dev->transport)->get_alignment_offset_lbas != (unsigned long )((sector_t (*/* const */)(struct se_device * ))0)) { tmp___1 = (*((dev->transport)->get_alignment_offset_lbas))(dev); lalba = (u16 )tmp___1; buf[14] = (unsigned int )((unsigned char )((int )lalba >> 8)) & 63U; buf[15] = (unsigned char )lalba; } else { } if (dev->dev_attrib.emulate_tpu != 0 || dev->dev_attrib.emulate_tpws != 0) { buf[14] = (unsigned int )buf[14] | 128U; } else { } tmp___2 = transport_kmap_data_sg(cmd); rbuf = (unsigned char *)tmp___2; if ((unsigned long )rbuf != (unsigned long )((unsigned char *)0U)) { __min1 = 32U; __min2 = cmd->data_length; memcpy((void *)rbuf, (void const *)(& buf), (size_t )(__min1 < __min2 ? __min1 : __min2)); transport_kunmap_data_sg(cmd); } else { } target_complete_cmd_with_length(cmd, 0, 32); return (0U); } } sector_t sbc_get_write_same_sectors(struct se_cmd *cmd ) { u32 num_blocks ; u16 tmp ; sector_t tmp___0 ; { if ((unsigned int )*(cmd->t_task_cdb) == 65U) { tmp = get_unaligned_be16((void const *)cmd->t_task_cdb + 7U); num_blocks = (u32 )tmp; } else if ((unsigned int )*(cmd->t_task_cdb) == 147U) { num_blocks = get_unaligned_be32((void const *)cmd->t_task_cdb + 10U); } else { num_blocks = get_unaligned_be32((void const *)cmd->t_task_cdb + 28U); } if (num_blocks != 0U) { return ((sector_t )num_blocks); } else { } tmp___0 = (*(((cmd->se_dev)->transport)->get_blocks))(cmd->se_dev); return ((sector_t )(((unsigned long long )tmp___0 - cmd->t_task_lba) + 1ULL)); } } static char const __kstrtab_sbc_get_write_same_sectors[27U] = { 's', 'b', 'c', '_', 'g', 'e', 't', '_', 'w', 'r', 'i', 't', 'e', '_', 's', 'a', 'm', 'e', '_', 's', 'e', 'c', 't', 'o', 'r', 's', '\000'}; struct kernel_symbol const __ksymtab_sbc_get_write_same_sectors ; struct kernel_symbol const __ksymtab_sbc_get_write_same_sectors = {(unsigned long )(& sbc_get_write_same_sectors), (char const *)(& __kstrtab_sbc_get_write_same_sectors)}; static sense_reason_t sbc_execute_write_same_unmap(struct se_cmd *cmd ) { struct sbc_ops *ops ; sector_t nolb ; sector_t tmp ; sense_reason_t ret ; { ops = (struct sbc_ops *)cmd->protocol_data; tmp = sbc_get_write_same_sectors(cmd); nolb = tmp; if (nolb != 0UL) { ret = (*(ops->execute_unmap))(cmd, (sector_t )cmd->t_task_lba, nolb); if (ret != 0U) { return (ret); } else { } } else { } target_complete_cmd(cmd, 0); return (0U); } } static sense_reason_t sbc_emulate_noop(struct se_cmd *cmd ) { { target_complete_cmd(cmd, 0); return (0U); } } __inline static u32 sbc_get_size(struct se_cmd *cmd , u32 sectors ) { { return ((cmd->se_dev)->dev_attrib.block_size * sectors); } } __inline static u32 transport_get_sectors_6(unsigned char *cdb ) { { return ((int )*(cdb + 4UL) != 0 ? (u32 )((int )*(cdb + 4UL)) : 256U); } } __inline static u32 transport_get_sectors_10(unsigned char *cdb ) { { return ((unsigned int )((int )*(cdb + 7UL) << 8) + (unsigned int )*(cdb + 8UL)); } } __inline static u32 transport_get_sectors_12(unsigned char *cdb ) { { return ((((unsigned int )((int )*(cdb + 6UL) << 24) + (unsigned int )((int )*(cdb + 7UL) << 16)) + (unsigned int )((int )*(cdb + 8UL) << 8)) + (unsigned int )*(cdb + 9UL)); } } __inline static u32 transport_get_sectors_16(unsigned char *cdb ) { { return ((((unsigned int )((int )*(cdb + 10UL) << 24) + (unsigned int )((int )*(cdb + 11UL) << 16)) + (unsigned int )((int )*(cdb + 12UL) << 8)) + (unsigned int )*(cdb + 13UL)); } } __inline static u32 transport_get_sectors_32(unsigned char *cdb ) { { return ((((unsigned int )((int )*(cdb + 28UL) << 24) + (unsigned int )((int )*(cdb + 29UL) << 16)) + (unsigned int )((int )*(cdb + 30UL) << 8)) + (unsigned int )*(cdb + 31UL)); } } __inline static u32 transport_lba_21(unsigned char *cdb ) { { return ((u32 )(((((int )*(cdb + 1UL) & 31) << 16) | ((int )*(cdb + 2UL) << 8)) | (int )*(cdb + 3UL))); } } __inline static u32 transport_lba_32(unsigned char *cdb ) { { return ((u32 )(((((int )*(cdb + 2UL) << 24) | ((int )*(cdb + 3UL) << 16)) | ((int )*(cdb + 4UL) << 8)) | (int )*(cdb + 5UL))); } } __inline static unsigned long long transport_lba_64(unsigned char *cdb ) { unsigned int __v1 ; unsigned int __v2 ; { __v1 = (unsigned int )(((((int )*(cdb + 2UL) << 24) | ((int )*(cdb + 3UL) << 16)) | ((int )*(cdb + 4UL) << 8)) | (int )*(cdb + 5UL)); __v2 = (unsigned int )(((((int )*(cdb + 6UL) << 24) | ((int )*(cdb + 7UL) << 16)) | ((int )*(cdb + 8UL) << 8)) | (int )*(cdb + 9UL)); return ((unsigned long long )__v2 | ((unsigned long long )__v1 << 32)); } } __inline static unsigned long long transport_lba_64_ext(unsigned char *cdb ) { unsigned int __v1 ; unsigned int __v2 ; { __v1 = (unsigned int )(((((int )*(cdb + 12UL) << 24) | ((int )*(cdb + 13UL) << 16)) | ((int )*(cdb + 14UL) << 8)) | (int )*(cdb + 15UL)); __v2 = (unsigned int )(((((int )*(cdb + 16UL) << 24) | ((int )*(cdb + 17UL) << 16)) | ((int )*(cdb + 18UL) << 8)) | (int )*(cdb + 19UL)); return ((unsigned long long )__v2 | ((unsigned long long )__v1 << 32)); } } static sense_reason_t sbc_setup_write_same(struct se_cmd *cmd , unsigned char *flags , struct sbc_ops *ops ) { struct se_device *dev ; sector_t end_lba ; sector_t tmp ; unsigned int sectors ; sector_t tmp___0 ; sense_reason_t ret ; { dev = cmd->se_dev; tmp = (*((dev->transport)->get_blocks))(dev); end_lba = tmp + 1UL; tmp___0 = sbc_get_write_same_sectors(cmd); sectors = (unsigned int )tmp___0; if (((int )*flags & 4) != 0 || ((int )*flags & 2) != 0) { printk("\vWRITE_SAME PBDATA and LBDATA bits not supported for Block Discard Emulation\n"); return (2U); } else { } if ((cmd->se_dev)->dev_attrib.max_write_same_len < sectors) { printk("\fWRITE_SAME sectors: %u exceeds max_write_same_len: %u\n", sectors, (cmd->se_dev)->dev_attrib.max_write_same_len); return (8U); } else { } if (cmd->t_task_lba + (unsigned long long )sectors < cmd->t_task_lba || cmd->t_task_lba + (unsigned long long )sectors > (unsigned long long )end_lba) { printk("\vWRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n", (unsigned long long )end_lba, cmd->t_task_lba, sectors); return (17U); } else { } if (((int )*flags & 16) != 0) { printk("\fWRITE SAME with ANCHOR not supported\n"); return (8U); } else { } if (((int )*flags & 8) != 0) { if ((unsigned long )ops->execute_unmap == (unsigned long )((sense_reason_t (*)(struct se_cmd * , sector_t , sector_t ))0)) { return (2U); } else { } if (dev->dev_attrib.emulate_tpws == 0) { printk("\vGot WRITE_SAME w/ UNMAP=1, but backend device has emulate_tpws disabled\n"); return (2U); } else { } cmd->execute_cmd = & sbc_execute_write_same_unmap; return (0U); } else { } if ((unsigned long )ops->execute_write_same == (unsigned long )((sense_reason_t (*)(struct se_cmd * ))0)) { return (2U); } else { } ret = sbc_check_prot(dev, cmd, cmd->t_task_cdb, sectors, 1); if (ret != 0U) { return (ret); } else { } cmd->execute_cmd = ops->execute_write_same; return (0U); } } static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd , bool success ) { unsigned char *buf ; unsigned char *addr ; struct scatterlist *sg ; unsigned int offset ; sense_reason_t ret ; int i ; int count ; void *tmp ; struct page *tmp___0 ; void *tmp___1 ; { ret = 0U; tmp = kmalloc((size_t )cmd->data_length, 208U); buf = (unsigned char *)tmp; if ((unsigned long )buf == (unsigned long )((unsigned char *)0U)) { printk("\vUnable to allocate xor_callback buf\n"); return (18U); } else { } sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, (void *)buf, (size_t )cmd->data_length); offset = 0U; count = 0; sg = cmd->t_bidi_data_sg; goto ldv_59454; ldv_59453: tmp___0 = sg_page___0(sg); tmp___1 = kmap_atomic(tmp___0); addr = (unsigned char *)tmp___1; if ((unsigned long )addr == (unsigned long )((unsigned char *)0U)) { ret = 18U; goto out; } else { } i = 0; goto ldv_59451; ldv_59450: *(addr + ((unsigned long )sg->offset + (unsigned long )i)) = (int )*(addr + ((unsigned long )sg->offset + (unsigned long )i)) ^ (int )*(buf + ((unsigned long )offset + (unsigned long )i)); i = i + 1; ldv_59451: ; if ((unsigned int )i < sg->length) { goto ldv_59450; } else { } offset = sg->length + offset; __kunmap_atomic((void *)addr); count = count + 1; sg = sg_next(sg); ldv_59454: ; if ((unsigned int )count < cmd->t_bidi_data_nents) { goto ldv_59453; } else { } out: kfree((void const *)buf); return (ret); } } static sense_reason_t sbc_execute_rw(struct se_cmd *cmd ) { struct sbc_ops *ops ; sense_reason_t tmp ; { ops = (struct sbc_ops *)cmd->protocol_data; tmp = (*(ops->execute_rw))(cmd, cmd->t_data_sg, cmd->t_data_nents, cmd->data_direction); return (tmp); } } static sense_reason_t compare_and_write_post(struct se_cmd *cmd , bool success ) { struct se_device *dev ; { dev = cmd->se_dev; spin_lock_irq(& cmd->t_state_lock); if ((cmd->transport_state & 16U) != 0U && (unsigned int )cmd->scsi_status == 0U) { cmd->se_cmd_flags = cmd->se_cmd_flags | 1048576U; } else { } spin_unlock_irq(& cmd->t_state_lock); up(& dev->caw_sem); return (0U); } } static sense_reason_t compare_and_write_callback(struct se_cmd *cmd , bool success ) { struct se_device *dev ; struct scatterlist *write_sg ; struct scatterlist *sg ; unsigned char *buf ; unsigned char *addr ; struct sg_mapping_iter m ; unsigned int offset ; unsigned int len ; unsigned int nlbas ; unsigned int block_size___0 ; unsigned int compare_len ; sense_reason_t ret ; int rc ; int i ; void *tmp ; void *tmp___0 ; size_t tmp___1 ; struct page *tmp___2 ; void *tmp___3 ; unsigned int _min1 ; unsigned int _min2 ; int tmp___4 ; { dev = cmd->se_dev; write_sg = (struct scatterlist *)0; buf = (unsigned char *)0U; offset = 0U; nlbas = cmd->t_task_nolb; block_size___0 = dev->dev_attrib.block_size; compare_len = nlbas * block_size___0; ret = 0U; if (! success && ((unsigned long )cmd->t_data_sg == (unsigned long )((struct scatterlist *)0) || (unsigned long )cmd->t_bidi_data_sg == (unsigned long )((struct scatterlist *)0))) { return (0U); } else { } if (cmd->data_length == 0U) { goto out; } else { } if ((unsigned int )cmd->scsi_status != 0U) { printk("\vcompare_and_write_callback: non zero scsi_status: 0x%02x\n", (int )cmd->scsi_status); goto out; } else { } tmp = kzalloc((size_t )cmd->data_length, 208U); buf = (unsigned char *)tmp; if ((unsigned long )buf == (unsigned long )((unsigned char *)0U)) { printk("\vUnable to allocate compare_and_write buf\n"); ret = 18U; goto out; } else { } tmp___0 = kmalloc((unsigned long )cmd->t_data_nents * 40UL, 208U); write_sg = (struct scatterlist *)tmp___0; if ((unsigned long )write_sg == (unsigned long )((struct scatterlist *)0)) { printk("\vUnable to allocate compare_and_write sg\n"); ret = 18U; goto out; } else { } sg_init_table(write_sg, cmd->t_data_nents); tmp___1 = sg_copy_to_buffer(cmd->t_data_sg, cmd->t_data_nents, (void *)buf, (size_t )cmd->data_length); rc = (int )tmp___1; if (rc == 0) { printk("\vsg_copy_to_buffer() failed for compare_and_write\n"); ret = 18U; goto out; } else { } i = 0; sg = cmd->t_bidi_data_sg; goto ldv_59490; ldv_59489: tmp___2 = sg_page___0(sg); tmp___3 = kmap_atomic(tmp___2); addr = (unsigned char *)tmp___3; if ((unsigned long )addr == (unsigned long )((unsigned char *)0U)) { ret = 18U; goto out; } else { } _min1 = sg->length; _min2 = compare_len; len = _min1 < _min2 ? _min1 : _min2; tmp___4 = memcmp((void const *)addr, (void const *)buf + (unsigned long )offset, (size_t )len); if (tmp___4 != 0) { printk("\fDetected MISCOMPARE for addr: %p buf: %p\n", addr, buf + (unsigned long )offset); __kunmap_atomic((void *)addr); goto miscompare; } else { } __kunmap_atomic((void *)addr); offset = offset + len; compare_len = compare_len - len; if (compare_len == 0U) { goto ldv_59488; } else { } i = i + 1; sg = sg_next(sg); ldv_59490: ; if ((unsigned int )i < cmd->t_bidi_data_nents) { goto ldv_59489; } else { } ldv_59488: i = 0; len = cmd->t_task_nolb * block_size___0; sg_miter_start(& m, cmd->t_data_sg, cmd->t_data_nents, 2U); goto ldv_59492; ldv_59491: sg_miter_next(& m); if (block_size___0 <= 4095U) { sg_set_page___0(write_sg + (unsigned long )i, m.page, block_size___0, block_size___0); } else { sg_miter_next(& m); sg_set_page___0(write_sg + (unsigned long )i, m.page, block_size___0, 0U); } len = len - block_size___0; i = i + 1; ldv_59492: ; if (len != 0U) { goto ldv_59491; } else { } sg_miter_stop(& m); cmd->t_data_sg_orig = cmd->t_data_sg; cmd->t_data_sg = write_sg; cmd->t_data_nents_orig = cmd->t_data_nents; cmd->t_data_nents = 1U; cmd->sam_task_attr = 33; cmd->transport_complete_callback = & compare_and_write_post; cmd->execute_cmd = & sbc_execute_rw; spin_lock_irq(& cmd->t_state_lock); cmd->t_state = 5; cmd->transport_state = cmd->transport_state | 530U; spin_unlock_irq(& cmd->t_state_lock); __target_execute_cmd(cmd); kfree((void const *)buf); return (ret); miscompare: printk("\fTarget/%s: Send MISCOMPARE check condition and sense\n", (char const *)(& (dev->transport)->name)); ret = 20U; out: up(& dev->caw_sem); kfree((void const *)write_sg); kfree((void const *)buf); return (ret); } } static sense_reason_t sbc_compare_and_write(struct se_cmd *cmd ) { struct sbc_ops *ops ; struct se_device *dev ; sense_reason_t ret ; int rc ; { ops = (struct sbc_ops *)cmd->protocol_data; dev = cmd->se_dev; rc = down_interruptible(& dev->caw_sem); if (rc != 0) { cmd->transport_complete_callback = (sense_reason_t (*)(struct se_cmd * , bool ))0; return (10U); } else { } cmd->data_length = cmd->t_task_nolb * dev->dev_attrib.block_size; ret = (*(ops->execute_rw))(cmd, cmd->t_bidi_data_sg, cmd->t_bidi_data_nents, 2); if (ret != 0U) { cmd->transport_complete_callback = (sense_reason_t (*)(struct se_cmd * , bool ))0; up(& dev->caw_sem); return (ret); } else { } return (0U); } } static int sbc_set_prot_op_checks(u8 protect , bool fabric_prot , enum target_prot_type prot_type , bool is_write , struct se_cmd *cmd ) { { if ((int )is_write) { cmd->prot_op = (int )fabric_prot ? 8 : ((unsigned int )protect != 0U ? 32 : 2); switch ((int )protect) { case 0: ; case 3: cmd->prot_checks = 0U; goto ldv_59510; case 1: ; case 5: cmd->prot_checks = 1U; if ((unsigned int )prot_type == 1U) { cmd->prot_checks = (u8 )((unsigned int )cmd->prot_checks | 4U); } else { } goto ldv_59510; case 2: ; if ((unsigned int )prot_type == 1U) { cmd->prot_checks = 4U; } else { } goto ldv_59510; case 4: cmd->prot_checks = 1U; goto ldv_59510; default: printk("\vUnsupported protect field %d\n", (int )protect); return (-22); } ldv_59510: ; } else { cmd->prot_op = (int )fabric_prot ? 1 : ((unsigned int )protect != 0U ? 16 : 4); switch ((int )protect) { case 0: ; case 1: ; case 5: cmd->prot_checks = 1U; if ((unsigned int )prot_type == 1U) { cmd->prot_checks = (u8 )((unsigned int )cmd->prot_checks | 4U); } else { } goto ldv_59519; case 2: ; if ((unsigned int )prot_type == 1U) { cmd->prot_checks = 4U; } else { } goto ldv_59519; case 3: cmd->prot_checks = 0U; goto ldv_59519; case 4: cmd->prot_checks = 1U; goto ldv_59519; default: printk("\vUnsupported protect field %d\n", (int )protect); return (-22); } ldv_59519: ; } return (0); } } static sense_reason_t sbc_check_prot(struct se_device *dev , struct se_cmd *cmd , unsigned char *cdb , u32 sectors , bool is_write ) { u8 protect ; int sp_ops ; int pi_prot_type ; bool fabric_prot ; long tmp ; int tmp___0 ; struct _ddebug descriptor ; long tmp___1 ; { protect = (u8 )((int )*(cdb + 1UL) >> 5); sp_ops = (int )(cmd->se_sess)->sup_prot_ops; pi_prot_type = (int )dev->dev_attrib.pi_prot_type; fabric_prot = 0; if ((unsigned long )cmd->t_prot_sg == (unsigned long )((struct scatterlist *)0) || cmd->t_prot_nents == 0U) { tmp = ldv__builtin_expect((long )(((unsigned int )protect != 0U && (unsigned int )dev->dev_attrib.pi_prot_type == 0U) && (unsigned int )(cmd->se_sess)->sess_prot_type == 0U), 0L); if (tmp != 0L) { printk("\vCDB contains protect bit, but device + fabric does not advertise PROTECT=1 feature bit\n"); return (8U); } else { } if ((int )cmd->prot_pto) { return (0U); } else { } } else { } switch ((unsigned int )dev->dev_attrib.pi_prot_type) { case 3U: cmd->reftag_seed = 4294967295U; goto ldv_59536; case 2U: ; if ((unsigned int )protect != 0U) { return (8U); } else { } cmd->reftag_seed = (u32 )cmd->t_task_lba; goto ldv_59536; case 1U: cmd->reftag_seed = (u32 )cmd->t_task_lba; goto ldv_59536; case 0U: fabric_prot = (int )is_write ? (sp_ops & 40) != 0 : (sp_ops & 17) != 0; if ((int )fabric_prot && (unsigned int )(cmd->se_sess)->sess_prot_type != 0U) { pi_prot_type = (int )(cmd->se_sess)->sess_prot_type; goto ldv_59536; } else { } if ((unsigned int )protect == 0U) { return (0U); } else { } default: printk("\vUnable to determine pi_prot_type for CDB: 0x%02x PROTECT: 0x%02x\n", (int )*cdb, (int )protect); return (8U); } ldv_59536: tmp___0 = sbc_set_prot_op_checks((int )protect, (int )fabric_prot, (enum target_prot_type )pi_prot_type, (int )is_write, cmd); if (tmp___0 != 0) { return (8U); } else { } cmd->prot_type = (enum target_prot_type )pi_prot_type; cmd->prot_length = (u32 )dev->prot_length * sectors; if ((unsigned int )protect != 0U) { cmd->data_length = dev->dev_attrib.block_size * sectors; } else { } descriptor.modname = "target_core_mod"; descriptor.function = "sbc_check_prot"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_sbc.c"; descriptor.format = "%s: prot_type=%d, data_length=%d, prot_length=%d prot_op=%d prot_checks=%d\n"; descriptor.lineno = 753U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor, "%s: prot_type=%d, data_length=%d, prot_length=%d prot_op=%d prot_checks=%d\n", "sbc_check_prot", (unsigned int )cmd->prot_type, cmd->data_length, cmd->prot_length, (unsigned int )cmd->prot_op, (int )cmd->prot_checks); } else { } return (0U); } } static int sbc_check_dpofua(struct se_device *dev , struct se_cmd *cmd , unsigned char *cdb ) { bool tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; { if (((int )*(cdb + 1UL) & 16) != 0) { tmp = target_check_fua(dev); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { printk("\vGot CDB: 0x%02x with DPO bit set, but device does not advertise support for DPO\n", (int )*cdb); return (-22); } else { } } else { } if (((int )*(cdb + 1UL) & 8) != 0) { tmp___1 = target_check_fua(dev); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { printk("\vGot CDB: 0x%02x with FUA bit set, but device does not advertise support for FUA write\n", (int )*cdb); return (-22); } else { } cmd->se_cmd_flags = cmd->se_cmd_flags | 128U; } else { } return (0); } } sense_reason_t sbc_parse_cdb(struct se_cmd *cmd , struct sbc_ops *ops ) { struct se_device *dev ; unsigned char *cdb ; unsigned int size ; u32 sectors ; sense_reason_t ret ; u32 tmp ; u32 tmp___0 ; int tmp___1 ; u32 tmp___2 ; int tmp___3 ; int tmp___4 ; u32 tmp___5 ; u32 tmp___6 ; int tmp___7 ; u32 tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; u32 tmp___12 ; u16 service_action ; u16 tmp___13 ; int tmp___14 ; u32 tmp___15 ; u32 tmp___16 ; u16 tmp___17 ; u32 tmp___18 ; u32 tmp___19 ; unsigned long long end_lba ; sector_t tmp___20 ; sense_reason_t tmp___21 ; { dev = cmd->se_dev; cdb = cmd->t_task_cdb; sectors = 0U; cmd->protocol_data = (void *)ops; switch ((int )*cdb) { case 8: sectors = transport_get_sectors_6(cdb); tmp = transport_lba_21(cdb); cmd->t_task_lba = (unsigned long long )tmp; cmd->se_cmd_flags = cmd->se_cmd_flags | 8U; cmd->execute_cmd = & sbc_execute_rw; goto ldv_59558; case 40: sectors = transport_get_sectors_10(cdb); tmp___0 = transport_lba_32(cdb); cmd->t_task_lba = (unsigned long long )tmp___0; tmp___1 = sbc_check_dpofua(dev, cmd, cdb); if (tmp___1 != 0) { return (8U); } else { } ret = sbc_check_prot(dev, cmd, cdb, sectors, 0); if (ret != 0U) { return (ret); } else { } cmd->se_cmd_flags = cmd->se_cmd_flags | 8U; cmd->execute_cmd = & sbc_execute_rw; goto ldv_59558; case 168: sectors = transport_get_sectors_12(cdb); tmp___2 = transport_lba_32(cdb); cmd->t_task_lba = (unsigned long long )tmp___2; tmp___3 = sbc_check_dpofua(dev, cmd, cdb); if (tmp___3 != 0) { return (8U); } else { } ret = sbc_check_prot(dev, cmd, cdb, sectors, 0); if (ret != 0U) { return (ret); } else { } cmd->se_cmd_flags = cmd->se_cmd_flags | 8U; cmd->execute_cmd = & sbc_execute_rw; goto ldv_59558; case 136: sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); tmp___4 = sbc_check_dpofua(dev, cmd, cdb); if (tmp___4 != 0) { return (8U); } else { } ret = sbc_check_prot(dev, cmd, cdb, sectors, 0); if (ret != 0U) { return (ret); } else { } cmd->se_cmd_flags = cmd->se_cmd_flags | 8U; cmd->execute_cmd = & sbc_execute_rw; goto ldv_59558; case 10: sectors = transport_get_sectors_6(cdb); tmp___5 = transport_lba_21(cdb); cmd->t_task_lba = (unsigned long long )tmp___5; cmd->se_cmd_flags = cmd->se_cmd_flags | 8U; cmd->execute_cmd = & sbc_execute_rw; goto ldv_59558; case 42: ; case 46: sectors = transport_get_sectors_10(cdb); tmp___6 = transport_lba_32(cdb); cmd->t_task_lba = (unsigned long long )tmp___6; tmp___7 = sbc_check_dpofua(dev, cmd, cdb); if (tmp___7 != 0) { return (8U); } else { } ret = sbc_check_prot(dev, cmd, cdb, sectors, 1); if (ret != 0U) { return (ret); } else { } cmd->se_cmd_flags = cmd->se_cmd_flags | 8U; cmd->execute_cmd = & sbc_execute_rw; goto ldv_59558; case 170: sectors = transport_get_sectors_12(cdb); tmp___8 = transport_lba_32(cdb); cmd->t_task_lba = (unsigned long long )tmp___8; tmp___9 = sbc_check_dpofua(dev, cmd, cdb); if (tmp___9 != 0) { return (8U); } else { } ret = sbc_check_prot(dev, cmd, cdb, sectors, 1); if (ret != 0U) { return (ret); } else { } cmd->se_cmd_flags = cmd->se_cmd_flags | 8U; cmd->execute_cmd = & sbc_execute_rw; goto ldv_59558; case 138: sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); tmp___10 = sbc_check_dpofua(dev, cmd, cdb); if (tmp___10 != 0) { return (8U); } else { } ret = sbc_check_prot(dev, cmd, cdb, sectors, 1); if (ret != 0U) { return (ret); } else { } cmd->se_cmd_flags = cmd->se_cmd_flags | 8U; cmd->execute_cmd = & sbc_execute_rw; goto ldv_59558; case 83: ; if ((unsigned int )cmd->data_direction != 1U || (cmd->se_cmd_flags & 1024U) == 0U) { return (8U); } else { } sectors = transport_get_sectors_10(cdb); tmp___11 = sbc_check_dpofua(dev, cmd, cdb); if (tmp___11 != 0) { return (8U); } else { } tmp___12 = transport_lba_32(cdb); cmd->t_task_lba = (unsigned long long )tmp___12; cmd->se_cmd_flags = cmd->se_cmd_flags | 8U; cmd->execute_cmd = & sbc_execute_rw; cmd->transport_complete_callback = & xdreadwrite_callback; goto ldv_59558; case 127: tmp___13 = get_unaligned_be16((void const *)cdb + 8U); service_action = tmp___13; switch ((int )service_action) { case 7: sectors = transport_get_sectors_32(cdb); tmp___14 = sbc_check_dpofua(dev, cmd, cdb); if (tmp___14 != 0) { return (8U); } else { } cmd->t_task_lba = transport_lba_64_ext(cdb); cmd->se_cmd_flags = cmd->se_cmd_flags | 8U; cmd->execute_cmd = & sbc_execute_rw; cmd->transport_complete_callback = & xdreadwrite_callback; goto ldv_59571; case 13: sectors = transport_get_sectors_32(cdb); if (sectors == 0U) { printk("\vWSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); return (8U); } else { } size = sbc_get_size(cmd, 1U); cmd->t_task_lba = get_unaligned_be64((void const *)cdb + 12U); ret = sbc_setup_write_same(cmd, cdb + 10UL, ops); if (ret != 0U) { return (ret); } else { } goto ldv_59571; default: printk("\vVARIABLE_LENGTH_CMD service action 0x%04x not supported\n", (int )service_action); return (2U); } ldv_59571: ; goto ldv_59558; case 137: sectors = (u32 )*(cdb + 13UL); if (sectors > 1U) { printk("\vCOMPARE_AND_WRITE contains NoLB: %u greater than 1\n", sectors); return (8U); } else { } tmp___15 = sbc_get_size(cmd, sectors); size = tmp___15 * 2U; cmd->t_task_lba = get_unaligned_be64((void const *)cdb + 2U); cmd->t_task_nolb = sectors; cmd->se_cmd_flags = cmd->se_cmd_flags | 524296U; cmd->execute_cmd = & sbc_compare_and_write; cmd->transport_complete_callback = & compare_and_write_callback; goto ldv_59558; case 37: size = 8U; cmd->execute_cmd = & sbc_emulate_readcapacity; goto ldv_59558; case 158: ; switch ((int )*(cmd->t_task_cdb + 1UL) & 31) { case 16: cmd->execute_cmd = & sbc_emulate_readcapacity_16; goto ldv_59578; case 19: cmd->execute_cmd = & target_emulate_report_referrals; goto ldv_59578; default: printk("\vUnsupported SA: 0x%02x\n", (int )*(cmd->t_task_cdb + 1UL) & 31); return (8U); } ldv_59578: size = (unsigned int )(((((int )*(cdb + 10UL) << 24) | ((int )*(cdb + 11UL) << 16)) | ((int )*(cdb + 12UL) << 8)) | (int )*(cdb + 13UL)); goto ldv_59558; case 53: ; case 145: ; if ((unsigned int )*cdb == 53U) { sectors = transport_get_sectors_10(cdb); tmp___16 = transport_lba_32(cdb); cmd->t_task_lba = (unsigned long long )tmp___16; } else { sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); } if ((unsigned long )ops->execute_sync_cache != (unsigned long )((sense_reason_t (*)(struct se_cmd * ))0)) { cmd->execute_cmd = ops->execute_sync_cache; goto check_lba; } else { } size = 0U; cmd->execute_cmd = & sbc_emulate_noop; goto ldv_59558; case 66: ; if ((unsigned long )ops->execute_unmap == (unsigned long )((sense_reason_t (*)(struct se_cmd * , sector_t , sector_t ))0)) { return (2U); } else { } if (dev->dev_attrib.emulate_tpu == 0) { printk("\vGot UNMAP, but backend device has emulate_tpu disabled\n"); return (2U); } else { } tmp___17 = get_unaligned_be16((void const *)cdb + 7U); size = (unsigned int )tmp___17; cmd->execute_cmd = & sbc_execute_unmap; goto ldv_59558; case 147: sectors = transport_get_sectors_16(cdb); if (sectors == 0U) { printk("\vWSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); return (8U); } else { } size = sbc_get_size(cmd, 1U); cmd->t_task_lba = get_unaligned_be64((void const *)cdb + 2U); ret = sbc_setup_write_same(cmd, cdb + 1UL, ops); if (ret != 0U) { return (ret); } else { } goto ldv_59558; case 65: sectors = transport_get_sectors_10(cdb); if (sectors == 0U) { printk("\vWSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); return (8U); } else { } size = sbc_get_size(cmd, 1U); tmp___18 = get_unaligned_be32((void const *)cdb + 2U); cmd->t_task_lba = (unsigned long long )tmp___18; ret = sbc_setup_write_same(cmd, cdb + 1UL, ops); if (ret != 0U) { return (ret); } else { } goto ldv_59558; case 47: size = 0U; sectors = transport_get_sectors_10(cdb); tmp___19 = transport_lba_32(cdb); cmd->t_task_lba = (unsigned long long )tmp___19; cmd->execute_cmd = & sbc_emulate_noop; goto check_lba; case 1: ; case 11: ; case 43: size = 0U; cmd->execute_cmd = & sbc_emulate_noop; goto ldv_59558; default: ret = spc_parse_cdb(cmd, & size); if (ret != 0U) { return (ret); } else { } } ldv_59558: ; if ((unsigned long )cmd->execute_cmd == (unsigned long )((sense_reason_t (*)(struct se_cmd * ))0)) { return (2U); } else { } if ((cmd->se_cmd_flags & 8U) != 0U) { check_lba: tmp___20 = (*((dev->transport)->get_blocks))(dev); end_lba = (unsigned long long )(tmp___20 + 1UL); if (cmd->t_task_lba + (unsigned long long )sectors < cmd->t_task_lba || cmd->t_task_lba + (unsigned long long )sectors > end_lba) { printk("\vcmd exceeds last lba %llu (lba %llu, sectors %u)\n", end_lba, cmd->t_task_lba, sectors); return (17U); } else { } if ((cmd->se_cmd_flags & 524288U) == 0U) { size = sbc_get_size(cmd, sectors); } else { } } else { } tmp___21 = target_cmd_size_check(cmd, size); return (tmp___21); } } static char const __kstrtab_sbc_parse_cdb[14U] = { 's', 'b', 'c', '_', 'p', 'a', 'r', 's', 'e', '_', 'c', 'd', 'b', '\000'}; struct kernel_symbol const __ksymtab_sbc_parse_cdb ; struct kernel_symbol const __ksymtab_sbc_parse_cdb = {(unsigned long )(& sbc_parse_cdb), (char const *)(& __kstrtab_sbc_parse_cdb)}; u32 sbc_get_device_type(struct se_device *dev ) { { return (0U); } } static char const __kstrtab_sbc_get_device_type[20U] = { 's', 'b', 'c', '_', 'g', 'e', 't', '_', 'd', 'e', 'v', 'i', 'c', 'e', '_', 't', 'y', 'p', 'e', '\000'}; struct kernel_symbol const __ksymtab_sbc_get_device_type ; struct kernel_symbol const __ksymtab_sbc_get_device_type = {(unsigned long )(& sbc_get_device_type), (char const *)(& __kstrtab_sbc_get_device_type)}; static sense_reason_t sbc_execute_unmap(struct se_cmd *cmd ) { struct sbc_ops *ops ; struct se_device *dev ; unsigned char *buf ; unsigned char *ptr ; sector_t lba ; int size ; u32 range ; sense_reason_t ret ; int dl ; int bd_dl ; void *tmp ; u16 tmp___0 ; u16 tmp___1 ; struct _ddebug descriptor ; long tmp___2 ; u64 tmp___3 ; struct _ddebug descriptor___0 ; long tmp___4 ; sector_t tmp___5 ; { ops = (struct sbc_ops *)cmd->protocol_data; dev = cmd->se_dev; ptr = (unsigned char *)0U; ret = 0U; if ((unsigned int )*(cmd->t_task_cdb + 1UL) != 0U) { return (8U); } else { } if (cmd->data_length == 0U) { target_complete_cmd(cmd, 0); return (0U); } else { } if (cmd->data_length <= 7U) { printk("\fUNMAP parameter list length %u too small\n", cmd->data_length); return (19U); } else { } tmp = transport_kmap_data_sg(cmd); buf = (unsigned char *)tmp; if ((unsigned long )buf == (unsigned long )((unsigned char *)0U)) { return (10U); } else { } tmp___0 = get_unaligned_be16((void const *)buf); dl = (int )tmp___0; tmp___1 = get_unaligned_be16((void const *)buf + 2U); bd_dl = (int )tmp___1; size = (int )(cmd->data_length - 8U); if (bd_dl > size) { printk("\fUNMAP parameter list length %u too small, ignoring bd_dl %u\n", cmd->data_length, bd_dl); } else { size = bd_dl; } if ((u32 )(size / 16) > dev->dev_attrib.max_unmap_block_desc_count) { ret = 9U; goto err; } else { } ptr = buf + 8UL; descriptor.modname = "target_core_mod"; descriptor.function = "sbc_execute_unmap"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_sbc.c"; descriptor.format = "UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u ptr: %p\n"; descriptor.lineno = 1157U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_pr_debug(& descriptor, "UNMAP: Sub: %s Using dl: %u bd_dl: %u size: %u ptr: %p\n", (char const *)(& (dev->transport)->name), dl, bd_dl, size, ptr); } else { } goto ldv_59629; ldv_59628: tmp___3 = get_unaligned_be64((void const *)ptr); lba = (sector_t )tmp___3; range = get_unaligned_be32((void const *)ptr + 8U); descriptor___0.modname = "target_core_mod"; descriptor___0.function = "sbc_execute_unmap"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_sbc.c"; descriptor___0.format = "UNMAP: Using lba: %llu and range: %u\n"; descriptor___0.lineno = 1163U; descriptor___0.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___4 != 0L) { __dynamic_pr_debug(& descriptor___0, "UNMAP: Using lba: %llu and range: %u\n", (unsigned long long )lba, range); } else { } if (dev->dev_attrib.max_unmap_lba_count < range) { ret = 9U; goto err; } else { } tmp___5 = (*((dev->transport)->get_blocks))(dev); if ((sector_t )range + lba > tmp___5 + 1UL) { ret = 17U; goto err; } else { } ret = (*(ops->execute_unmap))(cmd, lba, (sector_t )range); if (ret != 0U) { goto err; } else { } ptr = ptr + 16UL; size = size + -16; ldv_59629: ; if (size > 15) { goto ldv_59628; } else { } err: transport_kunmap_data_sg(cmd); if (ret == 0U) { target_complete_cmd(cmd, 0); } else { } return (ret); } } void sbc_dif_generate(struct se_cmd *cmd ) { struct se_device *dev ; struct se_dif_v1_tuple *sdt ; struct scatterlist *dsg ; struct scatterlist *psg ; sector_t sector ; void *daddr ; void *paddr ; int i ; int j ; int offset ; unsigned int block_size___0 ; struct page *tmp ; void *tmp___0 ; struct page *tmp___1 ; void *tmp___2 ; __u16 crc ; unsigned int avail ; struct page *tmp___3 ; void *tmp___4 ; unsigned int _min1 ; unsigned int _min2 ; struct page *tmp___5 ; void *tmp___6 ; __u16 tmp___7 ; __u32 tmp___8 ; struct _ddebug descriptor ; __u32 tmp___9 ; long tmp___10 ; { dev = cmd->se_dev; dsg = cmd->t_data_sg; sector = (sector_t )cmd->t_task_lba; offset = 0; block_size___0 = dev->dev_attrib.block_size; i = 0; psg = cmd->t_prot_sg; goto ldv_59656; ldv_59655: tmp = sg_page___0(psg); tmp___0 = kmap_atomic(tmp); paddr = tmp___0 + (unsigned long )psg->offset; tmp___1 = sg_page___0(dsg); tmp___2 = kmap_atomic(tmp___1); daddr = tmp___2 + (unsigned long )dsg->offset; j = 0; goto ldv_59653; ldv_59652: ; if ((unsigned int )offset >= dsg->length) { offset = (int )((unsigned int )offset - dsg->length); __kunmap_atomic(daddr + - ((unsigned long )dsg->offset)); dsg = sg_next(dsg); if ((unsigned long )dsg == (unsigned long )((struct scatterlist *)0)) { __kunmap_atomic(paddr + - ((unsigned long )psg->offset)); return; } else { } tmp___3 = sg_page___0(dsg); tmp___4 = kmap_atomic(tmp___3); daddr = tmp___4 + (unsigned long )dsg->offset; } else { } sdt = (struct se_dif_v1_tuple *)paddr + (unsigned long )j; _min1 = block_size___0; _min2 = dsg->length - (unsigned int )offset; avail = _min1 < _min2 ? _min1 : _min2; crc = crc_t10dif((unsigned char const *)daddr + (unsigned long )offset, (size_t )avail); if (avail < block_size___0) { __kunmap_atomic(daddr + - ((unsigned long )dsg->offset)); dsg = sg_next(dsg); if ((unsigned long )dsg == (unsigned long )((struct scatterlist *)0)) { __kunmap_atomic(paddr + - ((unsigned long )psg->offset)); return; } else { } tmp___5 = sg_page___0(dsg); tmp___6 = kmap_atomic(tmp___5); daddr = tmp___6 + (unsigned long )dsg->offset; offset = (int )(block_size___0 - avail); crc = crc_t10dif_update((int )crc, (unsigned char const *)daddr, (size_t )offset); } else { offset = (int )((unsigned int )offset + block_size___0); } tmp___7 = __fswab16((int )crc); sdt->guard_tag = tmp___7; if ((unsigned int )cmd->prot_type == 1U) { tmp___8 = __fswab32((__u32 )sector); sdt->ref_tag = tmp___8; } else { } sdt->app_tag = 0U; descriptor.modname = "target_core_mod"; descriptor.function = "sbc_dif_generate"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_sbc.c"; descriptor.format = "DIF %s INSERT sector: %llu guard_tag: 0x%04x app_tag: 0x%04x ref_tag: %u\n"; descriptor.lineno = 1248U; descriptor.flags = 0U; tmp___10 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___10 != 0L) { tmp___9 = __fswab32(sdt->ref_tag); __dynamic_pr_debug(& descriptor, "DIF %s INSERT sector: %llu guard_tag: 0x%04x app_tag: 0x%04x ref_tag: %u\n", (unsigned int )cmd->data_direction == 1U ? (char *)"WRITE" : (char *)"READ", (unsigned long long )sector, (int )sdt->guard_tag, (int )sdt->app_tag, tmp___9); } else { } sector = sector + 1UL; j = (int )((unsigned int )j + 8U); ldv_59653: ; if ((unsigned int )j < psg->length) { goto ldv_59652; } else { } __kunmap_atomic(daddr + - ((unsigned long )dsg->offset)); __kunmap_atomic(paddr + - ((unsigned long )psg->offset)); i = i + 1; psg = sg_next(psg); ldv_59656: ; if ((unsigned int )i < cmd->t_prot_nents) { goto ldv_59655; } else { } return; } } static sense_reason_t sbc_dif_v1_verify(struct se_cmd *cmd , struct se_dif_v1_tuple *sdt , __u16 crc , sector_t sector , unsigned int ei_lba ) { __be16 csum ; __u16 tmp ; __u16 tmp___0 ; __u16 tmp___1 ; __u32 tmp___2 ; __u32 tmp___3 ; __u32 tmp___4 ; __u32 tmp___5 ; { if (((int )cmd->prot_checks & 1) == 0) { goto check_ref; } else { } tmp = __fswab16((int )crc); csum = tmp; if ((int )sdt->guard_tag != (int )csum) { tmp___0 = __fswab16((int )csum); tmp___1 = __fswab16((int )sdt->guard_tag); printk("\vDIFv1 checksum failed on sector %llu guard tag 0x%04x csum 0x%04x\n", (unsigned long long )sector, (int )tmp___1, (int )tmp___0); return (21U); } else { } check_ref: ; if (((int )cmd->prot_checks & 4) == 0) { return (0U); } else { } if ((unsigned int )cmd->prot_type == 1U) { tmp___3 = __fswab32(sdt->ref_tag); if ((sector_t )tmp___3 != (sector & 4294967295UL)) { tmp___2 = __fswab32(sdt->ref_tag); printk("\vDIFv1 Type 1 reference failed on sector: %llu tag: 0x%08x sector MSB: 0x%08x\n", (unsigned long long )sector, tmp___2, (unsigned int )sector); return (23U); } else { } } else { } if ((unsigned int )cmd->prot_type == 2U) { tmp___5 = __fswab32(sdt->ref_tag); if (tmp___5 != ei_lba) { tmp___4 = __fswab32(sdt->ref_tag); printk("\vDIFv1 Type 2 reference failed on sector: %llu tag: 0x%08x ei_lba: 0x%08x\n", (unsigned long long )sector, tmp___4, ei_lba); return (23U); } else { } } else { } return (0U); } } void sbc_dif_copy_prot(struct se_cmd *cmd , unsigned int sectors , bool read , struct scatterlist *sg , int sg_off ) { struct se_device *dev ; struct scatterlist *psg ; void *paddr ; void *addr ; unsigned int i ; unsigned int len ; unsigned int left ; unsigned int offset ; unsigned int psg_len ; unsigned int copied ; struct page *tmp ; void *tmp___0 ; unsigned int _min1 ; unsigned int _min2 ; unsigned int _min1___0 ; unsigned int _min2___0 ; struct page *tmp___1 ; void *tmp___2 ; { dev = cmd->se_dev; offset = (unsigned int )sg_off; if ((unsigned long )sg == (unsigned long )((struct scatterlist *)0)) { return; } else { } left = (unsigned int )dev->prot_length * sectors; i = 0U; psg = cmd->t_prot_sg; goto ldv_59694; ldv_59693: copied = 0U; tmp = sg_page___0(psg); tmp___0 = kmap_atomic(tmp); paddr = tmp___0 + (unsigned long )psg->offset; _min1 = left; _min2 = psg->length; psg_len = _min1 < _min2 ? _min1 : _min2; goto ldv_59691; ldv_59690: _min1___0 = psg_len; _min2___0 = sg->length - offset; len = _min1___0 < _min2___0 ? _min1___0 : _min2___0; tmp___1 = sg_page___0(sg); tmp___2 = kmap_atomic(tmp___1); addr = tmp___2 + ((unsigned long )sg->offset + (unsigned long )offset); if ((int )read) { memcpy(paddr + (unsigned long )copied, (void const *)addr, (size_t )len); } else { memcpy(addr, (void const *)paddr + (unsigned long )copied, (size_t )len); } left = left - len; offset = offset + len; copied = copied + len; psg_len = psg_len - len; __kunmap_atomic(addr + (- ((unsigned long )sg->offset) - (unsigned long )offset)); if (sg->length <= offset) { sg = sg_next(sg); offset = 0U; } else { } ldv_59691: ; if (psg_len != 0U) { goto ldv_59690; } else { } __kunmap_atomic(paddr + - ((unsigned long )psg->offset)); i = i + 1U; psg = sg_next(psg); ldv_59694: ; if (cmd->t_prot_nents > i) { goto ldv_59693; } else { } return; } } static char const __kstrtab_sbc_dif_copy_prot[18U] = { 's', 'b', 'c', '_', 'd', 'i', 'f', '_', 'c', 'o', 'p', 'y', '_', 'p', 'r', 'o', 't', '\000'}; struct kernel_symbol const __ksymtab_sbc_dif_copy_prot ; struct kernel_symbol const __ksymtab_sbc_dif_copy_prot = {(unsigned long )(& sbc_dif_copy_prot), (char const *)(& __kstrtab_sbc_dif_copy_prot)}; sense_reason_t sbc_dif_verify(struct se_cmd *cmd , sector_t start , unsigned int sectors , unsigned int ei_lba , struct scatterlist *psg , int psg_off ) { struct se_device *dev ; struct se_dif_v1_tuple *sdt ; struct scatterlist *dsg ; sector_t sector ; void *daddr ; void *paddr ; int i ; sense_reason_t rc ; int dsg_off ; unsigned int block_size___0 ; struct page *tmp ; void *tmp___0 ; struct page *tmp___1 ; void *tmp___2 ; __u16 crc ; unsigned int avail ; struct page *tmp___3 ; void *tmp___4 ; struct _ddebug descriptor ; __u32 tmp___5 ; long tmp___6 ; unsigned int _min1 ; unsigned int _min2 ; struct page *tmp___7 ; void *tmp___8 ; { dev = cmd->se_dev; dsg = cmd->t_data_sg; sector = start; dsg_off = 0; block_size___0 = dev->dev_attrib.block_size; goto ldv_59737; ldv_59736: tmp = sg_page___0(psg); tmp___0 = kmap_atomic(tmp); paddr = tmp___0 + (unsigned long )psg->offset; tmp___1 = sg_page___0(dsg); tmp___2 = kmap_atomic(tmp___1); daddr = tmp___2 + (unsigned long )dsg->offset; i = psg_off; goto ldv_59734; ldv_59733: ; if ((unsigned int )dsg_off >= dsg->length) { dsg_off = (int )((unsigned int )dsg_off - dsg->length); __kunmap_atomic(daddr + - ((unsigned long )dsg->offset)); dsg = sg_next(dsg); if ((unsigned long )dsg == (unsigned long )((struct scatterlist *)0)) { __kunmap_atomic(paddr + - ((unsigned long )psg->offset)); return (0U); } else { } tmp___3 = sg_page___0(dsg); tmp___4 = kmap_atomic(tmp___3); daddr = tmp___4 + (unsigned long )dsg->offset; } else { } sdt = (struct se_dif_v1_tuple *)paddr + (unsigned long )i; descriptor.modname = "target_core_mod"; descriptor.function = "sbc_dif_verify"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_sbc.c"; descriptor.format = "DIF READ sector: %llu guard_tag: 0x%04x app_tag: 0x%04x ref_tag: %u\n"; descriptor.lineno = 1384U; descriptor.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___6 != 0L) { tmp___5 = __fswab32(sdt->ref_tag); __dynamic_pr_debug(& descriptor, "DIF READ sector: %llu guard_tag: 0x%04x app_tag: 0x%04x ref_tag: %u\n", (unsigned long long )sector, (int )sdt->guard_tag, (int )sdt->app_tag, tmp___5); } else { } if ((unsigned int )sdt->app_tag == 65535U) { dsg_off = (int )((unsigned int )dsg_off + block_size___0); goto next; } else { } _min1 = block_size___0; _min2 = dsg->length - (unsigned int )dsg_off; avail = _min1 < _min2 ? _min1 : _min2; crc = crc_t10dif((unsigned char const *)daddr + (unsigned long )dsg_off, (size_t )avail); if (avail < block_size___0) { __kunmap_atomic(daddr + - ((unsigned long )dsg->offset)); dsg = sg_next(dsg); if ((unsigned long )dsg == (unsigned long )((struct scatterlist *)0)) { __kunmap_atomic(paddr + - ((unsigned long )psg->offset)); return (0U); } else { } tmp___7 = sg_page___0(dsg); tmp___8 = kmap_atomic(tmp___7); daddr = tmp___8 + (unsigned long )dsg->offset; dsg_off = (int )(block_size___0 - avail); crc = crc_t10dif_update((int )crc, (unsigned char const *)daddr, (size_t )dsg_off); } else { dsg_off = (int )((unsigned int )dsg_off + block_size___0); } rc = sbc_dif_v1_verify(cmd, sdt, (int )crc, sector, ei_lba); if (rc != 0U) { __kunmap_atomic(daddr + - ((unsigned long )dsg->offset)); __kunmap_atomic(paddr + - ((unsigned long )psg->offset)); cmd->bad_sector = sector; return (rc); } else { } next: sector = sector + 1UL; ei_lba = ei_lba + 1U; i = (int )((unsigned int )i + 8U); ldv_59734: ; if ((unsigned int )i < psg->length && (sector_t )sectors + start > sector) { goto ldv_59733; } else { } psg_off = 0; __kunmap_atomic(daddr + - ((unsigned long )dsg->offset)); __kunmap_atomic(paddr + - ((unsigned long )psg->offset)); psg = sg_next(psg); ldv_59737: ; if ((unsigned long )psg != (unsigned long )((struct scatterlist *)0) && (sector_t )sectors + start > sector) { goto ldv_59736; } else { } return (0U); } } static char const __kstrtab_sbc_dif_verify[15U] = { 's', 'b', 'c', '_', 'd', 'i', 'f', '_', 'v', 'e', 'r', 'i', 'f', 'y', '\000'}; struct kernel_symbol const __ksymtab_sbc_dif_verify ; struct kernel_symbol const __ksymtab_sbc_dif_verify = {(unsigned long )(& sbc_dif_verify), (char const *)(& __kstrtab_sbc_dif_verify)}; bool ldv_queue_work_on_451(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_452(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_453(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_454(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_455(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_456(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_457(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_458(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_459(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_460(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_461(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_462(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } extern int hex_to_bin(char ) ; int ldv_mutex_trylock_487(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_485(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_488(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_489(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_484(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_486(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_490(struct mutex *ldv_func_arg1 ) ; bool ldv_queue_work_on_479(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_481(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_480(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_483(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_482(struct workqueue_struct *ldv_func_arg1 ) ; extern void int_to_scsilun(u64 , struct scsi_lun * ) ; sense_reason_t spc_emulate_inquiry_std(struct se_cmd *cmd , unsigned char *buf ) ; sense_reason_t spc_emulate_evpd_83(struct se_cmd *cmd , unsigned char *buf ) ; void spc_parse_naa_6h_vendor_specific(struct se_device *dev , unsigned char *buf ) ; int core_scsi3_ua_clear_for_request_sense(struct se_cmd *cmd , u8 *asc , u8 *ascq ) ; sense_reason_t target_do_xcopy(struct se_cmd *se_cmd ) ; sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd ) ; static void spc_fill_alua_data(struct se_lun *lun , unsigned char *buf ) { struct t10_alua_tg_pt_gp *tg_pt_gp ; { *(buf + 5UL) = 128U; spin_lock(& lun->lun_tg_pt_gp_lock); tg_pt_gp = lun->lun_tg_pt_gp; if ((unsigned long )tg_pt_gp != (unsigned long )((struct t10_alua_tg_pt_gp *)0)) { *(buf + 5UL) = (unsigned char )((int )((signed char )*(buf + 5UL)) | (int )((signed char )tg_pt_gp->tg_pt_gp_alua_access_type)); } else { } spin_unlock(& lun->lun_tg_pt_gp_lock); return; } } sense_reason_t spc_emulate_inquiry_std(struct se_cmd *cmd , unsigned char *buf ) { struct se_lun *lun ; struct se_device *dev ; struct se_session *sess ; u32 tmp ; size_t __min1 ; size_t tmp___0 ; size_t __min2 ; size_t __min1___0 ; size_t tmp___1 ; size_t __min2___0 ; { lun = cmd->se_lun; dev = cmd->se_dev; sess = cmd->se_sess; tmp = (*((dev->transport)->get_device_type))(dev); if (tmp == 1U) { *(buf + 1UL) = 128U; } else { } *(buf + 2UL) = 5U; *(buf + 3UL) = 2U; spc_fill_alua_data(lun, buf); if (dev->dev_attrib.emulate_3pc != 0) { *(buf + 5UL) = (unsigned int )*(buf + 5UL) | 8U; } else { } if (((unsigned int )sess->sup_prot_ops & 48U) != 0U) { if ((unsigned int )dev->dev_attrib.pi_prot_type != 0U || (unsigned int )(cmd->se_sess)->sess_prot_type != 0U) { *(buf + 5UL) = (unsigned int )*(buf + 5UL) | 1U; } else { } } else { } *(buf + 7UL) = 2U; memcpy((void *)buf + 8U, (void const *)"LIO-ORG ", 8UL); memset((void *)buf + 16U, 32, 16UL); tmp___0 = strlen((char const *)(& dev->t10_wwn.model)); __min1 = tmp___0; __min2 = 16UL; memcpy((void *)buf + 16U, (void const *)(& dev->t10_wwn.model), __min1 < __min2 ? __min1 : __min2); tmp___1 = strlen((char const *)(& dev->t10_wwn.revision)); __min1___0 = tmp___1; __min2___0 = 4UL; memcpy((void *)buf + 32U, (void const *)(& dev->t10_wwn.revision), __min1___0 < __min2___0 ? __min1___0 : __min2___0); *(buf + 4UL) = 31U; return (0U); } } static char const __kstrtab_spc_emulate_inquiry_std[24U] = { 's', 'p', 'c', '_', 'e', 'm', 'u', 'l', 'a', 't', 'e', '_', 'i', 'n', 'q', 'u', 'i', 'r', 'y', '_', 's', 't', 'd', '\000'}; struct kernel_symbol const __ksymtab_spc_emulate_inquiry_std ; struct kernel_symbol const __ksymtab_spc_emulate_inquiry_std = {(unsigned long )(& spc_emulate_inquiry_std), (char const *)(& __kstrtab_spc_emulate_inquiry_std)}; static sense_reason_t spc_emulate_evpd_80(struct se_cmd *cmd , unsigned char *buf ) { struct se_device *dev ; u16 len ; int tmp ; { dev = cmd->se_dev; if ((dev->dev_flags & 4U) != 0U) { tmp = sprintf((char *)buf + 4U, "%s", (char *)(& dev->t10_wwn.unit_serial)); len = (u16 )tmp; len = (u16 )((int )len + 1); *(buf + 3UL) = (unsigned char )len; } else { } return (0U); } } void spc_parse_naa_6h_vendor_specific(struct se_device *dev , unsigned char *buf ) { unsigned char *p ; int cnt ; bool next ; int val ; int tmp ; int tmp___0 ; int tmp___1 ; { p = (unsigned char *)(& dev->t10_wwn.unit_serial); next = 1; cnt = 0; goto ldv_59446; ldv_59445: tmp = hex_to_bin((int )((char )*p)); val = tmp; if (val < 0) { goto ldv_59444; } else { } if ((int )next) { next = 0; tmp___0 = cnt; cnt = cnt + 1; tmp___1 = cnt; cnt = cnt + 1; *(buf + (unsigned long )tmp___0) = (unsigned char )((int )((signed char )*(buf + (unsigned long )tmp___1)) | (int )((signed char )val)); } else { next = 1; *(buf + (unsigned long )cnt) = (int )((unsigned char )val) << 4U; } ldv_59444: p = p + 1; ldv_59446: ; if ((unsigned int )*p != 0U && cnt <= 12) { goto ldv_59445; } else { } return; } } sense_reason_t spc_emulate_evpd_83(struct se_cmd *cmd , unsigned char *buf ) { struct se_device *dev ; struct se_lun *lun ; struct se_portal_group *tpg ; struct t10_alua_lu_gp_member *lu_gp_mem ; struct t10_alua_tg_pt_gp *tg_pt_gp ; unsigned char *prod ; u32 prod_len ; u32 unit_serial_len ; u32 off ; u16 len ; u16 id_len ; u32 tmp ; u32 tmp___0 ; u32 tmp___1 ; u32 tmp___2 ; u32 tmp___3 ; u32 tmp___4 ; size_t tmp___5 ; size_t tmp___6 ; int tmp___7 ; struct t10_alua_lu_gp *lu_gp ; u32 padding ; u32 scsi_name_len ; u32 scsi_target_len ; u16 lu_gp_id ; u16 tg_pt_gp_id ; u16 tpgt ; u32 tmp___8 ; u32 tmp___9 ; u32 tmp___10 ; u32 tmp___11 ; u32 tmp___12 ; u32 tmp___13 ; u32 tmp___14 ; u32 tmp___15 ; u32 tmp___16 ; u32 tmp___17 ; u32 tmp___18 ; u32 tmp___19 ; u32 tmp___20 ; u32 tmp___21 ; u32 tmp___22 ; u32 tmp___23 ; u32 tmp___24 ; char *tmp___25 ; int tmp___26 ; u32 tmp___27 ; u32 tmp___28 ; char *tmp___29 ; int tmp___30 ; { dev = cmd->se_dev; lun = cmd->se_lun; tpg = (struct se_portal_group *)0; prod = (unsigned char *)(& dev->t10_wwn.model); off = 0U; len = 0U; off = 4U; if ((dev->dev_flags & 4U) == 0U) { goto check_t10_vend_desc; } else { } tmp = off; off = off + 1U; *(buf + (unsigned long )tmp) = 1U; *(buf + (unsigned long )off) = 0U; tmp___0 = off; off = off + 1U; *(buf + (unsigned long )tmp___0) = (unsigned int )*(buf + (unsigned long )tmp___0) | 3U; off = off + 1U; tmp___1 = off; off = off + 1U; *(buf + (unsigned long )tmp___1) = 16U; tmp___2 = off; off = off + 1U; *(buf + (unsigned long )tmp___2) = 96U; tmp___3 = off; off = off + 1U; *(buf + (unsigned long )tmp___3) = 1U; tmp___4 = off; off = off + 1U; *(buf + (unsigned long )tmp___4) = 64U; *(buf + (unsigned long )off) = 80U; spc_parse_naa_6h_vendor_specific(dev, buf + (unsigned long )off); len = 20U; off = (u32 )((int )len + 4); check_t10_vend_desc: id_len = 8U; prod_len = 4U; prod_len = prod_len + 8U; tmp___5 = strlen((char const *)prod); prod_len = (u32 )tmp___5 + prod_len; prod_len = prod_len + 1U; if ((dev->dev_flags & 4U) != 0U) { tmp___6 = strlen((char const *)(& dev->t10_wwn.unit_serial)); unit_serial_len = (u32 )tmp___6; unit_serial_len = unit_serial_len + 1U; tmp___7 = sprintf((char *)buf + (unsigned long )(off + 12U), "%s:%s", prod, (char *)(& dev->t10_wwn.unit_serial)); id_len = (int )((u16 )tmp___7) + (int )id_len; } else { } *(buf + (unsigned long )off) = 2U; *(buf + (unsigned long )(off + 1U)) = 1U; *(buf + (unsigned long )(off + 2U)) = 0U; memcpy((void *)buf + (unsigned long )(off + 4U), (void const *)"LIO-ORG", 8UL); id_len = (u16 )((int )id_len + 1); *(buf + (unsigned long )(off + 3U)) = (unsigned char )id_len; len = (unsigned int )((int )id_len + (int )len) + 4U; off = ((u32 )id_len + off) + 4U; lu_gp_id = 0U; tg_pt_gp_id = 0U; tpg = lun->lun_tpg; *(buf + (unsigned long )off) = (int )((unsigned char )tpg->proto_id) << 4U; tmp___8 = off; off = off + 1U; *(buf + (unsigned long )tmp___8) = (unsigned int )*(buf + (unsigned long )tmp___8) | 1U; *(buf + (unsigned long )off) = 128U; *(buf + (unsigned long )off) = (unsigned int )*(buf + (unsigned long )off) | 16U; tmp___9 = off; off = off + 1U; *(buf + (unsigned long )tmp___9) = (unsigned int )*(buf + (unsigned long )tmp___9) | 4U; off = off + 1U; tmp___10 = off; off = off + 1U; *(buf + (unsigned long )tmp___10) = 4U; off = off + 2U; tmp___11 = off; off = off + 1U; *(buf + (unsigned long )tmp___11) = (unsigned char )((int )lun->lun_rtpi >> 8); tmp___12 = off; off = off + 1U; *(buf + (unsigned long )tmp___12) = (unsigned char )lun->lun_rtpi; len = (unsigned int )len + 8U; spin_lock(& lun->lun_tg_pt_gp_lock); tg_pt_gp = lun->lun_tg_pt_gp; if ((unsigned long )tg_pt_gp == (unsigned long )((struct t10_alua_tg_pt_gp *)0)) { spin_unlock(& lun->lun_tg_pt_gp_lock); goto check_lu_gp; } else { } tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; spin_unlock(& lun->lun_tg_pt_gp_lock); *(buf + (unsigned long )off) = (int )((unsigned char )tpg->proto_id) << 4U; tmp___13 = off; off = off + 1U; *(buf + (unsigned long )tmp___13) = (unsigned int )*(buf + (unsigned long )tmp___13) | 1U; *(buf + (unsigned long )off) = 128U; *(buf + (unsigned long )off) = (unsigned int )*(buf + (unsigned long )off) | 16U; tmp___14 = off; off = off + 1U; *(buf + (unsigned long )tmp___14) = (unsigned int )*(buf + (unsigned long )tmp___14) | 5U; off = off + 1U; tmp___15 = off; off = off + 1U; *(buf + (unsigned long )tmp___15) = 4U; off = off + 2U; tmp___16 = off; off = off + 1U; *(buf + (unsigned long )tmp___16) = (unsigned char )((int )tg_pt_gp_id >> 8); tmp___17 = off; off = off + 1U; *(buf + (unsigned long )tmp___17) = (unsigned char )tg_pt_gp_id; len = (unsigned int )len + 8U; check_lu_gp: lu_gp_mem = dev->dev_alua_lu_gp_mem; if ((unsigned long )lu_gp_mem == (unsigned long )((struct t10_alua_lu_gp_member *)0)) { goto check_scsi_name; } else { } spin_lock(& lu_gp_mem->lu_gp_mem_lock); lu_gp = lu_gp_mem->lu_gp; if ((unsigned long )lu_gp == (unsigned long )((struct t10_alua_lu_gp *)0)) { spin_unlock(& lu_gp_mem->lu_gp_mem_lock); goto check_scsi_name; } else { } lu_gp_id = lu_gp->lu_gp_id; spin_unlock(& lu_gp_mem->lu_gp_mem_lock); tmp___18 = off; off = off + 1U; *(buf + (unsigned long )tmp___18) = (unsigned int )*(buf + (unsigned long )tmp___18) | 1U; tmp___19 = off; off = off + 1U; *(buf + (unsigned long )tmp___19) = (unsigned int )*(buf + (unsigned long )tmp___19) | 6U; off = off + 1U; tmp___20 = off; off = off + 1U; *(buf + (unsigned long )tmp___20) = 4U; off = off + 2U; tmp___21 = off; off = off + 1U; *(buf + (unsigned long )tmp___21) = (unsigned char )((int )lu_gp_id >> 8); tmp___22 = off; off = off + 1U; *(buf + (unsigned long )tmp___22) = (unsigned char )lu_gp_id; len = (unsigned int )len + 8U; check_scsi_name: *(buf + (unsigned long )off) = (int )((unsigned char )tpg->proto_id) << 4U; tmp___23 = off; off = off + 1U; *(buf + (unsigned long )tmp___23) = (unsigned int )*(buf + (unsigned long )tmp___23) | 3U; *(buf + (unsigned long )off) = 128U; *(buf + (unsigned long )off) = (unsigned int )*(buf + (unsigned long )off) | 16U; tmp___24 = off; off = off + 1U; *(buf + (unsigned long )tmp___24) = (unsigned int )*(buf + (unsigned long )tmp___24) | 8U; off = off + 2U; tpgt = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); tmp___25 = (*((tpg->se_tpg_tfo)->tpg_get_wwn))(tpg); tmp___26 = sprintf((char *)buf + (unsigned long )off, "%s,t,0x%04x", tmp___25, (int )tpgt); scsi_name_len = (u32 )tmp___26; scsi_name_len = scsi_name_len + 1U; padding = - scsi_name_len & 3U; if (padding != 0U) { scsi_name_len = scsi_name_len + padding; } else { } if (scsi_name_len > 256U) { scsi_name_len = 256U; } else { } *(buf + (unsigned long )(off - 1U)) = (unsigned char )scsi_name_len; off = off + scsi_name_len; len = (unsigned int )((int )((u16 )scsi_name_len) + (int )len) + 4U; *(buf + (unsigned long )off) = (int )((unsigned char )tpg->proto_id) << 4U; tmp___27 = off; off = off + 1U; *(buf + (unsigned long )tmp___27) = (unsigned int )*(buf + (unsigned long )tmp___27) | 3U; *(buf + (unsigned long )off) = 128U; *(buf + (unsigned long )off) = (unsigned int )*(buf + (unsigned long )off) | 32U; tmp___28 = off; off = off + 1U; *(buf + (unsigned long )tmp___28) = (unsigned int )*(buf + (unsigned long )tmp___28) | 8U; off = off + 2U; tmp___29 = (*((tpg->se_tpg_tfo)->tpg_get_wwn))(tpg); tmp___30 = sprintf((char *)buf + (unsigned long )off, "%s", tmp___29); scsi_target_len = (u32 )tmp___30; scsi_target_len = scsi_target_len + 1U; padding = - scsi_target_len & 3U; if (padding != 0U) { scsi_target_len = scsi_target_len + padding; } else { } if (scsi_target_len > 256U) { scsi_target_len = 256U; } else { } *(buf + (unsigned long )(off - 1U)) = (unsigned char )scsi_target_len; off = off + scsi_target_len; len = (unsigned int )((int )((u16 )scsi_target_len) + (int )len) + 4U; *(buf + 2UL) = (unsigned char )((int )len >> 8); *(buf + 3UL) = (unsigned char )len; return (0U); } } static char const __kstrtab_spc_emulate_evpd_83[20U] = { 's', 'p', 'c', '_', 'e', 'm', 'u', 'l', 'a', 't', 'e', '_', 'e', 'v', 'p', 'd', '_', '8', '3', '\000'}; struct kernel_symbol const __ksymtab_spc_emulate_evpd_83 ; struct kernel_symbol const __ksymtab_spc_emulate_evpd_83 = {(unsigned long )(& spc_emulate_evpd_83), (char const *)(& __kstrtab_spc_emulate_evpd_83)}; static sense_reason_t spc_emulate_evpd_86(struct se_cmd *cmd , unsigned char *buf ) { struct se_device *dev ; struct se_session *sess ; bool tmp ; int tmp___0 ; { dev = cmd->se_dev; sess = cmd->se_sess; *(buf + 3UL) = 60U; if (((unsigned int )sess->sup_prot_ops & 48U) != 0U) { if ((unsigned int )dev->dev_attrib.pi_prot_type == 1U || (unsigned int )(cmd->se_sess)->sess_prot_type == 1U) { *(buf + 4UL) = 5U; } else if ((unsigned int )dev->dev_attrib.pi_prot_type == 3U || (unsigned int )(cmd->se_sess)->sess_prot_type == 3U) { *(buf + 4UL) = 4U; } else { } } else { } *(buf + 5UL) = 7U; tmp = target_check_wce(dev); if ((int )tmp) { *(buf + 6UL) = 1U; } else { } spin_lock(& (cmd->se_dev)->t10_alua.lba_map_lock); tmp___0 = list_empty((struct list_head const *)(& dev->t10_alua.lba_map_list)); if (tmp___0 == 0) { *(buf + 8UL) = 16U; } else { } spin_unlock(& (cmd->se_dev)->t10_alua.lba_map_lock); return (0U); } } static sense_reason_t spc_emulate_evpd_b0(struct se_cmd *cmd , unsigned char *buf ) { struct se_device *dev ; int have_tp ; int opt ; int min ; u32 tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; { dev = cmd->se_dev; have_tp = 0; if (dev->dev_attrib.emulate_tpu != 0 || dev->dev_attrib.emulate_tpws != 0) { have_tp = 1; } else { } tmp = (*((dev->transport)->get_device_type))(dev); *buf = (unsigned char )tmp; *(buf + 3UL) = have_tp != 0 ? 60U : 16U; *(buf + 4UL) = 1U; if (dev->dev_attrib.emulate_caw != 0) { *(buf + 5UL) = 1U; } else { } if ((unsigned long )(dev->transport)->get_io_min != (unsigned long )((unsigned int (*/* const */)(struct se_device * ))0)) { tmp___0 = (*((dev->transport)->get_io_min))(dev); min = (int )tmp___0; if (min != 0) { put_unaligned_be16((int )((u16 )((u32 )min / dev->dev_attrib.block_size)), (void *)buf + 6U); } else { put_unaligned_be16(1, (void *)buf + 6U); } } else { put_unaligned_be16(1, (void *)buf + 6U); } put_unaligned_be32(dev->dev_attrib.hw_max_sectors, (void *)buf + 8U); if ((unsigned long )(dev->transport)->get_io_opt != (unsigned long )((unsigned int (*/* const */)(struct se_device * ))0)) { tmp___1 = (*((dev->transport)->get_io_opt))(dev); opt = (int )tmp___1; if (opt != 0) { put_unaligned_be32((u32 )opt / dev->dev_attrib.block_size, (void *)buf + 12U); } else { put_unaligned_be32(dev->dev_attrib.optimal_sectors, (void *)buf + 12U); } } else { put_unaligned_be32(dev->dev_attrib.optimal_sectors, (void *)buf + 12U); } if (have_tp == 0) { goto max_write_same; } else { } put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, (void *)buf + 20U); put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count, (void *)buf + 24U); put_unaligned_be32(dev->dev_attrib.unmap_granularity, (void *)buf + 28U); put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment, (void *)buf + 32U); if (dev->dev_attrib.unmap_granularity_alignment != 0U) { *(buf + 32UL) = (unsigned int )*(buf + 32UL) | 128U; } else { } max_write_same: put_unaligned_be64((u64 )dev->dev_attrib.max_write_same_len, (void *)buf + 36U); return (0U); } } static sense_reason_t spc_emulate_evpd_b1(struct se_cmd *cmd , unsigned char *buf ) { struct se_device *dev ; u32 tmp ; { dev = cmd->se_dev; tmp = (*((dev->transport)->get_device_type))(dev); *buf = (unsigned char )tmp; *(buf + 3UL) = 60U; *(buf + 5UL) = dev->dev_attrib.is_nonrot != 0; return (0U); } } static sense_reason_t spc_emulate_evpd_b2(struct se_cmd *cmd , unsigned char *buf ) { struct se_device *dev ; u32 tmp ; { dev = cmd->se_dev; tmp = (*((dev->transport)->get_device_type))(dev); *buf = (unsigned char )tmp; put_unaligned_be16(4, (void *)buf + 2U); *(buf + 4UL) = 0U; if (dev->dev_attrib.emulate_tpu != 0) { *(buf + 5UL) = 128U; } else { } if (dev->dev_attrib.emulate_tpws != 0) { *(buf + 5UL) = (unsigned int )*(buf + 5UL) | 96U; } else { } return (0U); } } static sense_reason_t spc_emulate_evpd_b3(struct se_cmd *cmd , unsigned char *buf ) { struct se_device *dev ; u32 tmp ; { dev = cmd->se_dev; tmp = (*((dev->transport)->get_device_type))(dev); *buf = (unsigned char )tmp; *(buf + 3UL) = 12U; put_unaligned_be32(dev->t10_alua.lba_map_segment_size, (void *)buf + 8U); put_unaligned_be32(dev->t10_alua.lba_map_segment_multiplier, (void *)buf + 12U); return (0U); } } static sense_reason_t spc_emulate_evpd_00(struct se_cmd *cmd , unsigned char *buf ) ; static struct __anonstruct_evpd_handlers_396 evpd_handlers[8U] = { {0U, & spc_emulate_evpd_00}, {128U, & spc_emulate_evpd_80}, {131U, & spc_emulate_evpd_83}, {134U, & spc_emulate_evpd_86}, {176U, & spc_emulate_evpd_b0}, {177U, & spc_emulate_evpd_b1}, {178U, & spc_emulate_evpd_b2}, {179U, & spc_emulate_evpd_b3}}; static sense_reason_t spc_emulate_evpd_00(struct se_cmd *cmd , unsigned char *buf ) { int p ; { if (((cmd->se_dev)->dev_flags & 4U) != 0U) { *(buf + 3UL) = 8U; p = 0; goto ldv_59530; ldv_59529: *(buf + ((unsigned long )p + 4UL)) = evpd_handlers[p].page; p = p + 1; ldv_59530: ; if ((unsigned int )p <= 7U) { goto ldv_59529; } else { } } else { } return (0U); } } static sense_reason_t spc_emulate_inquiry(struct se_cmd *cmd ) { struct se_device *dev ; struct se_portal_group *tpg ; unsigned char *rbuf ; unsigned char *cdb ; unsigned char *buf ; sense_reason_t ret ; int p ; int len ; void *tmp ; u32 tmp___0 ; struct se_device *_________p1 ; union __anonunion___u_401 __u ; u16 tmp___1 ; void *tmp___2 ; u32 __min1 ; u32 __min2 ; { dev = cmd->se_dev; tpg = (cmd->se_lun)->lun_tpg; cdb = cmd->t_task_cdb; len = 0; tmp = kzalloc(1024UL, 208U); buf = (unsigned char *)tmp; if ((unsigned long )buf == (unsigned long )((unsigned char *)0U)) { printk("\vUnable to allocate response buffer for INQUIRY\n"); return (10U); } else { } __read_once_size((void const volatile *)(& (tpg->tpg_virt_lun0)->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; if ((unsigned long )_________p1 == (unsigned long )dev) { *buf = 63U; } else { tmp___0 = (*((dev->transport)->get_device_type))(dev); *buf = (unsigned char )tmp___0; } if (((int )*(cdb + 1UL) & 1) == 0) { if ((unsigned int )*(cdb + 2UL) != 0U) { printk("\vINQUIRY with EVPD==0 but PAGE CODE=%02x\n", (int )*(cdb + 2UL)); ret = 8U; goto out; } else { } ret = spc_emulate_inquiry_std(cmd, buf); len = (int )*(buf + 4UL) + 5; goto out; } else { } p = 0; goto ldv_59554; ldv_59553: ; if ((int )*(cdb + 2UL) == (int )evpd_handlers[p].page) { *(buf + 1UL) = *(cdb + 2UL); ret = (*(evpd_handlers[p].emulate))(cmd, buf); tmp___1 = get_unaligned_be16((void const *)buf + 2U); len = (int )tmp___1 + 4; goto out; } else { } p = p + 1; ldv_59554: ; if ((unsigned int )p <= 7U) { goto ldv_59553; } else { } printk("\vUnknown VPD Code: 0x%02x\n", (int )*(cdb + 2UL)); ret = 8U; out: tmp___2 = transport_kmap_data_sg(cmd); rbuf = (unsigned char *)tmp___2; if ((unsigned long )rbuf != (unsigned long )((unsigned char *)0U)) { __min1 = 1024U; __min2 = cmd->data_length; memcpy((void *)rbuf, (void const *)buf, (size_t )(__min1 < __min2 ? __min1 : __min2)); transport_kunmap_data_sg(cmd); } else { } kfree((void const *)buf); if (ret == 0U) { target_complete_cmd_with_length(cmd, 0, len); } else { } return (ret); } } static int spc_modesense_rwrecovery(struct se_cmd *cmd , u8 pc , u8 *p ) { { *p = 1U; *(p + 1UL) = 10U; if ((unsigned int )pc == 1U) { } else { } return (12); } } static int spc_modesense_control(struct se_cmd *cmd , u8 pc , u8 *p ) { struct se_device *dev ; struct se_session *sess ; { dev = cmd->se_dev; sess = cmd->se_sess; *p = 10U; *(p + 1UL) = 10U; if ((unsigned int )pc == 1U) { goto out; } else { } *(p + 2UL) = 2U; *(p + 3UL) = dev->dev_attrib.emulate_rest_reord == 1 ? 0U : 16U; *(p + 4UL) = dev->dev_attrib.emulate_ua_intlck_ctrl != 2 ? (dev->dev_attrib.emulate_ua_intlck_ctrl == 1 ? 32U : 0U) : 48U; *(p + 5UL) = dev->dev_attrib.emulate_tas != 0 ? 64U : 0U; if (((unsigned int )sess->sup_prot_ops & 48U) != 0U) { if ((unsigned int )dev->dev_attrib.pi_prot_type != 0U || (unsigned int )sess->sess_prot_type != 0U) { *(p + 5UL) = (u8 )((unsigned int )*(p + 5UL) | 128U); } else { } } else { } *(p + 8UL) = 255U; *(p + 9UL) = 255U; *(p + 11UL) = 30U; out: ; return (12); } } static int spc_modesense_caching(struct se_cmd *cmd , u8 pc , u8 *p ) { struct se_device *dev ; bool tmp ; { dev = cmd->se_dev; *p = 8U; *(p + 1UL) = 18U; if ((unsigned int )pc == 1U) { goto out; } else { } tmp = target_check_wce(dev); if ((int )tmp) { *(p + 2UL) = 4U; } else { } *(p + 12UL) = 32U; out: ; return (20); } } static int spc_modesense_informational_exceptions(struct se_cmd *cmd , u8 pc , unsigned char *p ) { { *p = 28U; *(p + 1UL) = 10U; if ((unsigned int )pc == 1U) { } else { } return (12); } } static struct __anonstruct_modesense_handlers_402 modesense_handlers[4U] = { {1U, 0U, & spc_modesense_rwrecovery}, {8U, 0U, & spc_modesense_caching}, {10U, 0U, & spc_modesense_control}, {28U, 0U, & spc_modesense_informational_exceptions}}; static void spc_modesense_write_protect(unsigned char *buf , int type ) { { switch (type) { case 0: ; case 1: ; default: *buf = (unsigned int )*buf | 128U; goto ldv_59601; } ldv_59601: ; return; } } static void spc_modesense_dpofua(unsigned char *buf , int type ) { { switch (type) { case 0: *buf = (unsigned int )*buf | 16U; goto ldv_59607; default: ; goto ldv_59607; } ldv_59607: ; return; } } static int spc_modesense_blockdesc(unsigned char *buf , u64 blocks , u32 block_size___0 ) { unsigned char *tmp ; u64 _min1 ; unsigned long long _min2 ; { tmp = buf; buf = buf + 1; *tmp = 8U; _min1 = blocks; _min2 = 4294967295ULL; put_unaligned_be32((u32 )(_min1 < _min2 ? _min1 : _min2), (void *)buf); buf = buf + 4UL; put_unaligned_be32(block_size___0, (void *)buf); return (9); } } static int spc_modesense_long_blockdesc(unsigned char *buf , u64 blocks , u32 block_size___0 ) { int tmp ; unsigned char *tmp___0 ; unsigned char *tmp___1 ; { if (blocks <= 4294967295ULL) { tmp = spc_modesense_blockdesc(buf + 3UL, blocks, block_size___0); return (tmp + 3); } else { } tmp___0 = buf; buf = buf + 1; *tmp___0 = 1U; buf = buf + 2UL; tmp___1 = buf; buf = buf + 1; *tmp___1 = 16U; put_unaligned_be64(blocks, (void *)buf); buf = buf + 12UL; put_unaligned_be32(block_size___0, (void *)buf); return (17); } } static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd ) { struct se_device *dev ; char *cdb ; unsigned char buf[512U] ; unsigned char *rbuf ; int type ; u32 tmp ; int ten ; bool dbd ; bool llba ; u8 pc ; u8 page ; u8 subpage ; int length ; int ret ; int i ; bool read_only ; bool tmp___0 ; bool tmp___1 ; u64 blocks ; sector_t tmp___2 ; u32 block_size___0 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; void *tmp___7 ; u32 __min1 ; u32 __min2 ; { dev = cmd->se_dev; cdb = (char *)cmd->t_task_cdb; tmp = (*((dev->transport)->get_device_type))(dev); type = (int )tmp; ten = (unsigned int )*(cmd->t_task_cdb) == 90U; dbd = ((int )*(cdb + 1UL) & 8) != 0; llba = (bool )(ten != 0 && ((int )*(cdb + 1UL) & 16) != 0); pc = (u8 )((int )((signed char )*(cdb + 2UL)) >> 6); page = (unsigned int )((u8 )*(cdb + 2UL)) & 63U; subpage = (u8 )*(cdb + 3UL); length = 0; tmp___0 = target_lun_is_rdonly(cmd); read_only = tmp___0; memset((void *)(& buf), 0, 512UL); length = ten != 0 ? 3 : 2; if ((int )(cmd->se_lun)->lun_access & 1 || (int )read_only) { spc_modesense_write_protect((unsigned char *)(& buf) + (unsigned long )length, type); } else { } tmp___1 = target_check_fua(dev); if ((int )tmp___1) { spc_modesense_dpofua((unsigned char *)(& buf) + (unsigned long )length, type); } else { } length = length + 1; if (! dbd && type == 0) { tmp___2 = (*((dev->transport)->get_blocks))(dev); blocks = (u64 )tmp___2; block_size___0 = dev->dev_attrib.block_size; if (ten != 0) { if ((int )llba) { tmp___3 = spc_modesense_long_blockdesc((unsigned char *)(& buf) + (unsigned long )length, blocks, block_size___0); length = tmp___3 + length; } else { length = length + 3; tmp___4 = spc_modesense_blockdesc((unsigned char *)(& buf) + (unsigned long )length, blocks, block_size___0); length = tmp___4 + length; } } else { tmp___5 = spc_modesense_blockdesc((unsigned char *)(& buf) + (unsigned long )length, blocks, block_size___0); length = tmp___5 + length; } } else if (ten != 0) { length = length + 4; } else { length = length + 1; } if ((unsigned int )page == 63U) { if ((unsigned int )subpage != 0U && (unsigned int )subpage != 255U) { printk("\fMODE_SENSE: Invalid subpage code: 0x%02x\n", (int )subpage); return (8U); } else { } i = 0; goto ldv_59646; ldv_59645: ; if (((int )modesense_handlers[i].subpage & ~ ((int )subpage)) == 0) { ret = (*(modesense_handlers[i].emulate))(cmd, (int )pc, (unsigned char *)(& buf) + (unsigned long )length); if (ten == 0 && length + ret > 254) { goto ldv_59644; } else { } length = length + ret; } else { } i = i + 1; ldv_59646: ; if ((unsigned int )i <= 3U) { goto ldv_59645; } else { } ldv_59644: ; goto set_length; } else { } i = 0; goto ldv_59651; ldv_59650: ; if ((int )modesense_handlers[i].page == (int )page && (int )modesense_handlers[i].subpage == (int )subpage) { tmp___6 = (*(modesense_handlers[i].emulate))(cmd, (int )pc, (unsigned char *)(& buf) + (unsigned long )length); length = tmp___6 + length; goto set_length; } else { } i = i + 1; ldv_59651: ; if ((unsigned int )i <= 3U) { goto ldv_59650; } else { } if ((unsigned int )page != 3U) { printk("\vMODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", (int )page, (int )subpage); } else { } return (11U); set_length: ; if (ten != 0) { put_unaligned_be16((int )((unsigned int )((u16 )length) + 65534U), (void *)(& buf)); } else { buf[0] = (unsigned int )((unsigned char )length) + 255U; } tmp___7 = transport_kmap_data_sg(cmd); rbuf = (unsigned char *)tmp___7; if ((unsigned long )rbuf != (unsigned long )((unsigned char *)0U)) { __min1 = 512U; __min2 = cmd->data_length; memcpy((void *)rbuf, (void const *)(& buf), (size_t )(__min1 < __min2 ? __min1 : __min2)); transport_kunmap_data_sg(cmd); } else { } target_complete_cmd_with_length(cmd, 0, length); return (0U); } } static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd ) { char *cdb ; bool ten ; int off ; bool pf ; u8 page ; u8 subpage ; unsigned char *buf ; unsigned char tbuf[512U] ; int length ; sense_reason_t ret ; int i ; void *tmp ; int tmp___0 ; { cdb = (char *)cmd->t_task_cdb; ten = (int )((signed char )*cdb) == 85; off = (int )ten ? 8 : 4; pf = ((int )*(cdb + 1UL) & 16) != 0; ret = 0U; if (cmd->data_length == 0U) { target_complete_cmd(cmd, 0); return (0U); } else { } if (cmd->data_length < (u32 )(off + 2)) { return (19U); } else { } tmp = transport_kmap_data_sg(cmd); buf = (unsigned char *)tmp; if ((unsigned long )buf == (unsigned long )((unsigned char *)0U)) { return (10U); } else { } if (! pf) { ret = 8U; goto out; } else { } page = (unsigned int )*(buf + (unsigned long )off) & 63U; subpage = ((int )*(buf + (unsigned long )off) & 64) != 0 ? *(buf + ((unsigned long )off + 1UL)) : 0U; i = 0; goto ldv_59675; ldv_59674: ; if ((int )modesense_handlers[i].page == (int )page && (int )modesense_handlers[i].subpage == (int )subpage) { memset((void *)(& tbuf), 0, 512UL); length = (*(modesense_handlers[i].emulate))(cmd, 0, (unsigned char *)(& tbuf)); goto check_contents; } else { } i = i + 1; ldv_59675: ; if ((unsigned int )i <= 3U) { goto ldv_59674; } else { } ret = 11U; goto out; check_contents: ; if (cmd->data_length < (u32 )(off + length)) { ret = 19U; goto out; } else { } tmp___0 = memcmp((void const *)buf + (unsigned long )off, (void const *)(& tbuf), (size_t )length); if (tmp___0 != 0) { ret = 9U; } else { } out: transport_kunmap_data_sg(cmd); if (ret == 0U) { target_complete_cmd(cmd, 0); } else { } return (ret); } } static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd ) { unsigned char *cdb ; unsigned char *rbuf ; u8 ua_asc ; u8 ua_ascq ; unsigned char buf[96U] ; void *tmp ; int tmp___0 ; u32 __min1 ; u32 __min2 ; { cdb = cmd->t_task_cdb; ua_asc = 0U; ua_ascq = 0U; memset((void *)(& buf), 0, 96UL); if ((int )*(cdb + 1UL) & 1) { printk("\vREQUEST_SENSE description emulation not supported\n"); return (8U); } else { } tmp = transport_kmap_data_sg(cmd); rbuf = (unsigned char *)tmp; if ((unsigned long )rbuf == (unsigned long )((unsigned char *)0U)) { return (10U); } else { } tmp___0 = core_scsi3_ua_clear_for_request_sense(cmd, & ua_asc, & ua_ascq); if (tmp___0 == 0) { buf[0] = 112U; buf[2] = 6U; buf[12] = ua_asc; buf[13] = ua_ascq; buf[7] = 10U; } else { buf[0] = 112U; buf[2] = 0U; buf[12] = 0U; buf[7] = 10U; } __min1 = 96U; __min2 = cmd->data_length; memcpy((void *)rbuf, (void const *)(& buf), (size_t )(__min1 < __min2 ? __min1 : __min2)); transport_kunmap_data_sg(cmd); target_complete_cmd(cmd, 0); return (0U); } } sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd ) { struct se_dev_entry *deve ; struct se_session *sess ; struct se_node_acl *nacl ; unsigned char *buf ; u32 lun_count ; u32 offset ; void *tmp ; struct hlist_node *____ptr ; struct hlist_node *________p1 ; struct hlist_node *_________p1 ; union __anonunion___u_407 __u ; int tmp___0 ; struct hlist_node const *__mptr ; struct se_dev_entry *tmp___1 ; struct hlist_node *____ptr___0 ; struct hlist_node *________p1___0 ; struct hlist_node *_________p1___0 ; union __anonunion___u_409 __u___0 ; int tmp___2 ; struct hlist_node const *__mptr___0 ; struct se_dev_entry *tmp___3 ; { sess = cmd->se_sess; lun_count = 0U; offset = 8U; if (cmd->data_length <= 15U) { printk("\fREPORT LUNS allocation length %u too small\n", cmd->data_length); return (8U); } else { } tmp = transport_kmap_data_sg(cmd); buf = (unsigned char *)tmp; if ((unsigned long )buf == (unsigned long )((unsigned char *)0U)) { return (10U); } else { } if ((unsigned long )sess == (unsigned long )((struct se_session *)0)) { int_to_scsilun(0ULL, (struct scsi_lun *)buf + (unsigned long )offset); lun_count = 1U; goto done; } else { } nacl = sess->se_node_acl; rcu_read_lock___0(); __read_once_size((void const volatile *)(& nacl->lun_entry_hlist.first), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp___0 = debug_lockdep_rcu_enabled(); ____ptr = ________p1; if ((unsigned long )____ptr != (unsigned long )((struct hlist_node *)0)) { __mptr = (struct hlist_node const *)____ptr; tmp___1 = (struct se_dev_entry *)__mptr + 0xfffffffffffffed0UL; } else { tmp___1 = (struct se_dev_entry *)0; } deve = tmp___1; goto ldv_59748; ldv_59747: lun_count = lun_count + 1U; if (offset + 8U > cmd->data_length) { goto ldv_59746; } else { } int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)buf + (unsigned long )offset); offset = offset + 8U; ldv_59746: __read_once_size((void const volatile *)(& deve->link.next), (void *)(& __u___0.__c), 8); _________p1___0 = __u___0.__val; ________p1___0 = _________p1___0; tmp___2 = debug_lockdep_rcu_enabled(); ____ptr___0 = ________p1___0; if ((unsigned long )____ptr___0 != (unsigned long )((struct hlist_node *)0)) { __mptr___0 = (struct hlist_node const *)____ptr___0; tmp___3 = (struct se_dev_entry *)__mptr___0 + 0xfffffffffffffed0UL; } else { tmp___3 = (struct se_dev_entry *)0; } deve = tmp___3; ldv_59748: ; if ((unsigned long )deve != (unsigned long )((struct se_dev_entry *)0)) { goto ldv_59747; } else { } rcu_read_unlock___0(); done: lun_count = lun_count * 8U; *buf = (unsigned char )(lun_count >> 24); *(buf + 1UL) = (unsigned char )(lun_count >> 16); *(buf + 2UL) = (unsigned char )(lun_count >> 8); *(buf + 3UL) = (unsigned char )lun_count; transport_kunmap_data_sg(cmd); target_complete_cmd_with_length(cmd, 0, (int )((lun_count + 1U) * 8U)); return (0U); } } static char const __kstrtab_spc_emulate_report_luns[24U] = { 's', 'p', 'c', '_', 'e', 'm', 'u', 'l', 'a', 't', 'e', '_', 'r', 'e', 'p', 'o', 'r', 't', '_', 'l', 'u', 'n', 's', '\000'}; struct kernel_symbol const __ksymtab_spc_emulate_report_luns ; struct kernel_symbol const __ksymtab_spc_emulate_report_luns = {(unsigned long )(& spc_emulate_report_luns), (char const *)(& __kstrtab_spc_emulate_report_luns)}; static sense_reason_t spc_emulate_testunitready(struct se_cmd *cmd ) { { target_complete_cmd(cmd, 0); return (0U); } } sense_reason_t spc_parse_cdb(struct se_cmd *cmd , unsigned int *size ) { struct se_device *dev ; unsigned char *cdb ; u16 tmp ; u32 tmp___0 ; u16 tmp___1 ; u32 tmp___2 ; char *tmp___3 ; { dev = cmd->se_dev; cdb = cmd->t_task_cdb; switch ((int )*cdb) { case 21: *size = (unsigned int )*(cdb + 4UL); cmd->execute_cmd = & spc_emulate_modeselect; goto ldv_59767; case 85: *size = (unsigned int )(((int )*(cdb + 7UL) << 8) + (int )*(cdb + 8UL)); cmd->execute_cmd = & spc_emulate_modeselect; goto ldv_59767; case 26: *size = (unsigned int )*(cdb + 4UL); cmd->execute_cmd = & spc_emulate_modesense; goto ldv_59767; case 90: *size = (unsigned int )(((int )*(cdb + 7UL) << 8) + (int )*(cdb + 8UL)); cmd->execute_cmd = & spc_emulate_modesense; goto ldv_59767; case 76: ; case 77: *size = (unsigned int )(((int )*(cdb + 7UL) << 8) + (int )*(cdb + 8UL)); goto ldv_59767; case 94: *size = (unsigned int )(((int )*(cdb + 7UL) << 8) + (int )*(cdb + 8UL)); cmd->execute_cmd = & target_scsi3_emulate_pr_in; goto ldv_59767; case 95: *size = (unsigned int )(((int )*(cdb + 7UL) << 8) + (int )*(cdb + 8UL)); cmd->execute_cmd = & target_scsi3_emulate_pr_out; goto ldv_59767; case 23: ; case 87: ; if ((unsigned int )*cdb == 87U) { *size = (unsigned int )(((int )*(cdb + 7UL) << 8) | (int )*(cdb + 8UL)); } else { *size = cmd->data_length; } cmd->execute_cmd = & target_scsi2_reservation_release; goto ldv_59767; case 22: ; case 86: ; if ((unsigned int )*cdb == 86U) { *size = (unsigned int )(((int )*(cdb + 7UL) << 8) | (int )*(cdb + 8UL)); } else { *size = cmd->data_length; } cmd->execute_cmd = & target_scsi2_reservation_reserve; goto ldv_59767; case 3: *size = (unsigned int )*(cdb + 4UL); cmd->execute_cmd = & spc_emulate_request_sense; goto ldv_59767; case 18: *size = (unsigned int )(((int )*(cdb + 3UL) << 8) + (int )*(cdb + 4UL)); cmd->sam_task_attr = 33; cmd->execute_cmd = & spc_emulate_inquiry; goto ldv_59767; case 162: ; case 181: *size = (unsigned int )(((((int )*(cdb + 6UL) << 24) | ((int )*(cdb + 7UL) << 16)) | ((int )*(cdb + 8UL) << 8)) | (int )*(cdb + 9UL)); goto ldv_59767; case 131: *size = get_unaligned_be32((void const *)cdb + 10U); cmd->execute_cmd = & target_do_xcopy; goto ldv_59767; case 132: *size = get_unaligned_be32((void const *)cdb + 10U); cmd->execute_cmd = & target_do_receive_copy_results; goto ldv_59767; case 140: ; case 141: *size = (unsigned int )(((((int )*(cdb + 10UL) << 24) | ((int )*(cdb + 11UL) << 16)) | ((int )*(cdb + 12UL) << 8)) | (int )*(cdb + 13UL)); goto ldv_59767; case 28: ; case 29: *size = (unsigned int )(((int )*(cdb + 3UL) << 8) | (int )*(cdb + 4UL)); goto ldv_59767; case 59: *size = (unsigned int )((((int )*(cdb + 6UL) << 16) + ((int )*(cdb + 7UL) << 8)) + (int )*(cdb + 8UL)); goto ldv_59767; case 160: cmd->execute_cmd = & spc_emulate_report_luns; *size = (unsigned int )(((((int )*(cdb + 6UL) << 24) | ((int )*(cdb + 7UL) << 16)) | ((int )*(cdb + 8UL) << 8)) | (int )*(cdb + 9UL)); cmd->sam_task_attr = 33; goto ldv_59767; case 0: cmd->execute_cmd = & spc_emulate_testunitready; *size = 0U; goto ldv_59767; case 163: tmp___0 = (*((dev->transport)->get_device_type))(dev); if (tmp___0 != 5U) { if (((int )*(cdb + 1UL) & 31) == 10) { cmd->execute_cmd = & target_emulate_report_target_port_groups; } else { } *size = get_unaligned_be32((void const *)cdb + 6U); } else { tmp = get_unaligned_be16((void const *)cdb + 8U); *size = (unsigned int )tmp; } goto ldv_59767; case 164: tmp___2 = (*((dev->transport)->get_device_type))(dev); if (tmp___2 != 5U) { if ((unsigned int )*(cdb + 1UL) == 10U) { cmd->execute_cmd = & target_emulate_set_target_port_groups; } else { } *size = get_unaligned_be32((void const *)cdb + 6U); } else { tmp___1 = get_unaligned_be16((void const *)cdb + 8U); *size = (unsigned int )tmp___1; } goto ldv_59767; default: tmp___3 = (*((cmd->se_tfo)->get_fabric_name))(); printk("\fTARGET_CORE[%s]: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", tmp___3, (int )*cdb); return (2U); } ldv_59767: ; return (0U); } } static char const __kstrtab_spc_parse_cdb[14U] = { 's', 'p', 'c', '_', 'p', 'a', 'r', 's', 'e', '_', 'c', 'd', 'b', '\000'}; struct kernel_symbol const __ksymtab_spc_parse_cdb ; struct kernel_symbol const __ksymtab_spc_parse_cdb = {(unsigned long )(& spc_parse_cdb), (char const *)(& __kstrtab_spc_parse_cdb)}; bool ldv_queue_work_on_479(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_480(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_481(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_482(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_483(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_484(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_485(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_486(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_487(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_488(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_489(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_490(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; int ldv_mutex_trylock_515(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_513(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_516(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_517(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_512(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_514(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_518(struct mutex *ldv_func_arg1 ) ; bool ldv_queue_work_on_507(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_509(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_508(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_511(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_510(struct workqueue_struct *ldv_func_arg1 ) ; sense_reason_t target_scsi3_ua_check(struct se_cmd *cmd ) { struct se_dev_entry *deve ; struct se_session *sess ; struct se_node_acl *nacl ; int tmp ; { sess = cmd->se_sess; if ((unsigned long )sess == (unsigned long )((struct se_session *)0)) { return (0U); } else { } nacl = sess->se_node_acl; if ((unsigned long )nacl == (unsigned long )((struct se_node_acl *)0)) { return (0U); } else { } rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return (0U); } else { } tmp = atomic_read((atomic_t const *)(& deve->ua_count)); if (tmp == 0) { rcu_read_unlock___0(); return (0U); } else { } rcu_read_unlock___0(); switch ((int )*(cmd->t_task_cdb)) { case 18: ; case 160: ; case 3: ; return (0U); default: ; return (14U); } } } int core_scsi3_ua_allocate(struct se_dev_entry *deve , u8 asc , u8 ascq ) { struct se_ua *ua ; struct se_ua *ua_p ; struct se_ua *ua_tmp ; void *tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct _ddebug descriptor ; long tmp___0 ; { tmp = kmem_cache_zalloc(se_ua_cache, 32U); ua = (struct se_ua *)tmp; if ((unsigned long )ua == (unsigned long )((struct se_ua *)0)) { printk("\vUnable to allocate struct se_ua\n"); return (-12); } else { } INIT_LIST_HEAD(& ua->ua_nacl_list); ua->ua_asc = asc; ua->ua_ascq = ascq; spin_lock(& deve->ua_lock); __mptr = (struct list_head const *)deve->ua_list.next; ua_p = (struct se_ua *)__mptr + 0xfffffffffffffff8UL; __mptr___0 = (struct list_head const *)ua_p->ua_nacl_list.next; ua_tmp = (struct se_ua *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_57098; ldv_57097: ; if ((int )ua_p->ua_asc == (int )asc && (int )ua_p->ua_ascq == (int )ascq) { spin_unlock(& deve->ua_lock); kmem_cache_free(se_ua_cache, (void *)ua); return (0); } else { } if ((unsigned int )ua_p->ua_asc == 41U) { if ((unsigned int )asc == 41U && (int )ua_p->ua_ascq < (int )ascq) { list_add(& ua->ua_nacl_list, & deve->ua_list); } else { list_add_tail(& ua->ua_nacl_list, & deve->ua_list); } } else if ((unsigned int )ua_p->ua_asc == 42U) { if ((unsigned int )asc == 41U || (int )ua_p->ua_asc < (int )ascq) { list_add(& ua->ua_nacl_list, & deve->ua_list); } else { list_add_tail(& ua->ua_nacl_list, & deve->ua_list); } } else { list_add_tail(& ua->ua_nacl_list, & deve->ua_list); } spin_unlock(& deve->ua_lock); atomic_inc_mb(& deve->ua_count); return (0); ua_p = ua_tmp; __mptr___1 = (struct list_head const *)ua_tmp->ua_nacl_list.next; ua_tmp = (struct se_ua *)__mptr___1 + 0xfffffffffffffff8UL; ldv_57098: ; if ((unsigned long )(& ua_p->ua_nacl_list) != (unsigned long )(& deve->ua_list)) { goto ldv_57097; } else { } list_add_tail(& ua->ua_nacl_list, & deve->ua_list); spin_unlock(& deve->ua_lock); descriptor.modname = "target_core_mod"; descriptor.function = "core_scsi3_ua_allocate"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_ua.c"; descriptor.format = "Allocated UNIT ATTENTION, mapped LUN: %llu, ASC: 0x%02x, ASCQ: 0x%02x\n"; descriptor.lineno = 165U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "Allocated UNIT ATTENTION, mapped LUN: %llu, ASC: 0x%02x, ASCQ: 0x%02x\n", deve->mapped_lun, (int )asc, (int )ascq); } else { } atomic_inc_mb(& deve->ua_count); return (0); } } void target_ua_allocate_lun(struct se_node_acl *nacl , u32 unpacked_lun , u8 asc , u8 ascq ) { struct se_dev_entry *deve ; { if ((unsigned long )nacl == (unsigned long )((struct se_node_acl *)0)) { return; } else { } rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, (u64 )unpacked_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return; } else { } core_scsi3_ua_allocate(deve, (int )asc, (int )ascq); rcu_read_unlock___0(); return; } } void core_scsi3_ua_release_all(struct se_dev_entry *deve ) { struct se_ua *ua ; struct se_ua *ua_p ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { spin_lock(& deve->ua_lock); __mptr = (struct list_head const *)deve->ua_list.next; ua = (struct se_ua *)__mptr + 0xfffffffffffffff8UL; __mptr___0 = (struct list_head const *)ua->ua_nacl_list.next; ua_p = (struct se_ua *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_57121; ldv_57120: list_del(& ua->ua_nacl_list); kmem_cache_free(se_ua_cache, (void *)ua); atomic_dec_mb(& deve->ua_count); ua = ua_p; __mptr___1 = (struct list_head const *)ua_p->ua_nacl_list.next; ua_p = (struct se_ua *)__mptr___1 + 0xfffffffffffffff8UL; ldv_57121: ; if ((unsigned long )(& ua->ua_nacl_list) != (unsigned long )(& deve->ua_list)) { goto ldv_57120; } else { } spin_unlock(& deve->ua_lock); return; } } void core_scsi3_ua_for_check_condition(struct se_cmd *cmd , u8 *asc , u8 *ascq ) { struct se_device *dev ; struct se_dev_entry *deve ; struct se_session *sess ; struct se_node_acl *nacl ; struct se_ua *ua ; struct se_ua *ua_p ; int head ; int tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct _ddebug descriptor ; char *tmp___0 ; long tmp___1 ; { dev = cmd->se_dev; sess = cmd->se_sess; ua = (struct se_ua *)0; head = 1; if ((unsigned long )sess == (unsigned long )((struct se_session *)0)) { return; } else { } nacl = sess->se_node_acl; if ((unsigned long )nacl == (unsigned long )((struct se_node_acl *)0)) { return; } else { } rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return; } else { } tmp = atomic_read((atomic_t const *)(& deve->ua_count)); if (tmp == 0) { rcu_read_unlock___0(); return; } else { } spin_lock(& deve->ua_lock); __mptr = (struct list_head const *)deve->ua_list.next; ua = (struct se_ua *)__mptr + 0xfffffffffffffff8UL; __mptr___0 = (struct list_head const *)ua->ua_nacl_list.next; ua_p = (struct se_ua *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_57143; ldv_57142: ; if (dev->dev_attrib.emulate_ua_intlck_ctrl != 0) { *asc = ua->ua_asc; *ascq = ua->ua_ascq; goto ldv_57141; } else { } if (head != 0) { *asc = ua->ua_asc; *ascq = ua->ua_ascq; head = 0; } else { } list_del(& ua->ua_nacl_list); kmem_cache_free(se_ua_cache, (void *)ua); atomic_dec_mb(& deve->ua_count); ua = ua_p; __mptr___1 = (struct list_head const *)ua_p->ua_nacl_list.next; ua_p = (struct se_ua *)__mptr___1 + 0xfffffffffffffff8UL; ldv_57143: ; if ((unsigned long )(& ua->ua_nacl_list) != (unsigned long )(& deve->ua_list)) { goto ldv_57142; } else { } ldv_57141: spin_unlock(& deve->ua_lock); rcu_read_unlock___0(); descriptor.modname = "target_core_mod"; descriptor.function = "core_scsi3_ua_for_check_condition"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_ua.c"; descriptor.format = "[%s]: %s UNIT ATTENTION condition with INTLCK_CTRL: %d, mapped LUN: %llu, got CDB: 0x%02x reported ASC: 0x%02x, ASCQ: 0x%02x\n"; descriptor.lineno = 275U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp___0 = (*(((nacl->se_tpg)->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "[%s]: %s UNIT ATTENTION condition with INTLCK_CTRL: %d, mapped LUN: %llu, got CDB: 0x%02x reported ASC: 0x%02x, ASCQ: 0x%02x\n", tmp___0, dev->dev_attrib.emulate_ua_intlck_ctrl != 0 ? (char *)"Reporting" : (char *)"Releasing", dev->dev_attrib.emulate_ua_intlck_ctrl, cmd->orig_fe_lun, (int )*(cmd->t_task_cdb), (int )*asc, (int )*ascq); } else { } return; } } int core_scsi3_ua_clear_for_request_sense(struct se_cmd *cmd , u8 *asc , u8 *ascq ) { struct se_dev_entry *deve ; struct se_session *sess ; struct se_node_acl *nacl ; struct se_ua *ua ; struct se_ua *ua_p ; int head ; int tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct _ddebug descriptor ; char *tmp___0 ; long tmp___1 ; { sess = cmd->se_sess; ua = (struct se_ua *)0; head = 1; if ((unsigned long )sess == (unsigned long )((struct se_session *)0)) { return (-22); } else { } nacl = sess->se_node_acl; if ((unsigned long )nacl == (unsigned long )((struct se_node_acl *)0)) { return (-22); } else { } rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return (-22); } else { } tmp = atomic_read((atomic_t const *)(& deve->ua_count)); if (tmp == 0) { rcu_read_unlock___0(); return (-1); } else { } spin_lock(& deve->ua_lock); __mptr = (struct list_head const *)deve->ua_list.next; ua = (struct se_ua *)__mptr + 0xfffffffffffffff8UL; __mptr___0 = (struct list_head const *)ua->ua_nacl_list.next; ua_p = (struct se_ua *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_57164; ldv_57163: ; if (head != 0) { *asc = ua->ua_asc; *ascq = ua->ua_ascq; head = 0; } else { } list_del(& ua->ua_nacl_list); kmem_cache_free(se_ua_cache, (void *)ua); atomic_dec_mb(& deve->ua_count); ua = ua_p; __mptr___1 = (struct list_head const *)ua_p->ua_nacl_list.next; ua_p = (struct se_ua *)__mptr___1 + 0xfffffffffffffff8UL; ldv_57164: ; if ((unsigned long )(& ua->ua_nacl_list) != (unsigned long )(& deve->ua_list)) { goto ldv_57163; } else { } spin_unlock(& deve->ua_lock); rcu_read_unlock___0(); descriptor.modname = "target_core_mod"; descriptor.function = "core_scsi3_ua_clear_for_request_sense"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_ua.c"; descriptor.format = "[%s]: Released UNIT ATTENTION condition, mapped LUN: %llu, got REQUEST_SENSE reported ASC: 0x%02x, ASCQ: 0x%02x\n"; descriptor.lineno = 334U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { tmp___0 = (*(((nacl->se_tpg)->se_tpg_tfo)->get_fabric_name))(); __dynamic_pr_debug(& descriptor, "[%s]: Released UNIT ATTENTION condition, mapped LUN: %llu, got REQUEST_SENSE reported ASC: 0x%02x, ASCQ: 0x%02x\n", tmp___0, cmd->orig_fe_lun, (int )*asc, (int )*ascq); } else { } return (head != 0 ? -1 : 0); } } bool ldv_queue_work_on_507(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_508(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_509(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_510(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_511(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_512(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_513(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_514(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_515(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_516(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_517(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_518(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; int ldv_mutex_trylock_543(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_541(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_544(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_545(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_540(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_542(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_546(struct mutex *ldv_func_arg1 ) ; extern void call_rcu_sched(struct callback_head * , void (*)(struct callback_head * ) ) ; bool ldv_queue_work_on_535(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_537(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_536(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_539(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_538(struct workqueue_struct *ldv_func_arg1 ) ; __inline static void *kmalloc_array(size_t n , size_t size , gfp_t flags ) { void *tmp ; { if (size != 0UL && 0xffffffffffffffffUL / size < n) { return ((void *)0); } else { } tmp = __kmalloc(n * size, flags); return (tmp); } } __inline static void *kcalloc(size_t n , size_t size , gfp_t flags ) { void *tmp ; { tmp = kmalloc_array(n, size, flags | 32768U); return (tmp); } } __inline static void sg_assign_page___1(struct scatterlist *sg , struct page *page ) { unsigned long page_link ; long tmp ; long tmp___0 ; long tmp___1 ; { page_link = sg->page_link & 3UL; tmp = ldv__builtin_expect(((unsigned long )page & 3UL) != 0UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (90), "i" (12UL)); ldv_24733: ; goto ldv_24733; } else { } tmp___0 = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (92), "i" (12UL)); ldv_24734: ; goto ldv_24734; } else { } tmp___1 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (93), "i" (12UL)); ldv_24735: ; goto ldv_24735; } else { } sg->page_link = page_link | (unsigned long )page; return; } } __inline static struct page *sg_page___1(struct scatterlist *sg ) { long tmp ; long tmp___0 ; { tmp = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (123), "i" (12UL)); ldv_24745: ; goto ldv_24745; } else { } tmp___0 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (124), "i" (12UL)); ldv_24746: ; goto ldv_24746; } else { } return ((struct page *)(sg->page_link & 0xfffffffffffffffcUL)); } } __inline static void sg_chain(struct scatterlist *prv , unsigned int prv_nents , struct scatterlist *sgl ) { { (prv + (unsigned long )(prv_nents - 1U))->offset = 0U; (prv + (unsigned long )(prv_nents - 1U))->length = 0U; (prv + (unsigned long )(prv_nents - 1U))->page_link = ((unsigned long )sgl & 0xfffffffffffffffcUL) | 1UL; return; } } __inline static void *sg_virt(struct scatterlist *sg ) { struct page *tmp ; void *tmp___0 ; { tmp = sg_page___1(sg); tmp___0 = lowmem_page_address((struct page const *)tmp); return (tmp___0 + (unsigned long )sg->offset); } } __inline static struct rd_dev *RD_DEV(struct se_device *dev ) { struct se_device const *__mptr ; { __mptr = (struct se_device const *)dev; return ((struct rd_dev *)__mptr); } } static int rd_attach_hba(struct se_hba *hba , u32 host_id ) { struct rd_host *rd_host ; void *tmp ; struct _ddebug descriptor ; long tmp___0 ; { tmp = kzalloc(64UL, 208U); rd_host = (struct rd_host *)tmp; if ((unsigned long )rd_host == (unsigned long )((struct rd_host *)0)) { printk("\vUnable to allocate memory for struct rd_host\n"); return (-12); } else { } rd_host->rd_host_id = host_id; hba->hba_ptr = (void *)rd_host; descriptor.modname = "target_core_mod"; descriptor.function = "rd_attach_hba"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_rd.c"; descriptor.format = "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on Generic Target Core Stack %s\n"; descriptor.lineno = 60U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on Generic Target Core Stack %s\n", hba->hba_id, (char *)"v4.0", (char *)"v5.0"); } else { } return (0); } } static void rd_detach_hba(struct se_hba *hba ) { struct rd_host *rd_host ; struct _ddebug descriptor ; long tmp ; { rd_host = (struct rd_host *)hba->hba_ptr; descriptor.modname = "target_core_mod"; descriptor.function = "rd_detach_hba"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_rd.c"; descriptor.format = "CORE_HBA[%d] - Detached Ramdisk HBA: %u from Generic Target Core\n"; descriptor.lineno = 70U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "CORE_HBA[%d] - Detached Ramdisk HBA: %u from Generic Target Core\n", hba->hba_id, rd_host->rd_host_id); } else { } kfree((void const *)rd_host); hba->hba_ptr = (void *)0; return; } } static u32 rd_release_sgl_table(struct rd_dev *rd_dev , struct rd_dev_sg_table *sg_table , u32 sg_table_count ) { struct page *pg ; struct scatterlist *sg ; u32 i ; u32 j ; u32 page_count___0 ; u32 sg_per_table ; { page_count___0 = 0U; i = 0U; goto ldv_56619; ldv_56618: sg = (sg_table + (unsigned long )i)->sg_table; sg_per_table = (sg_table + (unsigned long )i)->rd_sg_count; j = 0U; goto ldv_56616; ldv_56615: pg = sg_page___1(sg + (unsigned long )j); if ((unsigned long )pg != (unsigned long )((struct page *)0)) { __free_pages(pg, 0U); page_count___0 = page_count___0 + 1U; } else { } j = j + 1U; ldv_56616: ; if (j < sg_per_table) { goto ldv_56615; } else { } kfree((void const *)sg); i = i + 1U; ldv_56619: ; if (i < sg_table_count) { goto ldv_56618; } else { } kfree((void const *)sg_table); return (page_count___0); } } static void rd_release_device_space(struct rd_dev *rd_dev ) { u32 page_count___0 ; struct _ddebug descriptor ; long tmp ; { if ((unsigned long )rd_dev->sg_table_array == (unsigned long )((struct rd_dev_sg_table *)0) || rd_dev->sg_table_count == 0U) { return; } else { } page_count___0 = rd_release_sgl_table(rd_dev, rd_dev->sg_table_array, rd_dev->sg_table_count); descriptor.modname = "target_core_mod"; descriptor.function = "rd_release_device_space"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_rd.c"; descriptor.format = "CORE_RD[%u] - Released device space for Ramdisk Device ID: %u, pages %u in %u tables total bytes %lu\n"; descriptor.lineno = 114U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "CORE_RD[%u] - Released device space for Ramdisk Device ID: %u, pages %u in %u tables total bytes %lu\n", (rd_dev->rd_host)->rd_host_id, rd_dev->rd_dev_id, page_count___0, rd_dev->sg_table_count, (unsigned long )page_count___0 * 4096UL); } else { } rd_dev->sg_table_array = (struct rd_dev_sg_table *)0; rd_dev->sg_table_count = 0U; return; } } static int rd_allocate_sgl_table(struct rd_dev *rd_dev , struct rd_dev_sg_table *sg_table , u32 total_sg_needed , unsigned char init_payload ) { u32 i ; u32 j ; u32 page_offset___0 ; u32 sg_per_table ; u32 max_sg_per_table ; struct page *pg ; struct scatterlist *sg ; unsigned char *p ; unsigned int chain_entry ; void *tmp ; u32 tmp___0 ; void *tmp___1 ; { i = 0U; page_offset___0 = 0U; max_sg_per_table = 1638U; goto ldv_56646; ldv_56645: chain_entry = 0U; sg_per_table = max_sg_per_table < total_sg_needed ? max_sg_per_table : total_sg_needed; if (sg_per_table < total_sg_needed) { chain_entry = 1U; } else { } tmp = kcalloc((size_t )(sg_per_table + chain_entry), 40UL, 208U); sg = (struct scatterlist *)tmp; if ((unsigned long )sg == (unsigned long )((struct scatterlist *)0)) { printk("\vUnable to allocate scatterlist array for struct rd_dev\n"); return (-12); } else { } sg_init_table(sg, sg_per_table + chain_entry); if (i != 0U) { sg_chain((sg_table + (unsigned long )(i - 1U))->sg_table, max_sg_per_table + 1U, sg); } else { } (sg_table + (unsigned long )i)->sg_table = sg; (sg_table + (unsigned long )i)->rd_sg_count = sg_per_table; (sg_table + (unsigned long )i)->page_start_offset = page_offset___0; tmp___0 = i; i = i + 1U; (sg_table + (unsigned long )tmp___0)->page_end_offset = (page_offset___0 + sg_per_table) - 1U; j = 0U; goto ldv_56643; ldv_56642: pg = alloc_pages(208U, 0U); if ((unsigned long )pg == (unsigned long )((struct page *)0)) { printk("\vUnable to allocate scatterlist pages for struct rd_dev_sg_table\n"); return (-12); } else { } sg_assign_page___1(sg + (unsigned long )j, pg); (sg + (unsigned long )j)->length = 4096U; tmp___1 = kmap(pg); p = (unsigned char *)tmp___1; memset((void *)p, (int )init_payload, 4096UL); kunmap(pg); j = j + 1U; ldv_56643: ; if (j < sg_per_table) { goto ldv_56642; } else { } page_offset___0 = page_offset___0 + sg_per_table; total_sg_needed = total_sg_needed - sg_per_table; ldv_56646: ; if (total_sg_needed != 0U) { goto ldv_56645; } else { } return (0); } } static int rd_build_device_space(struct rd_dev *rd_dev ) { struct rd_dev_sg_table *sg_table ; u32 sg_tables ; u32 total_sg_needed ; u32 max_sg_per_table ; int rc ; void *tmp ; struct _ddebug descriptor ; long tmp___0 ; { max_sg_per_table = 1638U; if (rd_dev->rd_page_count == 0U) { printk("\vIllegal page count: %u for Ramdisk device\n", rd_dev->rd_page_count); return (-22); } else { } if ((rd_dev->rd_flags & 2U) != 0U) { return (0); } else { } total_sg_needed = rd_dev->rd_page_count; sg_tables = total_sg_needed / max_sg_per_table + 1U; tmp = kzalloc((unsigned long )sg_tables * 64UL, 208U); sg_table = (struct rd_dev_sg_table *)tmp; if ((unsigned long )sg_table == (unsigned long )((struct rd_dev_sg_table *)0)) { printk("\vUnable to allocate memory for Ramdisk scatterlist tables\n"); return (-12); } else { } rd_dev->sg_table_array = sg_table; rd_dev->sg_table_count = sg_tables; rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 0); if (rc != 0) { return (rc); } else { } descriptor.modname = "target_core_mod"; descriptor.function = "rd_build_device_space"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_rd.c"; descriptor.format = "CORE_RD[%u] - Built Ramdisk Device ID: %u space of %u pages in %u tables\n"; descriptor.lineno = 237U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "CORE_RD[%u] - Built Ramdisk Device ID: %u space of %u pages in %u tables\n", (rd_dev->rd_host)->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count, rd_dev->sg_table_count); } else { } return (0); } } static void rd_release_prot_space(struct rd_dev *rd_dev ) { u32 page_count___0 ; struct _ddebug descriptor ; long tmp ; { if ((unsigned long )rd_dev->sg_prot_array == (unsigned long )((struct rd_dev_sg_table *)0) || rd_dev->sg_prot_count == 0U) { return; } else { } page_count___0 = rd_release_sgl_table(rd_dev, rd_dev->sg_prot_array, rd_dev->sg_prot_count); descriptor.modname = "target_core_mod"; descriptor.function = "rd_release_prot_space"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_rd.c"; descriptor.format = "CORE_RD[%u] - Released protection space for Ramdisk Device ID: %u, pages %u in %u tables total bytes %lu\n"; descriptor.lineno = 255U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "CORE_RD[%u] - Released protection space for Ramdisk Device ID: %u, pages %u in %u tables total bytes %lu\n", (rd_dev->rd_host)->rd_host_id, rd_dev->rd_dev_id, page_count___0, rd_dev->sg_table_count, (unsigned long )page_count___0 * 4096UL); } else { } rd_dev->sg_prot_array = (struct rd_dev_sg_table *)0; rd_dev->sg_prot_count = 0U; return; } } static int rd_build_prot_space(struct rd_dev *rd_dev , int prot_length , int block_size___0 ) { struct rd_dev_sg_table *sg_table ; u32 total_sg_needed ; u32 sg_tables ; u32 max_sg_per_table ; int rc ; void *tmp ; struct _ddebug descriptor ; long tmp___0 ; { max_sg_per_table = 1638U; if ((rd_dev->rd_flags & 2U) != 0U) { return (0); } else { } total_sg_needed = (rd_dev->rd_page_count * (u32 )prot_length) / (u32 )block_size___0 + 1U; sg_tables = total_sg_needed / max_sg_per_table + 1U; tmp = kzalloc((unsigned long )sg_tables * 64UL, 208U); sg_table = (struct rd_dev_sg_table *)tmp; if ((unsigned long )sg_table == (unsigned long )((struct rd_dev_sg_table *)0)) { printk("\vUnable to allocate memory for Ramdisk protection scatterlist tables\n"); return (-12); } else { } rd_dev->sg_prot_array = sg_table; rd_dev->sg_prot_count = sg_tables; rc = rd_allocate_sgl_table(rd_dev, sg_table, total_sg_needed, 255); if (rc != 0) { return (rc); } else { } descriptor.modname = "target_core_mod"; descriptor.function = "rd_build_prot_space"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_rd.c"; descriptor.format = "CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of %u pages in %u tables\n"; descriptor.lineno = 297U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "CORE_RD[%u] - Built Ramdisk Device ID: %u prot space of %u pages in %u tables\n", (rd_dev->rd_host)->rd_host_id, rd_dev->rd_dev_id, total_sg_needed, rd_dev->sg_prot_count); } else { } return (0); } } static struct se_device *rd_alloc_device(struct se_hba *hba , char const *name ) { struct rd_dev *rd_dev ; struct rd_host *rd_host ; void *tmp ; { rd_host = (struct rd_host *)hba->hba_ptr; tmp = kzalloc(5120UL, 208U); rd_dev = (struct rd_dev *)tmp; if ((unsigned long )rd_dev == (unsigned long )((struct rd_dev *)0)) { printk("\vUnable to allocate memory for struct rd_dev\n"); return ((struct se_device *)0); } else { } rd_dev->rd_host = rd_host; return (& rd_dev->dev); } } static int rd_configure_device(struct se_device *dev ) { struct rd_dev *rd_dev ; struct rd_dev *tmp ; struct rd_host *rd_host ; int ret ; struct _ddebug descriptor ; long tmp___0 ; u32 tmp___1 ; struct _ddebug descriptor___0 ; long tmp___2 ; { tmp = RD_DEV(dev); rd_dev = tmp; rd_host = (struct rd_host *)(dev->se_hba)->hba_ptr; if ((rd_dev->rd_flags & 1U) == 0U) { descriptor.modname = "target_core_mod"; descriptor.function = "rd_configure_device"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_rd.c"; descriptor.format = "Missing rd_pages= parameter\n"; descriptor.lineno = 325U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "Missing rd_pages= parameter\n"); } else { } return (-22); } else { } ret = rd_build_device_space(rd_dev); if (ret < 0) { goto fail; } else { } dev->dev_attrib.hw_block_size = 512U; dev->dev_attrib.hw_max_sectors = 4294967295U; dev->dev_attrib.hw_queue_depth = 128U; tmp___1 = rd_host->rd_host_dev_id_count; rd_host->rd_host_dev_id_count = rd_host->rd_host_dev_id_count + 1U; rd_dev->rd_dev_id = tmp___1; descriptor___0.modname = "target_core_mod"; descriptor___0.function = "rd_configure_device"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_rd.c"; descriptor___0.format = "CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of %u pages in %u tables, %lu total bytes\n"; descriptor___0.lineno = 343U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_pr_debug(& descriptor___0, "CORE_RD[%u] - Added TCM MEMCPY Ramdisk Device ID: %u of %u pages in %u tables, %lu total bytes\n", rd_host->rd_host_id, rd_dev->rd_dev_id, rd_dev->rd_page_count, rd_dev->sg_table_count, (unsigned long )rd_dev->rd_page_count * 4096UL); } else { } return (0); fail: rd_release_device_space(rd_dev); return (ret); } } static void rd_dev_call_rcu(struct callback_head *p ) { struct se_device *dev ; struct callback_head const *__mptr ; struct rd_dev *rd_dev ; struct rd_dev *tmp ; { __mptr = (struct callback_head const *)p; dev = (struct se_device *)__mptr + 0xffffffffffffec58UL; tmp = RD_DEV(dev); rd_dev = tmp; kfree((void const *)rd_dev); return; } } static void rd_free_device(struct se_device *dev ) { struct rd_dev *rd_dev ; struct rd_dev *tmp ; { tmp = RD_DEV(dev); rd_dev = tmp; rd_release_device_space(rd_dev); call_rcu_sched(& dev->callback_head, & rd_dev_call_rcu); return; } } static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev , u32 page ) { struct rd_dev_sg_table *sg_table ; u32 i ; u32 sg_per_table ; { sg_per_table = 1638U; i = page / sg_per_table; if (rd_dev->sg_table_count > i) { sg_table = rd_dev->sg_table_array + (unsigned long )i; if (sg_table->page_start_offset <= page && sg_table->page_end_offset >= page) { return (sg_table); } else { } } else { } printk("\vUnable to locate struct rd_dev_sg_table for page: %u\n", page); return ((struct rd_dev_sg_table *)0); } } static struct rd_dev_sg_table *rd_get_prot_table(struct rd_dev *rd_dev , u32 page ) { struct rd_dev_sg_table *sg_table ; u32 i ; u32 sg_per_table ; { sg_per_table = 1638U; i = page / sg_per_table; if (rd_dev->sg_prot_count > i) { sg_table = rd_dev->sg_prot_array + (unsigned long )i; if (sg_table->page_start_offset <= page && sg_table->page_end_offset >= page) { return (sg_table); } else { } } else { } printk("\vUnable to locate struct prot rd_dev_sg_table for page: %u\n", page); return ((struct rd_dev_sg_table *)0); } } static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd , bool is_read ) { struct se_device *se_dev ; struct rd_dev *dev ; struct rd_dev *tmp ; struct rd_dev_sg_table *prot_table ; bool need_to_release ; struct scatterlist *prot_sg ; u32 sectors ; u32 prot_offset ; u32 prot_page ; u64 tmp___0 ; sense_reason_t rc ; uint32_t __base ; uint32_t __rem ; { se_dev = cmd->se_dev; tmp = RD_DEV(se_dev); dev = tmp; need_to_release = 0; sectors = cmd->data_length / se_dev->dev_attrib.block_size; rc = 10U; tmp___0 = cmd->t_task_lba * (unsigned long long )se_dev->prot_length; __base = 4096U; __rem = (uint32_t )(tmp___0 % (u64 )__base); tmp___0 = tmp___0 / (u64 )__base; prot_offset = __rem; prot_page = (u32 )tmp___0; prot_table = rd_get_prot_table(dev, prot_page); if ((unsigned long )prot_table == (unsigned long )((struct rd_dev_sg_table *)0)) { return (10U); } else { } prot_sg = prot_table->sg_table + (unsigned long )(prot_page - prot_table->page_start_offset); if ((int )is_read) { rc = sbc_dif_verify(cmd, (sector_t )cmd->t_task_lba, sectors, 0U, prot_sg, (int )prot_offset); } else { rc = sbc_dif_verify(cmd, (sector_t )cmd->t_task_lba, sectors, 0U, cmd->t_prot_sg, 0); } if (rc == 0U) { sbc_dif_copy_prot(cmd, sectors, (int )is_read, prot_sg, (int )prot_offset); } else { } if ((int )need_to_release) { kfree((void const *)prot_sg); } else { } return (rc); } } static sense_reason_t rd_execute_rw(struct se_cmd *cmd , struct scatterlist *sgl , u32 sgl_nents , enum dma_data_direction data_direction ) { struct se_device *se_dev ; struct rd_dev *dev ; struct rd_dev *tmp ; struct rd_dev_sg_table *table ; struct scatterlist *rd_sg ; struct sg_mapping_iter m ; u32 rd_offset ; u32 rd_size ; u32 rd_page ; u32 src_len ; u64 tmp___0 ; sense_reason_t rc ; uint32_t __base ; uint32_t __rem ; struct _ddebug descriptor ; long tmp___1 ; u32 len ; void *rd_addr ; struct _ddebug descriptor___0 ; long tmp___2 ; unsigned int _min1 ; u32 _min2 ; struct _ddebug descriptor___1 ; long tmp___3 ; void *tmp___4 ; { se_dev = cmd->se_dev; tmp = RD_DEV(se_dev); dev = tmp; if ((dev->rd_flags & 2U) != 0U) { target_complete_cmd(cmd, 0); return (0U); } else { } tmp___0 = cmd->t_task_lba * (unsigned long long )se_dev->dev_attrib.block_size; __base = 4096U; __rem = (uint32_t )(tmp___0 % (u64 )__base); tmp___0 = tmp___0 / (u64 )__base; rd_offset = __rem; rd_page = (u32 )tmp___0; rd_size = cmd->data_length; table = rd_get_sg_table(dev, rd_page); if ((unsigned long )table == (unsigned long )((struct rd_dev_sg_table *)0)) { return (10U); } else { } rd_sg = table->sg_table + (unsigned long )(rd_page - table->page_start_offset); descriptor.modname = "target_core_mod"; descriptor.function = "rd_execute_rw"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_rd.c"; descriptor.format = "RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n"; descriptor.lineno = 519U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor, "RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n", dev->rd_dev_id, (unsigned int )data_direction == 2U ? (char *)"Read" : (char *)"Write", cmd->t_task_lba, rd_size, rd_page, rd_offset); } else { } if (((unsigned int )cmd->prot_type != 0U && (unsigned int )se_dev->dev_attrib.pi_prot_type != 0U) && (unsigned int )data_direction == 1U) { rc = rd_do_prot_rw(cmd, 0); if (rc != 0U) { return (rc); } else { } } else { } src_len = 4096U - rd_offset; sg_miter_start(& m, sgl, sgl_nents, (unsigned int )data_direction == 2U ? 2U : 4U); goto ldv_56764; ldv_56765: sg_miter_next(& m); if ((unsigned int )m.length == 0U) { descriptor___0.modname = "target_core_mod"; descriptor___0.function = "rd_execute_rw"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_rd.c"; descriptor___0.format = "RD[%u]: invalid sgl %p len %zu\n"; descriptor___0.lineno = 539U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_pr_debug(& descriptor___0, "RD[%u]: invalid sgl %p len %zu\n", dev->rd_dev_id, m.addr, m.length); } else { } sg_miter_stop(& m); return (3U); } else { } _min1 = (unsigned int )m.length; _min2 = src_len; len = _min1 < _min2 ? _min1 : _min2; if (len > rd_size) { descriptor___1.modname = "target_core_mod"; descriptor___1.function = "rd_execute_rw"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_rd.c"; descriptor___1.format = "RD[%u]: size underrun page %d offset %d size %d\n"; descriptor___1.lineno = 547U; descriptor___1.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_pr_debug(& descriptor___1, "RD[%u]: size underrun page %d offset %d size %d\n", dev->rd_dev_id, rd_page, rd_offset, rd_size); } else { } len = rd_size; } else { } m.consumed = (size_t )len; tmp___4 = sg_virt(rd_sg); rd_addr = tmp___4 + (unsigned long )rd_offset; if ((unsigned int )data_direction == 2U) { memcpy(m.addr, (void const *)rd_addr, (size_t )len); } else { memcpy(rd_addr, (void const *)m.addr, (size_t )len); } rd_size = rd_size - len; if (rd_size == 0U) { goto ldv_56764; } else { } src_len = src_len - len; if (src_len != 0U) { rd_offset = rd_offset + len; goto ldv_56764; } else { } rd_page = rd_page + 1U; rd_offset = 0U; src_len = 4096U; if (table->page_end_offset >= rd_page) { rd_sg = rd_sg + 1; goto ldv_56764; } else { } table = rd_get_sg_table(dev, rd_page); if ((unsigned long )table == (unsigned long )((struct rd_dev_sg_table *)0)) { sg_miter_stop(& m); return (10U); } else { } rd_sg = table->sg_table; ldv_56764: ; if (rd_size != 0U) { goto ldv_56765; } else { } sg_miter_stop(& m); if (((unsigned int )cmd->prot_type != 0U && (unsigned int )se_dev->dev_attrib.pi_prot_type != 0U) && (unsigned int )data_direction == 2U) { rc = rd_do_prot_rw(cmd, 1); if (rc != 0U) { return (rc); } else { } } else { } target_complete_cmd(cmd, 0); return (0U); } } static struct match_token tokens___0[3U] = { {0, "rd_pages=%d"}, {1, "rd_nullio=%d"}, {2, (char const *)0}}; static ssize_t rd_set_configfs_dev_params(struct se_device *dev , char const *page , ssize_t count ) { struct rd_dev *rd_dev ; struct rd_dev *tmp ; char *orig ; char *ptr ; char *opts ; substring_t args[3U] ; int ret ; int arg ; int token ; struct _ddebug descriptor ; long tmp___0 ; struct _ddebug descriptor___0 ; long tmp___1 ; { tmp = RD_DEV(dev); rd_dev = tmp; ret = 0; opts = kstrdup(page, 208U); if ((unsigned long )opts == (unsigned long )((char *)0)) { return (-12L); } else { } orig = opts; goto ldv_56785; ldv_56793: ; if ((int )((signed char )*ptr) == 0) { goto ldv_56785; } else { } token = match_token(ptr, (struct match_token const *)(& tokens___0), (substring_t *)(& args)); switch (token) { case 0: match_int((substring_t *)(& args), & arg); rd_dev->rd_page_count = (u32 )arg; descriptor.modname = "target_core_mod"; descriptor.function = "rd_set_configfs_dev_params"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_rd.c"; descriptor.format = "RAMDISK: Referencing Page Count: %u\n"; descriptor.lineno = 634U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "RAMDISK: Referencing Page Count: %u\n", rd_dev->rd_page_count); } else { } rd_dev->rd_flags = rd_dev->rd_flags | 1U; goto ldv_56789; case 1: match_int((substring_t *)(& args), & arg); if (arg != 1) { goto ldv_56789; } else { } descriptor___0.modname = "target_core_mod"; descriptor___0.function = "rd_set_configfs_dev_params"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_rd.c"; descriptor___0.format = "RAMDISK: Setting NULLIO flag: %d\n"; descriptor___0.lineno = 642U; descriptor___0.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor___0, "RAMDISK: Setting NULLIO flag: %d\n", arg); } else { } rd_dev->rd_flags = rd_dev->rd_flags | 2U; goto ldv_56789; default: ; goto ldv_56789; } ldv_56789: ; ldv_56785: ptr = strsep(& opts, ",\n"); if ((unsigned long )ptr != (unsigned long )((char *)0)) { goto ldv_56793; } else { } kfree((void const *)orig); return (ret != 0 ? (ssize_t )ret : count); } } static ssize_t rd_show_configfs_dev_params(struct se_device *dev , char *b ) { struct rd_dev *rd_dev ; struct rd_dev *tmp ; ssize_t bl ; int tmp___0 ; int tmp___1 ; { tmp = RD_DEV(dev); rd_dev = tmp; tmp___0 = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", rd_dev->rd_dev_id); bl = (ssize_t )tmp___0; tmp___1 = sprintf(b + (unsigned long )bl, " PAGES/PAGE_SIZE: %u*%lu SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count, 4096UL, rd_dev->sg_table_count, (rd_dev->rd_flags & 2U) != 0U); bl = (ssize_t )tmp___1 + bl; return (bl); } } static sector_t rd_get_blocks(struct se_device *dev ) { struct rd_dev *rd_dev ; struct rd_dev *tmp ; unsigned long long blocks_long ; { tmp = RD_DEV(dev); rd_dev = tmp; blocks_long = (unsigned long long )(((unsigned long )rd_dev->rd_page_count * 4096UL) / (unsigned long )dev->dev_attrib.block_size - 1UL); return ((sector_t )blocks_long); } } static int rd_init_prot(struct se_device *dev ) { struct rd_dev *rd_dev ; struct rd_dev *tmp ; int tmp___0 ; { tmp = RD_DEV(dev); rd_dev = tmp; if ((unsigned int )dev->dev_attrib.pi_prot_type == 0U) { return (0); } else { } tmp___0 = rd_build_prot_space(rd_dev, dev->prot_length, (int )dev->dev_attrib.block_size); return (tmp___0); } } static void rd_free_prot(struct se_device *dev ) { struct rd_dev *rd_dev ; struct rd_dev *tmp ; { tmp = RD_DEV(dev); rd_dev = tmp; rd_release_prot_space(rd_dev); return; } } static struct sbc_ops rd_sbc_ops = {& rd_execute_rw, 0, 0, 0}; static sense_reason_t rd_parse_cdb(struct se_cmd *cmd ) { sense_reason_t tmp ; { tmp = sbc_parse_cdb(cmd, & rd_sbc_ops); return (tmp); } } static struct target_backend_ops const rd_mcp_ops = {{'r', 'd', '_', 'm', 'c', 'p', '\000'}, {'R', 'A', 'M', 'D', 'I', 'S', 'K', '-', 'M', 'C', 'P', '\000'}, {'4', '.', '0', '\000'}, 0, (unsigned char)0, & rd_attach_hba, & rd_detach_hba, 0, & rd_alloc_device, & rd_configure_device, & rd_free_device, & rd_set_configfs_dev_params, & rd_show_configfs_dev_params, 0, & rd_parse_cdb, & sbc_get_device_type, & rd_get_blocks, 0, 0, 0, 0, 0, 0, & rd_init_prot, 0, & rd_free_prot, (struct configfs_attribute **)(& sbc_attrib_attrs)}; int rd_module_init(void) { int tmp ; { tmp = transport_backend_register(& rd_mcp_ops); return (tmp); } } void rd_module_exit(void) { { target_backend_unregister(& rd_mcp_ops); return; } } void ldv_initialize_target_backend_ops_83(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(5048UL); rd_mcp_ops_group0 = (struct se_device *)tmp; tmp___0 = ldv_init_zalloc(400UL); rd_mcp_ops_group1 = (struct se_hba *)tmp___0; return; } } void ldv_main_exported_84(void) { struct se_cmd *ldvarg33 ; void *tmp ; u32 ldvarg34 ; enum dma_data_direction ldvarg32 ; struct scatterlist *ldvarg35 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(832UL); ldvarg33 = (struct se_cmd *)tmp; tmp___0 = ldv_init_zalloc(40UL); ldvarg35 = (struct scatterlist *)tmp___0; ldv_memset((void *)(& ldvarg34), 0, 4UL); ldv_memset((void *)(& ldvarg32), 0, 4UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_84 == 1) { rd_execute_rw(ldvarg33, ldvarg35, ldvarg34, ldvarg32); ldv_state_variable_84 = 1; } else { } goto ldv_56836; default: ldv_stop(); } ldv_56836: ; return; } } void ldv_main_exported_83(void) { u32 ldvarg415 ; char *ldvarg411 ; void *tmp ; char *ldvarg414 ; void *tmp___0 ; char *ldvarg410 ; void *tmp___1 ; struct se_cmd *ldvarg412 ; void *tmp___2 ; ssize_t ldvarg413 ; int tmp___3 ; { tmp = ldv_init_zalloc(1UL); ldvarg411 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg414 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg410 = (char *)tmp___1; tmp___2 = ldv_init_zalloc(832UL); ldvarg412 = (struct se_cmd *)tmp___2; ldv_memset((void *)(& ldvarg415), 0, 4UL); ldv_memset((void *)(& ldvarg413), 0, 8UL); tmp___3 = __VERIFIER_nondet_int(); switch (tmp___3) { case 0: ; if (ldv_state_variable_83 == 1) { rd_attach_hba(rd_mcp_ops_group1, ldvarg415); ldv_state_variable_83 = 1; } else { } goto ldv_56848; case 1: ; if (ldv_state_variable_83 == 1) { rd_detach_hba(rd_mcp_ops_group1); ldv_state_variable_83 = 1; } else { } goto ldv_56848; case 2: ; if (ldv_state_variable_83 == 1) { rd_set_configfs_dev_params(rd_mcp_ops_group0, (char const *)ldvarg414, ldvarg413); ldv_state_variable_83 = 1; } else { } goto ldv_56848; case 3: ; if (ldv_state_variable_83 == 1) { rd_parse_cdb(ldvarg412); ldv_state_variable_83 = 1; } else { } goto ldv_56848; case 4: ; if (ldv_state_variable_83 == 1) { rd_configure_device(rd_mcp_ops_group0); ldv_state_variable_83 = 1; } else { } goto ldv_56848; case 5: ; if (ldv_state_variable_83 == 1) { rd_get_blocks(rd_mcp_ops_group0); ldv_state_variable_83 = 1; } else { } goto ldv_56848; case 6: ; if (ldv_state_variable_83 == 1) { rd_free_prot(rd_mcp_ops_group0); ldv_state_variable_83 = 1; } else { } goto ldv_56848; case 7: ; if (ldv_state_variable_83 == 1) { rd_alloc_device(rd_mcp_ops_group1, (char const *)ldvarg411); ldv_state_variable_83 = 1; } else { } goto ldv_56848; case 8: ; if (ldv_state_variable_83 == 1) { rd_init_prot(rd_mcp_ops_group0); ldv_state_variable_83 = 1; } else { } goto ldv_56848; case 9: ; if (ldv_state_variable_83 == 1) { rd_free_device(rd_mcp_ops_group0); ldv_state_variable_83 = 1; } else { } goto ldv_56848; case 10: ; if (ldv_state_variable_83 == 1) { sbc_get_device_type(rd_mcp_ops_group0); ldv_state_variable_83 = 1; } else { } goto ldv_56848; case 11: ; if (ldv_state_variable_83 == 1) { rd_show_configfs_dev_params(rd_mcp_ops_group0, ldvarg410); ldv_state_variable_83 = 1; } else { } goto ldv_56848; default: ldv_stop(); } ldv_56848: ; return; } } bool ldv_queue_work_on_535(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_536(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_537(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_538(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_539(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_540(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_541(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_542(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_543(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_544(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_545(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_546(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } __inline static long atomic_long_read(atomic_long_t *l ) { atomic64_t *v ; long tmp ; { v = l; tmp = atomic64_read((atomic64_t const *)v); return (tmp); } } int ldv_mutex_trylock_573(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_569(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_570(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_574(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_568(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_571(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_572(struct mutex *ldv_func_arg1 ) ; bool ldv_queue_work_on_563(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_565(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_564(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_567(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_566(struct workqueue_struct *ldv_func_arg1 ) ; static ssize_t target_stat_scsi_dev_show_attr_inst(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; struct se_hba *hba ; int tmp ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; hba = dev->se_hba; tmp = snprintf(page, 4096UL, "%u\n", hba->hba_index); return ((ssize_t )tmp); } } static struct target_stat_scsi_dev_attribute target_stat_scsi_dev_inst = {{"inst", & __this_module, 292U}, & target_stat_scsi_dev_show_attr_inst, 0}; static ssize_t target_stat_scsi_dev_show_attr_indx(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; int tmp ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; tmp = snprintf(page, 4096UL, "%u\n", dev->dev_index); return ((ssize_t )tmp); } } static struct target_stat_scsi_dev_attribute target_stat_scsi_dev_indx = {{"indx", & __this_module, 292U}, & target_stat_scsi_dev_show_attr_indx, 0}; static ssize_t target_stat_scsi_dev_show_attr_role(struct se_dev_stat_grps *sgrps , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "Target\n"); return ((ssize_t )tmp); } } static struct target_stat_scsi_dev_attribute target_stat_scsi_dev_role = {{"role", & __this_module, 292U}, & target_stat_scsi_dev_show_attr_role, 0}; static ssize_t target_stat_scsi_dev_show_attr_ports(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; int tmp ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; tmp = snprintf(page, 4096UL, "%u\n", dev->export_count); return ((ssize_t )tmp); } } static struct target_stat_scsi_dev_attribute target_stat_scsi_dev_ports = {{"ports", & __this_module, 292U}, & target_stat_scsi_dev_show_attr_ports, 0}; static struct se_dev_stat_grps *to_target_stat_scsi_dev(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_dev_stat_grps *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_dev_stat_grps *)__mptr + 0xffffffffffffff90UL; } else { tmp___0 = (struct se_dev_stat_grps *)0; } return (tmp___0); } } static ssize_t target_stat_scsi_dev_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_dev_stat_grps *se_dev_stat_grps ; struct se_dev_stat_grps *tmp ; struct target_stat_scsi_dev_attribute *target_stat_scsi_dev_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_stat_scsi_dev(item); se_dev_stat_grps = tmp; __mptr = (struct configfs_attribute const *)attr; target_stat_scsi_dev_attr = (struct target_stat_scsi_dev_attribute *)__mptr; ret = 0L; if ((unsigned long )target_stat_scsi_dev_attr->show != (unsigned long )((ssize_t (*)(struct se_dev_stat_grps * , char * ))0)) { ret = (*(target_stat_scsi_dev_attr->show))(se_dev_stat_grps, page); } else { } return (ret); } } static ssize_t target_stat_scsi_dev_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_dev_stat_grps *se_dev_stat_grps ; struct se_dev_stat_grps *tmp ; struct target_stat_scsi_dev_attribute *target_stat_scsi_dev_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_stat_scsi_dev(item); se_dev_stat_grps = tmp; __mptr = (struct configfs_attribute const *)attr; target_stat_scsi_dev_attr = (struct target_stat_scsi_dev_attribute *)__mptr; ret = -22L; if ((unsigned long )target_stat_scsi_dev_attr->store != (unsigned long )((ssize_t (*)(struct se_dev_stat_grps * , char const * , size_t ))0)) { ret = (*(target_stat_scsi_dev_attr->store))(se_dev_stat_grps, page, count); } else { } return (ret); } } static struct configfs_attribute *target_stat_scsi_dev_attrs[5U] = { & target_stat_scsi_dev_inst.attr, & target_stat_scsi_dev_indx.attr, & target_stat_scsi_dev_role.attr, & target_stat_scsi_dev_ports.attr, (struct configfs_attribute *)0}; static struct configfs_item_operations target_stat_scsi_dev_attrib_ops = {0, & target_stat_scsi_dev_attr_show, & target_stat_scsi_dev_attr_store, 0, 0}; static struct config_item_type target_stat_scsi_dev_cit = {& __this_module, & target_stat_scsi_dev_attrib_ops, 0, (struct configfs_attribute **)(& target_stat_scsi_dev_attrs)}; static ssize_t target_stat_scsi_tgt_dev_show_attr_inst(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; struct se_hba *hba ; int tmp ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; hba = dev->se_hba; tmp = snprintf(page, 4096UL, "%u\n", hba->hba_index); return ((ssize_t )tmp); } } static struct target_stat_scsi_tgt_dev_attribute target_stat_scsi_tgt_dev_inst = {{"inst", & __this_module, 292U}, & target_stat_scsi_tgt_dev_show_attr_inst, 0}; static ssize_t target_stat_scsi_tgt_dev_show_attr_indx(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; int tmp ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; tmp = snprintf(page, 4096UL, "%u\n", dev->dev_index); return ((ssize_t )tmp); } } static struct target_stat_scsi_tgt_dev_attribute target_stat_scsi_tgt_dev_indx = {{"indx", & __this_module, 292U}, & target_stat_scsi_tgt_dev_show_attr_indx, 0}; static ssize_t target_stat_scsi_tgt_dev_show_attr_num_lus(struct se_dev_stat_grps *sgrps , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", 1); return ((ssize_t )tmp); } } static struct target_stat_scsi_tgt_dev_attribute target_stat_scsi_tgt_dev_num_lus = {{"num_lus", & __this_module, 292U}, & target_stat_scsi_tgt_dev_show_attr_num_lus, 0}; static ssize_t target_stat_scsi_tgt_dev_show_attr_status(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; int tmp ; int tmp___0 ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; if (dev->export_count != 0U) { tmp = snprintf(page, 4096UL, "activated"); return ((ssize_t )tmp); } else { tmp___0 = snprintf(page, 4096UL, "deactivated"); return ((ssize_t )tmp___0); } } } static struct target_stat_scsi_tgt_dev_attribute target_stat_scsi_tgt_dev_status = {{"status", & __this_module, 292U}, & target_stat_scsi_tgt_dev_show_attr_status, 0}; static ssize_t target_stat_scsi_tgt_dev_show_attr_non_access_lus(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; int non_accessible_lus ; int tmp ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; if (dev->export_count != 0U) { non_accessible_lus = 0; } else { non_accessible_lus = 1; } tmp = snprintf(page, 4096UL, "%u\n", non_accessible_lus); return ((ssize_t )tmp); } } static struct target_stat_scsi_tgt_dev_attribute target_stat_scsi_tgt_dev_non_access_lus = {{"non_access_lus", & __this_module, 292U}, & target_stat_scsi_tgt_dev_show_attr_non_access_lus, 0}; static ssize_t target_stat_scsi_tgt_dev_show_attr_resets(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; long tmp ; int tmp___0 ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; tmp = atomic_long_read(& dev->num_resets); tmp___0 = snprintf(page, 4096UL, "%lu\n", tmp); return ((ssize_t )tmp___0); } } static struct target_stat_scsi_tgt_dev_attribute target_stat_scsi_tgt_dev_resets = {{"resets", & __this_module, 292U}, & target_stat_scsi_tgt_dev_show_attr_resets, 0}; static struct se_dev_stat_grps *to_target_stat_scsi_tgt_dev(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_dev_stat_grps *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_dev_stat_grps *)__mptr + 0xffffffffffffff20UL; } else { tmp___0 = (struct se_dev_stat_grps *)0; } return (tmp___0); } } static ssize_t target_stat_scsi_tgt_dev_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_dev_stat_grps *se_dev_stat_grps ; struct se_dev_stat_grps *tmp ; struct target_stat_scsi_tgt_dev_attribute *target_stat_scsi_tgt_dev_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_stat_scsi_tgt_dev(item); se_dev_stat_grps = tmp; __mptr = (struct configfs_attribute const *)attr; target_stat_scsi_tgt_dev_attr = (struct target_stat_scsi_tgt_dev_attribute *)__mptr; ret = 0L; if ((unsigned long )target_stat_scsi_tgt_dev_attr->show != (unsigned long )((ssize_t (*)(struct se_dev_stat_grps * , char * ))0)) { ret = (*(target_stat_scsi_tgt_dev_attr->show))(se_dev_stat_grps, page); } else { } return (ret); } } static ssize_t target_stat_scsi_tgt_dev_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_dev_stat_grps *se_dev_stat_grps ; struct se_dev_stat_grps *tmp ; struct target_stat_scsi_tgt_dev_attribute *target_stat_scsi_tgt_dev_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_stat_scsi_tgt_dev(item); se_dev_stat_grps = tmp; __mptr = (struct configfs_attribute const *)attr; target_stat_scsi_tgt_dev_attr = (struct target_stat_scsi_tgt_dev_attribute *)__mptr; ret = -22L; if ((unsigned long )target_stat_scsi_tgt_dev_attr->store != (unsigned long )((ssize_t (*)(struct se_dev_stat_grps * , char const * , size_t ))0)) { ret = (*(target_stat_scsi_tgt_dev_attr->store))(se_dev_stat_grps, page, count); } else { } return (ret); } } static struct configfs_attribute *target_stat_scsi_tgt_dev_attrs[7U] = { & target_stat_scsi_tgt_dev_inst.attr, & target_stat_scsi_tgt_dev_indx.attr, & target_stat_scsi_tgt_dev_num_lus.attr, & target_stat_scsi_tgt_dev_status.attr, & target_stat_scsi_tgt_dev_non_access_lus.attr, & target_stat_scsi_tgt_dev_resets.attr, (struct configfs_attribute *)0}; static struct configfs_item_operations target_stat_scsi_tgt_dev_attrib_ops = {0, & target_stat_scsi_tgt_dev_attr_show, & target_stat_scsi_tgt_dev_attr_store, 0, 0}; static struct config_item_type target_stat_scsi_tgt_dev_cit = {& __this_module, & target_stat_scsi_tgt_dev_attrib_ops, 0, (struct configfs_attribute **)(& target_stat_scsi_tgt_dev_attrs)}; static ssize_t target_stat_scsi_lu_show_attr_inst(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; struct se_hba *hba ; int tmp ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; hba = dev->se_hba; tmp = snprintf(page, 4096UL, "%u\n", hba->hba_index); return ((ssize_t )tmp); } } static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_inst = {{"inst", & __this_module, 292U}, & target_stat_scsi_lu_show_attr_inst, 0}; static ssize_t target_stat_scsi_lu_show_attr_dev(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; int tmp ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; tmp = snprintf(page, 4096UL, "%u\n", dev->dev_index); return ((ssize_t )tmp); } } static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_dev = {{"dev", & __this_module, 292U}, & target_stat_scsi_lu_show_attr_dev, 0}; static ssize_t target_stat_scsi_lu_show_attr_indx(struct se_dev_stat_grps *sgrps , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", 1); return ((ssize_t )tmp); } } static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_indx = {{"indx", & __this_module, 292U}, & target_stat_scsi_lu_show_attr_indx, 0}; static ssize_t target_stat_scsi_lu_show_attr_lun(struct se_dev_stat_grps *sgrps , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%llu\n", 0ULL); return ((ssize_t )tmp); } } static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_lun = {{"lun", & __this_module, 292U}, & target_stat_scsi_lu_show_attr_lun, 0}; static ssize_t target_stat_scsi_lu_show_attr_lu_name(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; size_t tmp ; int tmp___0 ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; tmp = strlen((char const *)(& dev->t10_wwn.unit_serial)); tmp___0 = snprintf(page, 4096UL, "%s\n", tmp != 0UL ? (char *)(& dev->t10_wwn.unit_serial) : (char *)"None"); return ((ssize_t )tmp___0); } } static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_lu_name = {{"lu_name", & __this_module, 292U}, & target_stat_scsi_lu_show_attr_lu_name, 0}; static ssize_t target_stat_scsi_lu_show_attr_vend(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; int i ; char str[9U] ; int tmp ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; i = 0; goto ldv_58209; ldv_58208: str[i] = (int )((signed char )dev->t10_wwn.vendor[i]) > 31 && (int )((signed char )dev->t10_wwn.vendor[i]) != 127 ? dev->t10_wwn.vendor[i] : 32; i = i + 1; ldv_58209: ; if ((unsigned int )i <= 7U) { goto ldv_58208; } else { } str[i] = 0; tmp = snprintf(page, 4096UL, "%s\n", (char *)(& str)); return ((ssize_t )tmp); } } static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_vend = {{"vend", & __this_module, 292U}, & target_stat_scsi_lu_show_attr_vend, 0}; static ssize_t target_stat_scsi_lu_show_attr_prod(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; int i ; char str[17U] ; int tmp ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; i = 0; goto ldv_58222; ldv_58221: str[i] = (int )((signed char )dev->t10_wwn.model[i]) > 31 && (int )((signed char )dev->t10_wwn.model[i]) != 127 ? dev->t10_wwn.model[i] : 32; i = i + 1; ldv_58222: ; if ((unsigned int )i <= 7U) { goto ldv_58221; } else { } str[i] = 0; tmp = snprintf(page, 4096UL, "%s\n", (char *)(& str)); return ((ssize_t )tmp); } } static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_prod = {{"prod", & __this_module, 292U}, & target_stat_scsi_lu_show_attr_prod, 0}; static ssize_t target_stat_scsi_lu_show_attr_rev(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; int i ; char str[5U] ; int tmp ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; i = 0; goto ldv_58235; ldv_58234: str[i] = (int )((signed char )dev->t10_wwn.revision[i]) > 31 && (int )((signed char )dev->t10_wwn.revision[i]) != 127 ? dev->t10_wwn.revision[i] : 32; i = i + 1; ldv_58235: ; if ((unsigned int )i <= 3U) { goto ldv_58234; } else { } str[i] = 0; tmp = snprintf(page, 4096UL, "%s\n", (char *)(& str)); return ((ssize_t )tmp); } } static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_rev = {{"rev", & __this_module, 292U}, & target_stat_scsi_lu_show_attr_rev, 0}; static ssize_t target_stat_scsi_lu_show_attr_dev_type(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; u32 tmp ; int tmp___0 ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; tmp = (*((dev->transport)->get_device_type))(dev); tmp___0 = snprintf(page, 4096UL, "%u\n", tmp); return ((ssize_t )tmp___0); } } static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_dev_type = {{"dev_type", & __this_module, 292U}, & target_stat_scsi_lu_show_attr_dev_type, 0}; static ssize_t target_stat_scsi_lu_show_attr_status(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; int tmp ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; tmp = snprintf(page, 4096UL, "%s\n", dev->export_count != 0U ? (char *)"available" : (char *)"notavailable"); return ((ssize_t )tmp); } } static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_status = {{"status", & __this_module, 292U}, & target_stat_scsi_lu_show_attr_status, 0}; static ssize_t target_stat_scsi_lu_show_attr_state_bit(struct se_dev_stat_grps *sgrps , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "exposed\n"); return ((ssize_t )tmp); } } static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_state_bit = {{"state_bit", & __this_module, 292U}, & target_stat_scsi_lu_show_attr_state_bit, 0}; static ssize_t target_stat_scsi_lu_show_attr_num_cmds(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; long tmp ; int tmp___0 ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; tmp = atomic_long_read(& dev->num_cmds); tmp___0 = snprintf(page, 4096UL, "%lu\n", tmp); return ((ssize_t )tmp___0); } } static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_num_cmds = {{"num_cmds", & __this_module, 292U}, & target_stat_scsi_lu_show_attr_num_cmds, 0}; static ssize_t target_stat_scsi_lu_show_attr_read_mbytes(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; long tmp ; int tmp___0 ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; tmp = atomic_long_read(& dev->read_bytes); tmp___0 = snprintf(page, 4096UL, "%lu\n", tmp >> 20); return ((ssize_t )tmp___0); } } static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_read_mbytes = {{"read_mbytes", & __this_module, 292U}, & target_stat_scsi_lu_show_attr_read_mbytes, 0}; static ssize_t target_stat_scsi_lu_show_attr_write_mbytes(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; long tmp ; int tmp___0 ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; tmp = atomic_long_read(& dev->write_bytes); tmp___0 = snprintf(page, 4096UL, "%lu\n", tmp >> 20); return ((ssize_t )tmp___0); } } static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_write_mbytes = {{"write_mbytes", & __this_module, 292U}, & target_stat_scsi_lu_show_attr_write_mbytes, 0}; static ssize_t target_stat_scsi_lu_show_attr_resets(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; long tmp ; int tmp___0 ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; tmp = atomic_long_read(& dev->num_resets); tmp___0 = snprintf(page, 4096UL, "%lu\n", tmp); return ((ssize_t )tmp___0); } } static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_resets = {{"resets", & __this_module, 292U}, & target_stat_scsi_lu_show_attr_resets, 0}; static ssize_t target_stat_scsi_lu_show_attr_full_stat(struct se_dev_stat_grps *sgrps , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", 0); return ((ssize_t )tmp); } } static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_full_stat = {{"full_stat", & __this_module, 292U}, & target_stat_scsi_lu_show_attr_full_stat, 0}; static ssize_t target_stat_scsi_lu_show_attr_hs_num_cmds(struct se_dev_stat_grps *sgrps , char *page ) { int tmp ; { tmp = snprintf(page, 4096UL, "%u\n", 0); return ((ssize_t )tmp); } } static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_hs_num_cmds = {{"hs_num_cmds", & __this_module, 292U}, & target_stat_scsi_lu_show_attr_hs_num_cmds, 0}; static ssize_t target_stat_scsi_lu_show_attr_creation_time(struct se_dev_stat_grps *sgrps , char *page ) { struct se_device *dev ; struct se_dev_stat_grps const *__mptr ; int tmp ; { __mptr = (struct se_dev_stat_grps const *)sgrps; dev = (struct se_device *)__mptr + 0xfffffffffffff6e0UL; tmp = snprintf(page, 4096UL, "%u\n", (unsigned int )(((unsigned long )((unsigned int )dev->creation_time) * 100UL + 0xffffff9c007270e0UL) / 250UL)); return ((ssize_t )tmp); } } static struct target_stat_scsi_lu_attribute target_stat_scsi_lu_creation_time = {{"creation_time", & __this_module, 292U}, & target_stat_scsi_lu_show_attr_creation_time, 0}; static struct se_dev_stat_grps *to_target_stat_scsi_lu(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_dev_stat_grps *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_dev_stat_grps *)__mptr + 0xfffffffffffffeb0UL; } else { tmp___0 = (struct se_dev_stat_grps *)0; } return (tmp___0); } } static ssize_t target_stat_scsi_lu_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_dev_stat_grps *se_dev_stat_grps ; struct se_dev_stat_grps *tmp ; struct target_stat_scsi_lu_attribute *target_stat_scsi_lu_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_stat_scsi_lu(item); se_dev_stat_grps = tmp; __mptr = (struct configfs_attribute const *)attr; target_stat_scsi_lu_attr = (struct target_stat_scsi_lu_attribute *)__mptr; ret = 0L; if ((unsigned long )target_stat_scsi_lu_attr->show != (unsigned long )((ssize_t (*)(struct se_dev_stat_grps * , char * ))0)) { ret = (*(target_stat_scsi_lu_attr->show))(se_dev_stat_grps, page); } else { } return (ret); } } static ssize_t target_stat_scsi_lu_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_dev_stat_grps *se_dev_stat_grps ; struct se_dev_stat_grps *tmp ; struct target_stat_scsi_lu_attribute *target_stat_scsi_lu_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_stat_scsi_lu(item); se_dev_stat_grps = tmp; __mptr = (struct configfs_attribute const *)attr; target_stat_scsi_lu_attr = (struct target_stat_scsi_lu_attribute *)__mptr; ret = -22L; if ((unsigned long )target_stat_scsi_lu_attr->store != (unsigned long )((ssize_t (*)(struct se_dev_stat_grps * , char const * , size_t ))0)) { ret = (*(target_stat_scsi_lu_attr->store))(se_dev_stat_grps, page, count); } else { } return (ret); } } static struct configfs_attribute *target_stat_scsi_lu_attrs[19U] = { & target_stat_scsi_lu_inst.attr, & target_stat_scsi_lu_dev.attr, & target_stat_scsi_lu_indx.attr, & target_stat_scsi_lu_lun.attr, & target_stat_scsi_lu_lu_name.attr, & target_stat_scsi_lu_vend.attr, & target_stat_scsi_lu_prod.attr, & target_stat_scsi_lu_rev.attr, & target_stat_scsi_lu_dev_type.attr, & target_stat_scsi_lu_status.attr, & target_stat_scsi_lu_state_bit.attr, & target_stat_scsi_lu_num_cmds.attr, & target_stat_scsi_lu_read_mbytes.attr, & target_stat_scsi_lu_write_mbytes.attr, & target_stat_scsi_lu_resets.attr, & target_stat_scsi_lu_full_stat.attr, & target_stat_scsi_lu_hs_num_cmds.attr, & target_stat_scsi_lu_creation_time.attr, (struct configfs_attribute *)0}; static struct configfs_item_operations target_stat_scsi_lu_attrib_ops = {0, & target_stat_scsi_lu_attr_show, & target_stat_scsi_lu_attr_store, 0, 0}; static struct config_item_type target_stat_scsi_lu_cit = {& __this_module, & target_stat_scsi_lu_attrib_ops, 0, (struct configfs_attribute **)(& target_stat_scsi_lu_attrs)}; void target_stat_setup_dev_default_groups(struct se_device *dev ) { struct config_group *dev_stat_grp ; { dev_stat_grp = & dev->dev_stat_grps.stat_group; config_group_init_type_name(& dev->dev_stat_grps.scsi_dev_group, "scsi_dev", & target_stat_scsi_dev_cit); config_group_init_type_name(& dev->dev_stat_grps.scsi_tgt_dev_group, "scsi_tgt_dev", & target_stat_scsi_tgt_dev_cit); config_group_init_type_name(& dev->dev_stat_grps.scsi_lu_group, "scsi_lu", & target_stat_scsi_lu_cit); *(dev_stat_grp->default_groups) = & dev->dev_stat_grps.scsi_dev_group; *(dev_stat_grp->default_groups + 1UL) = & dev->dev_stat_grps.scsi_tgt_dev_group; *(dev_stat_grp->default_groups + 2UL) = & dev->dev_stat_grps.scsi_lu_group; *(dev_stat_grp->default_groups + 3UL) = (struct config_group *)0; return; } } static ssize_t target_stat_scsi_port_show_attr_inst(struct se_port_stat_grps *pgrps , char *page ) { struct se_lun *lun ; struct se_port_stat_grps const *__mptr ; struct se_device *dev ; ssize_t ret ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_393 __u ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct se_port_stat_grps const *)pgrps; lun = (struct se_lun *)__mptr + 0xfffffffffffffde0UL; ret = -19L; rcu_read_lock___0(); __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_stat.c", 546, "suspicious rcu_dereference_check() usage"); } else { } } else { } dev = ________p1; if ((unsigned long )dev != (unsigned long )((struct se_device *)0)) { tmp___1 = snprintf(page, 4096UL, "%u\n", dev->hba_index); ret = (ssize_t )tmp___1; } else { } rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_port_attribute target_stat_scsi_port_inst = {{"inst", & __this_module, 292U}, & target_stat_scsi_port_show_attr_inst, 0}; static ssize_t target_stat_scsi_port_show_attr_dev(struct se_port_stat_grps *pgrps , char *page ) { struct se_lun *lun ; struct se_port_stat_grps const *__mptr ; struct se_device *dev ; ssize_t ret ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_395 __u ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct se_port_stat_grps const *)pgrps; lun = (struct se_lun *)__mptr + 0xfffffffffffffde0UL; ret = -19L; rcu_read_lock___0(); __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_stat.c", 562, "suspicious rcu_dereference_check() usage"); } else { } } else { } dev = ________p1; if ((unsigned long )dev != (unsigned long )((struct se_device *)0)) { tmp___1 = snprintf(page, 4096UL, "%u\n", dev->dev_index); ret = (ssize_t )tmp___1; } else { } rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_port_attribute target_stat_scsi_port_dev = {{"dev", & __this_module, 292U}, & target_stat_scsi_port_show_attr_dev, 0}; static ssize_t target_stat_scsi_port_show_attr_indx(struct se_port_stat_grps *pgrps , char *page ) { struct se_lun *lun ; struct se_port_stat_grps const *__mptr ; struct se_device *dev ; ssize_t ret ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_397 __u ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct se_port_stat_grps const *)pgrps; lun = (struct se_lun *)__mptr + 0xfffffffffffffde0UL; ret = -19L; rcu_read_lock___0(); __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_stat.c", 578, "suspicious rcu_dereference_check() usage"); } else { } } else { } dev = ________p1; if ((unsigned long )dev != (unsigned long )((struct se_device *)0)) { tmp___1 = snprintf(page, 4096UL, "%u\n", (int )lun->lun_rtpi); ret = (ssize_t )tmp___1; } else { } rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_port_attribute target_stat_scsi_port_indx = {{"indx", & __this_module, 292U}, & target_stat_scsi_port_show_attr_indx, 0}; static ssize_t target_stat_scsi_port_show_attr_role(struct se_port_stat_grps *pgrps , char *page ) { struct se_lun *lun ; struct se_port_stat_grps const *__mptr ; struct se_device *dev ; ssize_t ret ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_399 __u ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct se_port_stat_grps const *)pgrps; lun = (struct se_lun *)__mptr + 0xfffffffffffffde0UL; ret = -19L; rcu_read_lock___0(); __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_stat.c", 594, "suspicious rcu_dereference_check() usage"); } else { } } else { } dev = ________p1; if ((unsigned long )dev != (unsigned long )((struct se_device *)0)) { tmp___1 = snprintf(page, 4096UL, "%s%u\n", (char *)"Device", dev->dev_index); ret = (ssize_t )tmp___1; } else { } rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_port_attribute target_stat_scsi_port_role = {{"role", & __this_module, 292U}, & target_stat_scsi_port_show_attr_role, 0}; static ssize_t target_stat_scsi_port_show_attr_busy_count(struct se_port_stat_grps *pgrps , char *page ) { struct se_lun *lun ; struct se_port_stat_grps const *__mptr ; struct se_device *dev ; ssize_t ret ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_401___0 __u ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct se_port_stat_grps const *)pgrps; lun = (struct se_lun *)__mptr + 0xfffffffffffffde0UL; ret = -19L; rcu_read_lock___0(); __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_stat.c", 610, "suspicious rcu_dereference_check() usage"); } else { } } else { } dev = ________p1; if ((unsigned long )dev != (unsigned long )((struct se_device *)0)) { tmp___1 = snprintf(page, 4096UL, "%u\n", 0); ret = (ssize_t )tmp___1; } else { } rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_port_attribute target_stat_scsi_port_busy_count = {{"busy_count", & __this_module, 292U}, & target_stat_scsi_port_show_attr_busy_count, 0}; static struct se_port_stat_grps *to_target_stat_scsi_port(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_port_stat_grps *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_port_stat_grps *)__mptr + 0xffffffffffffff90UL; } else { tmp___0 = (struct se_port_stat_grps *)0; } return (tmp___0); } } static ssize_t target_stat_scsi_port_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_port_stat_grps *se_port_stat_grps ; struct se_port_stat_grps *tmp ; struct target_stat_scsi_port_attribute *target_stat_scsi_port_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_stat_scsi_port(item); se_port_stat_grps = tmp; __mptr = (struct configfs_attribute const *)attr; target_stat_scsi_port_attr = (struct target_stat_scsi_port_attribute *)__mptr; ret = 0L; if ((unsigned long )target_stat_scsi_port_attr->show != (unsigned long )((ssize_t (*)(struct se_port_stat_grps * , char * ))0)) { ret = (*(target_stat_scsi_port_attr->show))(se_port_stat_grps, page); } else { } return (ret); } } static ssize_t target_stat_scsi_port_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_port_stat_grps *se_port_stat_grps ; struct se_port_stat_grps *tmp ; struct target_stat_scsi_port_attribute *target_stat_scsi_port_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_stat_scsi_port(item); se_port_stat_grps = tmp; __mptr = (struct configfs_attribute const *)attr; target_stat_scsi_port_attr = (struct target_stat_scsi_port_attribute *)__mptr; ret = -22L; if ((unsigned long )target_stat_scsi_port_attr->store != (unsigned long )((ssize_t (*)(struct se_port_stat_grps * , char const * , size_t ))0)) { ret = (*(target_stat_scsi_port_attr->store))(se_port_stat_grps, page, count); } else { } return (ret); } } static struct configfs_attribute *target_stat_scsi_port_attrs[6U] = { & target_stat_scsi_port_inst.attr, & target_stat_scsi_port_dev.attr, & target_stat_scsi_port_indx.attr, & target_stat_scsi_port_role.attr, & target_stat_scsi_port_busy_count.attr, (struct configfs_attribute *)0}; static struct configfs_item_operations target_stat_scsi_port_attrib_ops = {0, & target_stat_scsi_port_attr_show, & target_stat_scsi_port_attr_store, 0, 0}; static struct config_item_type target_stat_scsi_port_cit = {& __this_module, & target_stat_scsi_port_attrib_ops, 0, (struct configfs_attribute **)(& target_stat_scsi_port_attrs)}; static ssize_t target_stat_scsi_tgt_port_show_attr_inst(struct se_port_stat_grps *pgrps , char *page ) { struct se_lun *lun ; struct se_port_stat_grps const *__mptr ; struct se_device *dev ; ssize_t ret ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_403 __u ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct se_port_stat_grps const *)pgrps; lun = (struct se_lun *)__mptr + 0xfffffffffffffde0UL; ret = -19L; rcu_read_lock___0(); __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_stat.c", 667, "suspicious rcu_dereference_check() usage"); } else { } } else { } dev = ________p1; if ((unsigned long )dev != (unsigned long )((struct se_device *)0)) { tmp___1 = snprintf(page, 4096UL, "%u\n", dev->hba_index); ret = (ssize_t )tmp___1; } else { } rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_tgt_port_attribute target_stat_scsi_tgt_port_inst = {{"inst", & __this_module, 292U}, & target_stat_scsi_tgt_port_show_attr_inst, 0}; static ssize_t target_stat_scsi_tgt_port_show_attr_dev(struct se_port_stat_grps *pgrps , char *page ) { struct se_lun *lun ; struct se_port_stat_grps const *__mptr ; struct se_device *dev ; ssize_t ret ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_405 __u ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct se_port_stat_grps const *)pgrps; lun = (struct se_lun *)__mptr + 0xfffffffffffffde0UL; ret = -19L; rcu_read_lock___0(); __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_stat.c", 683, "suspicious rcu_dereference_check() usage"); } else { } } else { } dev = ________p1; if ((unsigned long )dev != (unsigned long )((struct se_device *)0)) { tmp___1 = snprintf(page, 4096UL, "%u\n", dev->dev_index); ret = (ssize_t )tmp___1; } else { } rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_tgt_port_attribute target_stat_scsi_tgt_port_dev = {{"dev", & __this_module, 292U}, & target_stat_scsi_tgt_port_show_attr_dev, 0}; static ssize_t target_stat_scsi_tgt_port_show_attr_indx(struct se_port_stat_grps *pgrps , char *page ) { struct se_lun *lun ; struct se_port_stat_grps const *__mptr ; struct se_device *dev ; ssize_t ret ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_407___0 __u ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct se_port_stat_grps const *)pgrps; lun = (struct se_lun *)__mptr + 0xfffffffffffffde0UL; ret = -19L; rcu_read_lock___0(); __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_stat.c", 699, "suspicious rcu_dereference_check() usage"); } else { } } else { } dev = ________p1; if ((unsigned long )dev != (unsigned long )((struct se_device *)0)) { tmp___1 = snprintf(page, 4096UL, "%u\n", (int )lun->lun_rtpi); ret = (ssize_t )tmp___1; } else { } rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_tgt_port_attribute target_stat_scsi_tgt_port_indx = {{"indx", & __this_module, 292U}, & target_stat_scsi_tgt_port_show_attr_indx, 0}; static ssize_t target_stat_scsi_tgt_port_show_attr_name(struct se_port_stat_grps *pgrps , char *page ) { struct se_lun *lun ; struct se_port_stat_grps const *__mptr ; struct se_portal_group *tpg ; struct se_device *dev ; ssize_t ret ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_409___0 __u ; bool __warned ; int tmp ; int tmp___0 ; char *tmp___1 ; int tmp___2 ; { __mptr = (struct se_port_stat_grps const *)pgrps; lun = (struct se_lun *)__mptr + 0xfffffffffffffde0UL; tpg = lun->lun_tpg; ret = -19L; rcu_read_lock___0(); __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_stat.c", 716, "suspicious rcu_dereference_check() usage"); } else { } } else { } dev = ________p1; if ((unsigned long )dev != (unsigned long )((struct se_device *)0)) { tmp___1 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); tmp___2 = snprintf(page, 4096UL, "%sPort#%u\n", tmp___1, (int )lun->lun_rtpi); ret = (ssize_t )tmp___2; } else { } rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_tgt_port_attribute target_stat_scsi_tgt_port_name = {{"name", & __this_module, 292U}, & target_stat_scsi_tgt_port_show_attr_name, 0}; static ssize_t target_stat_scsi_tgt_port_show_attr_port_index(struct se_port_stat_grps *pgrps , char *page ) { struct se_lun *lun ; struct se_port_stat_grps const *__mptr ; struct se_portal_group *tpg ; struct se_device *dev ; ssize_t ret ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_411 __u ; bool __warned ; int tmp ; int tmp___0 ; u16 tmp___1 ; char *tmp___2 ; int tmp___3 ; { __mptr = (struct se_port_stat_grps const *)pgrps; lun = (struct se_lun *)__mptr + 0xfffffffffffffde0UL; tpg = lun->lun_tpg; ret = -19L; rcu_read_lock___0(); __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_stat.c", 735, "suspicious rcu_dereference_check() usage"); } else { } } else { } dev = ________p1; if ((unsigned long )dev != (unsigned long )((struct se_device *)0)) { tmp___1 = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); tmp___2 = (*((tpg->se_tpg_tfo)->tpg_get_wwn))(tpg); tmp___3 = snprintf(page, 4096UL, "%s%s%d\n", tmp___2, (char *)"+t+", (int )tmp___1); ret = (ssize_t )tmp___3; } else { } rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_tgt_port_attribute target_stat_scsi_tgt_port_port_index = {{"port_index", & __this_module, 292U}, & target_stat_scsi_tgt_port_show_attr_port_index, 0}; static ssize_t target_stat_scsi_tgt_port_show_attr_in_cmds(struct se_port_stat_grps *pgrps , char *page ) { struct se_lun *lun ; struct se_port_stat_grps const *__mptr ; struct se_device *dev ; ssize_t ret ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_413 __u ; bool __warned ; int tmp ; int tmp___0 ; long tmp___1 ; int tmp___2 ; { __mptr = (struct se_port_stat_grps const *)pgrps; lun = (struct se_lun *)__mptr + 0xfffffffffffffde0UL; ret = -19L; rcu_read_lock___0(); __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_stat.c", 753, "suspicious rcu_dereference_check() usage"); } else { } } else { } dev = ________p1; if ((unsigned long )dev != (unsigned long )((struct se_device *)0)) { tmp___1 = atomic_long_read(& lun->lun_stats.cmd_pdus); tmp___2 = snprintf(page, 4096UL, "%lu\n", tmp___1); ret = (ssize_t )tmp___2; } else { } rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_tgt_port_attribute target_stat_scsi_tgt_port_in_cmds = {{"in_cmds", & __this_module, 292U}, & target_stat_scsi_tgt_port_show_attr_in_cmds, 0}; static ssize_t target_stat_scsi_tgt_port_show_attr_write_mbytes(struct se_port_stat_grps *pgrps , char *page ) { struct se_lun *lun ; struct se_port_stat_grps const *__mptr ; struct se_device *dev ; ssize_t ret ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_415 __u ; bool __warned ; int tmp ; int tmp___0 ; long tmp___1 ; int tmp___2 ; { __mptr = (struct se_port_stat_grps const *)pgrps; lun = (struct se_lun *)__mptr + 0xfffffffffffffde0UL; ret = -19L; rcu_read_lock___0(); __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_stat.c", 770, "suspicious rcu_dereference_check() usage"); } else { } } else { } dev = ________p1; if ((unsigned long )dev != (unsigned long )((struct se_device *)0)) { tmp___1 = atomic_long_read(& lun->lun_stats.rx_data_octets); tmp___2 = snprintf(page, 4096UL, "%u\n", (unsigned int )(tmp___1 >> 20)); ret = (ssize_t )tmp___2; } else { } rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_tgt_port_attribute target_stat_scsi_tgt_port_write_mbytes = {{"write_mbytes", & __this_module, 292U}, & target_stat_scsi_tgt_port_show_attr_write_mbytes, 0}; static ssize_t target_stat_scsi_tgt_port_show_attr_read_mbytes(struct se_port_stat_grps *pgrps , char *page ) { struct se_lun *lun ; struct se_port_stat_grps const *__mptr ; struct se_device *dev ; ssize_t ret ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_417 __u ; bool __warned ; int tmp ; int tmp___0 ; long tmp___1 ; int tmp___2 ; { __mptr = (struct se_port_stat_grps const *)pgrps; lun = (struct se_lun *)__mptr + 0xfffffffffffffde0UL; ret = -19L; rcu_read_lock___0(); __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_stat.c", 787, "suspicious rcu_dereference_check() usage"); } else { } } else { } dev = ________p1; if ((unsigned long )dev != (unsigned long )((struct se_device *)0)) { tmp___1 = atomic_long_read(& lun->lun_stats.tx_data_octets); tmp___2 = snprintf(page, 4096UL, "%u\n", (unsigned int )(tmp___1 >> 20)); ret = (ssize_t )tmp___2; } else { } rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_tgt_port_attribute target_stat_scsi_tgt_port_read_mbytes = {{"read_mbytes", & __this_module, 292U}, & target_stat_scsi_tgt_port_show_attr_read_mbytes, 0}; static ssize_t target_stat_scsi_tgt_port_show_attr_hs_in_cmds(struct se_port_stat_grps *pgrps , char *page ) { struct se_lun *lun ; struct se_port_stat_grps const *__mptr ; struct se_device *dev ; ssize_t ret ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_419 __u ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct se_port_stat_grps const *)pgrps; lun = (struct se_lun *)__mptr + 0xfffffffffffffde0UL; ret = -19L; rcu_read_lock___0(); __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_stat.c", 804, "suspicious rcu_dereference_check() usage"); } else { } } else { } dev = ________p1; if ((unsigned long )dev != (unsigned long )((struct se_device *)0)) { tmp___1 = snprintf(page, 4096UL, "%u\n", 0); ret = (ssize_t )tmp___1; } else { } rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_tgt_port_attribute target_stat_scsi_tgt_port_hs_in_cmds = {{"hs_in_cmds", & __this_module, 292U}, & target_stat_scsi_tgt_port_show_attr_hs_in_cmds, 0}; static struct se_port_stat_grps *to_target_stat_scsi_tgt_port(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_port_stat_grps *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_port_stat_grps *)__mptr + 0xffffffffffffff20UL; } else { tmp___0 = (struct se_port_stat_grps *)0; } return (tmp___0); } } static ssize_t target_stat_scsi_tgt_port_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_port_stat_grps *se_port_stat_grps ; struct se_port_stat_grps *tmp ; struct target_stat_scsi_tgt_port_attribute *target_stat_scsi_tgt_port_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_stat_scsi_tgt_port(item); se_port_stat_grps = tmp; __mptr = (struct configfs_attribute const *)attr; target_stat_scsi_tgt_port_attr = (struct target_stat_scsi_tgt_port_attribute *)__mptr; ret = 0L; if ((unsigned long )target_stat_scsi_tgt_port_attr->show != (unsigned long )((ssize_t (*)(struct se_port_stat_grps * , char * ))0)) { ret = (*(target_stat_scsi_tgt_port_attr->show))(se_port_stat_grps, page); } else { } return (ret); } } static ssize_t target_stat_scsi_tgt_port_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_port_stat_grps *se_port_stat_grps ; struct se_port_stat_grps *tmp ; struct target_stat_scsi_tgt_port_attribute *target_stat_scsi_tgt_port_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_stat_scsi_tgt_port(item); se_port_stat_grps = tmp; __mptr = (struct configfs_attribute const *)attr; target_stat_scsi_tgt_port_attr = (struct target_stat_scsi_tgt_port_attribute *)__mptr; ret = -22L; if ((unsigned long )target_stat_scsi_tgt_port_attr->store != (unsigned long )((ssize_t (*)(struct se_port_stat_grps * , char const * , size_t ))0)) { ret = (*(target_stat_scsi_tgt_port_attr->store))(se_port_stat_grps, page, count); } else { } return (ret); } } static struct configfs_attribute *target_stat_scsi_tgt_port_attrs[10U] = { & target_stat_scsi_tgt_port_inst.attr, & target_stat_scsi_tgt_port_dev.attr, & target_stat_scsi_tgt_port_indx.attr, & target_stat_scsi_tgt_port_name.attr, & target_stat_scsi_tgt_port_port_index.attr, & target_stat_scsi_tgt_port_in_cmds.attr, & target_stat_scsi_tgt_port_write_mbytes.attr, & target_stat_scsi_tgt_port_read_mbytes.attr, & target_stat_scsi_tgt_port_hs_in_cmds.attr, (struct configfs_attribute *)0}; static struct configfs_item_operations target_stat_scsi_tgt_port_attrib_ops = {0, & target_stat_scsi_tgt_port_attr_show, & target_stat_scsi_tgt_port_attr_store, 0, 0}; static struct config_item_type target_stat_scsi_tgt_port_cit = {& __this_module, & target_stat_scsi_tgt_port_attrib_ops, 0, (struct configfs_attribute **)(& target_stat_scsi_tgt_port_attrs)}; static ssize_t target_stat_scsi_transport_show_attr_inst(struct se_port_stat_grps *pgrps , char *page ) { struct se_lun *lun ; struct se_port_stat_grps const *__mptr ; struct se_device *dev ; ssize_t ret ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_421 __u ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct se_port_stat_grps const *)pgrps; lun = (struct se_lun *)__mptr + 0xfffffffffffffde0UL; ret = -19L; rcu_read_lock___0(); __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_stat.c", 867, "suspicious rcu_dereference_check() usage"); } else { } } else { } dev = ________p1; if ((unsigned long )dev != (unsigned long )((struct se_device *)0)) { tmp___1 = snprintf(page, 4096UL, "%u\n", dev->hba_index); ret = (ssize_t )tmp___1; } else { } rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_transport_attribute target_stat_scsi_transport_inst = {{"inst", & __this_module, 292U}, & target_stat_scsi_transport_show_attr_inst, 0}; static ssize_t target_stat_scsi_transport_show_attr_device(struct se_port_stat_grps *pgrps , char *page ) { struct se_lun *lun ; struct se_port_stat_grps const *__mptr ; struct se_device *dev ; struct se_portal_group *tpg ; ssize_t ret ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_423 __u ; bool __warned ; int tmp ; int tmp___0 ; char *tmp___1 ; int tmp___2 ; { __mptr = (struct se_port_stat_grps const *)pgrps; lun = (struct se_lun *)__mptr + 0xfffffffffffffde0UL; tpg = lun->lun_tpg; ret = -19L; rcu_read_lock___0(); __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_stat.c", 884, "suspicious rcu_dereference_check() usage"); } else { } } else { } dev = ________p1; if ((unsigned long )dev != (unsigned long )((struct se_device *)0)) { tmp___1 = (*((tpg->se_tpg_tfo)->get_fabric_name))(); tmp___2 = snprintf(page, 4096UL, "scsiTransport%s\n", tmp___1); ret = (ssize_t )tmp___2; } else { } rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_transport_attribute target_stat_scsi_transport_device = {{"device", & __this_module, 292U}, & target_stat_scsi_transport_show_attr_device, 0}; static ssize_t target_stat_scsi_transport_show_attr_indx(struct se_port_stat_grps *pgrps , char *page ) { struct se_lun *lun ; struct se_port_stat_grps const *__mptr ; struct se_device *dev ; struct se_portal_group *tpg ; ssize_t ret ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_425 __u ; bool __warned ; int tmp ; int tmp___0 ; u32 tmp___1 ; int tmp___2 ; { __mptr = (struct se_port_stat_grps const *)pgrps; lun = (struct se_lun *)__mptr + 0xfffffffffffffde0UL; tpg = lun->lun_tpg; ret = -19L; rcu_read_lock___0(); __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_stat.c", 904, "suspicious rcu_dereference_check() usage"); } else { } } else { } dev = ________p1; if ((unsigned long )dev != (unsigned long )((struct se_device *)0)) { tmp___1 = (*((tpg->se_tpg_tfo)->tpg_get_inst_index))(tpg); tmp___2 = snprintf(page, 4096UL, "%u\n", tmp___1); ret = (ssize_t )tmp___2; } else { } rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_transport_attribute target_stat_scsi_transport_indx = {{"indx", & __this_module, 292U}, & target_stat_scsi_transport_show_attr_indx, 0}; static ssize_t target_stat_scsi_transport_show_attr_dev_name(struct se_port_stat_grps *pgrps , char *page ) { struct se_lun *lun ; struct se_port_stat_grps const *__mptr ; struct se_device *dev ; struct se_portal_group *tpg ; struct t10_wwn *wwn ; ssize_t ret ; struct se_device *________p1 ; struct se_device *_________p1 ; union __anonunion___u_427 __u ; bool __warned ; int tmp ; int tmp___0 ; size_t tmp___1 ; char *tmp___2 ; int tmp___3 ; { __mptr = (struct se_port_stat_grps const *)pgrps; lun = (struct se_lun *)__mptr + 0xfffffffffffffde0UL; tpg = lun->lun_tpg; ret = -19L; rcu_read_lock___0(); __read_once_size((void const volatile *)(& lun->lun_se_dev), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_stat.c", 923, "suspicious rcu_dereference_check() usage"); } else { } } else { } dev = ________p1; if ((unsigned long )dev != (unsigned long )((struct se_device *)0)) { wwn = & dev->t10_wwn; tmp___1 = strlen((char const *)(& wwn->unit_serial)); tmp___2 = (*((tpg->se_tpg_tfo)->tpg_get_wwn))(tpg); tmp___3 = snprintf(page, 4096UL, "%s+%s\n", tmp___2, tmp___1 != 0UL ? (char *)(& wwn->unit_serial) : (char *)(& wwn->vendor)); ret = (ssize_t )tmp___3; } else { } rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_transport_attribute target_stat_scsi_transport_dev_name = {{"dev_name", & __this_module, 292U}, & target_stat_scsi_transport_show_attr_dev_name, 0}; static struct se_port_stat_grps *to_target_stat_scsi_transport(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_port_stat_grps *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_port_stat_grps *)__mptr + 0xfffffffffffffeb0UL; } else { tmp___0 = (struct se_port_stat_grps *)0; } return (tmp___0); } } static ssize_t target_stat_scsi_transport_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_port_stat_grps *se_port_stat_grps ; struct se_port_stat_grps *tmp ; struct target_stat_scsi_transport_attribute *target_stat_scsi_transport_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_stat_scsi_transport(item); se_port_stat_grps = tmp; __mptr = (struct configfs_attribute const *)attr; target_stat_scsi_transport_attr = (struct target_stat_scsi_transport_attribute *)__mptr; ret = 0L; if ((unsigned long )target_stat_scsi_transport_attr->show != (unsigned long )((ssize_t (*)(struct se_port_stat_grps * , char * ))0)) { ret = (*(target_stat_scsi_transport_attr->show))(se_port_stat_grps, page); } else { } return (ret); } } static ssize_t target_stat_scsi_transport_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_port_stat_grps *se_port_stat_grps ; struct se_port_stat_grps *tmp ; struct target_stat_scsi_transport_attribute *target_stat_scsi_transport_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_stat_scsi_transport(item); se_port_stat_grps = tmp; __mptr = (struct configfs_attribute const *)attr; target_stat_scsi_transport_attr = (struct target_stat_scsi_transport_attribute *)__mptr; ret = -22L; if ((unsigned long )target_stat_scsi_transport_attr->store != (unsigned long )((ssize_t (*)(struct se_port_stat_grps * , char const * , size_t ))0)) { ret = (*(target_stat_scsi_transport_attr->store))(se_port_stat_grps, page, count); } else { } return (ret); } } static struct configfs_attribute *target_stat_scsi_transport_attrs[5U] = { & target_stat_scsi_transport_inst.attr, & target_stat_scsi_transport_device.attr, & target_stat_scsi_transport_indx.attr, & target_stat_scsi_transport_dev_name.attr, (struct configfs_attribute *)0}; static struct configfs_item_operations target_stat_scsi_transport_attrib_ops = {0, & target_stat_scsi_transport_attr_show, & target_stat_scsi_transport_attr_store, 0, 0}; static struct config_item_type target_stat_scsi_transport_cit = {& __this_module, & target_stat_scsi_transport_attrib_ops, 0, (struct configfs_attribute **)(& target_stat_scsi_transport_attrs)}; void target_stat_setup_port_default_groups(struct se_lun *lun ) { struct config_group *port_stat_grp ; { port_stat_grp = & lun->port_stat_grps.stat_group; config_group_init_type_name(& lun->port_stat_grps.scsi_port_group, "scsi_port", & target_stat_scsi_port_cit); config_group_init_type_name(& lun->port_stat_grps.scsi_tgt_port_group, "scsi_tgt_port", & target_stat_scsi_tgt_port_cit); config_group_init_type_name(& lun->port_stat_grps.scsi_transport_group, "scsi_transport", & target_stat_scsi_transport_cit); *(port_stat_grp->default_groups) = & lun->port_stat_grps.scsi_port_group; *(port_stat_grp->default_groups + 1UL) = & lun->port_stat_grps.scsi_tgt_port_group; *(port_stat_grp->default_groups + 2UL) = & lun->port_stat_grps.scsi_transport_group; *(port_stat_grp->default_groups + 3UL) = (struct config_group *)0; return; } } static ssize_t target_stat_scsi_auth_intr_show_attr_inst(struct se_ml_stat_grps *lgrps , char *page ) { struct se_lun_acl *lacl ; struct se_ml_stat_grps const *__mptr ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; struct se_portal_group *tpg ; ssize_t ret ; u32 tmp ; int tmp___0 ; { __mptr = (struct se_ml_stat_grps const *)lgrps; lacl = (struct se_lun_acl *)__mptr + 0xfffffffffffffe98UL; nacl = lacl->se_lun_nacl; rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return (-19L); } else { } tpg = nacl->se_tpg; tmp = (*((tpg->se_tpg_tfo)->tpg_get_inst_index))(tpg); tmp___0 = snprintf(page, 4096UL, "%u\n", tmp); ret = (ssize_t )tmp___0; rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_auth_intr_attribute target_stat_scsi_auth_intr_inst = {{"inst", & __this_module, 292U}, & target_stat_scsi_auth_intr_show_attr_inst, 0}; static ssize_t target_stat_scsi_auth_intr_show_attr_dev(struct se_ml_stat_grps *lgrps , char *page ) { struct se_lun_acl *lacl ; struct se_ml_stat_grps const *__mptr ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; struct se_lun *lun ; ssize_t ret ; struct se_lun *________p1 ; struct se_lun *_________p1 ; union __anonunion___u_429 __u ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct se_ml_stat_grps const *)lgrps; lacl = (struct se_lun_acl *)__mptr + 0xfffffffffffffe98UL; nacl = lacl->se_lun_nacl; rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return (-19L); } else { } __read_once_size((void const volatile *)(& deve->se_lun), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_stat.c", 1039, "suspicious rcu_dereference_check() usage"); } else { } } else { } lun = ________p1; tmp___1 = snprintf(page, 4096UL, "%u\n", lun->lun_index); ret = (ssize_t )tmp___1; rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_auth_intr_attribute target_stat_scsi_auth_intr_dev = {{"dev", & __this_module, 292U}, & target_stat_scsi_auth_intr_show_attr_dev, 0}; static ssize_t target_stat_scsi_auth_intr_show_attr_port(struct se_ml_stat_grps *lgrps , char *page ) { struct se_lun_acl *lacl ; struct se_ml_stat_grps const *__mptr ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; struct se_portal_group *tpg ; ssize_t ret ; u16 tmp ; int tmp___0 ; { __mptr = (struct se_ml_stat_grps const *)lgrps; lacl = (struct se_lun_acl *)__mptr + 0xfffffffffffffe98UL; nacl = lacl->se_lun_nacl; rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return (-19L); } else { } tpg = nacl->se_tpg; tmp = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); tmp___0 = snprintf(page, 4096UL, "%u\n", (int )tmp); ret = (ssize_t )tmp___0; rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_auth_intr_attribute target_stat_scsi_auth_intr_port = {{"port", & __this_module, 292U}, & target_stat_scsi_auth_intr_show_attr_port, 0}; static ssize_t target_stat_scsi_auth_intr_show_attr_indx(struct se_ml_stat_grps *lgrps , char *page ) { struct se_lun_acl *lacl ; struct se_ml_stat_grps const *__mptr ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; ssize_t ret ; int tmp ; { __mptr = (struct se_ml_stat_grps const *)lgrps; lacl = (struct se_lun_acl *)__mptr + 0xfffffffffffffe98UL; nacl = lacl->se_lun_nacl; rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return (-19L); } else { } tmp = snprintf(page, 4096UL, "%u\n", nacl->acl_index); ret = (ssize_t )tmp; rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_auth_intr_attribute target_stat_scsi_auth_intr_indx = {{"indx", & __this_module, 292U}, & target_stat_scsi_auth_intr_show_attr_indx, 0}; static ssize_t target_stat_scsi_auth_intr_show_attr_dev_or_port(struct se_ml_stat_grps *lgrps , char *page ) { struct se_lun_acl *lacl ; struct se_ml_stat_grps const *__mptr ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; ssize_t ret ; int tmp ; { __mptr = (struct se_ml_stat_grps const *)lgrps; lacl = (struct se_lun_acl *)__mptr + 0xfffffffffffffe98UL; nacl = lacl->se_lun_nacl; rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return (-19L); } else { } tmp = snprintf(page, 4096UL, "%u\n", 1); ret = (ssize_t )tmp; rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_auth_intr_attribute target_stat_scsi_auth_intr_dev_or_port = {{"dev_or_port", & __this_module, 292U}, & target_stat_scsi_auth_intr_show_attr_dev_or_port, 0}; static ssize_t target_stat_scsi_auth_intr_show_attr_intr_name(struct se_ml_stat_grps *lgrps , char *page ) { struct se_lun_acl *lacl ; struct se_ml_stat_grps const *__mptr ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; ssize_t ret ; int tmp ; { __mptr = (struct se_ml_stat_grps const *)lgrps; lacl = (struct se_lun_acl *)__mptr + 0xfffffffffffffe98UL; nacl = lacl->se_lun_nacl; rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return (-19L); } else { } tmp = snprintf(page, 4096UL, "%s\n", (char *)(& nacl->initiatorname)); ret = (ssize_t )tmp; rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_auth_intr_attribute target_stat_scsi_auth_intr_intr_name = {{"intr_name", & __this_module, 292U}, & target_stat_scsi_auth_intr_show_attr_intr_name, 0}; static ssize_t target_stat_scsi_auth_intr_show_attr_map_indx(struct se_ml_stat_grps *lgrps , char *page ) { struct se_lun_acl *lacl ; struct se_ml_stat_grps const *__mptr ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; ssize_t ret ; int tmp ; { __mptr = (struct se_ml_stat_grps const *)lgrps; lacl = (struct se_lun_acl *)__mptr + 0xfffffffffffffe98UL; nacl = lacl->se_lun_nacl; rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return (-19L); } else { } tmp = snprintf(page, 4096UL, "%u\n", 0); ret = (ssize_t )tmp; rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_auth_intr_attribute target_stat_scsi_auth_intr_map_indx = {{"map_indx", & __this_module, 292U}, & target_stat_scsi_auth_intr_show_attr_map_indx, 0}; static ssize_t target_stat_scsi_auth_intr_show_attr_att_count(struct se_ml_stat_grps *lgrps , char *page ) { struct se_lun_acl *lacl ; struct se_ml_stat_grps const *__mptr ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; ssize_t ret ; int tmp ; { __mptr = (struct se_ml_stat_grps const *)lgrps; lacl = (struct se_lun_acl *)__mptr + 0xfffffffffffffe98UL; nacl = lacl->se_lun_nacl; rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return (-19L); } else { } tmp = snprintf(page, 4096UL, "%u\n", deve->attach_count); ret = (ssize_t )tmp; rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_auth_intr_attribute target_stat_scsi_auth_intr_att_count = {{"att_count", & __this_module, 292U}, & target_stat_scsi_auth_intr_show_attr_att_count, 0}; static ssize_t target_stat_scsi_auth_intr_show_attr_num_cmds(struct se_ml_stat_grps *lgrps , char *page ) { struct se_lun_acl *lacl ; struct se_ml_stat_grps const *__mptr ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; ssize_t ret ; long tmp ; int tmp___0 ; { __mptr = (struct se_ml_stat_grps const *)lgrps; lacl = (struct se_lun_acl *)__mptr + 0xfffffffffffffe98UL; nacl = lacl->se_lun_nacl; rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return (-19L); } else { } tmp = atomic_long_read(& deve->total_cmds); tmp___0 = snprintf(page, 4096UL, "%lu\n", tmp); ret = (ssize_t )tmp___0; rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_auth_intr_attribute target_stat_scsi_auth_intr_num_cmds = {{"num_cmds", & __this_module, 292U}, & target_stat_scsi_auth_intr_show_attr_num_cmds, 0}; static ssize_t target_stat_scsi_auth_intr_show_attr_read_mbytes(struct se_ml_stat_grps *lgrps , char *page ) { struct se_lun_acl *lacl ; struct se_ml_stat_grps const *__mptr ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; ssize_t ret ; long tmp ; int tmp___0 ; { __mptr = (struct se_ml_stat_grps const *)lgrps; lacl = (struct se_lun_acl *)__mptr + 0xfffffffffffffe98UL; nacl = lacl->se_lun_nacl; rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return (-19L); } else { } tmp = atomic_long_read(& deve->read_bytes); tmp___0 = snprintf(page, 4096UL, "%u\n", (unsigned int )(tmp >> 20)); ret = (ssize_t )tmp___0; rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_auth_intr_attribute target_stat_scsi_auth_intr_read_mbytes = {{"read_mbytes", & __this_module, 292U}, & target_stat_scsi_auth_intr_show_attr_read_mbytes, 0}; static ssize_t target_stat_scsi_auth_intr_show_attr_write_mbytes(struct se_ml_stat_grps *lgrps , char *page ) { struct se_lun_acl *lacl ; struct se_ml_stat_grps const *__mptr ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; ssize_t ret ; long tmp ; int tmp___0 ; { __mptr = (struct se_ml_stat_grps const *)lgrps; lacl = (struct se_lun_acl *)__mptr + 0xfffffffffffffe98UL; nacl = lacl->se_lun_nacl; rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return (-19L); } else { } tmp = atomic_long_read(& deve->write_bytes); tmp___0 = snprintf(page, 4096UL, "%u\n", (unsigned int )(tmp >> 20)); ret = (ssize_t )tmp___0; rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_auth_intr_attribute target_stat_scsi_auth_intr_write_mbytes = {{"write_mbytes", & __this_module, 292U}, & target_stat_scsi_auth_intr_show_attr_write_mbytes, 0}; static ssize_t target_stat_scsi_auth_intr_show_attr_hs_num_cmds(struct se_ml_stat_grps *lgrps , char *page ) { struct se_lun_acl *lacl ; struct se_ml_stat_grps const *__mptr ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; ssize_t ret ; int tmp ; { __mptr = (struct se_ml_stat_grps const *)lgrps; lacl = (struct se_lun_acl *)__mptr + 0xfffffffffffffe98UL; nacl = lacl->se_lun_nacl; rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return (-19L); } else { } tmp = snprintf(page, 4096UL, "%u\n", 0); ret = (ssize_t )tmp; rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_auth_intr_attribute target_stat_scsi_auth_intr_hs_num_cmds = {{"hs_num_cmds", & __this_module, 292U}, & target_stat_scsi_auth_intr_show_attr_hs_num_cmds, 0}; static ssize_t target_stat_scsi_auth_intr_show_attr_creation_time(struct se_ml_stat_grps *lgrps , char *page ) { struct se_lun_acl *lacl ; struct se_ml_stat_grps const *__mptr ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; ssize_t ret ; int tmp ; { __mptr = (struct se_ml_stat_grps const *)lgrps; lacl = (struct se_lun_acl *)__mptr + 0xfffffffffffffe98UL; nacl = lacl->se_lun_nacl; rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return (-19L); } else { } tmp = snprintf(page, 4096UL, "%u\n", (unsigned int )(((unsigned long )((unsigned int )deve->creation_time) * 100UL + 0xffffff9c007270e0UL) / 250UL)); ret = (ssize_t )tmp; rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_auth_intr_attribute target_stat_scsi_auth_intr_creation_time = {{"creation_time", & __this_module, 292U}, & target_stat_scsi_auth_intr_show_attr_creation_time, 0}; static ssize_t target_stat_scsi_auth_intr_show_attr_row_status(struct se_ml_stat_grps *lgrps , char *page ) { struct se_lun_acl *lacl ; struct se_ml_stat_grps const *__mptr ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; ssize_t ret ; int tmp ; { __mptr = (struct se_ml_stat_grps const *)lgrps; lacl = (struct se_lun_acl *)__mptr + 0xfffffffffffffe98UL; nacl = lacl->se_lun_nacl; rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return (-19L); } else { } tmp = snprintf(page, 4096UL, "Ready\n"); ret = (ssize_t )tmp; rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_auth_intr_attribute target_stat_scsi_auth_intr_row_status = {{"row_status", & __this_module, 292U}, & target_stat_scsi_auth_intr_show_attr_row_status, 0}; static struct se_ml_stat_grps *to_target_stat_scsi_auth_intr(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_ml_stat_grps *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_ml_stat_grps *)__mptr + 0xffffffffffffff90UL; } else { tmp___0 = (struct se_ml_stat_grps *)0; } return (tmp___0); } } static ssize_t target_stat_scsi_auth_intr_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_ml_stat_grps *se_ml_stat_grps ; struct se_ml_stat_grps *tmp ; struct target_stat_scsi_auth_intr_attribute *target_stat_scsi_auth_intr_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_stat_scsi_auth_intr(item); se_ml_stat_grps = tmp; __mptr = (struct configfs_attribute const *)attr; target_stat_scsi_auth_intr_attr = (struct target_stat_scsi_auth_intr_attribute *)__mptr; ret = 0L; if ((unsigned long )target_stat_scsi_auth_intr_attr->show != (unsigned long )((ssize_t (*)(struct se_ml_stat_grps * , char * ))0)) { ret = (*(target_stat_scsi_auth_intr_attr->show))(se_ml_stat_grps, page); } else { } return (ret); } } static ssize_t target_stat_scsi_auth_intr_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_ml_stat_grps *se_ml_stat_grps ; struct se_ml_stat_grps *tmp ; struct target_stat_scsi_auth_intr_attribute *target_stat_scsi_auth_intr_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_stat_scsi_auth_intr(item); se_ml_stat_grps = tmp; __mptr = (struct configfs_attribute const *)attr; target_stat_scsi_auth_intr_attr = (struct target_stat_scsi_auth_intr_attribute *)__mptr; ret = -22L; if ((unsigned long )target_stat_scsi_auth_intr_attr->store != (unsigned long )((ssize_t (*)(struct se_ml_stat_grps * , char const * , size_t ))0)) { ret = (*(target_stat_scsi_auth_intr_attr->store))(se_ml_stat_grps, page, count); } else { } return (ret); } } static struct configfs_attribute *target_stat_scsi_auth_intr_attrs[15U] = { & target_stat_scsi_auth_intr_inst.attr, & target_stat_scsi_auth_intr_dev.attr, & target_stat_scsi_auth_intr_port.attr, & target_stat_scsi_auth_intr_indx.attr, & target_stat_scsi_auth_intr_dev_or_port.attr, & target_stat_scsi_auth_intr_intr_name.attr, & target_stat_scsi_auth_intr_map_indx.attr, & target_stat_scsi_auth_intr_att_count.attr, & target_stat_scsi_auth_intr_num_cmds.attr, & target_stat_scsi_auth_intr_read_mbytes.attr, & target_stat_scsi_auth_intr_write_mbytes.attr, & target_stat_scsi_auth_intr_hs_num_cmds.attr, & target_stat_scsi_auth_intr_creation_time.attr, & target_stat_scsi_auth_intr_row_status.attr, (struct configfs_attribute *)0}; static struct configfs_item_operations target_stat_scsi_auth_intr_attrib_ops = {0, & target_stat_scsi_auth_intr_attr_show, & target_stat_scsi_auth_intr_attr_store, 0, 0}; static struct config_item_type target_stat_scsi_auth_intr_cit = {& __this_module, & target_stat_scsi_auth_intr_attrib_ops, 0, (struct configfs_attribute **)(& target_stat_scsi_auth_intr_attrs)}; static ssize_t target_stat_scsi_att_intr_port_show_attr_inst(struct se_ml_stat_grps *lgrps , char *page ) { struct se_lun_acl *lacl ; struct se_ml_stat_grps const *__mptr ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; struct se_portal_group *tpg ; ssize_t ret ; u32 tmp ; int tmp___0 ; { __mptr = (struct se_ml_stat_grps const *)lgrps; lacl = (struct se_lun_acl *)__mptr + 0xfffffffffffffe98UL; nacl = lacl->se_lun_nacl; rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return (-19L); } else { } tpg = nacl->se_tpg; tmp = (*((tpg->se_tpg_tfo)->tpg_get_inst_index))(tpg); tmp___0 = snprintf(page, 4096UL, "%u\n", tmp); ret = (ssize_t )tmp___0; rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_att_intr_port_attribute target_stat_scsi_att_intr_port_inst = {{"inst", & __this_module, 292U}, & target_stat_scsi_att_intr_port_show_attr_inst, 0}; static ssize_t target_stat_scsi_att_intr_port_show_attr_dev(struct se_ml_stat_grps *lgrps , char *page ) { struct se_lun_acl *lacl ; struct se_ml_stat_grps const *__mptr ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; struct se_lun *lun ; ssize_t ret ; struct se_lun *________p1 ; struct se_lun *_________p1 ; union __anonunion___u_431 __u ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct se_ml_stat_grps const *)lgrps; lacl = (struct se_lun_acl *)__mptr + 0xfffffffffffffe98UL; nacl = lacl->se_lun_nacl; rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return (-19L); } else { } __read_once_size((void const volatile *)(& deve->se_lun), (void *)(& __u.__c), 8); _________p1 = __u.__val; ________p1 = _________p1; tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_stat.c", 1408, "suspicious rcu_dereference_check() usage"); } else { } } else { } lun = ________p1; tmp___1 = snprintf(page, 4096UL, "%u\n", lun->lun_index); ret = (ssize_t )tmp___1; rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_att_intr_port_attribute target_stat_scsi_att_intr_port_dev = {{"dev", & __this_module, 292U}, & target_stat_scsi_att_intr_port_show_attr_dev, 0}; static ssize_t target_stat_scsi_att_intr_port_show_attr_port(struct se_ml_stat_grps *lgrps , char *page ) { struct se_lun_acl *lacl ; struct se_ml_stat_grps const *__mptr ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; struct se_portal_group *tpg ; ssize_t ret ; u16 tmp ; int tmp___0 ; { __mptr = (struct se_ml_stat_grps const *)lgrps; lacl = (struct se_lun_acl *)__mptr + 0xfffffffffffffe98UL; nacl = lacl->se_lun_nacl; rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return (-19L); } else { } tpg = nacl->se_tpg; tmp = (*((tpg->se_tpg_tfo)->tpg_get_tag))(tpg); tmp___0 = snprintf(page, 4096UL, "%u\n", (int )tmp); ret = (ssize_t )tmp___0; rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_att_intr_port_attribute target_stat_scsi_att_intr_port_port = {{"port", & __this_module, 292U}, & target_stat_scsi_att_intr_port_show_attr_port, 0}; static ssize_t target_stat_scsi_att_intr_port_show_attr_indx(struct se_ml_stat_grps *lgrps , char *page ) { struct se_lun_acl *lacl ; struct se_ml_stat_grps const *__mptr ; struct se_node_acl *nacl ; struct se_session *se_sess ; struct se_portal_group *tpg ; ssize_t ret ; u32 tmp ; int tmp___0 ; { __mptr = (struct se_ml_stat_grps const *)lgrps; lacl = (struct se_lun_acl *)__mptr + 0xfffffffffffffe98UL; nacl = lacl->se_lun_nacl; spin_lock_irq(& nacl->nacl_sess_lock); se_sess = nacl->nacl_sess; if ((unsigned long )se_sess == (unsigned long )((struct se_session *)0)) { spin_unlock_irq(& nacl->nacl_sess_lock); return (-19L); } else { } tpg = nacl->se_tpg; tmp = (*((tpg->se_tpg_tfo)->sess_get_index))(se_sess); tmp___0 = snprintf(page, 4096UL, "%u\n", tmp); ret = (ssize_t )tmp___0; spin_unlock_irq(& nacl->nacl_sess_lock); return (ret); } } static struct target_stat_scsi_att_intr_port_attribute target_stat_scsi_att_intr_port_indx = {{"indx", & __this_module, 292U}, & target_stat_scsi_att_intr_port_show_attr_indx, 0}; static ssize_t target_stat_scsi_att_intr_port_show_attr_port_auth_indx(struct se_ml_stat_grps *lgrps , char *page ) { struct se_lun_acl *lacl ; struct se_ml_stat_grps const *__mptr ; struct se_node_acl *nacl ; struct se_dev_entry *deve ; ssize_t ret ; int tmp ; { __mptr = (struct se_ml_stat_grps const *)lgrps; lacl = (struct se_lun_acl *)__mptr + 0xfffffffffffffe98UL; nacl = lacl->se_lun_nacl; rcu_read_lock___0(); deve = target_nacl_find_deve(nacl, lacl->mapped_lun); if ((unsigned long )deve == (unsigned long )((struct se_dev_entry *)0)) { rcu_read_unlock___0(); return (-19L); } else { } tmp = snprintf(page, 4096UL, "%u\n", nacl->acl_index); ret = (ssize_t )tmp; rcu_read_unlock___0(); return (ret); } } static struct target_stat_scsi_att_intr_port_attribute target_stat_scsi_att_intr_port_port_auth_indx = {{"port_auth_indx", & __this_module, 292U}, & target_stat_scsi_att_intr_port_show_attr_port_auth_indx, 0}; static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident(struct se_ml_stat_grps *lgrps , char *page ) { struct se_lun_acl *lacl ; struct se_ml_stat_grps const *__mptr ; struct se_node_acl *nacl ; struct se_session *se_sess ; struct se_portal_group *tpg ; ssize_t ret ; unsigned char buf[64U] ; int tmp ; { __mptr = (struct se_ml_stat_grps const *)lgrps; lacl = (struct se_lun_acl *)__mptr + 0xfffffffffffffe98UL; nacl = lacl->se_lun_nacl; spin_lock_irq(& nacl->nacl_sess_lock); se_sess = nacl->nacl_sess; if ((unsigned long )se_sess == (unsigned long )((struct se_session *)0)) { spin_unlock_irq(& nacl->nacl_sess_lock); return (-19L); } else { } tpg = nacl->se_tpg; memset((void *)(& buf), 0, 64UL); if ((unsigned long )(tpg->se_tpg_tfo)->sess_get_initiator_sid != (unsigned long )((u32 (*/* const */)(struct se_session * , unsigned char * , u32 ))0)) { (*((tpg->se_tpg_tfo)->sess_get_initiator_sid))(se_sess, (unsigned char *)(& buf), 64U); } else { } tmp = snprintf(page, 4096UL, "%s+i+%s\n", (char *)(& nacl->initiatorname), (unsigned char *)(& buf)); ret = (ssize_t )tmp; spin_unlock_irq(& nacl->nacl_sess_lock); return (ret); } } static struct target_stat_scsi_att_intr_port_attribute target_stat_scsi_att_intr_port_port_ident = {{"port_ident", & __this_module, 292U}, & target_stat_scsi_att_intr_port_show_attr_port_ident, 0}; static struct se_ml_stat_grps *to_target_stat_scsi_att_intr_port(struct config_item *ci ) { struct config_group const *__mptr ; struct config_group *tmp ; struct se_ml_stat_grps *tmp___0 ; { if ((unsigned long )ci != (unsigned long )((struct config_item *)0)) { tmp = to_config_group(ci); __mptr = (struct config_group const *)tmp; tmp___0 = (struct se_ml_stat_grps *)__mptr + 0xffffffffffffff20UL; } else { tmp___0 = (struct se_ml_stat_grps *)0; } return (tmp___0); } } static ssize_t target_stat_scsi_att_intr_port_attr_show(struct config_item *item , struct configfs_attribute *attr , char *page ) { struct se_ml_stat_grps *se_ml_stat_grps ; struct se_ml_stat_grps *tmp ; struct target_stat_scsi_att_intr_port_attribute *target_stat_scsi_att_intr_port_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_stat_scsi_att_intr_port(item); se_ml_stat_grps = tmp; __mptr = (struct configfs_attribute const *)attr; target_stat_scsi_att_intr_port_attr = (struct target_stat_scsi_att_intr_port_attribute *)__mptr; ret = 0L; if ((unsigned long )target_stat_scsi_att_intr_port_attr->show != (unsigned long )((ssize_t (*)(struct se_ml_stat_grps * , char * ))0)) { ret = (*(target_stat_scsi_att_intr_port_attr->show))(se_ml_stat_grps, page); } else { } return (ret); } } static ssize_t target_stat_scsi_att_intr_port_attr_store(struct config_item *item , struct configfs_attribute *attr , char const *page , size_t count ) { struct se_ml_stat_grps *se_ml_stat_grps ; struct se_ml_stat_grps *tmp ; struct target_stat_scsi_att_intr_port_attribute *target_stat_scsi_att_intr_port_attr ; struct configfs_attribute const *__mptr ; ssize_t ret ; { tmp = to_target_stat_scsi_att_intr_port(item); se_ml_stat_grps = tmp; __mptr = (struct configfs_attribute const *)attr; target_stat_scsi_att_intr_port_attr = (struct target_stat_scsi_att_intr_port_attribute *)__mptr; ret = -22L; if ((unsigned long )target_stat_scsi_att_intr_port_attr->store != (unsigned long )((ssize_t (*)(struct se_ml_stat_grps * , char const * , size_t ))0)) { ret = (*(target_stat_scsi_att_intr_port_attr->store))(se_ml_stat_grps, page, count); } else { } return (ret); } } static struct configfs_attribute *target_stat_scsi_ath_intr_port_attrs[7U] = { & target_stat_scsi_att_intr_port_inst.attr, & target_stat_scsi_att_intr_port_dev.attr, & target_stat_scsi_att_intr_port_port.attr, & target_stat_scsi_att_intr_port_indx.attr, & target_stat_scsi_att_intr_port_port_auth_indx.attr, & target_stat_scsi_att_intr_port_port_ident.attr, (struct configfs_attribute *)0}; static struct configfs_item_operations target_stat_scsi_att_intr_port_attrib_ops = {0, & target_stat_scsi_att_intr_port_attr_show, & target_stat_scsi_att_intr_port_attr_store, 0, 0}; static struct config_item_type target_stat_scsi_att_intr_port_cit = {& __this_module, & target_stat_scsi_att_intr_port_attrib_ops, 0, (struct configfs_attribute **)(& target_stat_scsi_ath_intr_port_attrs)}; void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *lacl ) { struct config_group *ml_stat_grp ; { ml_stat_grp = & lacl->ml_stat_grps.stat_group; config_group_init_type_name(& lacl->ml_stat_grps.scsi_auth_intr_group, "scsi_auth_intr", & target_stat_scsi_auth_intr_cit); config_group_init_type_name(& lacl->ml_stat_grps.scsi_att_intr_port_group, "scsi_att_intr_port", & target_stat_scsi_att_intr_port_cit); *(ml_stat_grp->default_groups) = & lacl->ml_stat_grps.scsi_auth_intr_group; *(ml_stat_grp->default_groups + 1UL) = & lacl->ml_stat_grps.scsi_att_intr_port_group; *(ml_stat_grp->default_groups + 2UL) = (struct config_group *)0; return; } } void ldv_initialize_configfs_item_operations_9(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_stat_scsi_att_intr_port_attrib_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_stat_scsi_att_intr_port_attrib_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_configfs_item_operations_52(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_stat_scsi_lu_attrib_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_stat_scsi_lu_attrib_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_configfs_item_operations_36(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_stat_scsi_tgt_port_attrib_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_stat_scsi_tgt_port_attrib_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_configfs_item_operations_71(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_stat_scsi_tgt_dev_attrib_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_stat_scsi_tgt_dev_attrib_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_configfs_item_operations_16(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_stat_scsi_auth_intr_attrib_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_stat_scsi_auth_intr_attrib_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_configfs_item_operations_31(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_stat_scsi_transport_attrib_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_stat_scsi_transport_attrib_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_configfs_item_operations_78(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_stat_scsi_dev_attrib_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_stat_scsi_dev_attrib_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_initialize_configfs_item_operations_46(void) { void *tmp ; void *tmp___0 ; { tmp = ldv_init_zalloc(24UL); target_stat_scsi_port_attrib_ops_group0 = (struct configfs_attribute *)tmp; tmp___0 = ldv_init_zalloc(80UL); target_stat_scsi_port_attrib_ops_group1 = (struct config_item *)tmp___0; return; } } void ldv_main_exported_33(void) { struct se_port_stat_grps *ldvarg251 ; void *tmp ; char *ldvarg252 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg251 = (struct se_port_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg252 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_33 == 1) { target_stat_scsi_transport_show_attr_indx(ldvarg251, ldvarg252); ldv_state_variable_33 = 1; } else { } goto ldv_59185; default: ldv_stop(); } ldv_59185: ; return; } } void ldv_main_exported_32(void) { struct se_port_stat_grps *ldvarg3 ; void *tmp ; char *ldvarg4 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg3 = (struct se_port_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg4 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_32 == 1) { target_stat_scsi_transport_show_attr_dev_name(ldvarg3, ldvarg4); ldv_state_variable_32 = 1; } else { } goto ldv_59193; default: ldv_stop(); } ldv_59193: ; return; } } void ldv_main_exported_63(void) { struct se_dev_stat_grps *ldvarg255 ; void *tmp ; char *ldvarg256 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg255 = (struct se_dev_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg256 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_63 == 1) { target_stat_scsi_lu_show_attr_rev(ldvarg255, ldvarg256); ldv_state_variable_63 = 1; } else { } goto ldv_59201; default: ldv_stop(); } ldv_59201: ; return; } } void ldv_main_exported_21(void) { struct se_ml_stat_grps *ldvarg253 ; void *tmp ; char *ldvarg254 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(336UL); ldvarg253 = (struct se_ml_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg254 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_21 == 1) { target_stat_scsi_auth_intr_show_attr_read_mbytes(ldvarg253, ldvarg254); ldv_state_variable_21 = 1; } else { } goto ldv_59209; default: ldv_stop(); } ldv_59209: ; return; } } void ldv_main_exported_71(void) { char *ldvarg12 ; void *tmp ; char *ldvarg13 ; void *tmp___0 ; size_t ldvarg11 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg12 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg13 = (char *)tmp___0; ldv_memset((void *)(& ldvarg11), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_71 == 1) { target_stat_scsi_tgt_dev_attr_show(target_stat_scsi_tgt_dev_attrib_ops_group1, target_stat_scsi_tgt_dev_attrib_ops_group0, ldvarg13); ldv_state_variable_71 = 1; } else { } goto ldv_59218; case 1: ; if (ldv_state_variable_71 == 1) { target_stat_scsi_tgt_dev_attr_store(target_stat_scsi_tgt_dev_attrib_ops_group1, target_stat_scsi_tgt_dev_attrib_ops_group0, (char const *)ldvarg12, ldvarg11); ldv_state_variable_71 = 1; } else { } goto ldv_59218; default: ldv_stop(); } ldv_59218: ; return; } } void ldv_main_exported_80(void) { char *ldvarg260 ; void *tmp ; struct se_dev_stat_grps *ldvarg259 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg260 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg259 = (struct se_dev_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_80 == 1) { target_stat_scsi_dev_show_attr_role(ldvarg259, ldvarg260); ldv_state_variable_80 = 1; } else { } goto ldv_59227; default: ldv_stop(); } ldv_59227: ; return; } } void ldv_main_exported_26(void) { struct se_ml_stat_grps *ldvarg257 ; void *tmp ; char *ldvarg258 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(336UL); ldvarg257 = (struct se_ml_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg258 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_26 == 1) { target_stat_scsi_auth_intr_show_attr_dev_or_port(ldvarg257, ldvarg258); ldv_state_variable_26 = 1; } else { } goto ldv_59235; default: ldv_stop(); } ldv_59235: ; return; } } void ldv_main_exported_18(void) { struct se_ml_stat_grps *ldvarg20 ; void *tmp ; char *ldvarg21 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(336UL); ldvarg20 = (struct se_ml_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg21 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_18 == 1) { target_stat_scsi_auth_intr_show_attr_creation_time(ldvarg20, ldvarg21); ldv_state_variable_18 = 1; } else { } goto ldv_59243; default: ldv_stop(); } ldv_59243: ; return; } } void ldv_main_exported_72(void) { char *ldvarg276 ; void *tmp ; struct se_dev_stat_grps *ldvarg275 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg276 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg275 = (struct se_dev_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_72 == 1) { target_stat_scsi_tgt_dev_show_attr_resets(ldvarg275, ldvarg276); ldv_state_variable_72 = 1; } else { } goto ldv_59251; default: ldv_stop(); } ldv_59251: ; return; } } void ldv_main_exported_16(void) { size_t ldvarg25 ; char *ldvarg26 ; void *tmp ; char *ldvarg27 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg26 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg27 = (char *)tmp___0; ldv_memset((void *)(& ldvarg25), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_16 == 1) { target_stat_scsi_auth_intr_attr_show(target_stat_scsi_auth_intr_attrib_ops_group1, target_stat_scsi_auth_intr_attrib_ops_group0, ldvarg27); ldv_state_variable_16 = 1; } else { } goto ldv_59260; case 1: ; if (ldv_state_variable_16 == 1) { target_stat_scsi_auth_intr_attr_store(target_stat_scsi_auth_intr_attrib_ops_group1, target_stat_scsi_auth_intr_attrib_ops_group0, (char const *)ldvarg26, ldvarg25); ldv_state_variable_16 = 1; } else { } goto ldv_59260; default: ldv_stop(); } ldv_59260: ; return; } } void ldv_main_exported_44(void) { struct se_port_stat_grps *ldvarg28 ; void *tmp ; char *ldvarg29 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg28 = (struct se_port_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg29 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_44 == 1) { target_stat_scsi_tgt_port_show_attr_dev(ldvarg28, ldvarg29); ldv_state_variable_44 = 1; } else { } goto ldv_59269; default: ldv_stop(); } ldv_59269: ; return; } } void ldv_main_exported_55(void) { struct se_dev_stat_grps *ldvarg30 ; void *tmp ; char *ldvarg31 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg30 = (struct se_dev_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg31 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_55 == 1) { target_stat_scsi_lu_show_attr_full_stat(ldvarg30, ldvarg31); ldv_state_variable_55 = 1; } else { } goto ldv_59277; default: ldv_stop(); } ldv_59277: ; return; } } void ldv_main_exported_74(void) { struct se_dev_stat_grps *ldvarg277 ; void *tmp ; char *ldvarg278 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg277 = (struct se_dev_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg278 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_74 == 1) { target_stat_scsi_tgt_dev_show_attr_status(ldvarg277, ldvarg278); ldv_state_variable_74 = 1; } else { } goto ldv_59285; default: ldv_stop(); } ldv_59285: ; return; } } void ldv_main_exported_27(void) { struct se_ml_stat_grps *ldvarg36 ; void *tmp ; char *ldvarg37 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(336UL); ldvarg36 = (struct se_ml_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg37 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_27 == 1) { target_stat_scsi_auth_intr_show_attr_indx(ldvarg36, ldvarg37); ldv_state_variable_27 = 1; } else { } goto ldv_59293; default: ldv_stop(); } ldv_59293: ; return; } } void ldv_main_exported_57(void) { char *ldvarg49 ; void *tmp ; struct se_dev_stat_grps *ldvarg48 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg49 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg48 = (struct se_dev_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_57 == 1) { target_stat_scsi_lu_show_attr_write_mbytes(ldvarg48, ldvarg49); ldv_state_variable_57 = 1; } else { } goto ldv_59301; default: ldv_stop(); } ldv_59301: ; return; } } void ldv_main_exported_61(void) { struct se_dev_stat_grps *ldvarg281 ; void *tmp ; char *ldvarg282 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg281 = (struct se_dev_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg282 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_61 == 1) { target_stat_scsi_lu_show_attr_status(ldvarg281, ldvarg282); ldv_state_variable_61 = 1; } else { } goto ldv_59309; default: ldv_stop(); } ldv_59309: ; return; } } void ldv_main_exported_20(void) { struct se_ml_stat_grps *ldvarg50 ; void *tmp ; char *ldvarg51 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(336UL); ldvarg50 = (struct se_ml_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg51 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_20 == 1) { target_stat_scsi_auth_intr_show_attr_write_mbytes(ldvarg50, ldvarg51); ldv_state_variable_20 = 1; } else { } goto ldv_59317; default: ldv_stop(); } ldv_59317: ; return; } } void ldv_main_exported_10(void) { struct se_ml_stat_grps *ldvarg296 ; void *tmp ; char *ldvarg297 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(336UL); ldvarg296 = (struct se_ml_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg297 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_10 == 1) { target_stat_scsi_att_intr_port_show_attr_port_ident(ldvarg296, ldvarg297); ldv_state_variable_10 = 1; } else { } goto ldv_59325; default: ldv_stop(); } ldv_59325: ; return; } } void ldv_main_exported_31(void) { size_t ldvarg72 ; char *ldvarg74 ; void *tmp ; char *ldvarg73 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg74 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg73 = (char *)tmp___0; ldv_memset((void *)(& ldvarg72), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_31 == 1) { target_stat_scsi_transport_attr_show(target_stat_scsi_transport_attrib_ops_group1, target_stat_scsi_transport_attrib_ops_group0, ldvarg74); ldv_state_variable_31 = 1; } else { } goto ldv_59334; case 1: ; if (ldv_state_variable_31 == 1) { target_stat_scsi_transport_attr_store(target_stat_scsi_transport_attrib_ops_group1, target_stat_scsi_transport_attrib_ops_group0, (char const *)ldvarg73, ldvarg72); ldv_state_variable_31 = 1; } else { } goto ldv_59334; default: ldv_stop(); } ldv_59334: ; return; } } void ldv_main_exported_35(void) { struct se_port_stat_grps *ldvarg75 ; void *tmp ; char *ldvarg76 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg75 = (struct se_port_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg76 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_35 == 1) { target_stat_scsi_transport_show_attr_inst(ldvarg75, ldvarg76); ldv_state_variable_35 = 1; } else { } goto ldv_59343; default: ldv_stop(); } ldv_59343: ; return; } } void ldv_main_exported_11(void) { char *ldvarg78 ; void *tmp ; struct se_ml_stat_grps *ldvarg77 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg78 = (char *)tmp; tmp___0 = ldv_init_zalloc(336UL); ldvarg77 = (struct se_ml_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_11 == 1) { target_stat_scsi_att_intr_port_show_attr_port_auth_indx(ldvarg77, ldvarg78); ldv_state_variable_11 = 1; } else { } goto ldv_59351; default: ldv_stop(); } ldv_59351: ; return; } } void ldv_main_exported_78(void) { char *ldvarg80 ; void *tmp ; size_t ldvarg79 ; char *ldvarg81 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg80 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg81 = (char *)tmp___0; ldv_memset((void *)(& ldvarg79), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_78 == 1) { target_stat_scsi_dev_attr_show(target_stat_scsi_dev_attrib_ops_group1, target_stat_scsi_dev_attrib_ops_group0, ldvarg81); ldv_state_variable_78 = 1; } else { } goto ldv_59360; case 1: ; if (ldv_state_variable_78 == 1) { target_stat_scsi_dev_attr_store(target_stat_scsi_dev_attrib_ops_group1, target_stat_scsi_dev_attrib_ops_group0, (char const *)ldvarg80, ldvarg79); ldv_state_variable_78 = 1; } else { } goto ldv_59360; default: ldv_stop(); } ldv_59360: ; return; } } void ldv_main_exported_48(void) { struct se_port_stat_grps *ldvarg313 ; void *tmp ; char *ldvarg314 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg313 = (struct se_port_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg314 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_48 == 1) { target_stat_scsi_port_show_attr_role(ldvarg313, ldvarg314); ldv_state_variable_48 = 1; } else { } goto ldv_59369; default: ldv_stop(); } ldv_59369: ; return; } } void ldv_main_exported_77(void) { char *ldvarg324 ; void *tmp ; struct se_dev_stat_grps *ldvarg323 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg324 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg323 = (struct se_dev_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_77 == 1) { target_stat_scsi_tgt_dev_show_attr_inst(ldvarg323, ldvarg324); ldv_state_variable_77 = 1; } else { } goto ldv_59377; default: ldv_stop(); } ldv_59377: ; return; } } void ldv_main_exported_65(void) { char *ldvarg89 ; void *tmp ; struct se_dev_stat_grps *ldvarg88 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg89 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg88 = (struct se_dev_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_65 == 1) { target_stat_scsi_lu_show_attr_vend(ldvarg88, ldvarg89); ldv_state_variable_65 = 1; } else { } goto ldv_59385; default: ldv_stop(); } ldv_59385: ; return; } } void ldv_main_exported_29(void) { struct se_ml_stat_grps *ldvarg90 ; void *tmp ; char *ldvarg91 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(336UL); ldvarg90 = (struct se_ml_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg91 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_29 == 1) { target_stat_scsi_auth_intr_show_attr_dev(ldvarg90, ldvarg91); ldv_state_variable_29 = 1; } else { } goto ldv_59393; default: ldv_stop(); } ldv_59393: ; return; } } void ldv_main_exported_50(void) { struct se_port_stat_grps *ldvarg336 ; void *tmp ; char *ldvarg337 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg336 = (struct se_port_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg337 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_50 == 1) { target_stat_scsi_port_show_attr_dev(ldvarg336, ldvarg337); ldv_state_variable_50 = 1; } else { } goto ldv_59401; default: ldv_stop(); } ldv_59401: ; return; } } void ldv_main_exported_39(void) { char *ldvarg339 ; void *tmp ; struct se_port_stat_grps *ldvarg338 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg339 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg338 = (struct se_port_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_39 == 1) { target_stat_scsi_tgt_port_show_attr_write_mbytes(ldvarg338, ldvarg339); ldv_state_variable_39 = 1; } else { } goto ldv_59409; default: ldv_stop(); } ldv_59409: ; return; } } void ldv_main_exported_64(void) { struct se_dev_stat_grps *ldvarg340 ; void *tmp ; char *ldvarg341 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg340 = (struct se_dev_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg341 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_64 == 1) { target_stat_scsi_lu_show_attr_prod(ldvarg340, ldvarg341); ldv_state_variable_64 = 1; } else { } goto ldv_59417; default: ldv_stop(); } ldv_59417: ; return; } } void ldv_main_exported_58(void) { char *ldvarg107 ; void *tmp ; struct se_dev_stat_grps *ldvarg106 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg107 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg106 = (struct se_dev_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_58 == 1) { target_stat_scsi_lu_show_attr_read_mbytes(ldvarg106, ldvarg107); ldv_state_variable_58 = 1; } else { } goto ldv_59425; default: ldv_stop(); } ldv_59425: ; return; } } void ldv_main_exported_41(void) { struct se_port_stat_grps *ldvarg347 ; void *tmp ; char *ldvarg348 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg347 = (struct se_port_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg348 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_41 == 1) { target_stat_scsi_tgt_port_show_attr_port_index(ldvarg347, ldvarg348); ldv_state_variable_41 = 1; } else { } goto ldv_59433; default: ldv_stop(); } ldv_59433: ; return; } } void ldv_main_exported_12(void) { struct se_ml_stat_grps *ldvarg345 ; void *tmp ; char *ldvarg346 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(336UL); ldvarg345 = (struct se_ml_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg346 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_12 == 1) { target_stat_scsi_att_intr_port_show_attr_indx(ldvarg345, ldvarg346); ldv_state_variable_12 = 1; } else { } goto ldv_59441; default: ldv_stop(); } ldv_59441: ; return; } } void ldv_main_exported_15(void) { char *ldvarg112 ; void *tmp ; struct se_ml_stat_grps *ldvarg111 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg112 = (char *)tmp; tmp___0 = ldv_init_zalloc(336UL); ldvarg111 = (struct se_ml_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_15 == 1) { target_stat_scsi_att_intr_port_show_attr_inst(ldvarg111, ldvarg112); ldv_state_variable_15 = 1; } else { } goto ldv_59449; default: ldv_stop(); } ldv_59449: ; return; } } void ldv_main_exported_81(void) { char *ldvarg117 ; void *tmp ; struct se_dev_stat_grps *ldvarg116 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg117 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg116 = (struct se_dev_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_81 == 1) { target_stat_scsi_dev_show_attr_indx(ldvarg116, ldvarg117); ldv_state_variable_81 = 1; } else { } goto ldv_59457; default: ldv_stop(); } ldv_59457: ; return; } } void ldv_main_exported_52(void) { char *ldvarg350 ; void *tmp ; char *ldvarg351 ; void *tmp___0 ; size_t ldvarg349 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg350 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg351 = (char *)tmp___0; ldv_memset((void *)(& ldvarg349), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_52 == 1) { target_stat_scsi_lu_attr_show(target_stat_scsi_lu_attrib_ops_group1, target_stat_scsi_lu_attrib_ops_group0, ldvarg351); ldv_state_variable_52 = 1; } else { } goto ldv_59466; case 1: ; if (ldv_state_variable_52 == 1) { target_stat_scsi_lu_attr_store(target_stat_scsi_lu_attrib_ops_group1, target_stat_scsi_lu_attrib_ops_group0, (char const *)ldvarg350, ldvarg349); ldv_state_variable_52 = 1; } else { } goto ldv_59466; default: ldv_stop(); } ldv_59466: ; return; } } void ldv_main_exported_60(void) { struct se_dev_stat_grps *ldvarg118 ; void *tmp ; char *ldvarg119 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg118 = (struct se_dev_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg119 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_60 == 1) { target_stat_scsi_lu_show_attr_state_bit(ldvarg118, ldvarg119); ldv_state_variable_60 = 1; } else { } goto ldv_59475; default: ldv_stop(); } ldv_59475: ; return; } } void ldv_main_exported_56(void) { char *ldvarg355 ; void *tmp ; struct se_dev_stat_grps *ldvarg354 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg355 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg354 = (struct se_dev_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_56 == 1) { target_stat_scsi_lu_show_attr_resets(ldvarg354, ldvarg355); ldv_state_variable_56 = 1; } else { } goto ldv_59483; default: ldv_stop(); } ldv_59483: ; return; } } void ldv_main_exported_73(void) { char *ldvarg124 ; void *tmp ; struct se_dev_stat_grps *ldvarg123 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg124 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg123 = (struct se_dev_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_73 == 1) { target_stat_scsi_tgt_dev_show_attr_non_access_lus(ldvarg123, ldvarg124); ldv_state_variable_73 = 1; } else { } goto ldv_59491; default: ldv_stop(); } ldv_59491: ; return; } } void ldv_main_exported_66(void) { char *ldvarg359 ; void *tmp ; struct se_dev_stat_grps *ldvarg358 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg359 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg358 = (struct se_dev_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_66 == 1) { target_stat_scsi_lu_show_attr_lu_name(ldvarg358, ldvarg359); ldv_state_variable_66 = 1; } else { } goto ldv_59499; default: ldv_stop(); } ldv_59499: ; return; } } void ldv_main_exported_45(void) { struct se_port_stat_grps *ldvarg356 ; void *tmp ; char *ldvarg357 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg356 = (struct se_port_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg357 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_45 == 1) { target_stat_scsi_tgt_port_show_attr_inst(ldvarg356, ldvarg357); ldv_state_variable_45 = 1; } else { } goto ldv_59507; default: ldv_stop(); } ldv_59507: ; return; } } void ldv_main_exported_76(void) { char *ldvarg128 ; void *tmp ; struct se_dev_stat_grps *ldvarg127 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg128 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg127 = (struct se_dev_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_76 == 1) { target_stat_scsi_tgt_dev_show_attr_indx(ldvarg127, ldvarg128); ldv_state_variable_76 = 1; } else { } goto ldv_59515; default: ldv_stop(); } ldv_59515: ; return; } } void ldv_main_exported_19(void) { char *ldvarg361 ; void *tmp ; struct se_ml_stat_grps *ldvarg360 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg361 = (char *)tmp; tmp___0 = ldv_init_zalloc(336UL); ldvarg360 = (struct se_ml_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_19 == 1) { target_stat_scsi_auth_intr_show_attr_hs_num_cmds(ldvarg360, ldvarg361); ldv_state_variable_19 = 1; } else { } goto ldv_59523; default: ldv_stop(); } ldv_59523: ; return; } } void ldv_main_exported_62(void) { char *ldvarg130 ; void *tmp ; struct se_dev_stat_grps *ldvarg129 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg130 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg129 = (struct se_dev_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_62 == 1) { target_stat_scsi_lu_show_attr_dev_type(ldvarg129, ldvarg130); ldv_state_variable_62 = 1; } else { } goto ldv_59531; default: ldv_stop(); } ldv_59531: ; return; } } void ldv_main_exported_54(void) { struct se_dev_stat_grps *ldvarg362 ; void *tmp ; char *ldvarg363 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg362 = (struct se_dev_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg363 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_54 == 1) { target_stat_scsi_lu_show_attr_hs_num_cmds(ldvarg362, ldvarg363); ldv_state_variable_54 = 1; } else { } goto ldv_59539; default: ldv_stop(); } ldv_59539: ; return; } } void ldv_main_exported_67(void) { char *ldvarg132 ; void *tmp ; struct se_dev_stat_grps *ldvarg131 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg132 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg131 = (struct se_dev_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_67 == 1) { target_stat_scsi_lu_show_attr_lun(ldvarg131, ldvarg132); ldv_state_variable_67 = 1; } else { } goto ldv_59547; default: ldv_stop(); } ldv_59547: ; return; } } void ldv_main_exported_70(void) { char *ldvarg365 ; void *tmp ; struct se_dev_stat_grps *ldvarg364 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg365 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg364 = (struct se_dev_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_70 == 1) { target_stat_scsi_lu_show_attr_inst(ldvarg364, ldvarg365); ldv_state_variable_70 = 1; } else { } goto ldv_59555; default: ldv_stop(); } ldv_59555: ; return; } } void ldv_main_exported_68(void) { char *ldvarg370 ; void *tmp ; struct se_dev_stat_grps *ldvarg369 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg370 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg369 = (struct se_dev_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_68 == 1) { target_stat_scsi_lu_show_attr_indx(ldvarg369, ldvarg370); ldv_state_variable_68 = 1; } else { } goto ldv_59563; default: ldv_stop(); } ldv_59563: ; return; } } void ldv_main_exported_17(void) { struct se_ml_stat_grps *ldvarg148 ; void *tmp ; char *ldvarg149 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(336UL); ldvarg148 = (struct se_ml_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg149 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_17 == 1) { target_stat_scsi_auth_intr_show_attr_row_status(ldvarg148, ldvarg149); ldv_state_variable_17 = 1; } else { } goto ldv_59571; default: ldv_stop(); } ldv_59571: ; return; } } void ldv_main_exported_30(void) { struct se_ml_stat_grps *ldvarg387 ; void *tmp ; char *ldvarg388 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(336UL); ldvarg387 = (struct se_ml_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg388 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_30 == 1) { target_stat_scsi_auth_intr_show_attr_inst(ldvarg387, ldvarg388); ldv_state_variable_30 = 1; } else { } goto ldv_59579; default: ldv_stop(); } ldv_59579: ; return; } } void ldv_main_exported_82(void) { char *ldvarg157 ; void *tmp ; struct se_dev_stat_grps *ldvarg156 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg157 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg156 = (struct se_dev_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_82 == 1) { target_stat_scsi_dev_show_attr_inst(ldvarg156, ldvarg157); ldv_state_variable_82 = 1; } else { } goto ldv_59587; default: ldv_stop(); } ldv_59587: ; return; } } void ldv_main_exported_25(void) { struct se_ml_stat_grps *ldvarg392 ; void *tmp ; char *ldvarg393 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(336UL); ldvarg392 = (struct se_ml_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg393 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_25 == 1) { target_stat_scsi_auth_intr_show_attr_intr_name(ldvarg392, ldvarg393); ldv_state_variable_25 = 1; } else { } goto ldv_59595; default: ldv_stop(); } ldv_59595: ; return; } } void ldv_main_exported_28(void) { char *ldvarg398 ; void *tmp ; struct se_ml_stat_grps *ldvarg397 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg398 = (char *)tmp; tmp___0 = ldv_init_zalloc(336UL); ldvarg397 = (struct se_ml_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_28 == 1) { target_stat_scsi_auth_intr_show_attr_port(ldvarg397, ldvarg398); ldv_state_variable_28 = 1; } else { } goto ldv_59603; default: ldv_stop(); } ldv_59603: ; return; } } void ldv_main_exported_75(void) { char *ldvarg409 ; void *tmp ; struct se_dev_stat_grps *ldvarg408 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg409 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg408 = (struct se_dev_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_75 == 1) { target_stat_scsi_tgt_dev_show_attr_num_lus(ldvarg408, ldvarg409); ldv_state_variable_75 = 1; } else { } goto ldv_59611; default: ldv_stop(); } ldv_59611: ; return; } } void ldv_main_exported_40(void) { char *ldvarg407 ; void *tmp ; struct se_port_stat_grps *ldvarg406 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg407 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg406 = (struct se_port_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_40 == 1) { target_stat_scsi_tgt_port_show_attr_in_cmds(ldvarg406, ldvarg407); ldv_state_variable_40 = 1; } else { } goto ldv_59619; default: ldv_stop(); } ldv_59619: ; return; } } void ldv_main_exported_14(void) { struct se_ml_stat_grps *ldvarg174 ; void *tmp ; char *ldvarg175 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(336UL); ldvarg174 = (struct se_ml_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg175 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_14 == 1) { target_stat_scsi_att_intr_port_show_attr_dev(ldvarg174, ldvarg175); ldv_state_variable_14 = 1; } else { } goto ldv_59627; default: ldv_stop(); } ldv_59627: ; return; } } void ldv_main_exported_59(void) { char *ldvarg420 ; void *tmp ; struct se_dev_stat_grps *ldvarg419 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg420 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg419 = (struct se_dev_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_59 == 1) { target_stat_scsi_lu_show_attr_num_cmds(ldvarg419, ldvarg420); ldv_state_variable_59 = 1; } else { } goto ldv_59635; default: ldv_stop(); } ldv_59635: ; return; } } void ldv_main_exported_69(void) { struct se_dev_stat_grps *ldvarg179 ; void *tmp ; char *ldvarg180 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg179 = (struct se_dev_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg180 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_69 == 1) { target_stat_scsi_lu_show_attr_dev(ldvarg179, ldvarg180); ldv_state_variable_69 = 1; } else { } goto ldv_59643; default: ldv_stop(); } ldv_59643: ; return; } } void ldv_main_exported_49(void) { struct se_port_stat_grps *ldvarg189 ; void *tmp ; char *ldvarg190 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg189 = (struct se_port_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg190 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_49 == 1) { target_stat_scsi_port_show_attr_indx(ldvarg189, ldvarg190); ldv_state_variable_49 = 1; } else { } goto ldv_59651; default: ldv_stop(); } ldv_59651: ; return; } } void ldv_main_exported_24(void) { struct se_ml_stat_grps *ldvarg194 ; void *tmp ; char *ldvarg195 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(336UL); ldvarg194 = (struct se_ml_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg195 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_24 == 1) { target_stat_scsi_auth_intr_show_attr_map_indx(ldvarg194, ldvarg195); ldv_state_variable_24 = 1; } else { } goto ldv_59659; default: ldv_stop(); } ldv_59659: ; return; } } void ldv_main_exported_53(void) { struct se_dev_stat_grps *ldvarg434 ; void *tmp ; char *ldvarg435 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg434 = (struct se_dev_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg435 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_53 == 1) { target_stat_scsi_lu_show_attr_creation_time(ldvarg434, ldvarg435); ldv_state_variable_53 = 1; } else { } goto ldv_59667; default: ldv_stop(); } ldv_59667: ; return; } } void ldv_main_exported_79(void) { char *ldvarg216 ; void *tmp ; struct se_dev_stat_grps *ldvarg215 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg216 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg215 = (struct se_dev_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_79 == 1) { target_stat_scsi_dev_show_attr_ports(ldvarg215, ldvarg216); ldv_state_variable_79 = 1; } else { } goto ldv_59675; default: ldv_stop(); } ldv_59675: ; return; } } void ldv_main_exported_22(void) { char *ldvarg445 ; void *tmp ; struct se_ml_stat_grps *ldvarg444 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg445 = (char *)tmp; tmp___0 = ldv_init_zalloc(336UL); ldvarg444 = (struct se_ml_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_22 == 1) { target_stat_scsi_auth_intr_show_attr_num_cmds(ldvarg444, ldvarg445); ldv_state_variable_22 = 1; } else { } goto ldv_59683; default: ldv_stop(); } ldv_59683: ; return; } } void ldv_main_exported_42(void) { struct se_port_stat_grps *ldvarg442 ; void *tmp ; char *ldvarg443 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg442 = (struct se_port_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg443 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_42 == 1) { target_stat_scsi_tgt_port_show_attr_name(ldvarg442, ldvarg443); ldv_state_variable_42 = 1; } else { } goto ldv_59691; default: ldv_stop(); } ldv_59691: ; return; } } void ldv_main_exported_46(void) { size_t ldvarg446 ; char *ldvarg447 ; void *tmp ; char *ldvarg448 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg447 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg448 = (char *)tmp___0; ldv_memset((void *)(& ldvarg446), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_46 == 1) { target_stat_scsi_port_attr_show(target_stat_scsi_port_attrib_ops_group1, target_stat_scsi_port_attrib_ops_group0, ldvarg448); ldv_state_variable_46 = 1; } else { } goto ldv_59700; case 1: ; if (ldv_state_variable_46 == 1) { target_stat_scsi_port_attr_store(target_stat_scsi_port_attrib_ops_group1, target_stat_scsi_port_attrib_ops_group0, (char const *)ldvarg447, ldvarg446); ldv_state_variable_46 = 1; } else { } goto ldv_59700; default: ldv_stop(); } ldv_59700: ; return; } } void ldv_main_exported_23(void) { char *ldvarg221 ; void *tmp ; struct se_ml_stat_grps *ldvarg220 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg221 = (char *)tmp; tmp___0 = ldv_init_zalloc(336UL); ldvarg220 = (struct se_ml_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_23 == 1) { target_stat_scsi_auth_intr_show_attr_att_count(ldvarg220, ldvarg221); ldv_state_variable_23 = 1; } else { } goto ldv_59709; default: ldv_stop(); } ldv_59709: ; return; } } void ldv_main_exported_13(void) { char *ldvarg450 ; void *tmp ; struct se_ml_stat_grps *ldvarg449 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg450 = (char *)tmp; tmp___0 = ldv_init_zalloc(336UL); ldvarg449 = (struct se_ml_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_13 == 1) { target_stat_scsi_att_intr_port_show_attr_port(ldvarg449, ldvarg450); ldv_state_variable_13 = 1; } else { } goto ldv_59717; default: ldv_stop(); } ldv_59717: ; return; } } void ldv_main_exported_36(void) { char *ldvarg459 ; void *tmp ; size_t ldvarg458 ; char *ldvarg460 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg459 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg460 = (char *)tmp___0; ldv_memset((void *)(& ldvarg458), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_36 == 1) { target_stat_scsi_tgt_port_attr_show(target_stat_scsi_tgt_port_attrib_ops_group1, target_stat_scsi_tgt_port_attrib_ops_group0, ldvarg460); ldv_state_variable_36 = 1; } else { } goto ldv_59726; case 1: ; if (ldv_state_variable_36 == 1) { target_stat_scsi_tgt_port_attr_store(target_stat_scsi_tgt_port_attrib_ops_group1, target_stat_scsi_tgt_port_attrib_ops_group0, (char const *)ldvarg459, ldvarg458); ldv_state_variable_36 = 1; } else { } goto ldv_59726; default: ldv_stop(); } ldv_59726: ; return; } } void ldv_main_exported_9(void) { char *ldvarg476 ; void *tmp ; char *ldvarg475 ; void *tmp___0 ; size_t ldvarg474 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg476 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg475 = (char *)tmp___0; ldv_memset((void *)(& ldvarg474), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_9 == 1) { target_stat_scsi_att_intr_port_attr_show(target_stat_scsi_att_intr_port_attrib_ops_group1, target_stat_scsi_att_intr_port_attrib_ops_group0, ldvarg476); ldv_state_variable_9 = 1; } else { } goto ldv_59736; case 1: ; if (ldv_state_variable_9 == 1) { target_stat_scsi_att_intr_port_attr_store(target_stat_scsi_att_intr_port_attrib_ops_group1, target_stat_scsi_att_intr_port_attrib_ops_group0, (char const *)ldvarg475, ldvarg474); ldv_state_variable_9 = 1; } else { } goto ldv_59736; default: ldv_stop(); } ldv_59736: ; return; } } void ldv_main_exported_51(void) { char *ldvarg473 ; void *tmp ; struct se_port_stat_grps *ldvarg472 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg473 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg472 = (struct se_port_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_51 == 1) { target_stat_scsi_port_show_attr_inst(ldvarg472, ldvarg473); ldv_state_variable_51 = 1; } else { } goto ldv_59745; default: ldv_stop(); } ldv_59745: ; return; } } void ldv_main_exported_47(void) { struct se_port_stat_grps *ldvarg235 ; void *tmp ; char *ldvarg236 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg235 = (struct se_port_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg236 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_47 == 1) { target_stat_scsi_port_show_attr_busy_count(ldvarg235, ldvarg236); ldv_state_variable_47 = 1; } else { } goto ldv_59753; default: ldv_stop(); } ldv_59753: ; return; } } void ldv_main_exported_38(void) { struct se_port_stat_grps *ldvarg480 ; void *tmp ; char *ldvarg481 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg480 = (struct se_port_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg481 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_38 == 1) { target_stat_scsi_tgt_port_show_attr_read_mbytes(ldvarg480, ldvarg481); ldv_state_variable_38 = 1; } else { } goto ldv_59761; default: ldv_stop(); } ldv_59761: ; return; } } void ldv_main_exported_34(void) { char *ldvarg483 ; void *tmp ; struct se_port_stat_grps *ldvarg482 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg483 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg482 = (struct se_port_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_34 == 1) { target_stat_scsi_transport_show_attr_device(ldvarg482, ldvarg483); ldv_state_variable_34 = 1; } else { } goto ldv_59769; default: ldv_stop(); } ldv_59769: ; return; } } void ldv_main_exported_37(void) { struct se_port_stat_grps *ldvarg239 ; void *tmp ; char *ldvarg240 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(448UL); ldvarg239 = (struct se_port_stat_grps *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg240 = (char *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_37 == 1) { target_stat_scsi_tgt_port_show_attr_hs_in_cmds(ldvarg239, ldvarg240); ldv_state_variable_37 = 1; } else { } goto ldv_59777; default: ldv_stop(); } ldv_59777: ; return; } } void ldv_main_exported_43(void) { char *ldvarg245 ; void *tmp ; struct se_port_stat_grps *ldvarg244 ; void *tmp___0 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg245 = (char *)tmp; tmp___0 = ldv_init_zalloc(448UL); ldvarg244 = (struct se_port_stat_grps *)tmp___0; tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_43 == 1) { target_stat_scsi_tgt_port_show_attr_indx(ldvarg244, ldvarg245); ldv_state_variable_43 = 1; } else { } goto ldv_59785; default: ldv_stop(); } ldv_59785: ; return; } } bool ldv_queue_work_on_563(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_564(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_565(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_566(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_567(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_568(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_569(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_570(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_571(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_lock_572(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_573(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_574(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; int ldv_mutex_trylock_599(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_597(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_600(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_601(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_604(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_605(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_606(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_596(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_598(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_602(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_603(struct mutex *ldv_func_arg1 ) ; extern int wait_for_completion_interruptible(struct completion * ) ; void ldv_destroy_workqueue_607(struct workqueue_struct *ldv_func_arg1 ) ; bool ldv_queue_work_on_591(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_593(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_592(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_595(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_594(struct workqueue_struct *ldv_func_arg1 ) ; __inline static bool queue_work___0(struct workqueue_struct *wq , struct work_struct *work ) { bool tmp ; { tmp = ldv_queue_work_on_591(8192, wq, work); return (tmp); } } void activate_work_7(struct work_struct *work , int state ) ; void disable_work_7(struct work_struct *work ) ; void call_and_disable_work_7(struct work_struct *work ) ; void call_and_disable_all_7(int state ) ; void invoke_work_7(void) ; struct se_portal_group xcopy_pt_tpg ; static struct workqueue_struct *xcopy_wq = (struct workqueue_struct *)0; static int target_xcopy_gen_naa_ieee(struct se_device *dev , unsigned char *buf ) { int off ; int tmp ; int tmp___0 ; int tmp___1 ; { off = 0; tmp = off; off = off + 1; *(buf + (unsigned long )tmp) = 96U; tmp___0 = off; off = off + 1; *(buf + (unsigned long )tmp___0) = 1U; tmp___1 = off; off = off + 1; *(buf + (unsigned long )tmp___1) = 64U; *(buf + (unsigned long )off) = 80U; spc_parse_naa_6h_vendor_specific(dev, buf + (unsigned long )off); return (0); } } static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd , struct xcopy_op *xop , bool src ) { struct se_device *se_dev ; unsigned char tmp_dev_wwn[16U] ; unsigned char *dev_wwn ; int rc ; struct list_head const *__mptr ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; struct _ddebug descriptor___1 ; long tmp___1 ; struct list_head const *__mptr___0 ; { if ((int )src) { dev_wwn = (unsigned char *)(& xop->dst_tid_wwn); } else { dev_wwn = (unsigned char *)(& xop->src_tid_wwn); } ldv_mutex_lock_603(& g_device_mutex); __mptr = (struct list_head const *)g_device_list.next; se_dev = (struct se_device *)__mptr + 0xfffffffffffffcc0UL; goto ldv_57204; ldv_57203: ; if (se_dev->dev_attrib.emulate_3pc == 0) { goto ldv_57198; } else { } memset((void *)(& tmp_dev_wwn), 0, 16UL); target_xcopy_gen_naa_ieee(se_dev, (unsigned char *)(& tmp_dev_wwn)); rc = memcmp((void const *)(& tmp_dev_wwn), (void const *)dev_wwn, 16UL); if (rc != 0) { goto ldv_57198; } else { } if ((int )src) { xop->dst_dev = se_dev; descriptor.modname = "target_core_mod"; descriptor.function = "target_xcopy_locate_se_dev_e4"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor.format = "XCOPY 0xe4: Setting xop->dst_dev: %p from located se_dev\n"; descriptor.lineno = 83U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "XCOPY 0xe4: Setting xop->dst_dev: %p from located se_dev\n", xop->dst_dev); } else { } } else { xop->src_dev = se_dev; descriptor___0.modname = "target_core_mod"; descriptor___0.function = "target_xcopy_locate_se_dev_e4"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___0.format = "XCOPY 0xe4: Setting xop->src_dev: %p from located se_dev\n"; descriptor___0.lineno = 87U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor___0, "XCOPY 0xe4: Setting xop->src_dev: %p from located se_dev\n", xop->src_dev); } else { } } rc = target_depend_item(& se_dev->dev_group.cg_item); if (rc != 0) { printk("\vconfigfs_depend_item attempt failed: %d for se_dev: %p\n", rc, se_dev); ldv_mutex_unlock_604(& g_device_mutex); return (rc); } else { } descriptor___1.modname = "target_core_mod"; descriptor___1.function = "target_xcopy_locate_se_dev_e4"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___1.format = "Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n"; descriptor___1.lineno = 100U; descriptor___1.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor___1, "Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n", se_dev, & se_dev->dev_group); } else { } ldv_mutex_unlock_605(& g_device_mutex); return (0); ldv_57198: __mptr___0 = (struct list_head const *)se_dev->g_dev_node.next; se_dev = (struct se_device *)__mptr___0 + 0xfffffffffffffcc0UL; ldv_57204: ; if ((unsigned long )(& se_dev->g_dev_node) != (unsigned long )(& g_device_list)) { goto ldv_57203; } else { } ldv_mutex_unlock_606(& g_device_mutex); printk("\vUnable to locate 0xe4 descriptor for EXTENDED_COPY\n"); return (-22); } } static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd , struct xcopy_op *xop , unsigned char *p , bool src ) { unsigned char *desc ; unsigned short ript ; u8 desig_len ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; struct _ddebug descriptor___1 ; long tmp___1 ; int tmp___2 ; struct _ddebug descriptor___2 ; long tmp___3 ; int tmp___4 ; { desc = p; ript = get_unaligned_be16((void const *)desc + 2U); descriptor.modname = "target_core_mod"; descriptor.function = "target_xcopy_parse_tiddesc_e4"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor.format = "XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n"; descriptor.lineno = 121U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "XCOPY 0xe4: RELATIVE INITIATOR PORT IDENTIFIER: %hu\n", (int )ript); } else { } if (((int )*(desc + 4UL) & 15) != 1) { printk("\vXCOPY 0xe4: code set of non binary type not supported\n"); return (-22); } else { } if (((int )*(desc + 5UL) & 48) != 0) { printk("\vXCOPY 0xe4: association other than LUN not supported\n"); return (-22); } else { } if (((int )*(desc + 5UL) & 15) != 3) { printk("\vXCOPY 0xe4: designator type unsupported: 0x%02x\n", (int )*(desc + 5UL) & 15); return (-22); } else { } desig_len = *(desc + 7UL); if ((unsigned int )desig_len != 16U) { printk("\vXCOPY 0xe4: invalid desig_len: %d\n", (int )desig_len); return (-22); } else { } descriptor___0.modname = "target_core_mod"; descriptor___0.function = "target_xcopy_parse_tiddesc_e4"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___0.format = "XCOPY 0xe4: desig_len: %d\n"; descriptor___0.lineno = 147U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor___0, "XCOPY 0xe4: desig_len: %d\n", (int )desig_len); } else { } if (((int )*(desc + 8UL) & 240) != 96) { printk("\vXCOPY 0xe4: Unsupported DESIGNATOR TYPE: 0x%02x\n", (int )*(desc + 8UL) & 240); return (-22); } else { } if ((int )src) { memcpy((void *)(& xop->src_tid_wwn), (void const *)desc + 8U, 16UL); tmp___2 = memcmp((void const *)(& xop->local_dev_wwn), (void const *)(& xop->src_tid_wwn), 16UL); if (tmp___2 == 0) { xop->op_origin = 1; xop->src_dev = se_cmd->se_dev; descriptor___1.modname = "target_core_mod"; descriptor___1.function = "target_xcopy_parse_tiddesc_e4"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___1.format = "XCOPY 0xe4: Set xop->src_dev %p from source received xop\n"; descriptor___1.lineno = 167U; descriptor___1.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor___1, "XCOPY 0xe4: Set xop->src_dev %p from source received xop\n", xop->src_dev); } else { } } else { } } else { memcpy((void *)(& xop->dst_tid_wwn), (void const *)desc + 8U, 16UL); tmp___4 = memcmp((void const *)(& xop->local_dev_wwn), (void const *)(& xop->dst_tid_wwn), 16UL); if (tmp___4 == 0) { xop->op_origin = 2; xop->dst_dev = se_cmd->se_dev; descriptor___2.modname = "target_core_mod"; descriptor___2.function = "target_xcopy_parse_tiddesc_e4"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___2.format = "XCOPY 0xe4: Set xop->dst_dev: %p from destination received xop\n"; descriptor___2.lineno = 179U; descriptor___2.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_pr_debug(& descriptor___2, "XCOPY 0xe4: Set xop->dst_dev: %p from destination received xop\n", xop->dst_dev); } else { } } else { } } return (0); } } static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd , struct xcopy_op *xop , unsigned char *p , unsigned short tdll ) { struct se_device *local_dev ; unsigned char *desc ; int offset ; int rc ; int ret ; unsigned short start ; bool src ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; { local_dev = se_cmd->se_dev; desc = p; offset = (int )tdll & 31; ret = 0; start = 0U; src = 1; if (offset != 0) { printk("\vXCOPY target descriptor list length is not multiple of %d\n", 32); return (-22); } else { } if ((unsigned int )tdll > 64U) { printk("\vXCOPY target descriptor supports a maximum two src/dest descriptors, tdll: %hu too large..\n", (int )tdll); return (-22); } else { } memset((void *)(& xop->local_dev_wwn), 0, 16UL); target_xcopy_gen_naa_ieee(local_dev, (unsigned char *)(& xop->local_dev_wwn)); goto ldv_57238; ldv_57237: ; switch ((int )*desc) { case 228: rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop, desc, (int )src); if (rc != 0) { goto out; } else { } if ((int )src) { src = 0; } else { src = 1; } start = (unsigned int )start + 32U; desc = desc + 32UL; ret = ret + 1; goto ldv_57235; default: printk("\vXCOPY unsupported descriptor type code: 0x%02x\n", (int )*desc); goto out; } ldv_57235: ; ldv_57238: ; if ((int )start < (int )tdll) { goto ldv_57237; } else { } if (xop->op_origin == 1) { rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, 1); } else { rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, 0); } if (rc < 0) { goto out; } else { } descriptor.modname = "target_core_mod"; descriptor.function = "target_xcopy_parse_target_descriptors"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor.format = "XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n"; descriptor.lineno = 251U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "XCOPY TGT desc: Source dev: %p NAA IEEE WWN: 0x%16phN\n", xop->src_dev, (unsigned char *)(& xop->src_tid_wwn)); } else { } descriptor___0.modname = "target_core_mod"; descriptor___0.function = "target_xcopy_parse_target_descriptors"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___0.format = "XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n"; descriptor___0.lineno = 253U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor___0, "XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n", xop->dst_dev, (unsigned char *)(& xop->dst_tid_wwn)); } else { } return (ret); out: ; return (-22); } } static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd , struct xcopy_op *xop , unsigned char *p ) { unsigned char *desc ; int dc ; unsigned short desc_len ; struct _ddebug descriptor ; long tmp ; u64 tmp___0 ; u64 tmp___1 ; struct _ddebug descriptor___0 ; long tmp___2 ; struct _ddebug descriptor___1 ; long tmp___3 ; { desc = p; dc = (int )*(desc + 1UL) & 2; desc_len = get_unaligned_be16((void const *)desc + 2U); if ((unsigned int )desc_len != 24U) { printk("\vXCOPY segment desc 0x02: Illegal desc_len: %hu\n", (int )desc_len); return (-22); } else { } xop->stdi = get_unaligned_be16((void const *)desc + 4U); xop->dtdi = get_unaligned_be16((void const *)desc + 6U); descriptor.modname = "target_core_mod"; descriptor.function = "target_xcopy_parse_segdesc_02"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor.format = "XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n"; descriptor.lineno = 278U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n", (int )desc_len, (int )xop->stdi, (int )xop->dtdi, dc); } else { } xop->nolb = get_unaligned_be16((void const *)desc + 10U); tmp___0 = get_unaligned_be64((void const *)desc + 12U); xop->src_lba = (sector_t )tmp___0; tmp___1 = get_unaligned_be64((void const *)desc + 20U); xop->dst_lba = (sector_t )tmp___1; descriptor___0.modname = "target_core_mod"; descriptor___0.function = "target_xcopy_parse_segdesc_02"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___0.format = "XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n"; descriptor___0.lineno = 285U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_pr_debug(& descriptor___0, "XCOPY seg desc 0x02: nolb: %hu src_lba: %llu dst_lba: %llu\n", (int )xop->nolb, (unsigned long long )xop->src_lba, (unsigned long long )xop->dst_lba); } else { } if (dc != 0) { xop->dbl = (unsigned int )((int )*(desc + 29UL) << 16); xop->dbl = xop->dbl | (unsigned int )((int )*(desc + 30UL) << 8); xop->dbl = xop->dbl | (unsigned int )*(desc + 31UL); descriptor___1.modname = "target_core_mod"; descriptor___1.function = "target_xcopy_parse_segdesc_02"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___1.format = "XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n"; descriptor___1.lineno = 292U; descriptor___1.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_pr_debug(& descriptor___1, "XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl); } else { } } else { } return (0); } } static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd , struct xcopy_op *xop , unsigned char *p , unsigned int sdll ) { unsigned char *desc ; unsigned int start ; int offset ; int rc ; int ret ; { desc = p; start = 0U; offset = (int )(sdll % 28U); ret = 0; if (offset != 0) { printk("\vXCOPY segment descriptor list length is not multiple of %d\n", 28); return (-22); } else { } goto ldv_57271; ldv_57270: ; switch ((int )*desc) { case 2: rc = target_xcopy_parse_segdesc_02(se_cmd, xop, desc); if (rc < 0) { goto out; } else { } ret = ret + 1; start = start + 28U; desc = desc + 28UL; goto ldv_57268; default: printk("\vXCOPY unsupported segment descriptortype: 0x%02x\n", (int )*desc); goto out; } ldv_57268: ; ldv_57271: ; if (start < sdll) { goto ldv_57270; } else { } return (ret); out: ; return (-22); } } static struct se_session xcopy_pt_sess ; static struct se_node_acl xcopy_pt_nacl ; static char *xcopy_pt_get_fabric_name(void) { { return ((char *)"xcopy-pt"); } } static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd ) { { return (0); } } static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop ) { struct se_device *remote_dev ; struct _ddebug descriptor ; long tmp ; { if (xop->op_origin == 1) { remote_dev = xop->dst_dev; } else { remote_dev = xop->src_dev; } descriptor.modname = "target_core_mod"; descriptor.function = "xcopy_pt_undepend_remotedev"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor.format = "Calling configfs_undepend_item for remote_dev: %p remote_dev->dev_group: %p\n"; descriptor.lineno = 375U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Calling configfs_undepend_item for remote_dev: %p remote_dev->dev_group: %p\n", remote_dev, & remote_dev->dev_group.cg_item); } else { } target_undepend_item(& remote_dev->dev_group.cg_item); return; } } static void xcopy_pt_release_cmd(struct se_cmd *se_cmd ) { struct xcopy_pt_cmd *xpt_cmd ; struct se_cmd const *__mptr ; { __mptr = (struct se_cmd const *)se_cmd; xpt_cmd = (struct xcopy_pt_cmd *)__mptr + 0xfffffffffffffff8UL; kfree((void const *)xpt_cmd); return; } } static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd ) { struct xcopy_pt_cmd *xpt_cmd ; struct se_cmd const *__mptr ; { __mptr = (struct se_cmd const *)se_cmd; xpt_cmd = (struct xcopy_pt_cmd *)__mptr + 0xfffffffffffffff8UL; complete(& xpt_cmd->xpt_passthrough_sem); return (0); } } static int xcopy_pt_write_pending(struct se_cmd *se_cmd ) { { return (0); } } static int xcopy_pt_write_pending_status(struct se_cmd *se_cmd ) { { return (0); } } static int xcopy_pt_queue_data_in(struct se_cmd *se_cmd ) { { return (0); } } static int xcopy_pt_queue_status(struct se_cmd *se_cmd ) { { return (0); } } static struct target_core_fabric_ops const xcopy_pt_tfo = {0, 0, 0UL, & xcopy_pt_get_fabric_name, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & xcopy_pt_check_stop_free, & xcopy_pt_release_cmd, 0, 0, 0, 0, & xcopy_pt_write_pending, & xcopy_pt_write_pending_status, 0, & xcopy_pt_get_cmd_state, & xcopy_pt_queue_data_in, & xcopy_pt_queue_status, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; int target_xcopy_setup_pt(void) { struct lock_class_key __key ; char const *__lock_name ; struct workqueue_struct *tmp ; { __lock_name = "\"xcopy_wq\""; tmp = __alloc_workqueue_key("xcopy_wq", 8U, 0, & __key, __lock_name); xcopy_wq = tmp; if ((unsigned long )xcopy_wq == (unsigned long )((struct workqueue_struct *)0)) { printk("\vUnable to allocate xcopy_wq\n"); return (-12); } else { } memset((void *)(& xcopy_pt_tpg), 0, 1328UL); INIT_LIST_HEAD(& xcopy_pt_tpg.se_tpg_node); INIT_LIST_HEAD(& xcopy_pt_tpg.acl_node_list); INIT_LIST_HEAD(& xcopy_pt_tpg.tpg_sess_list); xcopy_pt_tpg.se_tpg_tfo = & xcopy_pt_tfo; memset((void *)(& xcopy_pt_nacl), 0, 1304UL); INIT_LIST_HEAD(& xcopy_pt_nacl.acl_list); INIT_LIST_HEAD(& xcopy_pt_nacl.acl_sess_list); memset((void *)(& xcopy_pt_sess), 0, 1536UL); INIT_LIST_HEAD(& xcopy_pt_sess.sess_list); INIT_LIST_HEAD(& xcopy_pt_sess.sess_acl_list); xcopy_pt_nacl.se_tpg = & xcopy_pt_tpg; xcopy_pt_nacl.nacl_sess = & xcopy_pt_sess; xcopy_pt_sess.se_tpg = & xcopy_pt_tpg; xcopy_pt_sess.se_node_acl = & xcopy_pt_nacl; return (0); } } void target_xcopy_release_pt(void) { { if ((unsigned long )xcopy_wq != (unsigned long )((struct workqueue_struct *)0)) { ldv_destroy_workqueue_607(xcopy_wq); } else { } return; } } static void target_xcopy_setup_pt_port(struct xcopy_pt_cmd *xpt_cmd , struct xcopy_op *xop , bool remote_port ) { struct se_cmd *ec_cmd ; struct se_cmd *pt_cmd ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; struct _ddebug descriptor___1 ; long tmp___1 ; struct _ddebug descriptor___2 ; long tmp___2 ; { ec_cmd = xop->xop_se_cmd; pt_cmd = & xpt_cmd->se_cmd; if (xop->op_origin == 1) { if ((int )remote_port) { xpt_cmd->remote_port = remote_port; } else { pt_cmd->se_lun = ec_cmd->se_lun; pt_cmd->se_dev = ec_cmd->se_dev; descriptor.modname = "target_core_mod"; descriptor.function = "target_xcopy_setup_pt_port"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor.format = "Honoring local SRC port from ec_cmd->se_dev: %p\n"; descriptor.lineno = 490U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Honoring local SRC port from ec_cmd->se_dev: %p\n", pt_cmd->se_dev); } else { } pt_cmd->se_lun = ec_cmd->se_lun; descriptor___0.modname = "target_core_mod"; descriptor___0.function = "target_xcopy_setup_pt_port"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___0.format = "Honoring local SRC port from ec_cmd->se_lun: %p\n"; descriptor___0.lineno = 493U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor___0, "Honoring local SRC port from ec_cmd->se_lun: %p\n", pt_cmd->se_lun); } else { } } } else if ((int )remote_port) { xpt_cmd->remote_port = remote_port; } else { pt_cmd->se_lun = ec_cmd->se_lun; pt_cmd->se_dev = ec_cmd->se_dev; descriptor___1.modname = "target_core_mod"; descriptor___1.function = "target_xcopy_setup_pt_port"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___1.format = "Honoring local DST port from ec_cmd->se_dev: %p\n"; descriptor___1.lineno = 509U; descriptor___1.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor___1, "Honoring local DST port from ec_cmd->se_dev: %p\n", pt_cmd->se_dev); } else { } pt_cmd->se_lun = ec_cmd->se_lun; descriptor___2.modname = "target_core_mod"; descriptor___2.function = "target_xcopy_setup_pt_port"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___2.format = "Honoring local DST port from ec_cmd->se_lun: %p\n"; descriptor___2.lineno = 512U; descriptor___2.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_pr_debug(& descriptor___2, "Honoring local DST port from ec_cmd->se_lun: %p\n", pt_cmd->se_lun); } else { } } return; } } static void target_xcopy_init_pt_lun(struct se_device *se_dev , struct se_cmd *pt_cmd , bool remote_port ) { struct _ddebug descriptor ; long tmp ; { if ((int )remote_port) { descriptor.modname = "target_core_mod"; descriptor.function = "target_xcopy_init_pt_lun"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor.format = "Setup emulated se_dev: %p from se_dev\n"; descriptor.lineno = 527U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup emulated se_dev: %p from se_dev\n", pt_cmd->se_dev); } else { } pt_cmd->se_lun = & se_dev->xcopy_lun; pt_cmd->se_dev = se_dev; } else { } pt_cmd->se_cmd_flags = pt_cmd->se_cmd_flags | 256U; return; } } static int target_xcopy_setup_pt_cmd(struct xcopy_pt_cmd *xpt_cmd , struct xcopy_op *xop , struct se_device *se_dev , unsigned char *cdb , bool remote_port , bool alloc_mem ) { struct se_cmd *cmd ; sense_reason_t sense_rc ; int ret ; int rc ; struct _ddebug descriptor ; long tmp ; { cmd = & xpt_cmd->se_cmd; ret = 0; target_xcopy_init_pt_lun(se_dev, cmd, (int )remote_port); xpt_cmd->xcopy_op = xop; target_xcopy_setup_pt_port(xpt_cmd, xop, (int )remote_port); cmd->tag = 0ULL; sense_rc = target_setup_cmd_from_cdb(cmd, cdb); if (sense_rc != 0U) { ret = -22; goto out; } else { } if ((int )alloc_mem) { rc = target_alloc_sgl(& cmd->t_data_sg, & cmd->t_data_nents, cmd->data_length, 0); if (rc < 0) { ret = rc; goto out; } else { } cmd->se_cmd_flags = cmd->se_cmd_flags | 131072U; } else { sense_rc = transport_generic_map_mem_to_cmd(cmd, xop->xop_data_sg, xop->xop_data_nents, (struct scatterlist *)0, 0U); if (sense_rc != 0U) { ret = -22; goto out; } else { } descriptor.modname = "target_core_mod"; descriptor.function = "target_xcopy_setup_pt_cmd"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor.format = "Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents: %u\n"; descriptor.lineno = 589U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents: %u\n", cmd->t_data_sg, cmd->t_data_nents); } else { } } return (0); out: ; return (ret); } } static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd ) { struct se_cmd *se_cmd ; sense_reason_t sense_rc ; struct _ddebug descriptor ; long tmp ; { se_cmd = & xpt_cmd->se_cmd; sense_rc = transport_generic_new_cmd(se_cmd); if (sense_rc != 0U) { return (-22); } else { } if ((unsigned int )se_cmd->data_direction == 1U) { target_execute_cmd(se_cmd); } else { } wait_for_completion_interruptible(& xpt_cmd->xpt_passthrough_sem); descriptor.modname = "target_core_mod"; descriptor.function = "target_xcopy_issue_pt_cmd"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor.format = "target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n"; descriptor.lineno = 613U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "target_xcopy_issue_pt_cmd(): SCSI status: 0x%02x\n", (int )se_cmd->scsi_status); } else { } return ((unsigned int )se_cmd->scsi_status != 0U ? -22 : 0); } } static int target_xcopy_read_source(struct se_cmd *ec_cmd , struct xcopy_op *xop , struct se_device *src_dev , sector_t src_lba , u32 src_sectors ) { struct xcopy_pt_cmd *xpt_cmd ; struct se_cmd *se_cmd ; u32 length ; int rc ; unsigned char cdb[16U] ; bool remote_port ; void *tmp ; struct _ddebug descriptor ; long tmp___0 ; struct _ddebug descriptor___0 ; long tmp___1 ; { length = src_dev->dev_attrib.block_size * src_sectors; remote_port = xop->op_origin == 2; tmp = kzalloc(1040UL, 208U); xpt_cmd = (struct xcopy_pt_cmd *)tmp; if ((unsigned long )xpt_cmd == (unsigned long )((struct xcopy_pt_cmd *)0)) { printk("\vUnable to allocate xcopy_pt_cmd\n"); return (-12); } else { } init_completion(& xpt_cmd->xpt_passthrough_sem); se_cmd = & xpt_cmd->se_cmd; memset((void *)(& cdb), 0, 16UL); cdb[0] = 136U; put_unaligned_be64((u64 )src_lba, (void *)(& cdb) + 2U); put_unaligned_be32(src_sectors, (void *)(& cdb) + 10U); descriptor.modname = "target_core_mod"; descriptor.function = "target_xcopy_read_source"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor.format = "XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n"; descriptor.lineno = 645U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "XCOPY: Built READ_16: LBA: %llu Sectors: %u Length: %u\n", (unsigned long long )src_lba, src_sectors, length); } else { } transport_init_se_cmd(se_cmd, & xcopy_pt_tfo, (struct se_session *)0, length, 2, 0, (unsigned char *)(& xpt_cmd->sense_buffer)); xop->src_pt_cmd = xpt_cmd; rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, (unsigned char *)(& cdb), (int )remote_port, 1); if (rc < 0) { transport_generic_free_cmd(se_cmd, 0); return (rc); } else { } xop->xop_data_sg = se_cmd->t_data_sg; xop->xop_data_nents = se_cmd->t_data_nents; descriptor___0.modname = "target_core_mod"; descriptor___0.function = "target_xcopy_read_source"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___0.format = "XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ memory\n"; descriptor___0.lineno = 661U; descriptor___0.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor___0, "XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ memory\n", xop->xop_data_sg, xop->xop_data_nents); } else { } rc = target_xcopy_issue_pt_cmd(xpt_cmd); if (rc < 0) { transport_generic_free_cmd(se_cmd, 0); return (rc); } else { } se_cmd->t_data_sg = (struct scatterlist *)0; se_cmd->t_data_nents = 0U; return (0); } } static int target_xcopy_write_destination(struct se_cmd *ec_cmd , struct xcopy_op *xop , struct se_device *dst_dev , sector_t dst_lba , u32 dst_sectors ) { struct xcopy_pt_cmd *xpt_cmd ; struct se_cmd *se_cmd ; u32 length ; int rc ; unsigned char cdb[16U] ; bool remote_port ; void *tmp ; struct _ddebug descriptor ; long tmp___0 ; struct se_cmd *src_cmd ; { length = dst_dev->dev_attrib.block_size * dst_sectors; remote_port = xop->op_origin == 1; tmp = kzalloc(1040UL, 208U); xpt_cmd = (struct xcopy_pt_cmd *)tmp; if ((unsigned long )xpt_cmd == (unsigned long )((struct xcopy_pt_cmd *)0)) { printk("\vUnable to allocate xcopy_pt_cmd\n"); return (-12); } else { } init_completion(& xpt_cmd->xpt_passthrough_sem); se_cmd = & xpt_cmd->se_cmd; memset((void *)(& cdb), 0, 16UL); cdb[0] = 138U; put_unaligned_be64((u64 )dst_lba, (void *)(& cdb) + 2U); put_unaligned_be32(dst_sectors, (void *)(& cdb) + 10U); descriptor.modname = "target_core_mod"; descriptor.function = "target_xcopy_write_destination"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor.format = "XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n"; descriptor.lineno = 705U; descriptor.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor, "XCOPY: Built WRITE_16: LBA: %llu Sectors: %u Length: %u\n", (unsigned long long )dst_lba, dst_sectors, length); } else { } transport_init_se_cmd(se_cmd, & xcopy_pt_tfo, (struct se_session *)0, length, 1, 0, (unsigned char *)(& xpt_cmd->sense_buffer)); xop->dst_pt_cmd = xpt_cmd; rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, (unsigned char *)(& cdb), (int )remote_port, 0); if (rc < 0) { src_cmd = & (xop->src_pt_cmd)->se_cmd; src_cmd->se_cmd_flags = src_cmd->se_cmd_flags & 4294836223U; src_cmd->t_data_sg = xop->xop_data_sg; src_cmd->t_data_nents = xop->xop_data_nents; transport_generic_free_cmd(se_cmd, 0); return (rc); } else { } rc = target_xcopy_issue_pt_cmd(xpt_cmd); if (rc < 0) { se_cmd->se_cmd_flags = se_cmd->se_cmd_flags & 4294836223U; transport_generic_free_cmd(se_cmd, 0); return (rc); } else { } return (0); } } static void target_xcopy_do_work(struct work_struct *work ) { struct xcopy_op *xop ; struct work_struct const *__mptr ; struct se_device *src_dev ; struct se_device *dst_dev ; struct se_cmd *ec_cmd ; sector_t src_lba ; sector_t dst_lba ; sector_t end_lba ; unsigned int max_sectors ; int rc ; unsigned short nolb ; unsigned short cur_nolb ; unsigned short max_nolb ; unsigned short copied_nolb ; u32 _min1 ; u32 _min2 ; u32 __min1 ; u32 __min2 ; u16 __min1___0 ; u16 __min2___0 ; struct _ddebug descriptor ; long tmp ; struct _ddebug descriptor___0 ; long tmp___0 ; unsigned short _min1___0 ; unsigned short _min2___0 ; struct _ddebug descriptor___1 ; long tmp___1 ; struct _ddebug descriptor___2 ; long tmp___2 ; struct _ddebug descriptor___3 ; long tmp___3 ; struct _ddebug descriptor___4 ; long tmp___4 ; struct _ddebug descriptor___5 ; long tmp___5 ; struct _ddebug descriptor___6 ; long tmp___6 ; struct _ddebug descriptor___7 ; long tmp___7 ; { __mptr = (struct work_struct const *)work; xop = (struct xcopy_op *)__mptr + 0xffffffffffffff70UL; src_dev = xop->src_dev; dst_dev = xop->dst_dev; ec_cmd = xop->xop_se_cmd; src_lba = xop->src_lba; dst_lba = xop->dst_lba; nolb = xop->nolb; copied_nolb = 0U; end_lba = (sector_t )nolb + src_lba; _min1 = src_dev->dev_attrib.hw_max_sectors; _min2 = dst_dev->dev_attrib.hw_max_sectors; max_sectors = _min1 < _min2 ? _min1 : _min2; __min1 = max_sectors; __min2 = 1024U; max_sectors = __min1 < __min2 ? __min1 : __min2; __min1___0 = (u16 )max_sectors; __min2___0 = 65535U; max_nolb = (unsigned short )((int )__min1___0 < (int )__min2___0 ? __min1___0 : __min2___0); descriptor.modname = "target_core_mod"; descriptor.function = "target_xcopy_do_work"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor.format = "target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n"; descriptor.lineno = 760U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "target_xcopy_do_work: nolb: %hu, max_nolb: %hu end_lba: %llu\n", (int )nolb, (int )max_nolb, (unsigned long long )end_lba); } else { } descriptor___0.modname = "target_core_mod"; descriptor___0.function = "target_xcopy_do_work"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___0.format = "target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n"; descriptor___0.lineno = 762U; descriptor___0.flags = 0U; tmp___0 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___0 != 0L) { __dynamic_pr_debug(& descriptor___0, "target_xcopy_do_work: Starting src_lba: %llu, dst_lba: %llu\n", (unsigned long long )src_lba, (unsigned long long )dst_lba); } else { } goto ldv_57439; ldv_57438: _min1___0 = nolb; _min2___0 = max_nolb; cur_nolb = (unsigned short )((int )_min1___0 < (int )_min2___0 ? (int )_min1___0 : (int )_min2___0); descriptor___1.modname = "target_core_mod"; descriptor___1.function = "target_xcopy_do_work"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___1.format = "target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu, cur_nolb: %hu\n"; descriptor___1.lineno = 768U; descriptor___1.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor___1, "target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu, cur_nolb: %hu\n", src_dev, (unsigned long long )src_lba, (int )cur_nolb); } else { } rc = target_xcopy_read_source(ec_cmd, xop, src_dev, src_lba, (u32 )cur_nolb); if (rc < 0) { goto out; } else { } src_lba = (sector_t )cur_nolb + src_lba; descriptor___2.modname = "target_core_mod"; descriptor___2.function = "target_xcopy_do_work"; descriptor___2.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___2.format = "target_xcopy_do_work: Incremented READ src_lba to %llu\n"; descriptor___2.lineno = 776U; descriptor___2.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___2.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_pr_debug(& descriptor___2, "target_xcopy_do_work: Incremented READ src_lba to %llu\n", (unsigned long long )src_lba); } else { } descriptor___3.modname = "target_core_mod"; descriptor___3.function = "target_xcopy_do_work"; descriptor___3.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___3.format = "target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu, cur_nolb: %hu\n"; descriptor___3.lineno = 779U; descriptor___3.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___3.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_pr_debug(& descriptor___3, "target_xcopy_do_work: Calling write dst_dev: %p dst_lba: %llu, cur_nolb: %hu\n", dst_dev, (unsigned long long )dst_lba, (int )cur_nolb); } else { } rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev, dst_lba, (u32 )cur_nolb); if (rc < 0) { transport_generic_free_cmd(& (xop->src_pt_cmd)->se_cmd, 0); goto out; } else { } dst_lba = (sector_t )cur_nolb + dst_lba; descriptor___4.modname = "target_core_mod"; descriptor___4.function = "target_xcopy_do_work"; descriptor___4.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___4.format = "target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n"; descriptor___4.lineno = 790U; descriptor___4.flags = 0U; tmp___4 = ldv__builtin_expect((long )descriptor___4.flags & 1L, 0L); if (tmp___4 != 0L) { __dynamic_pr_debug(& descriptor___4, "target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n", (unsigned long long )dst_lba); } else { } copied_nolb = (int )copied_nolb + (int )cur_nolb; nolb = (int )nolb - (int )cur_nolb; transport_generic_free_cmd(& (xop->src_pt_cmd)->se_cmd, 0); (xop->dst_pt_cmd)->se_cmd.se_cmd_flags = (xop->dst_pt_cmd)->se_cmd.se_cmd_flags & 4294836223U; transport_generic_free_cmd(& (xop->dst_pt_cmd)->se_cmd, 0); ldv_57439: ; if (src_lba < end_lba) { goto ldv_57438; } else { } xcopy_pt_undepend_remotedev(xop); kfree((void const *)xop); descriptor___5.modname = "target_core_mod"; descriptor___5.function = "target_xcopy_do_work"; descriptor___5.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___5.format = "target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n"; descriptor___5.lineno = 805U; descriptor___5.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor___5.flags & 1L, 0L); if (tmp___5 != 0L) { __dynamic_pr_debug(& descriptor___5, "target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n", (unsigned long long )src_lba, (unsigned long long )dst_lba); } else { } descriptor___6.modname = "target_core_mod"; descriptor___6.function = "target_xcopy_do_work"; descriptor___6.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___6.format = "target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n"; descriptor___6.lineno = 807U; descriptor___6.flags = 0U; tmp___6 = ldv__builtin_expect((long )descriptor___6.flags & 1L, 0L); if (tmp___6 != 0L) { __dynamic_pr_debug(& descriptor___6, "target_xcopy_do_work: Blocks copied: %hu, Bytes Copied: %u\n", (int )copied_nolb, (u32 )copied_nolb * dst_dev->dev_attrib.block_size); } else { } descriptor___7.modname = "target_core_mod"; descriptor___7.function = "target_xcopy_do_work"; descriptor___7.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___7.format = "target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n"; descriptor___7.lineno = 809U; descriptor___7.flags = 0U; tmp___7 = ldv__builtin_expect((long )descriptor___7.flags & 1L, 0L); if (tmp___7 != 0L) { __dynamic_pr_debug(& descriptor___7, "target_xcopy_do_work: Setting X-COPY GOOD status -> sending response\n"); } else { } target_complete_cmd(ec_cmd, 0); return; out: xcopy_pt_undepend_remotedev(xop); kfree((void const *)xop); printk("\ftarget_xcopy_do_work: Setting X-COPY CHECK_CONDITION -> sending response\n"); ec_cmd->scsi_status = 2U; target_complete_cmd(ec_cmd, 2); return; } } sense_reason_t target_do_xcopy(struct se_cmd *se_cmd ) { struct se_device *dev ; struct xcopy_op *xop ; unsigned char *p ; unsigned char *seg_desc ; unsigned int list_id ; unsigned int list_id_usage ; unsigned int sdll ; unsigned int inline_dl ; unsigned int sa ; sense_reason_t ret ; int rc ; unsigned short tdll ; void *tmp ; void *tmp___0 ; struct _ddebug descriptor ; long tmp___1 ; struct _ddebug descriptor___0 ; long tmp___2 ; struct _ddebug descriptor___1 ; long tmp___3 ; struct lock_class_key __key ; atomic_long_t __constr_expr_0 ; { dev = se_cmd->se_dev; xop = (struct xcopy_op *)0; p = (unsigned char *)0U; ret = 9U; if (dev->dev_attrib.emulate_3pc == 0) { printk("\vEXTENDED_COPY operation explicitly disabled\n"); return (2U); } else { } sa = (unsigned int )*(se_cmd->t_task_cdb + 1UL) & 31U; if (sa != 0U) { printk("\vEXTENDED_COPY(LID4) not supported\n"); return (2U); } else { } tmp = kzalloc(224UL, 208U); xop = (struct xcopy_op *)tmp; if ((unsigned long )xop == (unsigned long )((struct xcopy_op *)0)) { printk("\vUnable to allocate xcopy_op\n"); return (18U); } else { } xop->xop_se_cmd = se_cmd; tmp___0 = transport_kmap_data_sg(se_cmd); p = (unsigned char *)tmp___0; if ((unsigned long )p == (unsigned long )((unsigned char *)0U)) { printk("\vtransport_kmap_data_sg() failed in target_do_xcopy\n"); kfree((void const *)xop); return (18U); } else { } list_id = (unsigned int )*p; list_id_usage = (unsigned int )(((int )*(p + 1UL) & 24) >> 3); tdll = get_unaligned_be16((void const *)p + 2U); sdll = get_unaligned_be32((void const *)p + 8U); inline_dl = get_unaligned_be32((void const *)p + 12U); if (inline_dl != 0U) { printk("\vXCOPY with non zero inline data length\n"); goto out; } else { } descriptor.modname = "target_core_mod"; descriptor.function = "target_do_xcopy"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor.format = "Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x tdll: %hu sdll: %u inline_dl: %u\n"; descriptor.lineno = 874U; descriptor.flags = 0U; tmp___1 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___1 != 0L) { __dynamic_pr_debug(& descriptor, "Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage, (int )tdll, sdll, inline_dl); } else { } rc = target_xcopy_parse_target_descriptors(se_cmd, xop, p + 16UL, (int )tdll); if (rc <= 0) { goto out; } else { } if ((xop->src_dev)->dev_attrib.block_size != (xop->dst_dev)->dev_attrib.block_size) { printk("\vXCOPY: Non matching src_dev block_size: %u + dst_dev block_size: %u currently unsupported\n", (xop->src_dev)->dev_attrib.block_size, (xop->dst_dev)->dev_attrib.block_size); xcopy_pt_undepend_remotedev(xop); ret = 10U; goto out; } else { } descriptor___0.modname = "target_core_mod"; descriptor___0.function = "target_do_xcopy"; descriptor___0.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___0.format = "XCOPY: Processed %d target descriptors, length: %u\n"; descriptor___0.lineno = 892U; descriptor___0.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_pr_debug(& descriptor___0, "XCOPY: Processed %d target descriptors, length: %u\n", rc, rc * 32); } else { } seg_desc = p + 16UL; seg_desc = seg_desc + (unsigned long )(rc * 32); rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll); if (rc <= 0) { xcopy_pt_undepend_remotedev(xop); goto out; } else { } transport_kunmap_data_sg(se_cmd); descriptor___1.modname = "target_core_mod"; descriptor___1.function = "target_do_xcopy"; descriptor___1.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor___1.format = "XCOPY: Processed %d segment descriptors, length: %u\n"; descriptor___1.lineno = 904U; descriptor___1.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor___1.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_pr_debug(& descriptor___1, "XCOPY: Processed %d segment descriptors, length: %u\n", rc, rc * 28); } else { } __init_work(& xop->xop_work, 0); __constr_expr_0.counter = 137438953408L; xop->xop_work.data = __constr_expr_0; lockdep_init_map(& xop->xop_work.lockdep_map, "(&xop->xop_work)", & __key, 0); INIT_LIST_HEAD(& xop->xop_work.entry); xop->xop_work.func = & target_xcopy_do_work; queue_work___0(xcopy_wq, & xop->xop_work); return (0U); out: ; if ((unsigned long )p != (unsigned long )((unsigned char *)0U)) { transport_kunmap_data_sg(se_cmd); } else { } kfree((void const *)xop); return (ret); } } static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd ) { unsigned char *p ; void *tmp ; { tmp = transport_kmap_data_sg(se_cmd); p = (unsigned char *)tmp; if ((unsigned long )p == (unsigned long )((unsigned char *)0U)) { printk("\vtransport_kmap_data_sg failed in target_rcr_operating_parameters\n"); return (18U); } else { } if (se_cmd->data_length <= 53U) { printk("\vReceive Copy Results Op Parameters length too small: %u\n", se_cmd->data_length); transport_kunmap_data_sg(se_cmd); return (8U); } else { } *(p + 4UL) = 1U; put_unaligned_be16(2, (void *)p + 8U); put_unaligned_be16(1, (void *)p + 10U); put_unaligned_be32(1024U, (void *)p + 12U); put_unaligned_be32(268435456U, (void *)p + 16U); put_unaligned_be32(0U, (void *)p + 20U); put_unaligned_be32(0U, (void *)p + 24U); put_unaligned_be32(0U, (void *)p + 28U); put_unaligned_be16(1, (void *)p + 34U); *(p + 36UL) = 1U; *(p + 37UL) = 9U; *(p + 38UL) = 9U; *(p + 39UL) = 9U; *(p + 43UL) = 2U; *(p + 44UL) = 2U; *(p + 45UL) = 228U; put_unaligned_be32(42U, (void *)p); transport_kunmap_data_sg(se_cmd); target_complete_cmd(se_cmd, 0); return (0U); } } sense_reason_t target_do_receive_copy_results(struct se_cmd *se_cmd ) { unsigned char *cdb ; int sa ; int list_id ; sense_reason_t rc ; struct _ddebug descriptor ; long tmp ; { cdb = se_cmd->t_task_cdb; sa = (int )*(cdb + 1UL) & 31; list_id = (int )*(cdb + 2UL); rc = 0U; descriptor.modname = "target_core_mod"; descriptor.function = "target_do_receive_copy_results"; descriptor.filename = "/work/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--32_7a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/5874/dscv_tempdir/dscv/ri/32_7a/drivers/target/target_core_xcopy.c"; descriptor.format = "Entering target_do_receive_copy_results: SA: 0x%02x, List ID: 0x%02x, AL: %u\n"; descriptor.lineno = 1013U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_pr_debug(& descriptor, "Entering target_do_receive_copy_results: SA: 0x%02x, List ID: 0x%02x, AL: %u\n", sa, list_id, se_cmd->data_length); } else { } if (list_id != 0) { printk("\vReceive Copy Results with non zero list identifier not supported\n"); return (8U); } else { } switch (sa) { case 3: rc = target_rcr_operating_parameters(se_cmd); goto ldv_57480; case 0: ; case 1: ; case 4: ; default: printk("\vUnsupported SA for receive copy results: 0x%02x\n", sa); return (8U); } ldv_57480: ; return (rc); } } extern int ldv_release_8(void) ; extern int ldv_probe_8(void) ; void activate_work_7(struct work_struct *work , int state ) { { if (ldv_work_7_0 == 0) { ldv_work_struct_7_0 = work; ldv_work_7_0 = state; return; } else { } if (ldv_work_7_1 == 0) { ldv_work_struct_7_1 = work; ldv_work_7_1 = state; return; } else { } if (ldv_work_7_2 == 0) { ldv_work_struct_7_2 = work; ldv_work_7_2 = state; return; } else { } if (ldv_work_7_3 == 0) { ldv_work_struct_7_3 = work; ldv_work_7_3 = state; return; } else { } return; } } void disable_work_7(struct work_struct *work ) { { if ((ldv_work_7_0 == 3 || ldv_work_7_0 == 2) && (unsigned long )ldv_work_struct_7_0 == (unsigned long )work) { ldv_work_7_0 = 1; } else { } if ((ldv_work_7_1 == 3 || ldv_work_7_1 == 2) && (unsigned long )ldv_work_struct_7_1 == (unsigned long )work) { ldv_work_7_1 = 1; } else { } if ((ldv_work_7_2 == 3 || ldv_work_7_2 == 2) && (unsigned long )ldv_work_struct_7_2 == (unsigned long )work) { ldv_work_7_2 = 1; } else { } if ((ldv_work_7_3 == 3 || ldv_work_7_3 == 2) && (unsigned long )ldv_work_struct_7_3 == (unsigned long )work) { ldv_work_7_3 = 1; } else { } return; } } void call_and_disable_work_7(struct work_struct *work ) { { if ((ldv_work_7_0 == 2 || ldv_work_7_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_7_0) { target_xcopy_do_work(work); ldv_work_7_0 = 1; return; } else { } if ((ldv_work_7_1 == 2 || ldv_work_7_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_7_1) { target_xcopy_do_work(work); ldv_work_7_1 = 1; return; } else { } if ((ldv_work_7_2 == 2 || ldv_work_7_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_7_2) { target_xcopy_do_work(work); ldv_work_7_2 = 1; return; } else { } if ((ldv_work_7_3 == 2 || ldv_work_7_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_7_3) { target_xcopy_do_work(work); ldv_work_7_3 = 1; return; } else { } return; } } void call_and_disable_all_7(int state ) { { if (ldv_work_7_0 == state) { call_and_disable_work_7(ldv_work_struct_7_0); } else { } if (ldv_work_7_1 == state) { call_and_disable_work_7(ldv_work_struct_7_1); } else { } if (ldv_work_7_2 == state) { call_and_disable_work_7(ldv_work_struct_7_2); } else { } if (ldv_work_7_3 == state) { call_and_disable_work_7(ldv_work_struct_7_3); } else { } return; } } void work_init_7(void) { { ldv_work_7_0 = 0; ldv_work_7_1 = 0; ldv_work_7_2 = 0; ldv_work_7_3 = 0; return; } } void invoke_work_7(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_7_0 == 2 || ldv_work_7_0 == 3) { ldv_work_7_0 = 4; target_xcopy_do_work(ldv_work_struct_7_0); ldv_work_7_0 = 1; } else { } goto ldv_57513; case 1: ; if (ldv_work_7_1 == 2 || ldv_work_7_1 == 3) { ldv_work_7_1 = 4; target_xcopy_do_work(ldv_work_struct_7_0); ldv_work_7_1 = 1; } else { } goto ldv_57513; case 2: ; if (ldv_work_7_2 == 2 || ldv_work_7_2 == 3) { ldv_work_7_2 = 4; target_xcopy_do_work(ldv_work_struct_7_0); ldv_work_7_2 = 1; } else { } goto ldv_57513; case 3: ; if (ldv_work_7_3 == 2 || ldv_work_7_3 == 3) { ldv_work_7_3 = 4; target_xcopy_do_work(ldv_work_struct_7_0); ldv_work_7_3 = 1; } else { } goto ldv_57513; default: ldv_stop(); } ldv_57513: ; return; } } void ldv_initialize_target_core_fabric_ops_8(void) { void *tmp ; { tmp = ldv_init_zalloc(832UL); xcopy_pt_tfo_group0 = (struct se_cmd *)tmp; return; } } void ldv_main_exported_8(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_state_variable_8 == 1) { xcopy_pt_queue_status(xcopy_pt_tfo_group0); ldv_state_variable_8 = 1; } else { } if (ldv_state_variable_8 == 2) { xcopy_pt_queue_status(xcopy_pt_tfo_group0); ldv_state_variable_8 = 2; } else { } goto ldv_57525; case 1: ; if (ldv_state_variable_8 == 1) { xcopy_pt_release_cmd(xcopy_pt_tfo_group0); ldv_state_variable_8 = 1; } else { } if (ldv_state_variable_8 == 2) { xcopy_pt_release_cmd(xcopy_pt_tfo_group0); ldv_state_variable_8 = 2; } else { } goto ldv_57525; case 2: ; if (ldv_state_variable_8 == 2) { xcopy_pt_write_pending(xcopy_pt_tfo_group0); ldv_state_variable_8 = 2; } else { } goto ldv_57525; case 3: ; if (ldv_state_variable_8 == 1) { xcopy_pt_check_stop_free(xcopy_pt_tfo_group0); ldv_state_variable_8 = 1; } else { } if (ldv_state_variable_8 == 2) { xcopy_pt_check_stop_free(xcopy_pt_tfo_group0); ldv_state_variable_8 = 2; } else { } goto ldv_57525; case 4: ; if (ldv_state_variable_8 == 1) { xcopy_pt_get_cmd_state(xcopy_pt_tfo_group0); ldv_state_variable_8 = 1; } else { } if (ldv_state_variable_8 == 2) { xcopy_pt_get_cmd_state(xcopy_pt_tfo_group0); ldv_state_variable_8 = 2; } else { } goto ldv_57525; case 5: ; if (ldv_state_variable_8 == 1) { xcopy_pt_queue_data_in(xcopy_pt_tfo_group0); ldv_state_variable_8 = 1; } else { } if (ldv_state_variable_8 == 2) { xcopy_pt_queue_data_in(xcopy_pt_tfo_group0); ldv_state_variable_8 = 2; } else { } goto ldv_57525; case 6: ; if (ldv_state_variable_8 == 1) { xcopy_pt_get_fabric_name(); ldv_state_variable_8 = 1; } else { } if (ldv_state_variable_8 == 2) { xcopy_pt_get_fabric_name(); ldv_state_variable_8 = 2; } else { } goto ldv_57525; case 7: ; if (ldv_state_variable_8 == 1) { xcopy_pt_write_pending_status(xcopy_pt_tfo_group0); ldv_state_variable_8 = 1; } else { } if (ldv_state_variable_8 == 2) { xcopy_pt_write_pending_status(xcopy_pt_tfo_group0); ldv_state_variable_8 = 2; } else { } goto ldv_57525; case 8: ; if (ldv_state_variable_8 == 2) { ldv_release_8(); ldv_state_variable_8 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_57525; case 9: ; if (ldv_state_variable_8 == 1) { ldv_probe_8(); ldv_state_variable_8 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_57525; default: ldv_stop(); } ldv_57525: ; return; } } bool ldv_queue_work_on_591(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_592(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___0 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_593(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___1 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_594(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_595(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void ldv_mutex_lock_596(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_597(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_598(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_599(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___3 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_600(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_601(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex_of_inode(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_602(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex_of_inode(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_lock_603(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_g_device_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_604(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_g_device_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_605(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_g_device_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_606(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_g_device_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_destroy_workqueue_607(struct workqueue_struct *ldv_func_arg1 ) { { destroy_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } __inline static void ldv_error(void) { { ERROR: ; __VERIFIER_error(); } } __inline static int ldv_undef_int_negative(void) { int ret ; int tmp ; { tmp = ldv_undef_int(); ret = tmp; if (ret >= 0) { ldv_stop(); } else { } return (ret); } } bool ldv_is_err(void const *ptr ) { { return ((unsigned long )ptr > 2012UL); } } void *ldv_err_ptr(long error ) { { return ((void *)(2012L - error)); } } long ldv_ptr_err(void const *ptr ) { { return ((long )(2012UL - (unsigned long )ptr)); } } bool ldv_is_err_or_null(void const *ptr ) { bool tmp ; int tmp___0 ; { if ((unsigned long )ptr == (unsigned long )((void const *)0)) { tmp___0 = 1; } else { tmp = ldv_is_err(ptr); if ((int )tmp) { tmp___0 = 1; } else { tmp___0 = 0; } } return ((bool )tmp___0); } } static int ldv_mutex_acl_node_mutex_of_se_portal_group = 1; int ldv_mutex_lock_interruptible_acl_node_mutex_of_se_portal_group(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_acl_node_mutex_of_se_portal_group != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_acl_node_mutex_of_se_portal_group = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_acl_node_mutex_of_se_portal_group(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_acl_node_mutex_of_se_portal_group != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_acl_node_mutex_of_se_portal_group = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_acl_node_mutex_of_se_portal_group(struct mutex *lock ) { { if (ldv_mutex_acl_node_mutex_of_se_portal_group != 1) { ldv_error(); } else { } ldv_mutex_acl_node_mutex_of_se_portal_group = 2; return; } } int ldv_mutex_trylock_acl_node_mutex_of_se_portal_group(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_acl_node_mutex_of_se_portal_group != 1) { ldv_error(); } else { } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread != 0) { return (0); } else { ldv_mutex_acl_node_mutex_of_se_portal_group = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_acl_node_mutex_of_se_portal_group(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_acl_node_mutex_of_se_portal_group != 1) { ldv_error(); } else { } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_acl_node_mutex_of_se_portal_group = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_acl_node_mutex_of_se_portal_group(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_acl_node_mutex_of_se_portal_group == 1) { nondetermined = ldv_undef_int(); if (nondetermined != 0) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_acl_node_mutex_of_se_portal_group(struct mutex *lock ) { { if (ldv_mutex_acl_node_mutex_of_se_portal_group != 2) { ldv_error(); } else { } ldv_mutex_acl_node_mutex_of_se_portal_group = 1; return; } } void ldv_usb_lock_device_acl_node_mutex_of_se_portal_group(void) { { ldv_mutex_lock_acl_node_mutex_of_se_portal_group((struct mutex *)0); return; } } int ldv_usb_trylock_device_acl_node_mutex_of_se_portal_group(void) { int tmp ; { tmp = ldv_mutex_trylock_acl_node_mutex_of_se_portal_group((struct mutex *)0); return (tmp); } } int ldv_usb_lock_device_for_reset_acl_node_mutex_of_se_portal_group(void) { int tmp ; int tmp___0 ; { tmp___0 = ldv_undef_int(); if (tmp___0 != 0) { ldv_mutex_lock_acl_node_mutex_of_se_portal_group((struct mutex *)0); return (0); } else { tmp = ldv_undef_int_negative(); return (tmp); } } } void ldv_usb_unlock_device_acl_node_mutex_of_se_portal_group(void) { { ldv_mutex_unlock_acl_node_mutex_of_se_portal_group((struct mutex *)0); return; } } static int ldv_mutex_backend_mutex = 1; int ldv_mutex_lock_interruptible_backend_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_backend_mutex != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_backend_mutex = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_backend_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_backend_mutex != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_backend_mutex = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_backend_mutex(struct mutex *lock ) { { if (ldv_mutex_backend_mutex != 1) { ldv_error(); } else { } ldv_mutex_backend_mutex = 2; return; } } int ldv_mutex_trylock_backend_mutex(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_backend_mutex != 1) { ldv_error(); } else { } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread != 0) { return (0); } else { ldv_mutex_backend_mutex = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_backend_mutex(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_backend_mutex != 1) { ldv_error(); } else { } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_backend_mutex = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_backend_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_backend_mutex == 1) { nondetermined = ldv_undef_int(); if (nondetermined != 0) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_backend_mutex(struct mutex *lock ) { { if (ldv_mutex_backend_mutex != 2) { ldv_error(); } else { } ldv_mutex_backend_mutex = 1; return; } } void ldv_usb_lock_device_backend_mutex(void) { { ldv_mutex_lock_backend_mutex((struct mutex *)0); return; } } int ldv_usb_trylock_device_backend_mutex(void) { int tmp ; { tmp = ldv_mutex_trylock_backend_mutex((struct mutex *)0); return (tmp); } } int ldv_usb_lock_device_for_reset_backend_mutex(void) { int tmp ; int tmp___0 ; { tmp___0 = ldv_undef_int(); if (tmp___0 != 0) { ldv_mutex_lock_backend_mutex((struct mutex *)0); return (0); } else { tmp = ldv_undef_int_negative(); return (tmp); } } } void ldv_usb_unlock_device_backend_mutex(void) { { ldv_mutex_unlock_backend_mutex((struct mutex *)0); return; } } static int ldv_mutex_g_device_mutex = 1; int ldv_mutex_lock_interruptible_g_device_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_g_device_mutex != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_g_device_mutex = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_g_device_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_g_device_mutex != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_g_device_mutex = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_g_device_mutex(struct mutex *lock ) { { if (ldv_mutex_g_device_mutex != 1) { ldv_error(); } else { } ldv_mutex_g_device_mutex = 2; return; } } int ldv_mutex_trylock_g_device_mutex(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_g_device_mutex != 1) { ldv_error(); } else { } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread != 0) { return (0); } else { ldv_mutex_g_device_mutex = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_g_device_mutex(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_g_device_mutex != 1) { ldv_error(); } else { } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_g_device_mutex = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_g_device_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_g_device_mutex == 1) { nondetermined = ldv_undef_int(); if (nondetermined != 0) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_g_device_mutex(struct mutex *lock ) { { if (ldv_mutex_g_device_mutex != 2) { ldv_error(); } else { } ldv_mutex_g_device_mutex = 1; return; } } void ldv_usb_lock_device_g_device_mutex(void) { { ldv_mutex_lock_g_device_mutex((struct mutex *)0); return; } } int ldv_usb_trylock_device_g_device_mutex(void) { int tmp ; { tmp = ldv_mutex_trylock_g_device_mutex((struct mutex *)0); return (tmp); } } int ldv_usb_lock_device_for_reset_g_device_mutex(void) { int tmp ; int tmp___0 ; { tmp___0 = ldv_undef_int(); if (tmp___0 != 0) { ldv_mutex_lock_g_device_mutex((struct mutex *)0); return (0); } else { tmp = ldv_undef_int_negative(); return (tmp); } } } void ldv_usb_unlock_device_g_device_mutex(void) { { ldv_mutex_unlock_g_device_mutex((struct mutex *)0); return; } } static int ldv_mutex_g_tf_lock = 1; int ldv_mutex_lock_interruptible_g_tf_lock(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_g_tf_lock != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_g_tf_lock = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_g_tf_lock(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_g_tf_lock != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_g_tf_lock = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_g_tf_lock(struct mutex *lock ) { { if (ldv_mutex_g_tf_lock != 1) { ldv_error(); } else { } ldv_mutex_g_tf_lock = 2; return; } } int ldv_mutex_trylock_g_tf_lock(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_g_tf_lock != 1) { ldv_error(); } else { } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread != 0) { return (0); } else { ldv_mutex_g_tf_lock = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_g_tf_lock(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_g_tf_lock != 1) { ldv_error(); } else { } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_g_tf_lock = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_g_tf_lock(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_g_tf_lock == 1) { nondetermined = ldv_undef_int(); if (nondetermined != 0) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_g_tf_lock(struct mutex *lock ) { { if (ldv_mutex_g_tf_lock != 2) { ldv_error(); } else { } ldv_mutex_g_tf_lock = 1; return; } } void ldv_usb_lock_device_g_tf_lock(void) { { ldv_mutex_lock_g_tf_lock((struct mutex *)0); return; } } int ldv_usb_trylock_device_g_tf_lock(void) { int tmp ; { tmp = ldv_mutex_trylock_g_tf_lock((struct mutex *)0); return (tmp); } } int ldv_usb_lock_device_for_reset_g_tf_lock(void) { int tmp ; int tmp___0 ; { tmp___0 = ldv_undef_int(); if (tmp___0 != 0) { ldv_mutex_lock_g_tf_lock((struct mutex *)0); return (0); } else { tmp = ldv_undef_int_negative(); return (tmp); } } } void ldv_usb_unlock_device_g_tf_lock(void) { { ldv_mutex_unlock_g_tf_lock((struct mutex *)0); return; } } static int ldv_mutex_hba_access_mutex_of_se_hba = 1; int ldv_mutex_lock_interruptible_hba_access_mutex_of_se_hba(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_hba_access_mutex_of_se_hba != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_hba_access_mutex_of_se_hba = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_hba_access_mutex_of_se_hba(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_hba_access_mutex_of_se_hba != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_hba_access_mutex_of_se_hba = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_hba_access_mutex_of_se_hba(struct mutex *lock ) { { if (ldv_mutex_hba_access_mutex_of_se_hba != 1) { ldv_error(); } else { } ldv_mutex_hba_access_mutex_of_se_hba = 2; return; } } int ldv_mutex_trylock_hba_access_mutex_of_se_hba(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_hba_access_mutex_of_se_hba != 1) { ldv_error(); } else { } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread != 0) { return (0); } else { ldv_mutex_hba_access_mutex_of_se_hba = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_hba_access_mutex_of_se_hba(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_hba_access_mutex_of_se_hba != 1) { ldv_error(); } else { } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_hba_access_mutex_of_se_hba = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_hba_access_mutex_of_se_hba(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_hba_access_mutex_of_se_hba == 1) { nondetermined = ldv_undef_int(); if (nondetermined != 0) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_hba_access_mutex_of_se_hba(struct mutex *lock ) { { if (ldv_mutex_hba_access_mutex_of_se_hba != 2) { ldv_error(); } else { } ldv_mutex_hba_access_mutex_of_se_hba = 1; return; } } void ldv_usb_lock_device_hba_access_mutex_of_se_hba(void) { { ldv_mutex_lock_hba_access_mutex_of_se_hba((struct mutex *)0); return; } } int ldv_usb_trylock_device_hba_access_mutex_of_se_hba(void) { int tmp ; { tmp = ldv_mutex_trylock_hba_access_mutex_of_se_hba((struct mutex *)0); return (tmp); } } int ldv_usb_lock_device_for_reset_hba_access_mutex_of_se_hba(void) { int tmp ; int tmp___0 ; { tmp___0 = ldv_undef_int(); if (tmp___0 != 0) { ldv_mutex_lock_hba_access_mutex_of_se_hba((struct mutex *)0); return (0); } else { tmp = ldv_undef_int_negative(); return (tmp); } } } void ldv_usb_unlock_device_hba_access_mutex_of_se_hba(void) { { ldv_mutex_unlock_hba_access_mutex_of_se_hba((struct mutex *)0); return; } } static int ldv_mutex_i_mutex_of_inode = 1; int ldv_mutex_lock_interruptible_i_mutex_of_inode(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_i_mutex_of_inode != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_i_mutex_of_inode = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_i_mutex_of_inode(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_i_mutex_of_inode != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_i_mutex_of_inode = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_i_mutex_of_inode(struct mutex *lock ) { { if (ldv_mutex_i_mutex_of_inode != 1) { ldv_error(); } else { } ldv_mutex_i_mutex_of_inode = 2; return; } } int ldv_mutex_trylock_i_mutex_of_inode(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_i_mutex_of_inode != 1) { ldv_error(); } else { } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread != 0) { return (0); } else { ldv_mutex_i_mutex_of_inode = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_i_mutex_of_inode(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_i_mutex_of_inode != 1) { ldv_error(); } else { } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_i_mutex_of_inode = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_i_mutex_of_inode(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_i_mutex_of_inode == 1) { nondetermined = ldv_undef_int(); if (nondetermined != 0) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_i_mutex_of_inode(struct mutex *lock ) { { if (ldv_mutex_i_mutex_of_inode != 2) { ldv_error(); } else { } ldv_mutex_i_mutex_of_inode = 1; return; } } void ldv_usb_lock_device_i_mutex_of_inode(void) { { ldv_mutex_lock_i_mutex_of_inode((struct mutex *)0); return; } } int ldv_usb_trylock_device_i_mutex_of_inode(void) { int tmp ; { tmp = ldv_mutex_trylock_i_mutex_of_inode((struct mutex *)0); return (tmp); } } int ldv_usb_lock_device_for_reset_i_mutex_of_inode(void) { int tmp ; int tmp___0 ; { tmp___0 = ldv_undef_int(); if (tmp___0 != 0) { ldv_mutex_lock_i_mutex_of_inode((struct mutex *)0); return (0); } else { tmp = ldv_undef_int_negative(); return (tmp); } } } void ldv_usb_unlock_device_i_mutex_of_inode(void) { { ldv_mutex_unlock_i_mutex_of_inode((struct mutex *)0); return; } } static int ldv_mutex_lock = 1; int ldv_mutex_lock_interruptible_lock(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_lock != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_lock = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_lock(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_lock != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_lock = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_lock(struct mutex *lock ) { { if (ldv_mutex_lock != 1) { ldv_error(); } else { } ldv_mutex_lock = 2; return; } } int ldv_mutex_trylock_lock(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_lock != 1) { ldv_error(); } else { } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread != 0) { return (0); } else { ldv_mutex_lock = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_lock(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_lock != 1) { ldv_error(); } else { } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_lock = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_lock(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_lock == 1) { nondetermined = ldv_undef_int(); if (nondetermined != 0) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_lock(struct mutex *lock ) { { if (ldv_mutex_lock != 2) { ldv_error(); } else { } ldv_mutex_lock = 1; return; } } void ldv_usb_lock_device_lock(void) { { ldv_mutex_lock_lock((struct mutex *)0); return; } } int ldv_usb_trylock_device_lock(void) { int tmp ; { tmp = ldv_mutex_trylock_lock((struct mutex *)0); return (tmp); } } int ldv_usb_lock_device_for_reset_lock(void) { int tmp ; int tmp___0 ; { tmp___0 = ldv_undef_int(); if (tmp___0 != 0) { ldv_mutex_lock_lock((struct mutex *)0); return (0); } else { tmp = ldv_undef_int_negative(); return (tmp); } } } void ldv_usb_unlock_device_lock(void) { { ldv_mutex_unlock_lock((struct mutex *)0); return; } } static int ldv_mutex_lun_entry_mutex_of_se_node_acl = 1; int ldv_mutex_lock_interruptible_lun_entry_mutex_of_se_node_acl(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_lun_entry_mutex_of_se_node_acl != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_lun_entry_mutex_of_se_node_acl = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_lun_entry_mutex_of_se_node_acl(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_lun_entry_mutex_of_se_node_acl != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_lun_entry_mutex_of_se_node_acl = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_lun_entry_mutex_of_se_node_acl(struct mutex *lock ) { { if (ldv_mutex_lun_entry_mutex_of_se_node_acl != 1) { ldv_error(); } else { } ldv_mutex_lun_entry_mutex_of_se_node_acl = 2; return; } } int ldv_mutex_trylock_lun_entry_mutex_of_se_node_acl(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_lun_entry_mutex_of_se_node_acl != 1) { ldv_error(); } else { } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread != 0) { return (0); } else { ldv_mutex_lun_entry_mutex_of_se_node_acl = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_lun_entry_mutex_of_se_node_acl(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_lun_entry_mutex_of_se_node_acl != 1) { ldv_error(); } else { } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_lun_entry_mutex_of_se_node_acl = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_lun_entry_mutex_of_se_node_acl(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_lun_entry_mutex_of_se_node_acl == 1) { nondetermined = ldv_undef_int(); if (nondetermined != 0) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_lun_entry_mutex_of_se_node_acl(struct mutex *lock ) { { if (ldv_mutex_lun_entry_mutex_of_se_node_acl != 2) { ldv_error(); } else { } ldv_mutex_lun_entry_mutex_of_se_node_acl = 1; return; } } void ldv_usb_lock_device_lun_entry_mutex_of_se_node_acl(void) { { ldv_mutex_lock_lun_entry_mutex_of_se_node_acl((struct mutex *)0); return; } } int ldv_usb_trylock_device_lun_entry_mutex_of_se_node_acl(void) { int tmp ; { tmp = ldv_mutex_trylock_lun_entry_mutex_of_se_node_acl((struct mutex *)0); return (tmp); } } int ldv_usb_lock_device_for_reset_lun_entry_mutex_of_se_node_acl(void) { int tmp ; int tmp___0 ; { tmp___0 = ldv_undef_int(); if (tmp___0 != 0) { ldv_mutex_lock_lun_entry_mutex_of_se_node_acl((struct mutex *)0); return (0); } else { tmp = ldv_undef_int_negative(); return (tmp); } } } void ldv_usb_unlock_device_lun_entry_mutex_of_se_node_acl(void) { { ldv_mutex_unlock_lun_entry_mutex_of_se_node_acl((struct mutex *)0); return; } } static int ldv_mutex_lun_tg_pt_md_mutex_of_se_lun = 1; int ldv_mutex_lock_interruptible_lun_tg_pt_md_mutex_of_se_lun(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_lun_tg_pt_md_mutex_of_se_lun != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_lun_tg_pt_md_mutex_of_se_lun = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_lun_tg_pt_md_mutex_of_se_lun(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_lun_tg_pt_md_mutex_of_se_lun != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_lun_tg_pt_md_mutex_of_se_lun = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_lun_tg_pt_md_mutex_of_se_lun(struct mutex *lock ) { { if (ldv_mutex_lun_tg_pt_md_mutex_of_se_lun != 1) { ldv_error(); } else { } ldv_mutex_lun_tg_pt_md_mutex_of_se_lun = 2; return; } } int ldv_mutex_trylock_lun_tg_pt_md_mutex_of_se_lun(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_lun_tg_pt_md_mutex_of_se_lun != 1) { ldv_error(); } else { } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread != 0) { return (0); } else { ldv_mutex_lun_tg_pt_md_mutex_of_se_lun = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_lun_tg_pt_md_mutex_of_se_lun(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_lun_tg_pt_md_mutex_of_se_lun != 1) { ldv_error(); } else { } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_lun_tg_pt_md_mutex_of_se_lun = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_lun_tg_pt_md_mutex_of_se_lun(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_lun_tg_pt_md_mutex_of_se_lun == 1) { nondetermined = ldv_undef_int(); if (nondetermined != 0) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_lun_tg_pt_md_mutex_of_se_lun(struct mutex *lock ) { { if (ldv_mutex_lun_tg_pt_md_mutex_of_se_lun != 2) { ldv_error(); } else { } ldv_mutex_lun_tg_pt_md_mutex_of_se_lun = 1; return; } } void ldv_usb_lock_device_lun_tg_pt_md_mutex_of_se_lun(void) { { ldv_mutex_lock_lun_tg_pt_md_mutex_of_se_lun((struct mutex *)0); return; } } int ldv_usb_trylock_device_lun_tg_pt_md_mutex_of_se_lun(void) { int tmp ; { tmp = ldv_mutex_trylock_lun_tg_pt_md_mutex_of_se_lun((struct mutex *)0); return (tmp); } } int ldv_usb_lock_device_for_reset_lun_tg_pt_md_mutex_of_se_lun(void) { int tmp ; int tmp___0 ; { tmp___0 = ldv_undef_int(); if (tmp___0 != 0) { ldv_mutex_lock_lun_tg_pt_md_mutex_of_se_lun((struct mutex *)0); return (0); } else { tmp = ldv_undef_int_negative(); return (tmp); } } } void ldv_usb_unlock_device_lun_tg_pt_md_mutex_of_se_lun(void) { { ldv_mutex_unlock_lun_tg_pt_md_mutex_of_se_lun((struct mutex *)0); return; } } static int ldv_mutex_mutex_of_device = 1; int ldv_mutex_lock_interruptible_mutex_of_device(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_mutex_of_device != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_mutex_of_device = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_mutex_of_device(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_mutex_of_device != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_mutex_of_device = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_mutex_of_device(struct mutex *lock ) { { if (ldv_mutex_mutex_of_device != 1) { ldv_error(); } else { } ldv_mutex_mutex_of_device = 2; return; } } int ldv_mutex_trylock_mutex_of_device(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_mutex_of_device != 1) { ldv_error(); } else { } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread != 0) { return (0); } else { ldv_mutex_mutex_of_device = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_mutex_of_device(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_mutex_of_device != 1) { ldv_error(); } else { } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_mutex_of_device = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_mutex_of_device(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_mutex_of_device == 1) { nondetermined = ldv_undef_int(); if (nondetermined != 0) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_mutex_of_device(struct mutex *lock ) { { if (ldv_mutex_mutex_of_device != 2) { ldv_error(); } else { } ldv_mutex_mutex_of_device = 1; return; } } void ldv_usb_lock_device_mutex_of_device(void) { { ldv_mutex_lock_mutex_of_device((struct mutex *)0); return; } } int ldv_usb_trylock_device_mutex_of_device(void) { int tmp ; { tmp = ldv_mutex_trylock_mutex_of_device((struct mutex *)0); return (tmp); } } int ldv_usb_lock_device_for_reset_mutex_of_device(void) { int tmp ; int tmp___0 ; { tmp___0 = ldv_undef_int(); if (tmp___0 != 0) { ldv_mutex_lock_mutex_of_device((struct mutex *)0); return (0); } else { tmp = ldv_undef_int_negative(); return (tmp); } } } void ldv_usb_unlock_device_mutex_of_device(void) { { ldv_mutex_unlock_mutex_of_device((struct mutex *)0); return; } } static int ldv_mutex_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp = 1; int ldv_mutex_lock_interruptible_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp(struct mutex *lock ) { { if (ldv_mutex_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp != 1) { ldv_error(); } else { } ldv_mutex_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp = 2; return; } } int ldv_mutex_trylock_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp != 1) { ldv_error(); } else { } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread != 0) { return (0); } else { ldv_mutex_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp != 1) { ldv_error(); } else { } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp == 1) { nondetermined = ldv_undef_int(); if (nondetermined != 0) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp(struct mutex *lock ) { { if (ldv_mutex_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp != 2) { ldv_error(); } else { } ldv_mutex_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp = 1; return; } } void ldv_usb_lock_device_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp(void) { { ldv_mutex_lock_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp((struct mutex *)0); return; } } int ldv_usb_trylock_device_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp(void) { int tmp ; { tmp = ldv_mutex_trylock_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp((struct mutex *)0); return (tmp); } } int ldv_usb_lock_device_for_reset_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp(void) { int tmp ; int tmp___0 ; { tmp___0 = ldv_undef_int(); if (tmp___0 != 0) { ldv_mutex_lock_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp((struct mutex *)0); return (0); } else { tmp = ldv_undef_int_negative(); return (tmp); } } } void ldv_usb_unlock_device_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp(void) { { ldv_mutex_unlock_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp((struct mutex *)0); return; } } static int ldv_mutex_tpg_lun_mutex_of_se_portal_group = 1; int ldv_mutex_lock_interruptible_tpg_lun_mutex_of_se_portal_group(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_tpg_lun_mutex_of_se_portal_group != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_tpg_lun_mutex_of_se_portal_group = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_tpg_lun_mutex_of_se_portal_group(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_tpg_lun_mutex_of_se_portal_group != 1) { ldv_error(); } else { } nondetermined = ldv_undef_int(); if (nondetermined != 0) { ldv_mutex_tpg_lun_mutex_of_se_portal_group = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_tpg_lun_mutex_of_se_portal_group(struct mutex *lock ) { { if (ldv_mutex_tpg_lun_mutex_of_se_portal_group != 1) { ldv_error(); } else { } ldv_mutex_tpg_lun_mutex_of_se_portal_group = 2; return; } } int ldv_mutex_trylock_tpg_lun_mutex_of_se_portal_group(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_tpg_lun_mutex_of_se_portal_group != 1) { ldv_error(); } else { } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread != 0) { return (0); } else { ldv_mutex_tpg_lun_mutex_of_se_portal_group = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_tpg_lun_mutex_of_se_portal_group(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_tpg_lun_mutex_of_se_portal_group != 1) { ldv_error(); } else { } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_tpg_lun_mutex_of_se_portal_group = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_tpg_lun_mutex_of_se_portal_group(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_tpg_lun_mutex_of_se_portal_group == 1) { nondetermined = ldv_undef_int(); if (nondetermined != 0) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_tpg_lun_mutex_of_se_portal_group(struct mutex *lock ) { { if (ldv_mutex_tpg_lun_mutex_of_se_portal_group != 2) { ldv_error(); } else { } ldv_mutex_tpg_lun_mutex_of_se_portal_group = 1; return; } } void ldv_usb_lock_device_tpg_lun_mutex_of_se_portal_group(void) { { ldv_mutex_lock_tpg_lun_mutex_of_se_portal_group((struct mutex *)0); return; } } int ldv_usb_trylock_device_tpg_lun_mutex_of_se_portal_group(void) { int tmp ; { tmp = ldv_mutex_trylock_tpg_lun_mutex_of_se_portal_group((struct mutex *)0); return (tmp); } } int ldv_usb_lock_device_for_reset_tpg_lun_mutex_of_se_portal_group(void) { int tmp ; int tmp___0 ; { tmp___0 = ldv_undef_int(); if (tmp___0 != 0) { ldv_mutex_lock_tpg_lun_mutex_of_se_portal_group((struct mutex *)0); return (0); } else { tmp = ldv_undef_int_negative(); return (tmp); } } } void ldv_usb_unlock_device_tpg_lun_mutex_of_se_portal_group(void) { { ldv_mutex_unlock_tpg_lun_mutex_of_se_portal_group((struct mutex *)0); return; } } void ldv_check_final_state(void) { { if (ldv_mutex_acl_node_mutex_of_se_portal_group != 1) { ldv_error(); } else { } if (ldv_mutex_backend_mutex != 1) { ldv_error(); } else { } if (ldv_mutex_g_device_mutex != 1) { ldv_error(); } else { } if (ldv_mutex_g_tf_lock != 1) { ldv_error(); } else { } if (ldv_mutex_hba_access_mutex_of_se_hba != 1) { ldv_error(); } else { } if (ldv_mutex_i_mutex_of_inode != 1) { ldv_error(); } else { } if (ldv_mutex_lock != 1) { ldv_error(); } else { } if (ldv_mutex_lun_entry_mutex_of_se_node_acl != 1) { ldv_error(); } else { } if (ldv_mutex_lun_tg_pt_md_mutex_of_se_lun != 1) { ldv_error(); } else { } if (ldv_mutex_mutex_of_device != 1) { ldv_error(); } else { } if (ldv_mutex_tg_pt_gp_md_mutex_of_t10_alua_tg_pt_gp != 1) { ldv_error(); } else { } if (ldv_mutex_tpg_lun_mutex_of_se_portal_group != 1) { ldv_error(); } else { } return; } }