extern void __VERIFIER_error() __attribute__ ((__noreturn__)); /* Generated by CIL v. 1.5.1 */ /* print_CIL_Input is false */ typedef unsigned char __u8; typedef short __s16; typedef unsigned short __u16; typedef int __s32; typedef unsigned int __u32; typedef unsigned long long __u64; typedef signed char s8; typedef unsigned char u8; typedef unsigned short u16; typedef int s32; typedef unsigned int u32; typedef long long s64; typedef unsigned long long u64; typedef long __kernel_long_t; typedef unsigned long __kernel_ulong_t; typedef int __kernel_pid_t; typedef unsigned int __kernel_uid32_t; typedef unsigned int __kernel_gid32_t; typedef __kernel_ulong_t __kernel_size_t; typedef __kernel_long_t __kernel_ssize_t; typedef long long __kernel_loff_t; typedef __kernel_long_t __kernel_time_t; typedef __kernel_long_t __kernel_clock_t; typedef int __kernel_timer_t; typedef int __kernel_clockid_t; typedef __u16 __le16; typedef __u16 __be16; typedef __u32 __le32; typedef __u32 __be32; typedef __u32 __wsum; struct kernel_symbol { unsigned long value ; char const *name ; }; struct module; typedef __u32 __kernel_dev_t; typedef __kernel_dev_t dev_t; typedef unsigned short umode_t; typedef __kernel_pid_t pid_t; typedef __kernel_clockid_t clockid_t; typedef _Bool bool; typedef __kernel_uid32_t uid_t; typedef __kernel_gid32_t gid_t; typedef __kernel_loff_t loff_t; typedef __kernel_size_t size_t; typedef __kernel_ssize_t ssize_t; typedef __kernel_time_t time_t; typedef unsigned int uint; typedef unsigned long ulong; typedef __s16 int16_t; typedef __s32 int32_t; typedef __u8 uint8_t; typedef __u16 uint16_t; typedef __u32 uint32_t; typedef __u64 uint64_t; typedef unsigned long sector_t; typedef unsigned long blkcnt_t; typedef u64 dma_addr_t; typedef unsigned int gfp_t; typedef unsigned int fmode_t; typedef unsigned int oom_flags_t; typedef u64 phys_addr_t; typedef phys_addr_t resource_size_t; struct __anonstruct_atomic_t_6 { int counter ; }; typedef struct __anonstruct_atomic_t_6 atomic_t; struct __anonstruct_atomic64_t_7 { long counter ; }; typedef struct __anonstruct_atomic64_t_7 atomic64_t; struct list_head { struct list_head *next ; struct list_head *prev ; }; struct hlist_node; struct hlist_head { struct hlist_node *first ; }; struct hlist_node { struct hlist_node *next ; struct hlist_node **pprev ; }; struct callback_head { struct callback_head *next ; void (*func)(struct callback_head * ) ; }; struct pt_regs { unsigned long r15 ; unsigned long r14 ; unsigned long r13 ; unsigned long r12 ; unsigned long bp ; unsigned long bx ; unsigned long r11 ; unsigned long r10 ; unsigned long r9 ; unsigned long r8 ; unsigned long ax ; unsigned long cx ; unsigned long dx ; unsigned long si ; unsigned long di ; unsigned long orig_ax ; unsigned long ip ; unsigned long cs ; unsigned long flags ; unsigned long sp ; unsigned long ss ; }; struct __anonstruct____missing_field_name_9 { unsigned int a ; unsigned int b ; }; struct __anonstruct____missing_field_name_10 { u16 limit0 ; u16 base0 ; unsigned char base1 ; unsigned char type : 4 ; unsigned char s : 1 ; unsigned char dpl : 2 ; unsigned char p : 1 ; unsigned char limit : 4 ; unsigned char avl : 1 ; unsigned char l : 1 ; unsigned char d : 1 ; unsigned char g : 1 ; unsigned char base2 ; }; union __anonunion____missing_field_name_8 { struct __anonstruct____missing_field_name_9 __annonCompField4 ; struct __anonstruct____missing_field_name_10 __annonCompField5 ; }; struct desc_struct { union __anonunion____missing_field_name_8 __annonCompField6 ; }; typedef unsigned long pteval_t; typedef unsigned long pgdval_t; typedef unsigned long pgprotval_t; struct __anonstruct_pte_t_11 { pteval_t pte ; }; typedef struct __anonstruct_pte_t_11 pte_t; struct pgprot { pgprotval_t pgprot ; }; typedef struct pgprot pgprot_t; struct __anonstruct_pgd_t_12 { pgdval_t pgd ; }; typedef struct __anonstruct_pgd_t_12 pgd_t; struct page; typedef struct page *pgtable_t; struct file; struct seq_file; struct thread_struct; struct mm_struct; struct task_struct; struct cpumask; struct qspinlock { atomic_t val ; }; typedef struct qspinlock arch_spinlock_t; struct qrwlock { atomic_t cnts ; arch_spinlock_t lock ; }; typedef struct qrwlock arch_rwlock_t; typedef void (*ctor_fn_t)(void); struct _ddebug { char const *modname ; char const *function ; char const *filename ; char const *format ; unsigned int lineno : 18 ; unsigned char flags ; }; struct device; struct net_device; struct file_operations; struct completion; struct bug_entry { int bug_addr_disp ; int file_disp ; unsigned short line ; unsigned short flags ; }; struct timespec; struct compat_timespec; struct __anonstruct_futex_16 { u32 *uaddr ; u32 val ; u32 flags ; u32 bitset ; u64 time ; u32 *uaddr2 ; }; struct __anonstruct_nanosleep_17 { clockid_t clockid ; struct timespec *rmtp ; struct compat_timespec *compat_rmtp ; u64 expires ; }; struct pollfd; struct __anonstruct_poll_18 { struct pollfd *ufds ; int nfds ; int has_timeout ; unsigned long tv_sec ; unsigned long tv_nsec ; }; union __anonunion____missing_field_name_15 { struct __anonstruct_futex_16 futex ; struct __anonstruct_nanosleep_17 nanosleep ; struct __anonstruct_poll_18 poll ; }; struct restart_block { long (*fn)(struct restart_block * ) ; union __anonunion____missing_field_name_15 __annonCompField7 ; }; struct kernel_vm86_regs { struct pt_regs pt ; unsigned short es ; unsigned short __esh ; unsigned short ds ; unsigned short __dsh ; unsigned short fs ; unsigned short __fsh ; unsigned short gs ; unsigned short __gsh ; }; union __anonunion____missing_field_name_19 { struct pt_regs *regs ; struct kernel_vm86_regs *vm86 ; }; struct math_emu_info { long ___orig_eip ; union __anonunion____missing_field_name_19 __annonCompField8 ; }; struct cpumask { unsigned long bits[128U] ; }; typedef struct cpumask cpumask_t; typedef struct cpumask *cpumask_var_t; struct fregs_state { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u32 status ; }; struct __anonstruct____missing_field_name_29 { u64 rip ; u64 rdp ; }; struct __anonstruct____missing_field_name_30 { u32 fip ; u32 fcs ; u32 foo ; u32 fos ; }; union __anonunion____missing_field_name_28 { struct __anonstruct____missing_field_name_29 __annonCompField12 ; struct __anonstruct____missing_field_name_30 __annonCompField13 ; }; union __anonunion____missing_field_name_31 { u32 padding1[12U] ; u32 sw_reserved[12U] ; }; struct fxregs_state { u16 cwd ; u16 swd ; u16 twd ; u16 fop ; union __anonunion____missing_field_name_28 __annonCompField14 ; u32 mxcsr ; u32 mxcsr_mask ; u32 st_space[32U] ; u32 xmm_space[64U] ; u32 padding[12U] ; union __anonunion____missing_field_name_31 __annonCompField15 ; }; struct swregs_state { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u8 ftop ; u8 changed ; u8 lookahead ; u8 no_update ; u8 rm ; u8 alimit ; struct math_emu_info *info ; u32 entry_eip ; }; struct xstate_header { u64 xfeatures ; u64 xcomp_bv ; u64 reserved[6U] ; }; struct xregs_state { struct fxregs_state i387 ; struct xstate_header header ; u8 __reserved[464U] ; }; union fpregs_state { struct fregs_state fsave ; struct fxregs_state fxsave ; struct swregs_state soft ; struct xregs_state xsave ; }; struct fpu { union fpregs_state state ; unsigned int last_cpu ; unsigned char fpstate_active ; unsigned char fpregs_active ; unsigned char counter ; }; struct seq_operations; struct perf_event; struct thread_struct { struct desc_struct tls_array[3U] ; unsigned long sp0 ; unsigned long sp ; unsigned short es ; unsigned short ds ; unsigned short fsindex ; unsigned short gsindex ; unsigned long fs ; unsigned long gs ; struct fpu fpu ; struct perf_event *ptrace_bps[4U] ; unsigned long debugreg6 ; unsigned long ptrace_dr7 ; unsigned long cr2 ; unsigned long trap_nr ; unsigned long error_code ; unsigned long *io_bitmap_ptr ; unsigned long iopl ; unsigned int io_bitmap_max ; }; typedef atomic64_t atomic_long_t; struct lockdep_map; struct stack_trace { unsigned int nr_entries ; unsigned int max_entries ; unsigned long *entries ; int skip ; }; struct lockdep_subclass_key { char __one_byte ; }; struct lock_class_key { struct lockdep_subclass_key subkeys[8U] ; }; struct lock_class { struct list_head hash_entry ; struct list_head lock_entry ; struct lockdep_subclass_key *key ; unsigned int subclass ; unsigned int dep_gen_id ; unsigned long usage_mask ; struct stack_trace usage_traces[13U] ; struct list_head locks_after ; struct list_head locks_before ; unsigned int version ; unsigned long ops ; char const *name ; int name_version ; unsigned long contention_point[4U] ; unsigned long contending_point[4U] ; }; struct lockdep_map { struct lock_class_key *key ; struct lock_class *class_cache[2U] ; char const *name ; int cpu ; unsigned long ip ; }; struct held_lock { u64 prev_chain_key ; unsigned long acquire_ip ; struct lockdep_map *instance ; struct lockdep_map *nest_lock ; u64 waittime_stamp ; u64 holdtime_stamp ; unsigned short class_idx : 13 ; unsigned char irq_context : 2 ; unsigned char trylock : 1 ; unsigned char read : 2 ; unsigned char check : 1 ; unsigned char hardirqs_off : 1 ; unsigned short references : 12 ; unsigned int pin_count ; }; struct raw_spinlock { arch_spinlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct raw_spinlock raw_spinlock_t; struct __anonstruct____missing_field_name_35 { u8 __padding[24U] ; struct lockdep_map dep_map ; }; union __anonunion____missing_field_name_34 { struct raw_spinlock rlock ; struct __anonstruct____missing_field_name_35 __annonCompField17 ; }; struct spinlock { union __anonunion____missing_field_name_34 __annonCompField18 ; }; typedef struct spinlock spinlock_t; struct __anonstruct_rwlock_t_36 { arch_rwlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_rwlock_t_36 rwlock_t; struct seqcount { unsigned int sequence ; struct lockdep_map dep_map ; }; typedef struct seqcount seqcount_t; struct __anonstruct_seqlock_t_45 { struct seqcount seqcount ; spinlock_t lock ; }; typedef struct __anonstruct_seqlock_t_45 seqlock_t; struct timespec { __kernel_time_t tv_sec ; long tv_nsec ; }; struct user_namespace; struct __anonstruct_kuid_t_46 { uid_t val ; }; typedef struct __anonstruct_kuid_t_46 kuid_t; struct __anonstruct_kgid_t_47 { gid_t val ; }; typedef struct __anonstruct_kgid_t_47 kgid_t; struct kstat { u64 ino ; dev_t dev ; umode_t mode ; unsigned int nlink ; kuid_t uid ; kgid_t gid ; dev_t rdev ; loff_t size ; struct timespec atime ; struct timespec mtime ; struct timespec ctime ; unsigned long blksize ; unsigned long long blocks ; }; struct vm_area_struct; struct __wait_queue_head { spinlock_t lock ; struct list_head task_list ; }; typedef struct __wait_queue_head wait_queue_head_t; struct __anonstruct_nodemask_t_48 { unsigned long bits[16U] ; }; typedef struct __anonstruct_nodemask_t_48 nodemask_t; struct optimistic_spin_queue { atomic_t tail ; }; struct mutex { atomic_t count ; spinlock_t wait_lock ; struct list_head wait_list ; struct task_struct *owner ; void *magic ; struct lockdep_map dep_map ; }; struct mutex_waiter { struct list_head list ; struct task_struct *task ; void *magic ; }; struct rw_semaphore; struct rw_semaphore { long count ; struct list_head wait_list ; raw_spinlock_t wait_lock ; struct optimistic_spin_queue osq ; struct task_struct *owner ; struct lockdep_map dep_map ; }; struct completion { unsigned int done ; wait_queue_head_t wait ; }; union ktime { s64 tv64 ; }; typedef union ktime ktime_t; struct notifier_block; struct timer_list { struct hlist_node entry ; unsigned long expires ; void (*function)(unsigned long ) ; unsigned long data ; u32 flags ; int slack ; int start_pid ; void *start_site ; char start_comm[16U] ; struct lockdep_map lockdep_map ; }; struct hrtimer; enum hrtimer_restart; struct rb_node { unsigned long __rb_parent_color ; struct rb_node *rb_right ; struct rb_node *rb_left ; }; struct rb_root { struct rb_node *rb_node ; }; struct ctl_table; struct nsproxy; struct ctl_table_root; struct ctl_table_header; struct ctl_dir; typedef int proc_handler(struct ctl_table * , int , void * , size_t * , loff_t * ); struct ctl_table_poll { atomic_t event ; wait_queue_head_t wait ; }; struct ctl_table { char const *procname ; void *data ; int maxlen ; umode_t mode ; struct ctl_table *child ; proc_handler *proc_handler ; struct ctl_table_poll *poll ; void *extra1 ; void *extra2 ; }; struct ctl_node { struct rb_node node ; struct ctl_table_header *header ; }; struct __anonstruct____missing_field_name_50 { struct ctl_table *ctl_table ; int used ; int count ; int nreg ; }; union __anonunion____missing_field_name_49 { struct __anonstruct____missing_field_name_50 __annonCompField19 ; struct callback_head rcu ; }; struct ctl_table_set; struct ctl_table_header { union __anonunion____missing_field_name_49 __annonCompField20 ; struct completion *unregistering ; struct ctl_table *ctl_table_arg ; struct ctl_table_root *root ; struct ctl_table_set *set ; struct ctl_dir *parent ; struct ctl_node *node ; }; struct ctl_dir { struct ctl_table_header header ; struct rb_root root ; }; struct ctl_table_set { int (*is_seen)(struct ctl_table_set * ) ; struct ctl_dir dir ; }; struct ctl_table_root { struct ctl_table_set default_set ; struct ctl_table_set *(*lookup)(struct ctl_table_root * , struct nsproxy * ) ; int (*permissions)(struct ctl_table_header * , struct ctl_table * ) ; }; struct workqueue_struct; struct work_struct; struct work_struct { atomic_long_t data ; struct list_head entry ; void (*func)(struct work_struct * ) ; struct lockdep_map lockdep_map ; }; struct delayed_work { struct work_struct work ; struct timer_list timer ; struct workqueue_struct *wq ; int cpu ; }; struct execute_work { struct work_struct work ; }; struct notifier_block { int (*notifier_call)(struct notifier_block * , unsigned long , void * ) ; struct notifier_block *next ; int priority ; }; struct resource { resource_size_t start ; resource_size_t end ; char const *name ; unsigned long flags ; struct resource *parent ; struct resource *sibling ; struct resource *child ; }; struct pci_dev; struct pm_message { int event ; }; typedef struct pm_message pm_message_t; struct dev_pm_ops { int (*prepare)(struct device * ) ; void (*complete)(struct device * ) ; int (*suspend)(struct device * ) ; int (*resume)(struct device * ) ; int (*freeze)(struct device * ) ; int (*thaw)(struct device * ) ; int (*poweroff)(struct device * ) ; int (*restore)(struct device * ) ; int (*suspend_late)(struct device * ) ; int (*resume_early)(struct device * ) ; int (*freeze_late)(struct device * ) ; int (*thaw_early)(struct device * ) ; int (*poweroff_late)(struct device * ) ; int (*restore_early)(struct device * ) ; int (*suspend_noirq)(struct device * ) ; int (*resume_noirq)(struct device * ) ; int (*freeze_noirq)(struct device * ) ; int (*thaw_noirq)(struct device * ) ; int (*poweroff_noirq)(struct device * ) ; int (*restore_noirq)(struct device * ) ; int (*runtime_suspend)(struct device * ) ; int (*runtime_resume)(struct device * ) ; int (*runtime_idle)(struct device * ) ; }; enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; struct wakeup_source; struct wake_irq; struct pm_subsys_data { spinlock_t lock ; unsigned int refcount ; struct list_head clock_list ; }; struct dev_pm_qos; struct dev_pm_info { pm_message_t power_state ; unsigned char can_wakeup : 1 ; unsigned char async_suspend : 1 ; bool is_prepared ; bool is_suspended ; bool is_noirq_suspended ; bool is_late_suspended ; bool ignore_children ; bool early_init ; bool direct_complete ; spinlock_t lock ; struct list_head entry ; struct completion completion ; struct wakeup_source *wakeup ; bool wakeup_path ; bool syscore ; struct timer_list suspend_timer ; unsigned long timer_expires ; struct work_struct work ; wait_queue_head_t wait_queue ; struct wake_irq *wakeirq ; atomic_t usage_count ; atomic_t child_count ; unsigned char disable_depth : 3 ; unsigned char idle_notification : 1 ; unsigned char request_pending : 1 ; unsigned char deferred_resume : 1 ; unsigned char run_wake : 1 ; unsigned char runtime_auto : 1 ; unsigned char no_callbacks : 1 ; unsigned char irq_safe : 1 ; unsigned char use_autosuspend : 1 ; unsigned char timer_autosuspends : 1 ; unsigned char memalloc_noio : 1 ; enum rpm_request request ; enum rpm_status runtime_status ; int runtime_error ; int autosuspend_delay ; unsigned long last_busy ; unsigned long active_jiffies ; unsigned long suspended_jiffies ; unsigned long accounting_timestamp ; struct pm_subsys_data *subsys_data ; void (*set_latency_tolerance)(struct device * , s32 ) ; struct dev_pm_qos *qos ; }; struct dev_pm_domain { struct dev_pm_ops ops ; void (*detach)(struct device * , bool ) ; int (*activate)(struct device * ) ; void (*sync)(struct device * ) ; void (*dismiss)(struct device * ) ; }; struct pci_bus; struct __anonstruct_mm_context_t_115 { void *ldt ; int size ; unsigned short ia32_compat ; struct mutex lock ; void *vdso ; atomic_t perf_rdpmc_allowed ; }; typedef struct __anonstruct_mm_context_t_115 mm_context_t; struct bio_vec; struct llist_node; struct llist_node { struct llist_node *next ; }; struct call_single_data { struct llist_node llist ; void (*func)(void * ) ; void *info ; unsigned int flags ; }; struct cred; struct inode; struct arch_uprobe_task { unsigned long saved_scratch_register ; unsigned int saved_trap_nr ; unsigned int saved_tf ; }; enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; struct __anonstruct____missing_field_name_148 { struct arch_uprobe_task autask ; unsigned long vaddr ; }; struct __anonstruct____missing_field_name_149 { struct callback_head dup_xol_work ; unsigned long dup_xol_addr ; }; union __anonunion____missing_field_name_147 { struct __anonstruct____missing_field_name_148 __annonCompField33 ; struct __anonstruct____missing_field_name_149 __annonCompField34 ; }; struct uprobe; struct return_instance; struct uprobe_task { enum uprobe_task_state state ; union __anonunion____missing_field_name_147 __annonCompField35 ; struct uprobe *active_uprobe ; unsigned long xol_vaddr ; struct return_instance *return_instances ; unsigned int depth ; }; struct xol_area; struct uprobes_state { struct xol_area *xol_area ; }; struct address_space; struct mem_cgroup; typedef void compound_page_dtor(struct page * ); union __anonunion____missing_field_name_150 { struct address_space *mapping ; void *s_mem ; }; union __anonunion____missing_field_name_152 { unsigned long index ; void *freelist ; bool pfmemalloc ; }; struct __anonstruct____missing_field_name_156 { unsigned short inuse ; unsigned short objects : 15 ; unsigned char frozen : 1 ; }; union __anonunion____missing_field_name_155 { atomic_t _mapcount ; struct __anonstruct____missing_field_name_156 __annonCompField38 ; int units ; }; struct __anonstruct____missing_field_name_154 { union __anonunion____missing_field_name_155 __annonCompField39 ; atomic_t _count ; }; union __anonunion____missing_field_name_153 { unsigned long counters ; struct __anonstruct____missing_field_name_154 __annonCompField40 ; unsigned int active ; }; struct __anonstruct____missing_field_name_151 { union __anonunion____missing_field_name_152 __annonCompField37 ; union __anonunion____missing_field_name_153 __annonCompField41 ; }; struct __anonstruct____missing_field_name_158 { struct page *next ; int pages ; int pobjects ; }; struct slab; struct __anonstruct____missing_field_name_159 { compound_page_dtor *compound_dtor ; unsigned long compound_order ; }; union __anonunion____missing_field_name_157 { struct list_head lru ; struct __anonstruct____missing_field_name_158 __annonCompField43 ; struct slab *slab_page ; struct callback_head callback_head ; struct __anonstruct____missing_field_name_159 __annonCompField44 ; pgtable_t pmd_huge_pte ; }; struct kmem_cache; union __anonunion____missing_field_name_160 { unsigned long private ; spinlock_t *ptl ; struct kmem_cache *slab_cache ; struct page *first_page ; }; struct page { unsigned long flags ; union __anonunion____missing_field_name_150 __annonCompField36 ; struct __anonstruct____missing_field_name_151 __annonCompField42 ; union __anonunion____missing_field_name_157 __annonCompField45 ; union __anonunion____missing_field_name_160 __annonCompField46 ; struct mem_cgroup *mem_cgroup ; }; struct page_frag { struct page *page ; __u32 offset ; __u32 size ; }; struct __anonstruct_shared_161 { struct rb_node rb ; unsigned long rb_subtree_last ; }; struct anon_vma; struct vm_operations_struct; struct mempolicy; struct vm_area_struct { unsigned long vm_start ; unsigned long vm_end ; struct vm_area_struct *vm_next ; struct vm_area_struct *vm_prev ; struct rb_node vm_rb ; unsigned long rb_subtree_gap ; struct mm_struct *vm_mm ; pgprot_t vm_page_prot ; unsigned long vm_flags ; struct __anonstruct_shared_161 shared ; struct list_head anon_vma_chain ; struct anon_vma *anon_vma ; struct vm_operations_struct const *vm_ops ; unsigned long vm_pgoff ; struct file *vm_file ; void *vm_private_data ; struct mempolicy *vm_policy ; }; struct core_thread { struct task_struct *task ; struct core_thread *next ; }; struct core_state { atomic_t nr_threads ; struct core_thread dumper ; struct completion startup ; }; struct task_rss_stat { int events ; int count[3U] ; }; struct mm_rss_stat { atomic_long_t count[3U] ; }; struct kioctx_table; struct linux_binfmt; struct mmu_notifier_mm; struct mm_struct { struct vm_area_struct *mmap ; struct rb_root mm_rb ; u32 vmacache_seqnum ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; unsigned long mmap_base ; unsigned long mmap_legacy_base ; unsigned long task_size ; unsigned long highest_vm_end ; pgd_t *pgd ; atomic_t mm_users ; atomic_t mm_count ; atomic_long_t nr_ptes ; atomic_long_t nr_pmds ; int map_count ; spinlock_t page_table_lock ; struct rw_semaphore mmap_sem ; struct list_head mmlist ; unsigned long hiwater_rss ; unsigned long hiwater_vm ; unsigned long total_vm ; unsigned long locked_vm ; unsigned long pinned_vm ; unsigned long shared_vm ; unsigned long exec_vm ; unsigned long stack_vm ; unsigned long def_flags ; unsigned long start_code ; unsigned long end_code ; unsigned long start_data ; unsigned long end_data ; unsigned long start_brk ; unsigned long brk ; unsigned long start_stack ; unsigned long arg_start ; unsigned long arg_end ; unsigned long env_start ; unsigned long env_end ; unsigned long saved_auxv[46U] ; struct mm_rss_stat rss_stat ; struct linux_binfmt *binfmt ; cpumask_var_t cpu_vm_mask_var ; mm_context_t context ; unsigned long flags ; struct core_state *core_state ; spinlock_t ioctx_lock ; struct kioctx_table *ioctx_table ; struct task_struct *owner ; struct file *exe_file ; struct mmu_notifier_mm *mmu_notifier_mm ; struct cpumask cpumask_allocation ; unsigned long numa_next_scan ; unsigned long numa_scan_offset ; int numa_scan_seq ; bool tlb_flush_pending ; struct uprobes_state uprobes_state ; void *bd_addr ; }; typedef __u64 Elf64_Addr; typedef __u16 Elf64_Half; typedef __u32 Elf64_Word; typedef __u64 Elf64_Xword; struct elf64_sym { Elf64_Word st_name ; unsigned char st_info ; unsigned char st_other ; Elf64_Half st_shndx ; Elf64_Addr st_value ; Elf64_Xword st_size ; }; typedef struct elf64_sym Elf64_Sym; union __anonunion____missing_field_name_166 { unsigned long bitmap[4U] ; struct callback_head callback_head ; }; struct idr_layer { int prefix ; int layer ; struct idr_layer *ary[256U] ; int count ; union __anonunion____missing_field_name_166 __annonCompField47 ; }; struct idr { struct idr_layer *hint ; struct idr_layer *top ; int layers ; int cur ; spinlock_t lock ; int id_free_cnt ; struct idr_layer *id_free ; }; struct ida_bitmap { long nr_busy ; unsigned long bitmap[15U] ; }; struct ida { struct idr idr ; struct ida_bitmap *free_bitmap ; }; struct dentry; struct iattr; struct super_block; struct file_system_type; struct kernfs_open_node; struct kernfs_iattrs; struct kernfs_root; struct kernfs_elem_dir { unsigned long subdirs ; struct rb_root children ; struct kernfs_root *root ; }; struct kernfs_node; struct kernfs_elem_symlink { struct kernfs_node *target_kn ; }; struct kernfs_ops; struct kernfs_elem_attr { struct kernfs_ops const *ops ; struct kernfs_open_node *open ; loff_t size ; struct kernfs_node *notify_next ; }; union __anonunion____missing_field_name_171 { struct kernfs_elem_dir dir ; struct kernfs_elem_symlink symlink ; struct kernfs_elem_attr attr ; }; struct kernfs_node { atomic_t count ; atomic_t active ; struct lockdep_map dep_map ; struct kernfs_node *parent ; char const *name ; struct rb_node rb ; void const *ns ; unsigned int hash ; union __anonunion____missing_field_name_171 __annonCompField48 ; void *priv ; unsigned short flags ; umode_t mode ; unsigned int ino ; struct kernfs_iattrs *iattr ; }; struct kernfs_syscall_ops { int (*remount_fs)(struct kernfs_root * , int * , char * ) ; int (*show_options)(struct seq_file * , struct kernfs_root * ) ; int (*mkdir)(struct kernfs_node * , char const * , umode_t ) ; int (*rmdir)(struct kernfs_node * ) ; int (*rename)(struct kernfs_node * , struct kernfs_node * , char const * ) ; }; struct kernfs_root { struct kernfs_node *kn ; unsigned int flags ; struct ida ino_ida ; struct kernfs_syscall_ops *syscall_ops ; struct list_head supers ; wait_queue_head_t deactivate_waitq ; }; struct kernfs_open_file { struct kernfs_node *kn ; struct file *file ; void *priv ; struct mutex mutex ; int event ; struct list_head list ; char *prealloc_buf ; size_t atomic_write_len ; bool mmapped ; struct vm_operations_struct const *vm_ops ; }; struct kernfs_ops { int (*seq_show)(struct seq_file * , void * ) ; void *(*seq_start)(struct seq_file * , loff_t * ) ; void *(*seq_next)(struct seq_file * , void * , loff_t * ) ; void (*seq_stop)(struct seq_file * , void * ) ; ssize_t (*read)(struct kernfs_open_file * , char * , size_t , loff_t ) ; size_t atomic_write_len ; bool prealloc ; ssize_t (*write)(struct kernfs_open_file * , char * , size_t , loff_t ) ; int (*mmap)(struct kernfs_open_file * , struct vm_area_struct * ) ; struct lock_class_key lockdep_key ; }; struct sock; struct kobject; enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; struct kobj_ns_type_operations { enum kobj_ns_type type ; bool (*current_may_mount)(void) ; void *(*grab_current_ns)(void) ; void const *(*netlink_ns)(struct sock * ) ; void const *(*initial_ns)(void) ; void (*drop_ns)(void * ) ; }; struct bin_attribute; struct attribute { char const *name ; umode_t mode ; bool ignore_lockdep ; struct lock_class_key *key ; struct lock_class_key skey ; }; struct attribute_group { char const *name ; umode_t (*is_visible)(struct kobject * , struct attribute * , int ) ; struct attribute **attrs ; struct bin_attribute **bin_attrs ; }; struct bin_attribute { struct attribute attr ; size_t size ; void *private ; ssize_t (*read)(struct file * , struct kobject * , struct bin_attribute * , char * , loff_t , size_t ) ; ssize_t (*write)(struct file * , struct kobject * , struct bin_attribute * , char * , loff_t , size_t ) ; int (*mmap)(struct file * , struct kobject * , struct bin_attribute * , struct vm_area_struct * ) ; }; struct sysfs_ops { ssize_t (*show)(struct kobject * , struct attribute * , char * ) ; ssize_t (*store)(struct kobject * , struct attribute * , char const * , size_t ) ; }; struct kref { atomic_t refcount ; }; struct kset; struct kobj_type; struct kobject { char const *name ; struct list_head entry ; struct kobject *parent ; struct kset *kset ; struct kobj_type *ktype ; struct kernfs_node *sd ; struct kref kref ; struct delayed_work release ; unsigned char state_initialized : 1 ; unsigned char state_in_sysfs : 1 ; unsigned char state_add_uevent_sent : 1 ; unsigned char state_remove_uevent_sent : 1 ; unsigned char uevent_suppress : 1 ; }; struct kobj_type { void (*release)(struct kobject * ) ; struct sysfs_ops const *sysfs_ops ; struct attribute **default_attrs ; struct kobj_ns_type_operations const *(*child_ns_type)(struct kobject * ) ; void const *(*namespace)(struct kobject * ) ; }; struct kobj_uevent_env { char *argv[3U] ; char *envp[32U] ; int envp_idx ; char buf[2048U] ; int buflen ; }; struct kset_uevent_ops { int (* const filter)(struct kset * , struct kobject * ) ; char const *(* const name)(struct kset * , struct kobject * ) ; int (* const uevent)(struct kset * , struct kobject * , struct kobj_uevent_env * ) ; }; struct kset { struct list_head list ; spinlock_t list_lock ; struct kobject kobj ; struct kset_uevent_ops const *uevent_ops ; }; struct kernel_param; struct kernel_param_ops { unsigned int flags ; int (*set)(char const * , struct kernel_param const * ) ; int (*get)(char * , struct kernel_param const * ) ; void (*free)(void * ) ; }; struct kparam_string; struct kparam_array; union __anonunion____missing_field_name_172 { void *arg ; struct kparam_string const *str ; struct kparam_array const *arr ; }; struct kernel_param { char const *name ; struct module *mod ; struct kernel_param_ops const *ops ; u16 const perm ; s8 level ; u8 flags ; union __anonunion____missing_field_name_172 __annonCompField49 ; }; struct kparam_string { unsigned int maxlen ; char *string ; }; struct kparam_array { unsigned int max ; unsigned int elemsize ; unsigned int *num ; struct kernel_param_ops const *ops ; void *elem ; }; struct latch_tree_node { struct rb_node node[2U] ; }; struct mod_arch_specific { }; struct module_param_attrs; struct module_kobject { struct kobject kobj ; struct module *mod ; struct kobject *drivers_dir ; struct module_param_attrs *mp ; struct completion *kobj_completion ; }; struct module_attribute { struct attribute attr ; ssize_t (*show)(struct module_attribute * , struct module_kobject * , char * ) ; ssize_t (*store)(struct module_attribute * , struct module_kobject * , char const * , size_t ) ; void (*setup)(struct module * , char const * ) ; int (*test)(struct module * ) ; void (*free)(struct module * ) ; }; struct exception_table_entry; enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2, MODULE_STATE_UNFORMED = 3 } ; struct mod_tree_node { struct module *mod ; struct latch_tree_node node ; }; struct module_sect_attrs; struct module_notes_attrs; struct tracepoint; struct trace_event_call; struct trace_enum_map; struct module { enum module_state state ; struct list_head list ; char name[56U] ; struct module_kobject mkobj ; struct module_attribute *modinfo_attrs ; char const *version ; char const *srcversion ; struct kobject *holders_dir ; struct kernel_symbol const *syms ; unsigned long const *crcs ; unsigned int num_syms ; struct mutex param_lock ; struct kernel_param *kp ; unsigned int num_kp ; unsigned int num_gpl_syms ; struct kernel_symbol const *gpl_syms ; unsigned long const *gpl_crcs ; struct kernel_symbol const *unused_syms ; unsigned long const *unused_crcs ; unsigned int num_unused_syms ; unsigned int num_unused_gpl_syms ; struct kernel_symbol const *unused_gpl_syms ; unsigned long const *unused_gpl_crcs ; bool sig_ok ; bool async_probe_requested ; struct kernel_symbol const *gpl_future_syms ; unsigned long const *gpl_future_crcs ; unsigned int num_gpl_future_syms ; unsigned int num_exentries ; struct exception_table_entry *extable ; int (*init)(void) ; void *module_init ; void *module_core ; unsigned int init_size ; unsigned int core_size ; unsigned int init_text_size ; unsigned int core_text_size ; struct mod_tree_node mtn_core ; struct mod_tree_node mtn_init ; unsigned int init_ro_size ; unsigned int core_ro_size ; struct mod_arch_specific arch ; unsigned int taints ; unsigned int num_bugs ; struct list_head bug_list ; struct bug_entry *bug_table ; Elf64_Sym *symtab ; Elf64_Sym *core_symtab ; unsigned int num_symtab ; unsigned int core_num_syms ; char *strtab ; char *core_strtab ; struct module_sect_attrs *sect_attrs ; struct module_notes_attrs *notes_attrs ; char *args ; void *percpu ; unsigned int percpu_size ; unsigned int num_tracepoints ; struct tracepoint * const *tracepoints_ptrs ; unsigned int num_trace_bprintk_fmt ; char const **trace_bprintk_fmt_start ; struct trace_event_call **trace_events ; unsigned int num_trace_events ; struct trace_enum_map **trace_enums ; unsigned int num_trace_enums ; unsigned int num_ftrace_callsites ; unsigned long *ftrace_callsites ; bool klp_alive ; struct list_head source_list ; struct list_head target_list ; void (*exit)(void) ; atomic_t refcnt ; ctor_fn_t (**ctors)(void) ; unsigned int num_ctors ; }; struct kernel_cap_struct { __u32 cap[2U] ; }; typedef struct kernel_cap_struct kernel_cap_t; struct plist_node { int prio ; struct list_head prio_list ; struct list_head node_list ; }; typedef unsigned long cputime_t; struct sem_undo_list; struct sysv_sem { struct sem_undo_list *undo_list ; }; struct user_struct; struct sysv_shm { struct list_head shm_clist ; }; struct __anonstruct_sigset_t_180 { unsigned long sig[1U] ; }; typedef struct __anonstruct_sigset_t_180 sigset_t; struct siginfo; typedef void __signalfn_t(int ); typedef __signalfn_t *__sighandler_t; typedef void __restorefn_t(void); typedef __restorefn_t *__sigrestore_t; union sigval { int sival_int ; void *sival_ptr ; }; typedef union sigval sigval_t; struct __anonstruct__kill_182 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; }; struct __anonstruct__timer_183 { __kernel_timer_t _tid ; int _overrun ; char _pad[0U] ; sigval_t _sigval ; int _sys_private ; }; struct __anonstruct__rt_184 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; sigval_t _sigval ; }; struct __anonstruct__sigchld_185 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; int _status ; __kernel_clock_t _utime ; __kernel_clock_t _stime ; }; struct __anonstruct__addr_bnd_187 { void *_lower ; void *_upper ; }; struct __anonstruct__sigfault_186 { void *_addr ; short _addr_lsb ; struct __anonstruct__addr_bnd_187 _addr_bnd ; }; struct __anonstruct__sigpoll_188 { long _band ; int _fd ; }; struct __anonstruct__sigsys_189 { void *_call_addr ; int _syscall ; unsigned int _arch ; }; union __anonunion__sifields_181 { int _pad[28U] ; struct __anonstruct__kill_182 _kill ; struct __anonstruct__timer_183 _timer ; struct __anonstruct__rt_184 _rt ; struct __anonstruct__sigchld_185 _sigchld ; struct __anonstruct__sigfault_186 _sigfault ; struct __anonstruct__sigpoll_188 _sigpoll ; struct __anonstruct__sigsys_189 _sigsys ; }; struct siginfo { int si_signo ; int si_errno ; int si_code ; union __anonunion__sifields_181 _sifields ; }; typedef struct siginfo siginfo_t; struct sigpending { struct list_head list ; sigset_t signal ; }; struct sigaction { __sighandler_t sa_handler ; unsigned long sa_flags ; __sigrestore_t sa_restorer ; sigset_t sa_mask ; }; struct k_sigaction { struct sigaction sa ; }; enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; struct pid_namespace; struct upid { int nr ; struct pid_namespace *ns ; struct hlist_node pid_chain ; }; struct pid { atomic_t count ; unsigned int level ; struct hlist_head tasks[3U] ; struct callback_head rcu ; struct upid numbers[1U] ; }; struct pid_link { struct hlist_node node ; struct pid *pid ; }; struct percpu_counter { raw_spinlock_t lock ; s64 count ; struct list_head list ; s32 *counters ; }; struct seccomp_filter; struct seccomp { int mode ; struct seccomp_filter *filter ; }; struct rt_mutex_waiter; struct rlimit { __kernel_ulong_t rlim_cur ; __kernel_ulong_t rlim_max ; }; struct timerqueue_node { struct rb_node node ; ktime_t expires ; }; struct timerqueue_head { struct rb_root head ; struct timerqueue_node *next ; }; struct hrtimer_clock_base; struct hrtimer_cpu_base; enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; struct hrtimer { struct timerqueue_node node ; ktime_t _softexpires ; enum hrtimer_restart (*function)(struct hrtimer * ) ; struct hrtimer_clock_base *base ; unsigned long state ; int start_pid ; void *start_site ; char start_comm[16U] ; }; struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base ; int index ; clockid_t clockid ; struct timerqueue_head active ; ktime_t (*get_time)(void) ; ktime_t offset ; }; struct hrtimer_cpu_base { raw_spinlock_t lock ; seqcount_t seq ; struct hrtimer *running ; unsigned int cpu ; unsigned int active_bases ; unsigned int clock_was_set_seq ; bool migration_enabled ; bool nohz_active ; unsigned char in_hrtirq : 1 ; unsigned char hres_active : 1 ; unsigned char hang_detected : 1 ; ktime_t expires_next ; struct hrtimer *next_timer ; unsigned int nr_events ; unsigned int nr_retries ; unsigned int nr_hangs ; unsigned int max_hang_time ; struct hrtimer_clock_base clock_base[4U] ; }; struct task_io_accounting { u64 rchar ; u64 wchar ; u64 syscr ; u64 syscw ; u64 read_bytes ; u64 write_bytes ; u64 cancelled_write_bytes ; }; struct latency_record { unsigned long backtrace[12U] ; unsigned int count ; unsigned long time ; unsigned long max ; }; struct assoc_array_ptr; struct assoc_array { struct assoc_array_ptr *root ; unsigned long nr_leaves_on_tree ; }; typedef int32_t key_serial_t; typedef uint32_t key_perm_t; struct key; struct signal_struct; struct key_type; struct keyring_index_key { struct key_type *type ; char const *description ; size_t desc_len ; }; union __anonunion____missing_field_name_196 { struct list_head graveyard_link ; struct rb_node serial_node ; }; struct key_user; union __anonunion____missing_field_name_197 { time_t expiry ; time_t revoked_at ; }; struct __anonstruct____missing_field_name_199 { struct key_type *type ; char *description ; }; union __anonunion____missing_field_name_198 { struct keyring_index_key index_key ; struct __anonstruct____missing_field_name_199 __annonCompField52 ; }; union __anonunion_type_data_200 { struct list_head link ; unsigned long x[2U] ; void *p[2U] ; int reject_error ; }; union __anonunion_payload_202 { unsigned long value ; void *rcudata ; void *data ; void *data2[2U] ; }; union __anonunion____missing_field_name_201 { union __anonunion_payload_202 payload ; struct assoc_array keys ; }; struct key { atomic_t usage ; key_serial_t serial ; union __anonunion____missing_field_name_196 __annonCompField50 ; struct rw_semaphore sem ; struct key_user *user ; void *security ; union __anonunion____missing_field_name_197 __annonCompField51 ; time_t last_used_at ; kuid_t uid ; kgid_t gid ; key_perm_t perm ; unsigned short quotalen ; unsigned short datalen ; unsigned long flags ; union __anonunion____missing_field_name_198 __annonCompField53 ; union __anonunion_type_data_200 type_data ; union __anonunion____missing_field_name_201 __annonCompField54 ; }; struct audit_context; struct group_info { atomic_t usage ; int ngroups ; int nblocks ; kgid_t small_block[32U] ; kgid_t *blocks[0U] ; }; struct cred { atomic_t usage ; atomic_t subscribers ; void *put_addr ; unsigned int magic ; kuid_t uid ; kgid_t gid ; kuid_t suid ; kgid_t sgid ; kuid_t euid ; kgid_t egid ; kuid_t fsuid ; kgid_t fsgid ; unsigned int securebits ; kernel_cap_t cap_inheritable ; kernel_cap_t cap_permitted ; kernel_cap_t cap_effective ; kernel_cap_t cap_bset ; unsigned char jit_keyring ; struct key *session_keyring ; struct key *process_keyring ; struct key *thread_keyring ; struct key *request_key_auth ; void *security ; struct user_struct *user ; struct user_namespace *user_ns ; struct group_info *group_info ; struct callback_head rcu ; }; struct percpu_ref; typedef void percpu_ref_func_t(struct percpu_ref * ); struct percpu_ref { atomic_long_t count ; unsigned long percpu_count_ptr ; percpu_ref_func_t *release ; percpu_ref_func_t *confirm_switch ; bool force_atomic ; struct callback_head rcu ; }; struct cgroup; struct cgroup_root; struct cgroup_subsys; struct cgroup_taskset; struct cgroup_subsys_state { struct cgroup *cgroup ; struct cgroup_subsys *ss ; struct percpu_ref refcnt ; struct cgroup_subsys_state *parent ; struct list_head sibling ; struct list_head children ; int id ; unsigned int flags ; u64 serial_nr ; struct callback_head callback_head ; struct work_struct destroy_work ; }; struct css_set { atomic_t refcount ; struct hlist_node hlist ; struct list_head tasks ; struct list_head mg_tasks ; struct list_head cgrp_links ; struct cgroup *dfl_cgrp ; struct cgroup_subsys_state *subsys[12U] ; struct list_head mg_preload_node ; struct list_head mg_node ; struct cgroup *mg_src_cgrp ; struct css_set *mg_dst_cset ; struct list_head e_cset_node[12U] ; struct callback_head callback_head ; }; struct cgroup { struct cgroup_subsys_state self ; unsigned long flags ; int id ; int populated_cnt ; struct kernfs_node *kn ; struct kernfs_node *procs_kn ; struct kernfs_node *populated_kn ; unsigned int subtree_control ; unsigned int child_subsys_mask ; struct cgroup_subsys_state *subsys[12U] ; struct cgroup_root *root ; struct list_head cset_links ; struct list_head e_csets[12U] ; struct list_head pidlists ; struct mutex pidlist_mutex ; wait_queue_head_t offline_waitq ; struct work_struct release_agent_work ; }; struct cgroup_root { struct kernfs_root *kf_root ; unsigned int subsys_mask ; int hierarchy_id ; struct cgroup cgrp ; atomic_t nr_cgrps ; struct list_head root_list ; unsigned int flags ; struct idr cgroup_idr ; char release_agent_path[4096U] ; char name[64U] ; }; struct cftype { char name[64U] ; int private ; umode_t mode ; size_t max_write_len ; unsigned int flags ; struct cgroup_subsys *ss ; struct list_head node ; struct kernfs_ops *kf_ops ; u64 (*read_u64)(struct cgroup_subsys_state * , struct cftype * ) ; s64 (*read_s64)(struct cgroup_subsys_state * , struct cftype * ) ; int (*seq_show)(struct seq_file * , void * ) ; void *(*seq_start)(struct seq_file * , loff_t * ) ; void *(*seq_next)(struct seq_file * , void * , loff_t * ) ; void (*seq_stop)(struct seq_file * , void * ) ; int (*write_u64)(struct cgroup_subsys_state * , struct cftype * , u64 ) ; int (*write_s64)(struct cgroup_subsys_state * , struct cftype * , s64 ) ; ssize_t (*write)(struct kernfs_open_file * , char * , size_t , loff_t ) ; struct lock_class_key lockdep_key ; }; struct cgroup_subsys { struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state * ) ; int (*css_online)(struct cgroup_subsys_state * ) ; void (*css_offline)(struct cgroup_subsys_state * ) ; void (*css_released)(struct cgroup_subsys_state * ) ; void (*css_free)(struct cgroup_subsys_state * ) ; void (*css_reset)(struct cgroup_subsys_state * ) ; void (*css_e_css_changed)(struct cgroup_subsys_state * ) ; int (*can_attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*cancel_attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*attach)(struct cgroup_subsys_state * , struct cgroup_taskset * ) ; void (*fork)(struct task_struct * ) ; void (*exit)(struct cgroup_subsys_state * , struct cgroup_subsys_state * , struct task_struct * ) ; void (*bind)(struct cgroup_subsys_state * ) ; int disabled ; int early_init ; bool broken_hierarchy ; bool warned_broken_hierarchy ; int id ; char const *name ; struct cgroup_root *root ; struct idr css_idr ; struct list_head cfts ; struct cftype *dfl_cftypes ; struct cftype *legacy_cftypes ; unsigned int depends_on ; }; struct futex_pi_state; struct robust_list_head; struct bio_list; struct fs_struct; struct perf_event_context; struct blk_plug; struct nameidata; struct cfs_rq; struct task_group; struct sighand_struct { atomic_t count ; struct k_sigaction action[64U] ; spinlock_t siglock ; wait_queue_head_t signalfd_wqh ; }; struct pacct_struct { int ac_flag ; long ac_exitcode ; unsigned long ac_mem ; cputime_t ac_utime ; cputime_t ac_stime ; unsigned long ac_minflt ; unsigned long ac_majflt ; }; struct cpu_itimer { cputime_t expires ; cputime_t incr ; u32 error ; u32 incr_error ; }; struct cputime { cputime_t utime ; cputime_t stime ; }; struct task_cputime { cputime_t utime ; cputime_t stime ; unsigned long long sum_exec_runtime ; }; struct task_cputime_atomic { atomic64_t utime ; atomic64_t stime ; atomic64_t sum_exec_runtime ; }; struct thread_group_cputimer { struct task_cputime_atomic cputime_atomic ; int running ; }; struct autogroup; struct tty_struct; struct taskstats; struct tty_audit_buf; struct signal_struct { atomic_t sigcnt ; atomic_t live ; int nr_threads ; struct list_head thread_head ; wait_queue_head_t wait_chldexit ; struct task_struct *curr_target ; struct sigpending shared_pending ; int group_exit_code ; int notify_count ; struct task_struct *group_exit_task ; int group_stop_count ; unsigned int flags ; unsigned char is_child_subreaper : 1 ; unsigned char has_child_subreaper : 1 ; int posix_timer_id ; struct list_head posix_timers ; struct hrtimer real_timer ; struct pid *leader_pid ; ktime_t it_real_incr ; struct cpu_itimer it[2U] ; struct thread_group_cputimer cputimer ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; struct pid *tty_old_pgrp ; int leader ; struct tty_struct *tty ; struct autogroup *autogroup ; seqlock_t stats_lock ; cputime_t utime ; cputime_t stime ; cputime_t cutime ; cputime_t cstime ; cputime_t gtime ; cputime_t cgtime ; struct cputime prev_cputime ; unsigned long nvcsw ; unsigned long nivcsw ; unsigned long cnvcsw ; unsigned long cnivcsw ; unsigned long min_flt ; unsigned long maj_flt ; unsigned long cmin_flt ; unsigned long cmaj_flt ; unsigned long inblock ; unsigned long oublock ; unsigned long cinblock ; unsigned long coublock ; unsigned long maxrss ; unsigned long cmaxrss ; struct task_io_accounting ioac ; unsigned long long sum_sched_runtime ; struct rlimit rlim[16U] ; struct pacct_struct pacct ; struct taskstats *stats ; unsigned int audit_tty ; unsigned int audit_tty_log_passwd ; struct tty_audit_buf *tty_audit_buf ; oom_flags_t oom_flags ; short oom_score_adj ; short oom_score_adj_min ; struct mutex cred_guard_mutex ; }; struct user_struct { atomic_t __count ; atomic_t processes ; atomic_t sigpending ; atomic_t inotify_watches ; atomic_t inotify_devs ; atomic_t fanotify_listeners ; atomic_long_t epoll_watches ; unsigned long mq_bytes ; unsigned long locked_shm ; struct key *uid_keyring ; struct key *session_keyring ; struct hlist_node uidhash_node ; kuid_t uid ; atomic_long_t locked_vm ; }; struct backing_dev_info; struct reclaim_state; struct sched_info { unsigned long pcount ; unsigned long long run_delay ; unsigned long long last_arrival ; unsigned long long last_queued ; }; struct task_delay_info { spinlock_t lock ; unsigned int flags ; u64 blkio_start ; u64 blkio_delay ; u64 swapin_delay ; u32 blkio_count ; u32 swapin_count ; u64 freepages_start ; u64 freepages_delay ; u32 freepages_count ; }; struct wake_q_node { struct wake_q_node *next ; }; struct io_context; struct pipe_inode_info; struct uts_namespace; struct load_weight { unsigned long weight ; u32 inv_weight ; }; struct sched_avg { u64 last_runnable_update ; s64 decay_count ; unsigned long load_avg_contrib ; unsigned long utilization_avg_contrib ; u32 runnable_avg_sum ; u32 avg_period ; u32 running_avg_sum ; }; struct sched_statistics { u64 wait_start ; u64 wait_max ; u64 wait_count ; u64 wait_sum ; u64 iowait_count ; u64 iowait_sum ; u64 sleep_start ; u64 sleep_max ; s64 sum_sleep_runtime ; u64 block_start ; u64 block_max ; u64 exec_max ; u64 slice_max ; u64 nr_migrations_cold ; u64 nr_failed_migrations_affine ; u64 nr_failed_migrations_running ; u64 nr_failed_migrations_hot ; u64 nr_forced_migrations ; u64 nr_wakeups ; u64 nr_wakeups_sync ; u64 nr_wakeups_migrate ; u64 nr_wakeups_local ; u64 nr_wakeups_remote ; u64 nr_wakeups_affine ; u64 nr_wakeups_affine_attempts ; u64 nr_wakeups_passive ; u64 nr_wakeups_idle ; }; struct sched_entity { struct load_weight load ; struct rb_node run_node ; struct list_head group_node ; unsigned int on_rq ; u64 exec_start ; u64 sum_exec_runtime ; u64 vruntime ; u64 prev_sum_exec_runtime ; u64 nr_migrations ; struct sched_statistics statistics ; int depth ; struct sched_entity *parent ; struct cfs_rq *cfs_rq ; struct cfs_rq *my_q ; struct sched_avg avg ; }; struct rt_rq; struct sched_rt_entity { struct list_head run_list ; unsigned long timeout ; unsigned long watchdog_stamp ; unsigned int time_slice ; struct sched_rt_entity *back ; struct sched_rt_entity *parent ; struct rt_rq *rt_rq ; struct rt_rq *my_q ; }; struct sched_dl_entity { struct rb_node rb_node ; u64 dl_runtime ; u64 dl_deadline ; u64 dl_period ; u64 dl_bw ; s64 runtime ; u64 deadline ; unsigned int flags ; int dl_throttled ; int dl_new ; int dl_boosted ; int dl_yielded ; struct hrtimer dl_timer ; }; struct memcg_oom_info { struct mem_cgroup *memcg ; gfp_t gfp_mask ; int order ; unsigned char may_oom : 1 ; }; struct sched_class; struct files_struct; struct compat_robust_list_head; struct numa_group; struct ftrace_ret_stack; struct task_struct { long volatile state ; void *stack ; atomic_t usage ; unsigned int flags ; unsigned int ptrace ; struct llist_node wake_entry ; int on_cpu ; struct task_struct *last_wakee ; unsigned long wakee_flips ; unsigned long wakee_flip_decay_ts ; int wake_cpu ; int on_rq ; int prio ; int static_prio ; int normal_prio ; unsigned int rt_priority ; struct sched_class const *sched_class ; struct sched_entity se ; struct sched_rt_entity rt ; struct task_group *sched_task_group ; struct sched_dl_entity dl ; struct hlist_head preempt_notifiers ; unsigned int btrace_seq ; unsigned int policy ; int nr_cpus_allowed ; cpumask_t cpus_allowed ; unsigned long rcu_tasks_nvcsw ; bool rcu_tasks_holdout ; struct list_head rcu_tasks_holdout_list ; int rcu_tasks_idle_cpu ; struct sched_info sched_info ; struct list_head tasks ; struct plist_node pushable_tasks ; struct rb_node pushable_dl_tasks ; struct mm_struct *mm ; struct mm_struct *active_mm ; u32 vmacache_seqnum ; struct vm_area_struct *vmacache[4U] ; struct task_rss_stat rss_stat ; int exit_state ; int exit_code ; int exit_signal ; int pdeath_signal ; unsigned long jobctl ; unsigned int personality ; unsigned char in_execve : 1 ; unsigned char in_iowait : 1 ; unsigned char sched_reset_on_fork : 1 ; unsigned char sched_contributes_to_load : 1 ; unsigned char sched_migrated : 1 ; unsigned char memcg_kmem_skip_account : 1 ; unsigned char brk_randomized : 1 ; unsigned long atomic_flags ; struct restart_block restart_block ; pid_t pid ; pid_t tgid ; struct task_struct *real_parent ; struct task_struct *parent ; struct list_head children ; struct list_head sibling ; struct task_struct *group_leader ; struct list_head ptraced ; struct list_head ptrace_entry ; struct pid_link pids[3U] ; struct list_head thread_group ; struct list_head thread_node ; struct completion *vfork_done ; int *set_child_tid ; int *clear_child_tid ; cputime_t utime ; cputime_t stime ; cputime_t utimescaled ; cputime_t stimescaled ; cputime_t gtime ; struct cputime prev_cputime ; unsigned long nvcsw ; unsigned long nivcsw ; u64 start_time ; u64 real_start_time ; unsigned long min_flt ; unsigned long maj_flt ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; struct cred const *real_cred ; struct cred const *cred ; char comm[16U] ; struct nameidata *nameidata ; struct sysv_sem sysvsem ; struct sysv_shm sysvshm ; unsigned long last_switch_count ; struct thread_struct thread ; struct fs_struct *fs ; struct files_struct *files ; struct nsproxy *nsproxy ; struct signal_struct *signal ; struct sighand_struct *sighand ; sigset_t blocked ; sigset_t real_blocked ; sigset_t saved_sigmask ; struct sigpending pending ; unsigned long sas_ss_sp ; size_t sas_ss_size ; int (*notifier)(void * ) ; void *notifier_data ; sigset_t *notifier_mask ; struct callback_head *task_works ; struct audit_context *audit_context ; kuid_t loginuid ; unsigned int sessionid ; struct seccomp seccomp ; u32 parent_exec_id ; u32 self_exec_id ; spinlock_t alloc_lock ; raw_spinlock_t pi_lock ; struct wake_q_node wake_q ; struct rb_root pi_waiters ; struct rb_node *pi_waiters_leftmost ; struct rt_mutex_waiter *pi_blocked_on ; struct mutex_waiter *blocked_on ; unsigned int irq_events ; unsigned long hardirq_enable_ip ; unsigned long hardirq_disable_ip ; unsigned int hardirq_enable_event ; unsigned int hardirq_disable_event ; int hardirqs_enabled ; int hardirq_context ; unsigned long softirq_disable_ip ; unsigned long softirq_enable_ip ; unsigned int softirq_disable_event ; unsigned int softirq_enable_event ; int softirqs_enabled ; int softirq_context ; u64 curr_chain_key ; int lockdep_depth ; unsigned int lockdep_recursion ; struct held_lock held_locks[48U] ; gfp_t lockdep_reclaim_gfp ; void *journal_info ; struct bio_list *bio_list ; struct blk_plug *plug ; struct reclaim_state *reclaim_state ; struct backing_dev_info *backing_dev_info ; struct io_context *io_context ; unsigned long ptrace_message ; siginfo_t *last_siginfo ; struct task_io_accounting ioac ; u64 acct_rss_mem1 ; u64 acct_vm_mem1 ; cputime_t acct_timexpd ; nodemask_t mems_allowed ; seqcount_t mems_allowed_seq ; int cpuset_mem_spread_rotor ; int cpuset_slab_spread_rotor ; struct css_set *cgroups ; struct list_head cg_list ; struct robust_list_head *robust_list ; struct compat_robust_list_head *compat_robust_list ; struct list_head pi_state_list ; struct futex_pi_state *pi_state_cache ; struct perf_event_context *perf_event_ctxp[2U] ; struct mutex perf_event_mutex ; struct list_head perf_event_list ; struct mempolicy *mempolicy ; short il_next ; short pref_node_fork ; int numa_scan_seq ; unsigned int numa_scan_period ; unsigned int numa_scan_period_max ; int numa_preferred_nid ; unsigned long numa_migrate_retry ; u64 node_stamp ; u64 last_task_numa_placement ; u64 last_sum_exec_runtime ; struct callback_head numa_work ; struct list_head numa_entry ; struct numa_group *numa_group ; unsigned long *numa_faults ; unsigned long total_numa_faults ; unsigned long numa_faults_locality[3U] ; unsigned long numa_pages_migrated ; struct callback_head rcu ; struct pipe_inode_info *splice_pipe ; struct page_frag task_frag ; struct task_delay_info *delays ; int make_it_fail ; int nr_dirtied ; int nr_dirtied_pause ; unsigned long dirty_paused_when ; int latency_record_count ; struct latency_record latency_record[32U] ; unsigned long timer_slack_ns ; unsigned long default_timer_slack_ns ; unsigned int kasan_depth ; int curr_ret_stack ; struct ftrace_ret_stack *ret_stack ; unsigned long long ftrace_timestamp ; atomic_t trace_overrun ; atomic_t tracing_graph_pause ; unsigned long trace ; unsigned long trace_recursion ; struct memcg_oom_info memcg_oom ; struct uprobe_task *utask ; unsigned int sequential_io ; unsigned int sequential_io_avg ; unsigned long task_state_change ; int pagefault_disabled ; }; enum irqreturn { IRQ_NONE = 0, IRQ_HANDLED = 1, IRQ_WAKE_THREAD = 2 } ; typedef enum irqreturn irqreturn_t; struct iscsi_bus_flash_session; struct sockaddr; struct iscsi_task; struct scsi_device; struct scsi_qla_host; struct iscsi_bus_flash_conn; struct iscsi_cls_session; struct iscsi_cls_conn; struct scsi_cmnd; struct Scsi_Host; struct iscsi_endpoint; struct device_type; struct class; struct klist_node; struct klist { spinlock_t k_lock ; struct list_head k_list ; void (*get)(struct klist_node * ) ; void (*put)(struct klist_node * ) ; }; struct klist_node { void *n_klist ; struct list_head n_node ; struct kref n_ref ; }; struct path; struct seq_file { char *buf ; size_t size ; size_t from ; size_t count ; size_t pad_until ; loff_t index ; loff_t read_pos ; u64 version ; struct mutex lock ; struct seq_operations const *op ; int poll_event ; struct user_namespace *user_ns ; void *private ; }; struct seq_operations { void *(*start)(struct seq_file * , loff_t * ) ; void (*stop)(struct seq_file * , void * ) ; void *(*next)(struct seq_file * , void * , loff_t * ) ; int (*show)(struct seq_file * , void * ) ; }; struct pinctrl; struct pinctrl_state; struct dev_pin_info { struct pinctrl *p ; struct pinctrl_state *default_state ; struct pinctrl_state *sleep_state ; struct pinctrl_state *idle_state ; }; struct ratelimit_state { raw_spinlock_t lock ; int interval ; int burst ; int printed ; int missed ; unsigned long begin ; }; struct dma_map_ops; struct dev_archdata { struct dma_map_ops *dma_ops ; void *iommu ; }; struct device_private; struct device_driver; struct driver_private; struct subsys_private; struct bus_type; struct device_node; struct fwnode_handle; struct iommu_ops; struct iommu_group; struct device_attribute; struct bus_type { char const *name ; char const *dev_name ; struct device *dev_root ; struct device_attribute *dev_attrs ; struct attribute_group const **bus_groups ; struct attribute_group const **dev_groups ; struct attribute_group const **drv_groups ; int (*match)(struct device * , struct device_driver * ) ; int (*uevent)(struct device * , struct kobj_uevent_env * ) ; int (*probe)(struct device * ) ; int (*remove)(struct device * ) ; void (*shutdown)(struct device * ) ; int (*online)(struct device * ) ; int (*offline)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct dev_pm_ops const *pm ; struct iommu_ops const *iommu_ops ; struct subsys_private *p ; struct lock_class_key lock_key ; }; enum probe_type { PROBE_DEFAULT_STRATEGY = 0, PROBE_PREFER_ASYNCHRONOUS = 1, PROBE_FORCE_SYNCHRONOUS = 2 } ; struct of_device_id; struct acpi_device_id; struct device_driver { char const *name ; struct bus_type *bus ; struct module *owner ; char const *mod_name ; bool suppress_bind_attrs ; enum probe_type probe_type ; struct of_device_id const *of_match_table ; struct acpi_device_id const *acpi_match_table ; int (*probe)(struct device * ) ; int (*remove)(struct device * ) ; void (*shutdown)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct attribute_group const **groups ; struct dev_pm_ops const *pm ; struct driver_private *p ; }; struct class_attribute; struct class { char const *name ; struct module *owner ; struct class_attribute *class_attrs ; struct attribute_group const **dev_groups ; struct kobject *dev_kobj ; int (*dev_uevent)(struct device * , struct kobj_uevent_env * ) ; char *(*devnode)(struct device * , umode_t * ) ; void (*class_release)(struct class * ) ; void (*dev_release)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct kobj_ns_type_operations const *ns_type ; void const *(*namespace)(struct device * ) ; struct dev_pm_ops const *pm ; struct subsys_private *p ; }; struct class_attribute { struct attribute attr ; ssize_t (*show)(struct class * , struct class_attribute * , char * ) ; ssize_t (*store)(struct class * , struct class_attribute * , char const * , size_t ) ; }; struct device_type { char const *name ; struct attribute_group const **groups ; int (*uevent)(struct device * , struct kobj_uevent_env * ) ; char *(*devnode)(struct device * , umode_t * , kuid_t * , kgid_t * ) ; void (*release)(struct device * ) ; struct dev_pm_ops const *pm ; }; struct device_attribute { struct attribute attr ; ssize_t (*show)(struct device * , struct device_attribute * , char * ) ; ssize_t (*store)(struct device * , struct device_attribute * , char const * , size_t ) ; }; struct device_dma_parameters { unsigned int max_segment_size ; unsigned long segment_boundary_mask ; }; struct dma_coherent_mem; struct cma; struct device { struct device *parent ; struct device_private *p ; struct kobject kobj ; char const *init_name ; struct device_type const *type ; struct mutex mutex ; struct bus_type *bus ; struct device_driver *driver ; void *platform_data ; void *driver_data ; struct dev_pm_info power ; struct dev_pm_domain *pm_domain ; struct dev_pin_info *pins ; int numa_node ; u64 *dma_mask ; u64 coherent_dma_mask ; unsigned long dma_pfn_offset ; struct device_dma_parameters *dma_parms ; struct list_head dma_pools ; struct dma_coherent_mem *dma_mem ; struct cma *cma_area ; struct dev_archdata archdata ; struct device_node *of_node ; struct fwnode_handle *fwnode ; dev_t devt ; u32 id ; spinlock_t devres_lock ; struct list_head devres_head ; struct klist_node knode_class ; struct class *class ; struct attribute_group const **groups ; void (*release)(struct device * ) ; struct iommu_group *iommu_group ; bool offline_disabled ; bool offline ; }; struct wakeup_source { char const *name ; struct list_head entry ; spinlock_t lock ; struct wake_irq *wakeirq ; struct timer_list timer ; unsigned long timer_expires ; ktime_t total_time ; ktime_t max_time ; ktime_t last_time ; ktime_t start_prevent_time ; ktime_t prevent_sleep_time ; unsigned long event_count ; unsigned long active_count ; unsigned long relax_count ; unsigned long expire_count ; unsigned long wakeup_count ; bool active ; bool autosleep_enabled ; }; struct hlist_bl_node; struct hlist_bl_head { struct hlist_bl_node *first ; }; struct hlist_bl_node { struct hlist_bl_node *next ; struct hlist_bl_node **pprev ; }; struct __anonstruct____missing_field_name_220 { spinlock_t lock ; int count ; }; union __anonunion____missing_field_name_219 { struct __anonstruct____missing_field_name_220 __annonCompField58 ; }; struct lockref { union __anonunion____missing_field_name_219 __annonCompField59 ; }; struct vfsmount; struct __anonstruct____missing_field_name_222 { u32 hash ; u32 len ; }; union __anonunion____missing_field_name_221 { struct __anonstruct____missing_field_name_222 __annonCompField60 ; u64 hash_len ; }; struct qstr { union __anonunion____missing_field_name_221 __annonCompField61 ; unsigned char const *name ; }; struct dentry_operations; union __anonunion_d_u_223 { struct hlist_node d_alias ; struct callback_head d_rcu ; }; struct dentry { unsigned int d_flags ; seqcount_t d_seq ; struct hlist_bl_node d_hash ; struct dentry *d_parent ; struct qstr d_name ; struct inode *d_inode ; unsigned char d_iname[32U] ; struct lockref d_lockref ; struct dentry_operations const *d_op ; struct super_block *d_sb ; unsigned long d_time ; void *d_fsdata ; struct list_head d_lru ; struct list_head d_child ; struct list_head d_subdirs ; union __anonunion_d_u_223 d_u ; }; struct dentry_operations { int (*d_revalidate)(struct dentry * , unsigned int ) ; int (*d_weak_revalidate)(struct dentry * , unsigned int ) ; int (*d_hash)(struct dentry const * , struct qstr * ) ; int (*d_compare)(struct dentry const * , struct dentry const * , unsigned int , char const * , struct qstr const * ) ; int (*d_delete)(struct dentry const * ) ; void (*d_release)(struct dentry * ) ; void (*d_prune)(struct dentry * ) ; void (*d_iput)(struct dentry * , struct inode * ) ; char *(*d_dname)(struct dentry * , char * , int ) ; struct vfsmount *(*d_automount)(struct path * ) ; int (*d_manage)(struct dentry * , bool ) ; struct inode *(*d_select_inode)(struct dentry * , unsigned int ) ; }; struct path { struct vfsmount *mnt ; struct dentry *dentry ; }; struct shrink_control { gfp_t gfp_mask ; unsigned long nr_to_scan ; int nid ; struct mem_cgroup *memcg ; }; struct shrinker { unsigned long (*count_objects)(struct shrinker * , struct shrink_control * ) ; unsigned long (*scan_objects)(struct shrinker * , struct shrink_control * ) ; int seeks ; long batch ; unsigned long flags ; struct list_head list ; atomic_long_t *nr_deferred ; }; struct list_lru_one { struct list_head list ; long nr_items ; }; struct list_lru_memcg { struct list_lru_one *lru[0U] ; }; struct list_lru_node { spinlock_t lock ; struct list_lru_one lru ; struct list_lru_memcg *memcg_lrus ; }; struct list_lru { struct list_lru_node *node ; struct list_head list ; }; struct __anonstruct____missing_field_name_227 { struct radix_tree_node *parent ; void *private_data ; }; union __anonunion____missing_field_name_226 { struct __anonstruct____missing_field_name_227 __annonCompField62 ; struct callback_head callback_head ; }; struct radix_tree_node { unsigned int path ; unsigned int count ; union __anonunion____missing_field_name_226 __annonCompField63 ; struct list_head private_list ; void *slots[64U] ; unsigned long tags[3U][1U] ; }; struct radix_tree_root { unsigned int height ; gfp_t gfp_mask ; struct radix_tree_node *rnode ; }; struct fiemap_extent { __u64 fe_logical ; __u64 fe_physical ; __u64 fe_length ; __u64 fe_reserved64[2U] ; __u32 fe_flags ; __u32 fe_reserved[3U] ; }; enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; struct bio_set; struct bio; struct bio_integrity_payload; struct block_device; typedef void bio_end_io_t(struct bio * , int ); struct bio_vec { struct page *bv_page ; unsigned int bv_len ; unsigned int bv_offset ; }; struct bvec_iter { sector_t bi_sector ; unsigned int bi_size ; unsigned int bi_idx ; unsigned int bi_bvec_done ; }; union __anonunion____missing_field_name_230 { struct bio_integrity_payload *bi_integrity ; }; struct bio { struct bio *bi_next ; struct block_device *bi_bdev ; unsigned long bi_flags ; unsigned long bi_rw ; struct bvec_iter bi_iter ; unsigned int bi_phys_segments ; unsigned int bi_seg_front_size ; unsigned int bi_seg_back_size ; atomic_t __bi_remaining ; bio_end_io_t *bi_end_io ; void *bi_private ; struct io_context *bi_ioc ; struct cgroup_subsys_state *bi_css ; union __anonunion____missing_field_name_230 __annonCompField64 ; unsigned short bi_vcnt ; unsigned short bi_max_vecs ; atomic_t __bi_cnt ; struct bio_vec *bi_io_vec ; struct bio_set *bi_pool ; struct bio_vec bi_inline_vecs[0U] ; }; struct bdi_writeback; struct export_operations; struct hd_geometry; struct iovec; struct kiocb; struct poll_table_struct; struct kstatfs; struct swap_info_struct; struct iov_iter; struct vm_fault; struct iattr { unsigned int ia_valid ; umode_t ia_mode ; kuid_t ia_uid ; kgid_t ia_gid ; loff_t ia_size ; struct timespec ia_atime ; struct timespec ia_mtime ; struct timespec ia_ctime ; struct file *ia_file ; }; struct dquot; typedef __kernel_uid32_t projid_t; struct __anonstruct_kprojid_t_231 { projid_t val ; }; typedef struct __anonstruct_kprojid_t_231 kprojid_t; enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; typedef long long qsize_t; union __anonunion____missing_field_name_232 { kuid_t uid ; kgid_t gid ; kprojid_t projid ; }; struct kqid { union __anonunion____missing_field_name_232 __annonCompField65 ; enum quota_type type ; }; struct mem_dqblk { qsize_t dqb_bhardlimit ; qsize_t dqb_bsoftlimit ; qsize_t dqb_curspace ; qsize_t dqb_rsvspace ; qsize_t dqb_ihardlimit ; qsize_t dqb_isoftlimit ; qsize_t dqb_curinodes ; time_t dqb_btime ; time_t dqb_itime ; }; struct quota_format_type; struct mem_dqinfo { struct quota_format_type *dqi_format ; int dqi_fmt_id ; struct list_head dqi_dirty_list ; unsigned long dqi_flags ; unsigned int dqi_bgrace ; unsigned int dqi_igrace ; qsize_t dqi_max_spc_limit ; qsize_t dqi_max_ino_limit ; void *dqi_priv ; }; struct dquot { struct hlist_node dq_hash ; struct list_head dq_inuse ; struct list_head dq_free ; struct list_head dq_dirty ; struct mutex dq_lock ; atomic_t dq_count ; wait_queue_head_t dq_wait_unused ; struct super_block *dq_sb ; struct kqid dq_id ; loff_t dq_off ; unsigned long dq_flags ; struct mem_dqblk dq_dqb ; }; struct quota_format_ops { int (*check_quota_file)(struct super_block * , int ) ; int (*read_file_info)(struct super_block * , int ) ; int (*write_file_info)(struct super_block * , int ) ; int (*free_file_info)(struct super_block * , int ) ; int (*read_dqblk)(struct dquot * ) ; int (*commit_dqblk)(struct dquot * ) ; int (*release_dqblk)(struct dquot * ) ; }; struct dquot_operations { int (*write_dquot)(struct dquot * ) ; struct dquot *(*alloc_dquot)(struct super_block * , int ) ; void (*destroy_dquot)(struct dquot * ) ; int (*acquire_dquot)(struct dquot * ) ; int (*release_dquot)(struct dquot * ) ; int (*mark_dirty)(struct dquot * ) ; int (*write_info)(struct super_block * , int ) ; qsize_t *(*get_reserved_space)(struct inode * ) ; int (*get_projid)(struct inode * , kprojid_t * ) ; }; struct qc_dqblk { int d_fieldmask ; u64 d_spc_hardlimit ; u64 d_spc_softlimit ; u64 d_ino_hardlimit ; u64 d_ino_softlimit ; u64 d_space ; u64 d_ino_count ; s64 d_ino_timer ; s64 d_spc_timer ; int d_ino_warns ; int d_spc_warns ; u64 d_rt_spc_hardlimit ; u64 d_rt_spc_softlimit ; u64 d_rt_space ; s64 d_rt_spc_timer ; int d_rt_spc_warns ; }; struct qc_type_state { unsigned int flags ; unsigned int spc_timelimit ; unsigned int ino_timelimit ; unsigned int rt_spc_timelimit ; unsigned int spc_warnlimit ; unsigned int ino_warnlimit ; unsigned int rt_spc_warnlimit ; unsigned long long ino ; blkcnt_t blocks ; blkcnt_t nextents ; }; struct qc_state { unsigned int s_incoredqs ; struct qc_type_state s_state[3U] ; }; struct qc_info { int i_fieldmask ; unsigned int i_flags ; unsigned int i_spc_timelimit ; unsigned int i_ino_timelimit ; unsigned int i_rt_spc_timelimit ; unsigned int i_spc_warnlimit ; unsigned int i_ino_warnlimit ; unsigned int i_rt_spc_warnlimit ; }; struct quotactl_ops { int (*quota_on)(struct super_block * , int , int , struct path * ) ; int (*quota_off)(struct super_block * , int ) ; int (*quota_enable)(struct super_block * , unsigned int ) ; int (*quota_disable)(struct super_block * , unsigned int ) ; int (*quota_sync)(struct super_block * , int ) ; int (*set_info)(struct super_block * , int , struct qc_info * ) ; int (*get_dqblk)(struct super_block * , struct kqid , struct qc_dqblk * ) ; int (*set_dqblk)(struct super_block * , struct kqid , struct qc_dqblk * ) ; int (*get_state)(struct super_block * , struct qc_state * ) ; int (*rm_xquota)(struct super_block * , unsigned int ) ; }; struct quota_format_type { int qf_fmt_id ; struct quota_format_ops const *qf_ops ; struct module *qf_owner ; struct quota_format_type *qf_next ; }; struct quota_info { unsigned int flags ; struct mutex dqio_mutex ; struct mutex dqonoff_mutex ; struct inode *files[3U] ; struct mem_dqinfo info[3U] ; struct quota_format_ops const *ops[3U] ; }; struct writeback_control; struct kiocb { struct file *ki_filp ; loff_t ki_pos ; void (*ki_complete)(struct kiocb * , long , long ) ; void *private ; int ki_flags ; }; struct address_space_operations { int (*writepage)(struct page * , struct writeback_control * ) ; int (*readpage)(struct file * , struct page * ) ; int (*writepages)(struct address_space * , struct writeback_control * ) ; int (*set_page_dirty)(struct page * ) ; int (*readpages)(struct file * , struct address_space * , struct list_head * , unsigned int ) ; int (*write_begin)(struct file * , struct address_space * , loff_t , unsigned int , unsigned int , struct page ** , void ** ) ; int (*write_end)(struct file * , struct address_space * , loff_t , unsigned int , unsigned int , struct page * , void * ) ; sector_t (*bmap)(struct address_space * , sector_t ) ; void (*invalidatepage)(struct page * , unsigned int , unsigned int ) ; int (*releasepage)(struct page * , gfp_t ) ; void (*freepage)(struct page * ) ; ssize_t (*direct_IO)(struct kiocb * , struct iov_iter * , loff_t ) ; int (*migratepage)(struct address_space * , struct page * , struct page * , enum migrate_mode ) ; int (*launder_page)(struct page * ) ; int (*is_partially_uptodate)(struct page * , unsigned long , unsigned long ) ; void (*is_dirty_writeback)(struct page * , bool * , bool * ) ; int (*error_remove_page)(struct address_space * , struct page * ) ; int (*swap_activate)(struct swap_info_struct * , struct file * , sector_t * ) ; void (*swap_deactivate)(struct file * ) ; }; struct address_space { struct inode *host ; struct radix_tree_root page_tree ; spinlock_t tree_lock ; atomic_t i_mmap_writable ; struct rb_root i_mmap ; struct rw_semaphore i_mmap_rwsem ; unsigned long nrpages ; unsigned long nrshadows ; unsigned long writeback_index ; struct address_space_operations const *a_ops ; unsigned long flags ; spinlock_t private_lock ; struct list_head private_list ; void *private_data ; }; struct request_queue; struct hd_struct; struct gendisk; struct block_device { dev_t bd_dev ; int bd_openers ; struct inode *bd_inode ; struct super_block *bd_super ; struct mutex bd_mutex ; struct list_head bd_inodes ; void *bd_claiming ; void *bd_holder ; int bd_holders ; bool bd_write_holder ; struct list_head bd_holder_disks ; struct block_device *bd_contains ; unsigned int bd_block_size ; struct hd_struct *bd_part ; unsigned int bd_part_count ; int bd_invalidated ; struct gendisk *bd_disk ; struct request_queue *bd_queue ; struct list_head bd_list ; unsigned long bd_private ; int bd_fsfreeze_count ; struct mutex bd_fsfreeze_mutex ; }; struct posix_acl; struct inode_operations; union __anonunion____missing_field_name_235 { unsigned int const i_nlink ; unsigned int __i_nlink ; }; union __anonunion____missing_field_name_236 { struct hlist_head i_dentry ; struct callback_head i_rcu ; }; struct file_lock_context; struct cdev; union __anonunion____missing_field_name_237 { struct pipe_inode_info *i_pipe ; struct block_device *i_bdev ; struct cdev *i_cdev ; char *i_link ; }; struct inode { umode_t i_mode ; unsigned short i_opflags ; kuid_t i_uid ; kgid_t i_gid ; unsigned int i_flags ; struct posix_acl *i_acl ; struct posix_acl *i_default_acl ; struct inode_operations const *i_op ; struct super_block *i_sb ; struct address_space *i_mapping ; void *i_security ; unsigned long i_ino ; union __anonunion____missing_field_name_235 __annonCompField66 ; dev_t i_rdev ; loff_t i_size ; struct timespec i_atime ; struct timespec i_mtime ; struct timespec i_ctime ; spinlock_t i_lock ; unsigned short i_bytes ; unsigned int i_blkbits ; blkcnt_t i_blocks ; unsigned long i_state ; struct mutex i_mutex ; unsigned long dirtied_when ; unsigned long dirtied_time_when ; struct hlist_node i_hash ; struct list_head i_wb_list ; struct bdi_writeback *i_wb ; int i_wb_frn_winner ; u16 i_wb_frn_avg_time ; u16 i_wb_frn_history ; struct list_head i_lru ; struct list_head i_sb_list ; union __anonunion____missing_field_name_236 __annonCompField67 ; u64 i_version ; atomic_t i_count ; atomic_t i_dio_count ; atomic_t i_writecount ; atomic_t i_readcount ; struct file_operations const *i_fop ; struct file_lock_context *i_flctx ; struct address_space i_data ; struct list_head i_devices ; union __anonunion____missing_field_name_237 __annonCompField68 ; __u32 i_generation ; __u32 i_fsnotify_mask ; struct hlist_head i_fsnotify_marks ; void *i_private ; }; struct fown_struct { rwlock_t lock ; struct pid *pid ; enum pid_type pid_type ; kuid_t uid ; kuid_t euid ; int signum ; }; struct file_ra_state { unsigned long start ; unsigned int size ; unsigned int async_size ; unsigned int ra_pages ; unsigned int mmap_miss ; loff_t prev_pos ; }; union __anonunion_f_u_238 { struct llist_node fu_llist ; struct callback_head fu_rcuhead ; }; struct file { union __anonunion_f_u_238 f_u ; struct path f_path ; struct inode *f_inode ; struct file_operations const *f_op ; spinlock_t f_lock ; atomic_long_t f_count ; unsigned int f_flags ; fmode_t f_mode ; struct mutex f_pos_lock ; loff_t f_pos ; struct fown_struct f_owner ; struct cred const *f_cred ; struct file_ra_state f_ra ; u64 f_version ; void *f_security ; void *private_data ; struct list_head f_ep_links ; struct list_head f_tfile_llink ; struct address_space *f_mapping ; }; typedef void *fl_owner_t; struct file_lock; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock * , struct file_lock * ) ; void (*fl_release_private)(struct file_lock * ) ; }; struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock * , struct file_lock * ) ; unsigned long (*lm_owner_key)(struct file_lock * ) ; fl_owner_t (*lm_get_owner)(fl_owner_t ) ; void (*lm_put_owner)(fl_owner_t ) ; void (*lm_notify)(struct file_lock * ) ; int (*lm_grant)(struct file_lock * , int ) ; bool (*lm_break)(struct file_lock * ) ; int (*lm_change)(struct file_lock * , int , struct list_head * ) ; void (*lm_setup)(struct file_lock * , void ** ) ; }; struct net; struct nlm_lockowner; struct nfs_lock_info { u32 state ; struct nlm_lockowner *owner ; struct list_head list ; }; struct nfs4_lock_state; struct nfs4_lock_info { struct nfs4_lock_state *owner ; }; struct fasync_struct; struct __anonstruct_afs_240 { struct list_head link ; int state ; }; union __anonunion_fl_u_239 { struct nfs_lock_info nfs_fl ; struct nfs4_lock_info nfs4_fl ; struct __anonstruct_afs_240 afs ; }; struct file_lock { struct file_lock *fl_next ; struct list_head fl_list ; struct hlist_node fl_link ; struct list_head fl_block ; fl_owner_t fl_owner ; unsigned int fl_flags ; unsigned char fl_type ; unsigned int fl_pid ; int fl_link_cpu ; struct pid *fl_nspid ; wait_queue_head_t fl_wait ; struct file *fl_file ; loff_t fl_start ; loff_t fl_end ; struct fasync_struct *fl_fasync ; unsigned long fl_break_time ; unsigned long fl_downgrade_time ; struct file_lock_operations const *fl_ops ; struct lock_manager_operations const *fl_lmops ; union __anonunion_fl_u_239 fl_u ; }; struct file_lock_context { spinlock_t flc_lock ; struct list_head flc_flock ; struct list_head flc_posix ; struct list_head flc_lease ; }; struct fasync_struct { spinlock_t fa_lock ; int magic ; int fa_fd ; struct fasync_struct *fa_next ; struct file *fa_file ; struct callback_head fa_rcu ; }; struct sb_writers { struct percpu_counter counter[3U] ; wait_queue_head_t wait ; int frozen ; wait_queue_head_t wait_unfrozen ; struct lockdep_map lock_map[3U] ; }; struct super_operations; struct xattr_handler; struct mtd_info; struct super_block { struct list_head s_list ; dev_t s_dev ; unsigned char s_blocksize_bits ; unsigned long s_blocksize ; loff_t s_maxbytes ; struct file_system_type *s_type ; struct super_operations const *s_op ; struct dquot_operations const *dq_op ; struct quotactl_ops const *s_qcop ; struct export_operations const *s_export_op ; unsigned long s_flags ; unsigned long s_iflags ; unsigned long s_magic ; struct dentry *s_root ; struct rw_semaphore s_umount ; int s_count ; atomic_t s_active ; void *s_security ; struct xattr_handler const **s_xattr ; struct list_head s_inodes ; struct hlist_bl_head s_anon ; struct list_head s_mounts ; struct block_device *s_bdev ; struct backing_dev_info *s_bdi ; struct mtd_info *s_mtd ; struct hlist_node s_instances ; unsigned int s_quota_types ; struct quota_info s_dquot ; struct sb_writers s_writers ; char s_id[32U] ; u8 s_uuid[16U] ; void *s_fs_info ; unsigned int s_max_links ; fmode_t s_mode ; u32 s_time_gran ; struct mutex s_vfs_rename_mutex ; char *s_subtype ; char *s_options ; struct dentry_operations const *s_d_op ; int cleancache_poolid ; struct shrinker s_shrink ; atomic_long_t s_remove_count ; int s_readonly_remount ; struct workqueue_struct *s_dio_done_wq ; struct hlist_head s_pins ; struct list_lru s_dentry_lru ; struct list_lru s_inode_lru ; struct callback_head rcu ; int s_stack_depth ; }; struct fiemap_extent_info { unsigned int fi_flags ; unsigned int fi_extents_mapped ; unsigned int fi_extents_max ; struct fiemap_extent *fi_extents_start ; }; struct dir_context; struct dir_context { int (*actor)(struct dir_context * , char const * , int , loff_t , u64 , unsigned int ) ; loff_t pos ; }; struct block_device_operations; struct file_operations { struct module *owner ; loff_t (*llseek)(struct file * , loff_t , int ) ; ssize_t (*read)(struct file * , char * , size_t , loff_t * ) ; ssize_t (*write)(struct file * , char const * , size_t , loff_t * ) ; ssize_t (*read_iter)(struct kiocb * , struct iov_iter * ) ; ssize_t (*write_iter)(struct kiocb * , struct iov_iter * ) ; int (*iterate)(struct file * , struct dir_context * ) ; unsigned int (*poll)(struct file * , struct poll_table_struct * ) ; long (*unlocked_ioctl)(struct file * , unsigned int , unsigned long ) ; long (*compat_ioctl)(struct file * , unsigned int , unsigned long ) ; int (*mmap)(struct file * , struct vm_area_struct * ) ; int (*mremap)(struct file * , struct vm_area_struct * ) ; int (*open)(struct inode * , struct file * ) ; int (*flush)(struct file * , fl_owner_t ) ; int (*release)(struct inode * , struct file * ) ; int (*fsync)(struct file * , loff_t , loff_t , int ) ; int (*aio_fsync)(struct kiocb * , int ) ; int (*fasync)(int , struct file * , int ) ; int (*lock)(struct file * , int , struct file_lock * ) ; ssize_t (*sendpage)(struct file * , struct page * , int , size_t , loff_t * , int ) ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; int (*check_flags)(int ) ; int (*flock)(struct file * , int , struct file_lock * ) ; ssize_t (*splice_write)(struct pipe_inode_info * , struct file * , loff_t * , size_t , unsigned int ) ; ssize_t (*splice_read)(struct file * , loff_t * , struct pipe_inode_info * , size_t , unsigned int ) ; int (*setlease)(struct file * , long , struct file_lock ** , void ** ) ; long (*fallocate)(struct file * , int , loff_t , loff_t ) ; void (*show_fdinfo)(struct seq_file * , struct file * ) ; }; struct inode_operations { struct dentry *(*lookup)(struct inode * , struct dentry * , unsigned int ) ; char const *(*follow_link)(struct dentry * , void ** ) ; int (*permission)(struct inode * , int ) ; struct posix_acl *(*get_acl)(struct inode * , int ) ; int (*readlink)(struct dentry * , char * , int ) ; void (*put_link)(struct inode * , void * ) ; int (*create)(struct inode * , struct dentry * , umode_t , bool ) ; int (*link)(struct dentry * , struct inode * , struct dentry * ) ; int (*unlink)(struct inode * , struct dentry * ) ; int (*symlink)(struct inode * , struct dentry * , char const * ) ; int (*mkdir)(struct inode * , struct dentry * , umode_t ) ; int (*rmdir)(struct inode * , struct dentry * ) ; int (*mknod)(struct inode * , struct dentry * , umode_t , dev_t ) ; int (*rename)(struct inode * , struct dentry * , struct inode * , struct dentry * ) ; int (*rename2)(struct inode * , struct dentry * , struct inode * , struct dentry * , unsigned int ) ; int (*setattr)(struct dentry * , struct iattr * ) ; int (*getattr)(struct vfsmount * , struct dentry * , struct kstat * ) ; int (*setxattr)(struct dentry * , char const * , void const * , size_t , int ) ; ssize_t (*getxattr)(struct dentry * , char const * , void * , size_t ) ; ssize_t (*listxattr)(struct dentry * , char * , size_t ) ; int (*removexattr)(struct dentry * , char const * ) ; int (*fiemap)(struct inode * , struct fiemap_extent_info * , u64 , u64 ) ; int (*update_time)(struct inode * , struct timespec * , int ) ; int (*atomic_open)(struct inode * , struct dentry * , struct file * , unsigned int , umode_t , int * ) ; int (*tmpfile)(struct inode * , struct dentry * , umode_t ) ; int (*set_acl)(struct inode * , struct posix_acl * , int ) ; }; struct super_operations { struct inode *(*alloc_inode)(struct super_block * ) ; void (*destroy_inode)(struct inode * ) ; void (*dirty_inode)(struct inode * , int ) ; int (*write_inode)(struct inode * , struct writeback_control * ) ; int (*drop_inode)(struct inode * ) ; void (*evict_inode)(struct inode * ) ; void (*put_super)(struct super_block * ) ; int (*sync_fs)(struct super_block * , int ) ; int (*freeze_super)(struct super_block * ) ; int (*freeze_fs)(struct super_block * ) ; int (*thaw_super)(struct super_block * ) ; int (*unfreeze_fs)(struct super_block * ) ; int (*statfs)(struct dentry * , struct kstatfs * ) ; int (*remount_fs)(struct super_block * , int * , char * ) ; void (*umount_begin)(struct super_block * ) ; int (*show_options)(struct seq_file * , struct dentry * ) ; int (*show_devname)(struct seq_file * , struct dentry * ) ; int (*show_path)(struct seq_file * , struct dentry * ) ; int (*show_stats)(struct seq_file * , struct dentry * ) ; ssize_t (*quota_read)(struct super_block * , int , char * , size_t , loff_t ) ; ssize_t (*quota_write)(struct super_block * , int , char const * , size_t , loff_t ) ; struct dquot **(*get_dquots)(struct inode * ) ; int (*bdev_try_to_free_page)(struct super_block * , struct page * , gfp_t ) ; long (*nr_cached_objects)(struct super_block * , struct shrink_control * ) ; long (*free_cached_objects)(struct super_block * , struct shrink_control * ) ; }; struct file_system_type { char const *name ; int fs_flags ; struct dentry *(*mount)(struct file_system_type * , int , char const * , void * ) ; void (*kill_sb)(struct super_block * ) ; struct module *owner ; struct file_system_type *next ; struct hlist_head fs_supers ; struct lock_class_key s_lock_key ; struct lock_class_key s_umount_key ; struct lock_class_key s_vfs_rename_key ; struct lock_class_key s_writers_key[3U] ; struct lock_class_key i_lock_key ; struct lock_class_key i_mutex_key ; struct lock_class_key i_mutex_dir_key ; }; struct disk_stats { unsigned long sectors[2U] ; unsigned long ios[2U] ; unsigned long merges[2U] ; unsigned long ticks[2U] ; unsigned long io_ticks ; unsigned long time_in_queue ; }; struct partition_meta_info { char uuid[37U] ; u8 volname[64U] ; }; struct hd_struct { sector_t start_sect ; sector_t nr_sects ; seqcount_t nr_sects_seq ; sector_t alignment_offset ; unsigned int discard_alignment ; struct device __dev ; struct kobject *holder_dir ; int policy ; int partno ; struct partition_meta_info *info ; int make_it_fail ; unsigned long stamp ; atomic_t in_flight[2U] ; struct disk_stats *dkstats ; atomic_t ref ; struct callback_head callback_head ; }; struct disk_part_tbl { struct callback_head callback_head ; int len ; struct hd_struct *last_lookup ; struct hd_struct *part[] ; }; struct disk_events; struct timer_rand_state; struct blk_integrity; struct gendisk { int major ; int first_minor ; int minors ; char disk_name[32U] ; char *(*devnode)(struct gendisk * , umode_t * ) ; unsigned int events ; unsigned int async_events ; struct disk_part_tbl *part_tbl ; struct hd_struct part0 ; struct block_device_operations const *fops ; struct request_queue *queue ; void *private_data ; int flags ; struct device *driverfs_dev ; struct kobject *slave_dir ; struct timer_rand_state *random ; atomic_t sync_io ; struct disk_events *ev ; struct blk_integrity *integrity ; int node_id ; }; struct vm_fault { unsigned int flags ; unsigned long pgoff ; void *virtual_address ; struct page *cow_page ; struct page *page ; unsigned long max_pgoff ; pte_t *pte ; }; struct vm_operations_struct { void (*open)(struct vm_area_struct * ) ; void (*close)(struct vm_area_struct * ) ; int (*fault)(struct vm_area_struct * , struct vm_fault * ) ; void (*map_pages)(struct vm_area_struct * , struct vm_fault * ) ; int (*page_mkwrite)(struct vm_area_struct * , struct vm_fault * ) ; int (*pfn_mkwrite)(struct vm_area_struct * , struct vm_fault * ) ; int (*access)(struct vm_area_struct * , unsigned long , void * , int , int ) ; char const *(*name)(struct vm_area_struct * ) ; int (*set_policy)(struct vm_area_struct * , struct mempolicy * ) ; struct mempolicy *(*get_policy)(struct vm_area_struct * , unsigned long ) ; struct page *(*find_special_page)(struct vm_area_struct * , unsigned long ) ; }; struct kvec; struct exception_table_entry { int insn ; int fixup ; }; struct proc_dir_entry; struct fprop_local_percpu { struct percpu_counter events ; unsigned int period ; raw_spinlock_t lock ; }; typedef int congested_fn(void * , int ); struct bdi_writeback_congested { unsigned long state ; atomic_t refcnt ; struct backing_dev_info *bdi ; int blkcg_id ; struct rb_node rb_node ; }; union __anonunion____missing_field_name_249 { struct work_struct release_work ; struct callback_head rcu ; }; struct bdi_writeback { struct backing_dev_info *bdi ; unsigned long state ; unsigned long last_old_flush ; struct list_head b_dirty ; struct list_head b_io ; struct list_head b_more_io ; struct list_head b_dirty_time ; spinlock_t list_lock ; struct percpu_counter stat[4U] ; struct bdi_writeback_congested *congested ; unsigned long bw_time_stamp ; unsigned long dirtied_stamp ; unsigned long written_stamp ; unsigned long write_bandwidth ; unsigned long avg_write_bandwidth ; unsigned long dirty_ratelimit ; unsigned long balanced_dirty_ratelimit ; struct fprop_local_percpu completions ; int dirty_exceeded ; spinlock_t work_lock ; struct list_head work_list ; struct delayed_work dwork ; struct percpu_ref refcnt ; struct fprop_local_percpu memcg_completions ; struct cgroup_subsys_state *memcg_css ; struct cgroup_subsys_state *blkcg_css ; struct list_head memcg_node ; struct list_head blkcg_node ; union __anonunion____missing_field_name_249 __annonCompField76 ; }; struct backing_dev_info { struct list_head bdi_list ; unsigned long ra_pages ; unsigned int capabilities ; congested_fn *congested_fn ; void *congested_data ; char *name ; unsigned int min_ratio ; unsigned int max_ratio ; unsigned int max_prop_frac ; atomic_long_t tot_write_bandwidth ; struct bdi_writeback wb ; struct radix_tree_root cgwb_tree ; struct rb_root cgwb_congested_tree ; atomic_t usage_cnt ; wait_queue_head_t wb_waitq ; struct device *dev ; struct timer_list laptop_mode_wb_timer ; struct dentry *debug_dir ; struct dentry *debug_stats ; }; typedef void *mempool_alloc_t(gfp_t , void * ); typedef void mempool_free_t(void * , void * ); struct mempool_s { spinlock_t lock ; int min_nr ; int curr_nr ; void **elements ; void *pool_data ; mempool_alloc_t *alloc ; mempool_free_t *free ; wait_queue_head_t wait ; }; typedef struct mempool_s mempool_t; union __anonunion____missing_field_name_250 { struct list_head q_node ; struct kmem_cache *__rcu_icq_cache ; }; union __anonunion____missing_field_name_251 { struct hlist_node ioc_node ; struct callback_head __rcu_head ; }; struct io_cq { struct request_queue *q ; struct io_context *ioc ; union __anonunion____missing_field_name_250 __annonCompField77 ; union __anonunion____missing_field_name_251 __annonCompField78 ; unsigned int flags ; }; struct io_context { atomic_long_t refcount ; atomic_t active_ref ; atomic_t nr_tasks ; spinlock_t lock ; unsigned short ioprio ; int nr_batch_requests ; unsigned long last_waited ; struct radix_tree_root icq_tree ; struct io_cq *icq_hint ; struct hlist_head icq_list ; struct work_struct release_work ; }; struct bio_integrity_payload { struct bio *bip_bio ; struct bvec_iter bip_iter ; bio_end_io_t *bip_end_io ; unsigned short bip_slab ; unsigned short bip_vcnt ; unsigned short bip_max_vcnt ; unsigned short bip_flags ; struct work_struct bip_work ; struct bio_vec *bip_vec ; struct bio_vec bip_inline_vecs[0U] ; }; struct bio_list { struct bio *head ; struct bio *tail ; }; struct bio_set { struct kmem_cache *bio_slab ; unsigned int front_pad ; mempool_t *bio_pool ; mempool_t *bvec_pool ; mempool_t *bio_integrity_pool ; mempool_t *bvec_integrity_pool ; spinlock_t rescue_lock ; struct bio_list rescue_list ; struct work_struct rescue_work ; struct workqueue_struct *rescue_workqueue ; }; struct bsg_class_device { struct device *class_dev ; struct device *parent ; int minor ; struct request_queue *queue ; struct kref ref ; void (*release)(struct device * ) ; }; struct scatterlist { unsigned long sg_magic ; unsigned long page_link ; unsigned int offset ; unsigned int length ; dma_addr_t dma_address ; unsigned int dma_length ; }; struct sg_table { struct scatterlist *sgl ; unsigned int nents ; unsigned int orig_nents ; }; struct elevator_queue; struct blk_trace; struct request; struct bsg_job; struct blkcg_gq; struct blk_flush_queue; typedef void rq_end_io_fn(struct request * , int ); struct request_list { struct request_queue *q ; struct blkcg_gq *blkg ; int count[2U] ; int starved[2U] ; mempool_t *rq_pool ; wait_queue_head_t wait[2U] ; unsigned int flags ; }; union __anonunion____missing_field_name_252 { struct call_single_data csd ; unsigned long fifo_time ; }; struct blk_mq_ctx; union __anonunion____missing_field_name_253 { struct hlist_node hash ; struct list_head ipi_list ; }; union __anonunion____missing_field_name_254 { struct rb_node rb_node ; void *completion_data ; }; struct __anonstruct_elv_256 { struct io_cq *icq ; void *priv[2U] ; }; struct __anonstruct_flush_257 { unsigned int seq ; struct list_head list ; rq_end_io_fn *saved_end_io ; }; union __anonunion____missing_field_name_255 { struct __anonstruct_elv_256 elv ; struct __anonstruct_flush_257 flush ; }; struct request { struct list_head queuelist ; union __anonunion____missing_field_name_252 __annonCompField79 ; struct request_queue *q ; struct blk_mq_ctx *mq_ctx ; u64 cmd_flags ; unsigned int cmd_type ; unsigned long atomic_flags ; int cpu ; unsigned int __data_len ; sector_t __sector ; struct bio *bio ; struct bio *biotail ; union __anonunion____missing_field_name_253 __annonCompField80 ; union __anonunion____missing_field_name_254 __annonCompField81 ; union __anonunion____missing_field_name_255 __annonCompField82 ; struct gendisk *rq_disk ; struct hd_struct *part ; unsigned long start_time ; struct request_list *rl ; unsigned long long start_time_ns ; unsigned long long io_start_time_ns ; unsigned short nr_phys_segments ; unsigned short nr_integrity_segments ; unsigned short ioprio ; void *special ; int tag ; int errors ; unsigned char __cmd[16U] ; unsigned char *cmd ; unsigned short cmd_len ; unsigned int extra_len ; unsigned int sense_len ; unsigned int resid_len ; void *sense ; unsigned long deadline ; struct list_head timeout_list ; unsigned int timeout ; int retries ; rq_end_io_fn *end_io ; void *end_io_data ; struct request *next_rq ; }; struct elevator_type; typedef int elevator_merge_fn(struct request_queue * , struct request ** , struct bio * ); typedef void elevator_merge_req_fn(struct request_queue * , struct request * , struct request * ); typedef void elevator_merged_fn(struct request_queue * , struct request * , int ); typedef int elevator_allow_merge_fn(struct request_queue * , struct request * , struct bio * ); typedef void elevator_bio_merged_fn(struct request_queue * , struct request * , struct bio * ); typedef int elevator_dispatch_fn(struct request_queue * , int ); typedef void elevator_add_req_fn(struct request_queue * , struct request * ); typedef struct request *elevator_request_list_fn(struct request_queue * , struct request * ); typedef void elevator_completed_req_fn(struct request_queue * , struct request * ); typedef int elevator_may_queue_fn(struct request_queue * , int ); typedef void elevator_init_icq_fn(struct io_cq * ); typedef void elevator_exit_icq_fn(struct io_cq * ); typedef int elevator_set_req_fn(struct request_queue * , struct request * , struct bio * , gfp_t ); typedef void elevator_put_req_fn(struct request * ); typedef void elevator_activate_req_fn(struct request_queue * , struct request * ); typedef void elevator_deactivate_req_fn(struct request_queue * , struct request * ); typedef int elevator_init_fn(struct request_queue * , struct elevator_type * ); typedef void elevator_exit_fn(struct elevator_queue * ); typedef void elevator_registered_fn(struct request_queue * ); struct elevator_ops { elevator_merge_fn *elevator_merge_fn ; elevator_merged_fn *elevator_merged_fn ; elevator_merge_req_fn *elevator_merge_req_fn ; elevator_allow_merge_fn *elevator_allow_merge_fn ; elevator_bio_merged_fn *elevator_bio_merged_fn ; elevator_dispatch_fn *elevator_dispatch_fn ; elevator_add_req_fn *elevator_add_req_fn ; elevator_activate_req_fn *elevator_activate_req_fn ; elevator_deactivate_req_fn *elevator_deactivate_req_fn ; elevator_completed_req_fn *elevator_completed_req_fn ; elevator_request_list_fn *elevator_former_req_fn ; elevator_request_list_fn *elevator_latter_req_fn ; elevator_init_icq_fn *elevator_init_icq_fn ; elevator_exit_icq_fn *elevator_exit_icq_fn ; elevator_set_req_fn *elevator_set_req_fn ; elevator_put_req_fn *elevator_put_req_fn ; elevator_may_queue_fn *elevator_may_queue_fn ; elevator_init_fn *elevator_init_fn ; elevator_exit_fn *elevator_exit_fn ; elevator_registered_fn *elevator_registered_fn ; }; struct elv_fs_entry { struct attribute attr ; ssize_t (*show)(struct elevator_queue * , char * ) ; ssize_t (*store)(struct elevator_queue * , char const * , size_t ) ; }; struct elevator_type { struct kmem_cache *icq_cache ; struct elevator_ops ops ; size_t icq_size ; size_t icq_align ; struct elv_fs_entry *elevator_attrs ; char elevator_name[16U] ; struct module *elevator_owner ; char icq_cache_name[21U] ; struct list_head list ; }; struct elevator_queue { struct elevator_type *type ; void *elevator_data ; struct kobject kobj ; struct mutex sysfs_lock ; unsigned char registered : 1 ; struct hlist_head hash[64U] ; }; typedef void request_fn_proc(struct request_queue * ); typedef void make_request_fn(struct request_queue * , struct bio * ); typedef int prep_rq_fn(struct request_queue * , struct request * ); typedef void unprep_rq_fn(struct request_queue * , struct request * ); struct bvec_merge_data { struct block_device *bi_bdev ; sector_t bi_sector ; unsigned int bi_size ; unsigned long bi_rw ; }; typedef int merge_bvec_fn(struct request_queue * , struct bvec_merge_data * , struct bio_vec * ); typedef void softirq_done_fn(struct request * ); typedef int dma_drain_needed_fn(struct request * ); typedef int lld_busy_fn(struct request_queue * ); typedef int bsg_job_fn(struct bsg_job * ); enum blk_eh_timer_return { BLK_EH_NOT_HANDLED = 0, BLK_EH_HANDLED = 1, BLK_EH_RESET_TIMER = 2 } ; typedef enum blk_eh_timer_return rq_timed_out_fn(struct request * ); struct blk_queue_tag { struct request **tag_index ; unsigned long *tag_map ; int busy ; int max_depth ; int real_max_depth ; atomic_t refcnt ; int alloc_policy ; int next_tag ; }; struct queue_limits { unsigned long bounce_pfn ; unsigned long seg_boundary_mask ; unsigned int max_hw_sectors ; unsigned int chunk_sectors ; unsigned int max_sectors ; unsigned int max_segment_size ; unsigned int physical_block_size ; unsigned int alignment_offset ; unsigned int io_min ; unsigned int io_opt ; unsigned int max_discard_sectors ; unsigned int max_write_same_sectors ; unsigned int discard_granularity ; unsigned int discard_alignment ; unsigned short logical_block_size ; unsigned short max_segments ; unsigned short max_integrity_segments ; unsigned char misaligned ; unsigned char discard_misaligned ; unsigned char cluster ; unsigned char discard_zeroes_data ; unsigned char raid_partial_stripes_expensive ; }; struct blk_mq_ops; struct blk_mq_hw_ctx; struct throtl_data; struct blk_mq_tag_set; struct request_queue { struct list_head queue_head ; struct request *last_merge ; struct elevator_queue *elevator ; int nr_rqs[2U] ; int nr_rqs_elvpriv ; struct request_list root_rl ; request_fn_proc *request_fn ; make_request_fn *make_request_fn ; prep_rq_fn *prep_rq_fn ; unprep_rq_fn *unprep_rq_fn ; merge_bvec_fn *merge_bvec_fn ; softirq_done_fn *softirq_done_fn ; rq_timed_out_fn *rq_timed_out_fn ; dma_drain_needed_fn *dma_drain_needed ; lld_busy_fn *lld_busy_fn ; struct blk_mq_ops *mq_ops ; unsigned int *mq_map ; struct blk_mq_ctx *queue_ctx ; unsigned int nr_queues ; struct blk_mq_hw_ctx **queue_hw_ctx ; unsigned int nr_hw_queues ; sector_t end_sector ; struct request *boundary_rq ; struct delayed_work delay_work ; struct backing_dev_info backing_dev_info ; void *queuedata ; unsigned long queue_flags ; int id ; gfp_t bounce_gfp ; spinlock_t __queue_lock ; spinlock_t *queue_lock ; struct kobject kobj ; struct kobject mq_kobj ; struct device *dev ; int rpm_status ; unsigned int nr_pending ; unsigned long nr_requests ; unsigned int nr_congestion_on ; unsigned int nr_congestion_off ; unsigned int nr_batching ; unsigned int dma_drain_size ; void *dma_drain_buffer ; unsigned int dma_pad_mask ; unsigned int dma_alignment ; struct blk_queue_tag *queue_tags ; struct list_head tag_busy_list ; unsigned int nr_sorted ; unsigned int in_flight[2U] ; unsigned int request_fn_active ; unsigned int rq_timeout ; struct timer_list timeout ; struct list_head timeout_list ; struct list_head icq_list ; unsigned long blkcg_pols[1U] ; struct blkcg_gq *root_blkg ; struct list_head blkg_list ; struct queue_limits limits ; unsigned int sg_timeout ; unsigned int sg_reserved_size ; int node ; struct blk_trace *blk_trace ; unsigned int flush_flags ; unsigned char flush_not_queueable : 1 ; struct blk_flush_queue *fq ; struct list_head requeue_list ; spinlock_t requeue_lock ; struct work_struct requeue_work ; struct mutex sysfs_lock ; int bypass_depth ; atomic_t mq_freeze_depth ; bsg_job_fn *bsg_job_fn ; int bsg_job_size ; struct bsg_class_device bsg_dev ; struct throtl_data *td ; struct callback_head callback_head ; wait_queue_head_t mq_freeze_wq ; struct percpu_ref mq_usage_counter ; struct list_head all_q_node ; struct blk_mq_tag_set *tag_set ; struct list_head tag_set_list ; }; struct blk_plug { struct list_head list ; struct list_head mq_list ; struct list_head cb_list ; }; struct blk_integrity_iter { void *prot_buf ; void *data_buf ; sector_t seed ; unsigned int data_size ; unsigned short interval ; char const *disk_name ; }; typedef int integrity_processing_fn(struct blk_integrity_iter * ); struct blk_integrity { integrity_processing_fn *generate_fn ; integrity_processing_fn *verify_fn ; unsigned short flags ; unsigned short tuple_size ; unsigned short interval ; unsigned short tag_size ; char const *name ; struct kobject kobj ; }; struct block_device_operations { int (*open)(struct block_device * , fmode_t ) ; void (*release)(struct gendisk * , fmode_t ) ; int (*rw_page)(struct block_device * , sector_t , struct page * , int ) ; int (*ioctl)(struct block_device * , fmode_t , unsigned int , unsigned long ) ; int (*compat_ioctl)(struct block_device * , fmode_t , unsigned int , unsigned long ) ; long (*direct_access)(struct block_device * , sector_t , void ** , unsigned long * , long ) ; unsigned int (*check_events)(struct gendisk * , unsigned int ) ; int (*media_changed)(struct gendisk * ) ; void (*unlock_native_capacity)(struct gendisk * ) ; int (*revalidate_disk)(struct gendisk * ) ; int (*getgeo)(struct block_device * , struct hd_geometry * ) ; void (*swap_slot_free_notify)(struct block_device * , unsigned long ) ; struct module *owner ; }; struct iscsi_boot_kobj { struct kobject kobj ; struct attribute_group *attr_group ; struct list_head list ; void *data ; ssize_t (*show)(void * , int , char * ) ; umode_t (*is_visible)(void * , int ) ; void (*release)(void * ) ; }; struct iscsi_boot_kset { struct list_head kobj_list ; struct kset *kset ; }; struct dma_attrs { unsigned long flags[1U] ; }; enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; struct dma_map_ops { void *(*alloc)(struct device * , size_t , dma_addr_t * , gfp_t , struct dma_attrs * ) ; void (*free)(struct device * , size_t , void * , dma_addr_t , struct dma_attrs * ) ; int (*mmap)(struct device * , struct vm_area_struct * , void * , dma_addr_t , size_t , struct dma_attrs * ) ; int (*get_sgtable)(struct device * , struct sg_table * , void * , dma_addr_t , size_t , struct dma_attrs * ) ; dma_addr_t (*map_page)(struct device * , struct page * , unsigned long , size_t , enum dma_data_direction , struct dma_attrs * ) ; void (*unmap_page)(struct device * , dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs * ) ; int (*map_sg)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ) ; void (*unmap_sg)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ) ; void (*sync_single_for_cpu)(struct device * , dma_addr_t , size_t , enum dma_data_direction ) ; void (*sync_single_for_device)(struct device * , dma_addr_t , size_t , enum dma_data_direction ) ; void (*sync_sg_for_cpu)(struct device * , struct scatterlist * , int , enum dma_data_direction ) ; void (*sync_sg_for_device)(struct device * , struct scatterlist * , int , enum dma_data_direction ) ; int (*mapping_error)(struct device * , dma_addr_t ) ; int (*dma_supported)(struct device * , u64 ) ; int (*set_dma_mask)(struct device * , u64 ) ; int is_phys ; }; struct scsi_lun { __u8 scsi_lun[8U] ; }; struct scsi_sense_hdr { u8 response_code ; u8 sense_key ; u8 asc ; u8 ascq ; u8 byte4 ; u8 byte5 ; u8 byte6 ; u8 additional_length ; }; enum scsi_device_state { SDEV_CREATED = 1, SDEV_RUNNING = 2, SDEV_CANCEL = 3, SDEV_DEL = 4, SDEV_QUIESCE = 5, SDEV_OFFLINE = 6, SDEV_TRANSPORT_OFFLINE = 7, SDEV_BLOCK = 8, SDEV_CREATED_BLOCK = 9 } ; struct scsi_target; struct scsi_dh_data; struct scsi_device { struct Scsi_Host *host ; struct request_queue *request_queue ; struct list_head siblings ; struct list_head same_target_siblings ; atomic_t device_busy ; atomic_t device_blocked ; spinlock_t list_lock ; struct list_head cmd_list ; struct list_head starved_entry ; struct scsi_cmnd *current_cmnd ; unsigned short queue_depth ; unsigned short max_queue_depth ; unsigned short last_queue_full_depth ; unsigned short last_queue_full_count ; unsigned long last_queue_full_time ; unsigned long queue_ramp_up_period ; unsigned long last_queue_ramp_up ; unsigned int id ; unsigned int channel ; u64 lun ; unsigned int manufacturer ; unsigned int sector_size ; void *hostdata ; char type ; char scsi_level ; char inq_periph_qual ; unsigned char inquiry_len ; unsigned char *inquiry ; char const *vendor ; char const *model ; char const *rev ; int vpd_pg83_len ; unsigned char *vpd_pg83 ; int vpd_pg80_len ; unsigned char *vpd_pg80 ; unsigned char current_tag ; struct scsi_target *sdev_target ; unsigned int sdev_bflags ; unsigned int eh_timeout ; unsigned char removable : 1 ; unsigned char changed : 1 ; unsigned char busy : 1 ; unsigned char lockable : 1 ; unsigned char locked : 1 ; unsigned char borken : 1 ; unsigned char disconnect : 1 ; unsigned char soft_reset : 1 ; unsigned char sdtr : 1 ; unsigned char wdtr : 1 ; unsigned char ppr : 1 ; unsigned char tagged_supported : 1 ; unsigned char simple_tags : 1 ; unsigned char was_reset : 1 ; unsigned char expecting_cc_ua : 1 ; unsigned char use_10_for_rw : 1 ; unsigned char use_10_for_ms : 1 ; unsigned char no_report_opcodes : 1 ; unsigned char no_write_same : 1 ; unsigned char use_16_for_rw : 1 ; unsigned char skip_ms_page_8 : 1 ; unsigned char skip_ms_page_3f : 1 ; unsigned char skip_vpd_pages : 1 ; unsigned char try_vpd_pages : 1 ; unsigned char use_192_bytes_for_3f : 1 ; unsigned char no_start_on_add : 1 ; unsigned char allow_restart : 1 ; unsigned char manage_start_stop : 1 ; unsigned char start_stop_pwr_cond : 1 ; unsigned char no_uld_attach : 1 ; unsigned char select_no_atn : 1 ; unsigned char fix_capacity : 1 ; unsigned char guess_capacity : 1 ; unsigned char retry_hwerror : 1 ; unsigned char last_sector_bug : 1 ; unsigned char no_read_disc_info : 1 ; unsigned char no_read_capacity_16 : 1 ; unsigned char try_rc_10_first : 1 ; unsigned char is_visible : 1 ; unsigned char wce_default_on : 1 ; unsigned char no_dif : 1 ; unsigned char broken_fua : 1 ; unsigned char lun_in_cdb : 1 ; atomic_t disk_events_disable_depth ; unsigned long supported_events[1U] ; unsigned long pending_events[1U] ; struct list_head event_list ; struct work_struct event_work ; unsigned int max_device_blocked ; atomic_t iorequest_cnt ; atomic_t iodone_cnt ; atomic_t ioerr_cnt ; struct device sdev_gendev ; struct device sdev_dev ; struct execute_work ew ; struct work_struct requeue_work ; struct scsi_dh_data *scsi_dh_data ; enum scsi_device_state sdev_state ; unsigned long sdev_data[0U] ; }; struct scsi_device_handler { struct list_head list ; struct module *module ; char const *name ; int (*check_sense)(struct scsi_device * , struct scsi_sense_hdr * ) ; struct scsi_dh_data *(*attach)(struct scsi_device * ) ; void (*detach)(struct scsi_device * ) ; int (*activate)(struct scsi_device * , void (*)(void * , int ) , void * ) ; int (*prep_fn)(struct scsi_device * , struct request * ) ; int (*set_params)(struct scsi_device * , char const * ) ; bool (*match)(struct scsi_device * ) ; }; struct scsi_dh_data { struct scsi_device_handler *scsi_dh ; struct scsi_device *sdev ; struct kref kref ; }; enum scsi_target_state { STARGET_CREATED = 1, STARGET_RUNNING = 2, STARGET_DEL = 3 } ; struct scsi_target { struct scsi_device *starget_sdev_user ; struct list_head siblings ; struct list_head devices ; struct device dev ; struct kref reap_ref ; unsigned int channel ; unsigned int id ; unsigned char create : 1 ; unsigned char single_lun : 1 ; unsigned char pdt_1f_for_no_lun : 1 ; unsigned char no_report_luns : 1 ; unsigned char expecting_lun_change : 1 ; atomic_t target_busy ; atomic_t target_blocked ; unsigned int can_queue ; unsigned int max_target_blocked ; char scsi_level ; enum scsi_target_state state ; void *hostdata ; unsigned long starget_data[0U] ; }; struct scsi_data_buffer { struct sg_table table ; unsigned int length ; int resid ; }; struct scsi_pointer { char *ptr ; int this_residual ; struct scatterlist *buffer ; int buffers_residual ; dma_addr_t dma_handle ; int volatile Status ; int volatile Message ; int volatile have_data_in ; int volatile sent_command ; int volatile phase ; }; struct scsi_cmnd { struct scsi_device *device ; struct list_head list ; struct list_head eh_entry ; struct delayed_work abort_work ; int eh_eflags ; unsigned long serial_number ; unsigned long jiffies_at_alloc ; int retries ; int allowed ; unsigned char prot_op ; unsigned char prot_type ; unsigned char prot_flags ; unsigned short cmd_len ; enum dma_data_direction sc_data_direction ; unsigned char *cmnd ; struct scsi_data_buffer sdb ; struct scsi_data_buffer *prot_sdb ; unsigned int underflow ; unsigned int transfersize ; struct request *request ; unsigned char *sense_buffer ; void (*scsi_done)(struct scsi_cmnd * ) ; struct scsi_pointer SCp ; unsigned char *host_scribble ; int result ; int flags ; unsigned char tag ; }; struct blk_mq_tags; struct blk_mq_cpu_notifier { struct list_head list ; void *data ; int (*notify)(void * , unsigned long , unsigned int ) ; }; struct blk_align_bitmap; struct blk_mq_ctxmap { unsigned int size ; unsigned int bits_per_word ; struct blk_align_bitmap *map ; }; struct __anonstruct____missing_field_name_259 { spinlock_t lock ; struct list_head dispatch ; }; struct blk_mq_hw_ctx { struct __anonstruct____missing_field_name_259 __annonCompField83 ; unsigned long state ; struct delayed_work run_work ; struct delayed_work delay_work ; cpumask_var_t cpumask ; int next_cpu ; int next_cpu_batch ; unsigned long flags ; struct request_queue *queue ; struct blk_flush_queue *fq ; void *driver_data ; struct blk_mq_ctxmap ctx_map ; unsigned int nr_ctx ; struct blk_mq_ctx **ctxs ; atomic_t wait_index ; struct blk_mq_tags *tags ; unsigned long queued ; unsigned long run ; unsigned long dispatched[10U] ; unsigned int numa_node ; unsigned int queue_num ; atomic_t nr_active ; struct blk_mq_cpu_notifier cpu_notifier ; struct kobject kobj ; }; struct blk_mq_tag_set { struct blk_mq_ops *ops ; unsigned int nr_hw_queues ; unsigned int queue_depth ; unsigned int reserved_tags ; unsigned int cmd_size ; int numa_node ; unsigned int timeout ; unsigned int flags ; void *driver_data ; struct blk_mq_tags **tags ; struct mutex tag_list_lock ; struct list_head tag_list ; }; struct blk_mq_queue_data { struct request *rq ; struct list_head *list ; bool last ; }; typedef int queue_rq_fn(struct blk_mq_hw_ctx * , struct blk_mq_queue_data const * ); typedef struct blk_mq_hw_ctx *map_queue_fn(struct request_queue * , int const ); typedef enum blk_eh_timer_return timeout_fn(struct request * , bool ); typedef int init_hctx_fn(struct blk_mq_hw_ctx * , void * , unsigned int ); typedef void exit_hctx_fn(struct blk_mq_hw_ctx * , unsigned int ); typedef int init_request_fn(void * , struct request * , unsigned int , unsigned int , unsigned int ); typedef void exit_request_fn(void * , struct request * , unsigned int , unsigned int ); struct blk_mq_ops { queue_rq_fn *queue_rq ; map_queue_fn *map_queue ; timeout_fn *timeout ; softirq_done_fn *complete ; init_hctx_fn *init_hctx ; exit_hctx_fn *exit_hctx ; init_request_fn *init_request ; exit_request_fn *exit_request ; }; struct scsi_host_cmd_pool; struct scsi_transport_template; struct scsi_host_template { struct module *module ; char const *name ; int (*detect)(struct scsi_host_template * ) ; int (*release)(struct Scsi_Host * ) ; char const *(*info)(struct Scsi_Host * ) ; int (*ioctl)(struct scsi_device * , int , void * ) ; int (*compat_ioctl)(struct scsi_device * , int , void * ) ; int (*queuecommand)(struct Scsi_Host * , struct scsi_cmnd * ) ; int (*eh_abort_handler)(struct scsi_cmnd * ) ; int (*eh_device_reset_handler)(struct scsi_cmnd * ) ; int (*eh_target_reset_handler)(struct scsi_cmnd * ) ; int (*eh_bus_reset_handler)(struct scsi_cmnd * ) ; int (*eh_host_reset_handler)(struct scsi_cmnd * ) ; int (*slave_alloc)(struct scsi_device * ) ; int (*slave_configure)(struct scsi_device * ) ; void (*slave_destroy)(struct scsi_device * ) ; int (*target_alloc)(struct scsi_target * ) ; void (*target_destroy)(struct scsi_target * ) ; int (*scan_finished)(struct Scsi_Host * , unsigned long ) ; void (*scan_start)(struct Scsi_Host * ) ; int (*change_queue_depth)(struct scsi_device * , int ) ; int (*bios_param)(struct scsi_device * , struct block_device * , sector_t , int * ) ; void (*unlock_native_capacity)(struct scsi_device * ) ; int (*show_info)(struct seq_file * , struct Scsi_Host * ) ; int (*write_info)(struct Scsi_Host * , char * , int ) ; enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd * ) ; int (*host_reset)(struct Scsi_Host * , int ) ; char const *proc_name ; struct proc_dir_entry *proc_dir ; int can_queue ; int this_id ; unsigned short sg_tablesize ; unsigned short sg_prot_tablesize ; unsigned int max_sectors ; unsigned long dma_boundary ; short cmd_per_lun ; unsigned char present ; int tag_alloc_policy ; unsigned char use_blk_tags : 1 ; unsigned char track_queue_depth : 1 ; unsigned char supported_mode : 2 ; unsigned char unchecked_isa_dma : 1 ; unsigned char use_clustering : 1 ; unsigned char emulated : 1 ; unsigned char skip_settle_delay : 1 ; unsigned char no_write_same : 1 ; unsigned char no_async_abort : 1 ; unsigned int max_host_blocked ; struct device_attribute **shost_attrs ; struct device_attribute **sdev_attrs ; struct list_head legacy_hosts ; u64 vendor_id ; unsigned int cmd_size ; struct scsi_host_cmd_pool *cmd_pool ; bool disable_blk_mq ; }; enum scsi_host_state { SHOST_CREATED = 1, SHOST_RUNNING = 2, SHOST_CANCEL = 3, SHOST_DEL = 4, SHOST_RECOVERY = 5, SHOST_CANCEL_RECOVERY = 6, SHOST_DEL_RECOVERY = 7 } ; union __anonunion____missing_field_name_260 { struct blk_queue_tag *bqt ; struct blk_mq_tag_set tag_set ; }; struct Scsi_Host { struct list_head __devices ; struct list_head __targets ; struct scsi_host_cmd_pool *cmd_pool ; spinlock_t free_list_lock ; struct list_head free_list ; struct list_head starved_list ; spinlock_t default_lock ; spinlock_t *host_lock ; struct mutex scan_mutex ; struct list_head eh_cmd_q ; struct task_struct *ehandler ; struct completion *eh_action ; wait_queue_head_t host_wait ; struct scsi_host_template *hostt ; struct scsi_transport_template *transportt ; union __anonunion____missing_field_name_260 __annonCompField84 ; atomic_t host_busy ; atomic_t host_blocked ; unsigned int host_failed ; unsigned int host_eh_scheduled ; unsigned int host_no ; int eh_deadline ; unsigned long last_reset ; unsigned int max_channel ; unsigned int max_id ; u64 max_lun ; unsigned int unique_id ; unsigned short max_cmd_len ; int this_id ; int can_queue ; short cmd_per_lun ; unsigned short sg_tablesize ; unsigned short sg_prot_tablesize ; unsigned int max_sectors ; unsigned long dma_boundary ; unsigned int nr_hw_queues ; unsigned long cmd_serial_number ; unsigned char active_mode : 2 ; unsigned char unchecked_isa_dma : 1 ; unsigned char use_clustering : 1 ; unsigned char host_self_blocked : 1 ; unsigned char reverse_ordering : 1 ; unsigned char tmf_in_progress : 1 ; unsigned char async_scan : 1 ; unsigned char eh_noresume : 1 ; unsigned char no_write_same : 1 ; unsigned char use_blk_mq : 1 ; unsigned char use_cmd_list : 1 ; char work_q_name[20U] ; struct workqueue_struct *work_q ; struct workqueue_struct *tmf_work_q ; unsigned char no_scsi2_lun_in_cdb : 1 ; unsigned int max_host_blocked ; unsigned int prot_capabilities ; unsigned char prot_guard_type ; struct request_queue *uspace_req_q ; unsigned long base ; unsigned long io_port ; unsigned char n_io_port ; unsigned char dma_channel ; unsigned int irq ; enum scsi_host_state shost_state ; struct device shost_gendev ; struct device shost_dev ; struct list_head sht_legacy_list ; void *shost_data ; struct device *dma_dev ; unsigned long hostdata[0U] ; }; typedef unsigned long kernel_ulong_t; struct pci_device_id { __u32 vendor ; __u32 device ; __u32 subvendor ; __u32 subdevice ; __u32 class ; __u32 class_mask ; kernel_ulong_t driver_data ; }; struct acpi_device_id { __u8 id[9U] ; kernel_ulong_t driver_data ; }; struct of_device_id { char name[32U] ; char type[32U] ; char compatible[128U] ; void const *data ; }; struct hotplug_slot; struct pci_slot { struct pci_bus *bus ; struct list_head list ; struct hotplug_slot *hotplug ; unsigned char number ; struct kobject kobj ; }; typedef int pci_power_t; typedef unsigned int pci_channel_state_t; enum pci_channel_state { pci_channel_io_normal = 1, pci_channel_io_frozen = 2, pci_channel_io_perm_failure = 3 } ; typedef unsigned short pci_dev_flags_t; typedef unsigned short pci_bus_flags_t; struct pcie_link_state; struct pci_vpd; struct pci_sriov; struct pci_ats; struct pci_driver; union __anonunion____missing_field_name_266 { struct pci_sriov *sriov ; struct pci_dev *physfn ; }; struct pci_dev { struct list_head bus_list ; struct pci_bus *bus ; struct pci_bus *subordinate ; void *sysdata ; struct proc_dir_entry *procent ; struct pci_slot *slot ; unsigned int devfn ; unsigned short vendor ; unsigned short device ; unsigned short subsystem_vendor ; unsigned short subsystem_device ; unsigned int class ; u8 revision ; u8 hdr_type ; u8 pcie_cap ; u8 msi_cap ; u8 msix_cap ; unsigned char pcie_mpss : 3 ; u8 rom_base_reg ; u8 pin ; u16 pcie_flags_reg ; u8 dma_alias_devfn ; struct pci_driver *driver ; u64 dma_mask ; struct device_dma_parameters dma_parms ; pci_power_t current_state ; u8 pm_cap ; unsigned char pme_support : 5 ; unsigned char pme_interrupt : 1 ; unsigned char pme_poll : 1 ; unsigned char d1_support : 1 ; unsigned char d2_support : 1 ; unsigned char no_d1d2 : 1 ; unsigned char no_d3cold : 1 ; unsigned char d3cold_allowed : 1 ; unsigned char mmio_always_on : 1 ; unsigned char wakeup_prepared : 1 ; unsigned char runtime_d3cold : 1 ; unsigned char ignore_hotplug : 1 ; unsigned int d3_delay ; unsigned int d3cold_delay ; struct pcie_link_state *link_state ; pci_channel_state_t error_state ; struct device dev ; int cfg_size ; unsigned int irq ; struct resource resource[17U] ; bool match_driver ; unsigned char transparent : 1 ; unsigned char multifunction : 1 ; unsigned char is_added : 1 ; unsigned char is_busmaster : 1 ; unsigned char no_msi : 1 ; unsigned char no_64bit_msi : 1 ; unsigned char block_cfg_access : 1 ; unsigned char broken_parity_status : 1 ; unsigned char irq_reroute_variant : 2 ; unsigned char msi_enabled : 1 ; unsigned char msix_enabled : 1 ; unsigned char ari_enabled : 1 ; unsigned char is_managed : 1 ; unsigned char needs_freset : 1 ; unsigned char state_saved : 1 ; unsigned char is_physfn : 1 ; unsigned char is_virtfn : 1 ; unsigned char reset_fn : 1 ; unsigned char is_hotplug_bridge : 1 ; unsigned char __aer_firmware_first_valid : 1 ; unsigned char __aer_firmware_first : 1 ; unsigned char broken_intx_masking : 1 ; unsigned char io_window_1k : 1 ; unsigned char irq_managed : 1 ; unsigned char has_secondary_link : 1 ; pci_dev_flags_t dev_flags ; atomic_t enable_cnt ; u32 saved_config_space[16U] ; struct hlist_head saved_cap_space ; struct bin_attribute *rom_attr ; int rom_attr_enabled ; struct bin_attribute *res_attr[17U] ; struct bin_attribute *res_attr_wc[17U] ; struct list_head msi_list ; struct attribute_group const **msi_irq_groups ; struct pci_vpd *vpd ; union __anonunion____missing_field_name_266 __annonCompField85 ; struct pci_ats *ats ; phys_addr_t rom ; size_t romlen ; char *driver_override ; }; struct pci_ops; struct msi_controller; struct pci_bus { struct list_head node ; struct pci_bus *parent ; struct list_head children ; struct list_head devices ; struct pci_dev *self ; struct list_head slots ; struct resource *resource[4U] ; struct list_head resources ; struct resource busn_res ; struct pci_ops *ops ; struct msi_controller *msi ; void *sysdata ; struct proc_dir_entry *procdir ; unsigned char number ; unsigned char primary ; unsigned char max_bus_speed ; unsigned char cur_bus_speed ; char name[48U] ; unsigned short bridge_ctl ; pci_bus_flags_t bus_flags ; struct device *bridge ; struct device dev ; struct bin_attribute *legacy_io ; struct bin_attribute *legacy_mem ; unsigned char is_added : 1 ; }; struct pci_ops { void *(*map_bus)(struct pci_bus * , unsigned int , int ) ; int (*read)(struct pci_bus * , unsigned int , int , int , u32 * ) ; int (*write)(struct pci_bus * , unsigned int , int , int , u32 ) ; }; struct pci_dynids { spinlock_t lock ; struct list_head list ; }; typedef unsigned int pci_ers_result_t; struct pci_error_handlers { pci_ers_result_t (*error_detected)(struct pci_dev * , enum pci_channel_state ) ; pci_ers_result_t (*mmio_enabled)(struct pci_dev * ) ; pci_ers_result_t (*link_reset)(struct pci_dev * ) ; pci_ers_result_t (*slot_reset)(struct pci_dev * ) ; void (*reset_notify)(struct pci_dev * , bool ) ; void (*resume)(struct pci_dev * ) ; }; struct pci_driver { struct list_head node ; char const *name ; struct pci_device_id const *id_table ; int (*probe)(struct pci_dev * , struct pci_device_id const * ) ; void (*remove)(struct pci_dev * ) ; int (*suspend)(struct pci_dev * , pm_message_t ) ; int (*suspend_late)(struct pci_dev * , pm_message_t ) ; int (*resume_early)(struct pci_dev * ) ; int (*resume)(struct pci_dev * ) ; void (*shutdown)(struct pci_dev * ) ; int (*sriov_configure)(struct pci_dev * , int ) ; struct pci_error_handlers const *err_handler ; struct device_driver driver ; struct pci_dynids dynids ; }; struct dma_pool; struct acpi_device; struct pci_sysdata { int domain ; int node ; struct acpi_device *companion ; void *iommu ; }; struct tasklet_struct { struct tasklet_struct *next ; unsigned long state ; atomic_t count ; void (*func)(unsigned long ) ; unsigned long data ; }; struct bsg_buffer { unsigned int payload_len ; int sg_cnt ; struct scatterlist *sg_list ; }; struct bsg_job { struct device *dev ; struct request *req ; void *request ; void *reply ; unsigned int request_len ; unsigned int reply_len ; struct bsg_buffer request_payload ; struct bsg_buffer reply_payload ; void *dd_data ; }; struct iovec { void *iov_base ; __kernel_size_t iov_len ; }; struct kvec { void *iov_base ; size_t iov_len ; }; union __anonunion____missing_field_name_267 { struct iovec const *iov ; struct kvec const *kvec ; struct bio_vec const *bvec ; }; struct iov_iter { int type ; size_t iov_offset ; size_t count ; union __anonunion____missing_field_name_267 __annonCompField86 ; unsigned long nr_segs ; }; typedef unsigned short __kernel_sa_family_t; struct __kernel_sockaddr_storage { __kernel_sa_family_t ss_family ; char __data[126U] ; }; typedef __kernel_sa_family_t sa_family_t; struct sockaddr { sa_family_t sa_family ; char sa_data[14U] ; }; struct msghdr { void *msg_name ; int msg_namelen ; struct iov_iter msg_iter ; void *msg_control ; __kernel_size_t msg_controllen ; unsigned int msg_flags ; struct kiocb *msg_iocb ; }; enum ldv_28054 { SS_FREE = 0, SS_UNCONNECTED = 1, SS_CONNECTING = 2, SS_CONNECTED = 3, SS_DISCONNECTING = 4 } ; typedef enum ldv_28054 socket_state; struct socket_wq { wait_queue_head_t wait ; struct fasync_struct *fasync_list ; struct callback_head rcu ; }; struct proto_ops; struct socket { socket_state state ; short type ; unsigned long flags ; struct socket_wq *wq ; struct file *file ; struct sock *sk ; struct proto_ops const *ops ; }; struct proto_ops { int family ; struct module *owner ; int (*release)(struct socket * ) ; int (*bind)(struct socket * , struct sockaddr * , int ) ; int (*connect)(struct socket * , struct sockaddr * , int , int ) ; int (*socketpair)(struct socket * , struct socket * ) ; int (*accept)(struct socket * , struct socket * , int ) ; int (*getname)(struct socket * , struct sockaddr * , int * , int ) ; unsigned int (*poll)(struct file * , struct socket * , struct poll_table_struct * ) ; int (*ioctl)(struct socket * , unsigned int , unsigned long ) ; int (*compat_ioctl)(struct socket * , unsigned int , unsigned long ) ; int (*listen)(struct socket * , int ) ; int (*shutdown)(struct socket * , int ) ; int (*setsockopt)(struct socket * , int , int , char * , unsigned int ) ; int (*getsockopt)(struct socket * , int , int , char * , int * ) ; int (*compat_setsockopt)(struct socket * , int , int , char * , unsigned int ) ; int (*compat_getsockopt)(struct socket * , int , int , char * , int * ) ; int (*sendmsg)(struct socket * , struct msghdr * , size_t ) ; int (*recvmsg)(struct socket * , struct msghdr * , size_t , int ) ; int (*mmap)(struct file * , struct socket * , struct vm_area_struct * ) ; ssize_t (*sendpage)(struct socket * , struct page * , int , size_t , int ) ; ssize_t (*splice_read)(struct socket * , loff_t * , struct pipe_inode_info * , size_t , unsigned int ) ; int (*set_peek_off)(struct sock * , int ) ; }; struct in6_addr; struct sk_buff; typedef u64 netdev_features_t; union __anonunion_in6_u_268 { __u8 u6_addr8[16U] ; __be16 u6_addr16[8U] ; __be32 u6_addr32[4U] ; }; struct in6_addr { union __anonunion_in6_u_268 in6_u ; }; struct sockaddr_in6 { unsigned short sin6_family ; __be16 sin6_port ; __be32 sin6_flowinfo ; struct in6_addr sin6_addr ; __u32 sin6_scope_id ; }; struct ethhdr { unsigned char h_dest[6U] ; unsigned char h_source[6U] ; __be16 h_proto ; }; struct pipe_buf_operations; struct pipe_buffer { struct page *page ; unsigned int offset ; unsigned int len ; struct pipe_buf_operations const *ops ; unsigned int flags ; unsigned long private ; }; struct pipe_inode_info { struct mutex mutex ; wait_queue_head_t wait ; unsigned int nrbufs ; unsigned int curbuf ; unsigned int buffers ; unsigned int readers ; unsigned int writers ; unsigned int files ; unsigned int waiting_writers ; unsigned int r_counter ; unsigned int w_counter ; struct page *tmp_page ; struct fasync_struct *fasync_readers ; struct fasync_struct *fasync_writers ; struct pipe_buffer *bufs ; }; struct pipe_buf_operations { int can_merge ; int (*confirm)(struct pipe_inode_info * , struct pipe_buffer * ) ; void (*release)(struct pipe_inode_info * , struct pipe_buffer * ) ; int (*steal)(struct pipe_inode_info * , struct pipe_buffer * ) ; void (*get)(struct pipe_inode_info * , struct pipe_buffer * ) ; }; struct napi_struct; struct nf_conntrack { atomic_t use ; }; union __anonunion____missing_field_name_273 { struct net_device *physoutdev ; char neigh_header[8U] ; }; union __anonunion____missing_field_name_274 { __be32 ipv4_daddr ; struct in6_addr ipv6_daddr ; }; struct nf_bridge_info { atomic_t use ; unsigned char orig_proto ; bool pkt_otherhost ; __u16 frag_max_size ; unsigned int mask ; struct net_device *physindev ; union __anonunion____missing_field_name_273 __annonCompField90 ; union __anonunion____missing_field_name_274 __annonCompField91 ; }; struct sk_buff_head { struct sk_buff *next ; struct sk_buff *prev ; __u32 qlen ; spinlock_t lock ; }; typedef unsigned int sk_buff_data_t; struct __anonstruct____missing_field_name_277 { u32 stamp_us ; u32 stamp_jiffies ; }; union __anonunion____missing_field_name_276 { u64 v64 ; struct __anonstruct____missing_field_name_277 __annonCompField92 ; }; struct skb_mstamp { union __anonunion____missing_field_name_276 __annonCompField93 ; }; union __anonunion____missing_field_name_280 { ktime_t tstamp ; struct skb_mstamp skb_mstamp ; }; struct __anonstruct____missing_field_name_279 { struct sk_buff *next ; struct sk_buff *prev ; union __anonunion____missing_field_name_280 __annonCompField94 ; }; union __anonunion____missing_field_name_278 { struct __anonstruct____missing_field_name_279 __annonCompField95 ; struct rb_node rbnode ; }; struct sec_path; struct __anonstruct____missing_field_name_282 { __u16 csum_start ; __u16 csum_offset ; }; union __anonunion____missing_field_name_281 { __wsum csum ; struct __anonstruct____missing_field_name_282 __annonCompField97 ; }; union __anonunion____missing_field_name_283 { unsigned int napi_id ; unsigned int sender_cpu ; }; union __anonunion____missing_field_name_284 { __u32 mark ; __u32 reserved_tailroom ; }; union __anonunion____missing_field_name_285 { __be16 inner_protocol ; __u8 inner_ipproto ; }; struct sk_buff { union __anonunion____missing_field_name_278 __annonCompField96 ; struct sock *sk ; struct net_device *dev ; char cb[48U] ; unsigned long _skb_refdst ; void (*destructor)(struct sk_buff * ) ; struct sec_path *sp ; struct nf_conntrack *nfct ; struct nf_bridge_info *nf_bridge ; unsigned int len ; unsigned int data_len ; __u16 mac_len ; __u16 hdr_len ; __u16 queue_mapping ; unsigned char cloned : 1 ; unsigned char nohdr : 1 ; unsigned char fclone : 2 ; unsigned char peeked : 1 ; unsigned char head_frag : 1 ; unsigned char xmit_more : 1 ; __u32 headers_start[0U] ; __u8 __pkt_type_offset[0U] ; unsigned char pkt_type : 3 ; unsigned char pfmemalloc : 1 ; unsigned char ignore_df : 1 ; unsigned char nfctinfo : 3 ; unsigned char nf_trace : 1 ; unsigned char ip_summed : 2 ; unsigned char ooo_okay : 1 ; unsigned char l4_hash : 1 ; unsigned char sw_hash : 1 ; unsigned char wifi_acked_valid : 1 ; unsigned char wifi_acked : 1 ; unsigned char no_fcs : 1 ; unsigned char encapsulation : 1 ; unsigned char encap_hdr_csum : 1 ; unsigned char csum_valid : 1 ; unsigned char csum_complete_sw : 1 ; unsigned char csum_level : 2 ; unsigned char csum_bad : 1 ; unsigned char ndisc_nodetype : 2 ; unsigned char ipvs_property : 1 ; unsigned char inner_protocol_type : 1 ; unsigned char remcsum_offload : 1 ; __u16 tc_index ; __u16 tc_verd ; union __anonunion____missing_field_name_281 __annonCompField98 ; __u32 priority ; int skb_iif ; __u32 hash ; __be16 vlan_proto ; __u16 vlan_tci ; union __anonunion____missing_field_name_283 __annonCompField99 ; __u32 secmark ; union __anonunion____missing_field_name_284 __annonCompField100 ; union __anonunion____missing_field_name_285 __annonCompField101 ; __u16 inner_transport_header ; __u16 inner_network_header ; __u16 inner_mac_header ; __be16 protocol ; __u16 transport_header ; __u16 network_header ; __u16 mac_header ; __u32 headers_end[0U] ; sk_buff_data_t tail ; sk_buff_data_t end ; unsigned char *head ; unsigned char *data ; unsigned int truesize ; atomic_t users ; }; struct dst_entry; struct rtable; struct hlist_nulls_node; struct hlist_nulls_head { struct hlist_nulls_node *first ; }; struct hlist_nulls_node { struct hlist_nulls_node *next ; struct hlist_nulls_node **pprev ; }; struct dql { unsigned int num_queued ; unsigned int adj_limit ; unsigned int last_obj_cnt ; unsigned int limit ; unsigned int num_completed ; unsigned int prev_ovlimit ; unsigned int prev_num_queued ; unsigned int prev_last_obj_cnt ; unsigned int lowest_slack ; unsigned long slack_start_time ; unsigned int max_limit ; unsigned int min_limit ; unsigned int slack_hold_time ; }; struct __anonstruct_sync_serial_settings_287 { unsigned int clock_rate ; unsigned int clock_type ; unsigned short loopback ; }; typedef struct __anonstruct_sync_serial_settings_287 sync_serial_settings; struct __anonstruct_te1_settings_288 { unsigned int clock_rate ; unsigned int clock_type ; unsigned short loopback ; unsigned int slot_map ; }; typedef struct __anonstruct_te1_settings_288 te1_settings; struct __anonstruct_raw_hdlc_proto_289 { unsigned short encoding ; unsigned short parity ; }; typedef struct __anonstruct_raw_hdlc_proto_289 raw_hdlc_proto; struct __anonstruct_fr_proto_290 { unsigned int t391 ; unsigned int t392 ; unsigned int n391 ; unsigned int n392 ; unsigned int n393 ; unsigned short lmi ; unsigned short dce ; }; typedef struct __anonstruct_fr_proto_290 fr_proto; struct __anonstruct_fr_proto_pvc_291 { unsigned int dlci ; }; typedef struct __anonstruct_fr_proto_pvc_291 fr_proto_pvc; struct __anonstruct_fr_proto_pvc_info_292 { unsigned int dlci ; char master[16U] ; }; typedef struct __anonstruct_fr_proto_pvc_info_292 fr_proto_pvc_info; struct __anonstruct_cisco_proto_293 { unsigned int interval ; unsigned int timeout ; }; typedef struct __anonstruct_cisco_proto_293 cisco_proto; struct ifmap { unsigned long mem_start ; unsigned long mem_end ; unsigned short base_addr ; unsigned char irq ; unsigned char dma ; unsigned char port ; }; union __anonunion_ifs_ifsu_294 { raw_hdlc_proto *raw_hdlc ; cisco_proto *cisco ; fr_proto *fr ; fr_proto_pvc *fr_pvc ; fr_proto_pvc_info *fr_pvc_info ; sync_serial_settings *sync ; te1_settings *te1 ; }; struct if_settings { unsigned int type ; unsigned int size ; union __anonunion_ifs_ifsu_294 ifs_ifsu ; }; union __anonunion_ifr_ifrn_295 { char ifrn_name[16U] ; }; union __anonunion_ifr_ifru_296 { struct sockaddr ifru_addr ; struct sockaddr ifru_dstaddr ; struct sockaddr ifru_broadaddr ; struct sockaddr ifru_netmask ; struct sockaddr ifru_hwaddr ; short ifru_flags ; int ifru_ivalue ; int ifru_mtu ; struct ifmap ifru_map ; char ifru_slave[16U] ; char ifru_newname[16U] ; void *ifru_data ; struct if_settings ifru_settings ; }; struct ifreq { union __anonunion_ifr_ifrn_295 ifr_ifrn ; union __anonunion_ifr_ifru_296 ifr_ifru ; }; typedef s32 compat_time_t; typedef s32 compat_long_t; typedef u32 compat_uptr_t; struct compat_timespec { compat_time_t tv_sec ; s32 tv_nsec ; }; struct compat_robust_list { compat_uptr_t next ; }; struct compat_robust_list_head { struct compat_robust_list list ; compat_long_t futex_offset ; compat_uptr_t list_op_pending ; }; struct ethtool_cmd { __u32 cmd ; __u32 supported ; __u32 advertising ; __u16 speed ; __u8 duplex ; __u8 port ; __u8 phy_address ; __u8 transceiver ; __u8 autoneg ; __u8 mdio_support ; __u32 maxtxpkt ; __u32 maxrxpkt ; __u16 speed_hi ; __u8 eth_tp_mdix ; __u8 eth_tp_mdix_ctrl ; __u32 lp_advertising ; __u32 reserved[2U] ; }; struct ethtool_drvinfo { __u32 cmd ; char driver[32U] ; char version[32U] ; char fw_version[32U] ; char bus_info[32U] ; char erom_version[32U] ; char reserved2[12U] ; __u32 n_priv_flags ; __u32 n_stats ; __u32 testinfo_len ; __u32 eedump_len ; __u32 regdump_len ; }; struct ethtool_wolinfo { __u32 cmd ; __u32 supported ; __u32 wolopts ; __u8 sopass[6U] ; }; struct ethtool_tunable { __u32 cmd ; __u32 id ; __u32 type_id ; __u32 len ; void *data[0U] ; }; struct ethtool_regs { __u32 cmd ; __u32 version ; __u32 len ; __u8 data[0U] ; }; struct ethtool_eeprom { __u32 cmd ; __u32 magic ; __u32 offset ; __u32 len ; __u8 data[0U] ; }; struct ethtool_eee { __u32 cmd ; __u32 supported ; __u32 advertised ; __u32 lp_advertised ; __u32 eee_active ; __u32 eee_enabled ; __u32 tx_lpi_enabled ; __u32 tx_lpi_timer ; __u32 reserved[2U] ; }; struct ethtool_modinfo { __u32 cmd ; __u32 type ; __u32 eeprom_len ; __u32 reserved[8U] ; }; struct ethtool_coalesce { __u32 cmd ; __u32 rx_coalesce_usecs ; __u32 rx_max_coalesced_frames ; __u32 rx_coalesce_usecs_irq ; __u32 rx_max_coalesced_frames_irq ; __u32 tx_coalesce_usecs ; __u32 tx_max_coalesced_frames ; __u32 tx_coalesce_usecs_irq ; __u32 tx_max_coalesced_frames_irq ; __u32 stats_block_coalesce_usecs ; __u32 use_adaptive_rx_coalesce ; __u32 use_adaptive_tx_coalesce ; __u32 pkt_rate_low ; __u32 rx_coalesce_usecs_low ; __u32 rx_max_coalesced_frames_low ; __u32 tx_coalesce_usecs_low ; __u32 tx_max_coalesced_frames_low ; __u32 pkt_rate_high ; __u32 rx_coalesce_usecs_high ; __u32 rx_max_coalesced_frames_high ; __u32 tx_coalesce_usecs_high ; __u32 tx_max_coalesced_frames_high ; __u32 rate_sample_interval ; }; struct ethtool_ringparam { __u32 cmd ; __u32 rx_max_pending ; __u32 rx_mini_max_pending ; __u32 rx_jumbo_max_pending ; __u32 tx_max_pending ; __u32 rx_pending ; __u32 rx_mini_pending ; __u32 rx_jumbo_pending ; __u32 tx_pending ; }; struct ethtool_channels { __u32 cmd ; __u32 max_rx ; __u32 max_tx ; __u32 max_other ; __u32 max_combined ; __u32 rx_count ; __u32 tx_count ; __u32 other_count ; __u32 combined_count ; }; struct ethtool_pauseparam { __u32 cmd ; __u32 autoneg ; __u32 rx_pause ; __u32 tx_pause ; }; struct ethtool_test { __u32 cmd ; __u32 flags ; __u32 reserved ; __u32 len ; __u64 data[0U] ; }; struct ethtool_stats { __u32 cmd ; __u32 n_stats ; __u64 data[0U] ; }; struct ethtool_tcpip4_spec { __be32 ip4src ; __be32 ip4dst ; __be16 psrc ; __be16 pdst ; __u8 tos ; }; struct ethtool_ah_espip4_spec { __be32 ip4src ; __be32 ip4dst ; __be32 spi ; __u8 tos ; }; struct ethtool_usrip4_spec { __be32 ip4src ; __be32 ip4dst ; __be32 l4_4_bytes ; __u8 tos ; __u8 ip_ver ; __u8 proto ; }; union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec ; struct ethtool_tcpip4_spec udp_ip4_spec ; struct ethtool_tcpip4_spec sctp_ip4_spec ; struct ethtool_ah_espip4_spec ah_ip4_spec ; struct ethtool_ah_espip4_spec esp_ip4_spec ; struct ethtool_usrip4_spec usr_ip4_spec ; struct ethhdr ether_spec ; __u8 hdata[52U] ; }; struct ethtool_flow_ext { __u8 padding[2U] ; unsigned char h_dest[6U] ; __be16 vlan_etype ; __be16 vlan_tci ; __be32 data[2U] ; }; struct ethtool_rx_flow_spec { __u32 flow_type ; union ethtool_flow_union h_u ; struct ethtool_flow_ext h_ext ; union ethtool_flow_union m_u ; struct ethtool_flow_ext m_ext ; __u64 ring_cookie ; __u32 location ; }; struct ethtool_rxnfc { __u32 cmd ; __u32 flow_type ; __u64 data ; struct ethtool_rx_flow_spec fs ; __u32 rule_cnt ; __u32 rule_locs[0U] ; }; struct ethtool_flash { __u32 cmd ; __u32 region ; char data[128U] ; }; struct ethtool_dump { __u32 cmd ; __u32 version ; __u32 flag ; __u32 len ; __u8 data[0U] ; }; struct ethtool_ts_info { __u32 cmd ; __u32 so_timestamping ; __s32 phc_index ; __u32 tx_types ; __u32 tx_reserved[3U] ; __u32 rx_filters ; __u32 rx_reserved[3U] ; }; enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ; struct ethtool_ops { int (*get_settings)(struct net_device * , struct ethtool_cmd * ) ; int (*set_settings)(struct net_device * , struct ethtool_cmd * ) ; void (*get_drvinfo)(struct net_device * , struct ethtool_drvinfo * ) ; int (*get_regs_len)(struct net_device * ) ; void (*get_regs)(struct net_device * , struct ethtool_regs * , void * ) ; void (*get_wol)(struct net_device * , struct ethtool_wolinfo * ) ; int (*set_wol)(struct net_device * , struct ethtool_wolinfo * ) ; u32 (*get_msglevel)(struct net_device * ) ; void (*set_msglevel)(struct net_device * , u32 ) ; int (*nway_reset)(struct net_device * ) ; u32 (*get_link)(struct net_device * ) ; int (*get_eeprom_len)(struct net_device * ) ; int (*get_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*set_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*get_coalesce)(struct net_device * , struct ethtool_coalesce * ) ; int (*set_coalesce)(struct net_device * , struct ethtool_coalesce * ) ; void (*get_ringparam)(struct net_device * , struct ethtool_ringparam * ) ; int (*set_ringparam)(struct net_device * , struct ethtool_ringparam * ) ; void (*get_pauseparam)(struct net_device * , struct ethtool_pauseparam * ) ; int (*set_pauseparam)(struct net_device * , struct ethtool_pauseparam * ) ; void (*self_test)(struct net_device * , struct ethtool_test * , u64 * ) ; void (*get_strings)(struct net_device * , u32 , u8 * ) ; int (*set_phys_id)(struct net_device * , enum ethtool_phys_id_state ) ; void (*get_ethtool_stats)(struct net_device * , struct ethtool_stats * , u64 * ) ; int (*begin)(struct net_device * ) ; void (*complete)(struct net_device * ) ; u32 (*get_priv_flags)(struct net_device * ) ; int (*set_priv_flags)(struct net_device * , u32 ) ; int (*get_sset_count)(struct net_device * , int ) ; int (*get_rxnfc)(struct net_device * , struct ethtool_rxnfc * , u32 * ) ; int (*set_rxnfc)(struct net_device * , struct ethtool_rxnfc * ) ; int (*flash_device)(struct net_device * , struct ethtool_flash * ) ; int (*reset)(struct net_device * , u32 * ) ; u32 (*get_rxfh_key_size)(struct net_device * ) ; u32 (*get_rxfh_indir_size)(struct net_device * ) ; int (*get_rxfh)(struct net_device * , u32 * , u8 * , u8 * ) ; int (*set_rxfh)(struct net_device * , u32 const * , u8 const * , u8 const ) ; void (*get_channels)(struct net_device * , struct ethtool_channels * ) ; int (*set_channels)(struct net_device * , struct ethtool_channels * ) ; int (*get_dump_flag)(struct net_device * , struct ethtool_dump * ) ; int (*get_dump_data)(struct net_device * , struct ethtool_dump * , void * ) ; int (*set_dump)(struct net_device * , struct ethtool_dump * ) ; int (*get_ts_info)(struct net_device * , struct ethtool_ts_info * ) ; int (*get_module_info)(struct net_device * , struct ethtool_modinfo * ) ; int (*get_module_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*get_eee)(struct net_device * , struct ethtool_eee * ) ; int (*set_eee)(struct net_device * , struct ethtool_eee * ) ; int (*get_tunable)(struct net_device * , struct ethtool_tunable const * , void * ) ; int (*set_tunable)(struct net_device * , struct ethtool_tunable const * , void const * ) ; }; struct prot_inuse; struct netns_core { struct ctl_table_header *sysctl_hdr ; int sysctl_somaxconn ; struct prot_inuse *inuse ; }; struct u64_stats_sync { }; struct ipstats_mib { u64 mibs[36U] ; struct u64_stats_sync syncp ; }; struct icmp_mib { unsigned long mibs[28U] ; }; struct icmpmsg_mib { atomic_long_t mibs[512U] ; }; struct icmpv6_mib { unsigned long mibs[6U] ; }; struct icmpv6_mib_device { atomic_long_t mibs[6U] ; }; struct icmpv6msg_mib { atomic_long_t mibs[512U] ; }; struct icmpv6msg_mib_device { atomic_long_t mibs[512U] ; }; struct tcp_mib { unsigned long mibs[16U] ; }; struct udp_mib { unsigned long mibs[9U] ; }; struct linux_mib { unsigned long mibs[115U] ; }; struct linux_xfrm_mib { unsigned long mibs[29U] ; }; struct netns_mib { struct tcp_mib *tcp_statistics ; struct ipstats_mib *ip_statistics ; struct linux_mib *net_statistics ; struct udp_mib *udp_statistics ; struct udp_mib *udplite_statistics ; struct icmp_mib *icmp_statistics ; struct icmpmsg_mib *icmpmsg_statistics ; struct proc_dir_entry *proc_net_devsnmp6 ; struct udp_mib *udp_stats_in6 ; struct udp_mib *udplite_stats_in6 ; struct ipstats_mib *ipv6_statistics ; struct icmpv6_mib *icmpv6_statistics ; struct icmpv6msg_mib *icmpv6msg_statistics ; struct linux_xfrm_mib *xfrm_statistics ; }; struct netns_unix { int sysctl_max_dgram_qlen ; struct ctl_table_header *ctl ; }; struct netns_packet { struct mutex sklist_lock ; struct hlist_head sklist ; }; struct netns_frags { struct percpu_counter mem ; int timeout ; int high_thresh ; int low_thresh ; }; struct ipv4_devconf; struct fib_rules_ops; struct fib_table; struct local_ports { seqlock_t lock ; int range[2U] ; bool warned ; }; struct ping_group_range { seqlock_t lock ; kgid_t range[2U] ; }; struct inet_peer_base; struct xt_table; struct netns_ipv4 { struct ctl_table_header *forw_hdr ; struct ctl_table_header *frags_hdr ; struct ctl_table_header *ipv4_hdr ; struct ctl_table_header *route_hdr ; struct ctl_table_header *xfrm4_hdr ; struct ipv4_devconf *devconf_all ; struct ipv4_devconf *devconf_dflt ; struct fib_rules_ops *rules_ops ; bool fib_has_custom_rules ; struct fib_table *fib_local ; struct fib_table *fib_main ; struct fib_table *fib_default ; int fib_num_tclassid_users ; struct hlist_head *fib_table_hash ; bool fib_offload_disabled ; struct sock *fibnl ; struct sock **icmp_sk ; struct sock *mc_autojoin_sk ; struct inet_peer_base *peers ; struct sock **tcp_sk ; struct netns_frags frags ; struct xt_table *iptable_filter ; struct xt_table *iptable_mangle ; struct xt_table *iptable_raw ; struct xt_table *arptable_filter ; struct xt_table *iptable_security ; struct xt_table *nat_table ; int sysctl_icmp_echo_ignore_all ; int sysctl_icmp_echo_ignore_broadcasts ; int sysctl_icmp_ignore_bogus_error_responses ; int sysctl_icmp_ratelimit ; int sysctl_icmp_ratemask ; int sysctl_icmp_errors_use_inbound_ifaddr ; struct local_ports ip_local_ports ; int sysctl_tcp_ecn ; int sysctl_tcp_ecn_fallback ; int sysctl_ip_no_pmtu_disc ; int sysctl_ip_fwd_use_pmtu ; int sysctl_ip_nonlocal_bind ; int sysctl_fwmark_reflect ; int sysctl_tcp_fwmark_accept ; int sysctl_tcp_mtu_probing ; int sysctl_tcp_base_mss ; int sysctl_tcp_probe_threshold ; u32 sysctl_tcp_probe_interval ; struct ping_group_range ping_group_range ; atomic_t dev_addr_genid ; unsigned long *sysctl_local_reserved_ports ; struct list_head mr_tables ; struct fib_rules_ops *mr_rules_ops ; atomic_t rt_genid ; }; struct neighbour; struct dst_ops { unsigned short family ; unsigned int gc_thresh ; int (*gc)(struct dst_ops * ) ; struct dst_entry *(*check)(struct dst_entry * , __u32 ) ; unsigned int (*default_advmss)(struct dst_entry const * ) ; unsigned int (*mtu)(struct dst_entry const * ) ; u32 *(*cow_metrics)(struct dst_entry * , unsigned long ) ; void (*destroy)(struct dst_entry * ) ; void (*ifdown)(struct dst_entry * , struct net_device * , int ) ; struct dst_entry *(*negative_advice)(struct dst_entry * ) ; void (*link_failure)(struct sk_buff * ) ; void (*update_pmtu)(struct dst_entry * , struct sock * , struct sk_buff * , u32 ) ; void (*redirect)(struct dst_entry * , struct sock * , struct sk_buff * ) ; int (*local_out)(struct sk_buff * ) ; struct neighbour *(*neigh_lookup)(struct dst_entry const * , struct sk_buff * , void const * ) ; struct kmem_cache *kmem_cachep ; struct percpu_counter pcpuc_entries ; }; struct netns_sysctl_ipv6 { struct ctl_table_header *hdr ; struct ctl_table_header *route_hdr ; struct ctl_table_header *icmp_hdr ; struct ctl_table_header *frags_hdr ; struct ctl_table_header *xfrm6_hdr ; int bindv6only ; int flush_delay ; int ip6_rt_max_size ; int ip6_rt_gc_min_interval ; int ip6_rt_gc_timeout ; int ip6_rt_gc_interval ; int ip6_rt_gc_elasticity ; int ip6_rt_mtu_expires ; int ip6_rt_min_advmss ; int flowlabel_consistency ; int auto_flowlabels ; int icmpv6_time ; int anycast_src_echo_reply ; int fwmark_reflect ; int idgen_retries ; int idgen_delay ; int flowlabel_state_ranges ; }; struct ipv6_devconf; struct rt6_info; struct rt6_statistics; struct fib6_table; struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl ; struct ipv6_devconf *devconf_all ; struct ipv6_devconf *devconf_dflt ; struct inet_peer_base *peers ; struct netns_frags frags ; struct xt_table *ip6table_filter ; struct xt_table *ip6table_mangle ; struct xt_table *ip6table_raw ; struct xt_table *ip6table_security ; struct xt_table *ip6table_nat ; struct rt6_info *ip6_null_entry ; struct rt6_statistics *rt6_stats ; struct timer_list ip6_fib_timer ; struct hlist_head *fib_table_hash ; struct fib6_table *fib6_main_tbl ; struct dst_ops ip6_dst_ops ; unsigned int ip6_rt_gc_expire ; unsigned long ip6_rt_last_gc ; struct rt6_info *ip6_prohibit_entry ; struct rt6_info *ip6_blk_hole_entry ; struct fib6_table *fib6_local_tbl ; struct fib_rules_ops *fib6_rules_ops ; struct sock **icmp_sk ; struct sock *ndisc_sk ; struct sock *tcp_sk ; struct sock *igmp_sk ; struct sock *mc_autojoin_sk ; struct list_head mr6_tables ; struct fib_rules_ops *mr6_rules_ops ; atomic_t dev_addr_genid ; atomic_t fib6_sernum ; }; struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl ; struct netns_frags frags ; }; struct netns_sysctl_lowpan { struct ctl_table_header *frags_hdr ; }; struct netns_ieee802154_lowpan { struct netns_sysctl_lowpan sysctl ; struct netns_frags frags ; }; struct sctp_mib; struct netns_sctp { struct sctp_mib *sctp_statistics ; struct proc_dir_entry *proc_net_sctp ; struct ctl_table_header *sysctl_header ; struct sock *ctl_sock ; struct list_head local_addr_list ; struct list_head addr_waitq ; struct timer_list addr_wq_timer ; struct list_head auto_asconf_splist ; spinlock_t addr_wq_lock ; spinlock_t local_addr_lock ; unsigned int rto_initial ; unsigned int rto_min ; unsigned int rto_max ; int rto_alpha ; int rto_beta ; int max_burst ; int cookie_preserve_enable ; char *sctp_hmac_alg ; unsigned int valid_cookie_life ; unsigned int sack_timeout ; unsigned int hb_interval ; int max_retrans_association ; int max_retrans_path ; int max_retrans_init ; int pf_retrans ; int sndbuf_policy ; int rcvbuf_policy ; int default_auto_asconf ; int addip_enable ; int addip_noauth ; int prsctp_enable ; int auth_enable ; int scope_policy ; int rwnd_upd_shift ; unsigned long max_autoclose ; }; struct netns_dccp { struct sock *v4_ctl_sk ; struct sock *v6_ctl_sk ; }; struct in_addr { __be32 s_addr ; }; struct sockaddr_in { __kernel_sa_family_t sin_family ; __be16 sin_port ; struct in_addr sin_addr ; unsigned char __pad[8U] ; }; struct nf_logger; struct netns_nf { struct proc_dir_entry *proc_netfilter ; struct nf_logger const *nf_loggers[13U] ; struct ctl_table_header *nf_log_dir_header ; }; struct ebt_table; struct netns_xt { struct list_head tables[13U] ; bool notrack_deprecated_warning ; bool clusterip_deprecated_warning ; struct ebt_table *broute_table ; struct ebt_table *frame_filter ; struct ebt_table *frame_nat ; }; struct nf_proto_net { struct ctl_table_header *ctl_table_header ; struct ctl_table *ctl_table ; struct ctl_table_header *ctl_compat_header ; struct ctl_table *ctl_compat_table ; unsigned int users ; }; struct nf_generic_net { struct nf_proto_net pn ; unsigned int timeout ; }; struct nf_tcp_net { struct nf_proto_net pn ; unsigned int timeouts[14U] ; unsigned int tcp_loose ; unsigned int tcp_be_liberal ; unsigned int tcp_max_retrans ; }; struct nf_udp_net { struct nf_proto_net pn ; unsigned int timeouts[2U] ; }; struct nf_icmp_net { struct nf_proto_net pn ; unsigned int timeout ; }; struct nf_ip_net { struct nf_generic_net generic ; struct nf_tcp_net tcp ; struct nf_udp_net udp ; struct nf_icmp_net icmp ; struct nf_icmp_net icmpv6 ; struct ctl_table_header *ctl_table_header ; struct ctl_table *ctl_table ; }; struct ct_pcpu { spinlock_t lock ; struct hlist_nulls_head unconfirmed ; struct hlist_nulls_head dying ; struct hlist_nulls_head tmpl ; }; struct ip_conntrack_stat; struct nf_ct_event_notifier; struct nf_exp_event_notifier; struct netns_ct { atomic_t count ; unsigned int expect_count ; struct delayed_work ecache_dwork ; bool ecache_dwork_pending ; struct ctl_table_header *sysctl_header ; struct ctl_table_header *acct_sysctl_header ; struct ctl_table_header *tstamp_sysctl_header ; struct ctl_table_header *event_sysctl_header ; struct ctl_table_header *helper_sysctl_header ; char *slabname ; unsigned int sysctl_log_invalid ; int sysctl_events ; int sysctl_acct ; int sysctl_auto_assign_helper ; bool auto_assign_helper_warned ; int sysctl_tstamp ; int sysctl_checksum ; unsigned int htable_size ; seqcount_t generation ; struct kmem_cache *nf_conntrack_cachep ; struct hlist_nulls_head *hash ; struct hlist_head *expect_hash ; struct ct_pcpu *pcpu_lists ; struct ip_conntrack_stat *stat ; struct nf_ct_event_notifier *nf_conntrack_event_cb ; struct nf_exp_event_notifier *nf_expect_event_cb ; struct nf_ip_net nf_ct_proto ; unsigned int labels_used ; u8 label_words ; struct hlist_head *nat_bysource ; unsigned int nat_htable_size ; }; struct nft_af_info; struct netns_nftables { struct list_head af_info ; struct list_head commit_list ; struct nft_af_info *ipv4 ; struct nft_af_info *ipv6 ; struct nft_af_info *inet ; struct nft_af_info *arp ; struct nft_af_info *bridge ; struct nft_af_info *netdev ; unsigned int base_seq ; u8 gencursor ; }; struct flow_cache_percpu { struct hlist_head *hash_table ; int hash_count ; u32 hash_rnd ; int hash_rnd_recalc ; struct tasklet_struct flush_tasklet ; }; struct flow_cache { u32 hash_shift ; struct flow_cache_percpu *percpu ; struct notifier_block hotcpu_notifier ; int low_watermark ; int high_watermark ; struct timer_list rnd_timer ; }; struct xfrm_policy_hash { struct hlist_head *table ; unsigned int hmask ; u8 dbits4 ; u8 sbits4 ; u8 dbits6 ; u8 sbits6 ; }; struct xfrm_policy_hthresh { struct work_struct work ; seqlock_t lock ; u8 lbits4 ; u8 rbits4 ; u8 lbits6 ; u8 rbits6 ; }; struct netns_xfrm { struct list_head state_all ; struct hlist_head *state_bydst ; struct hlist_head *state_bysrc ; struct hlist_head *state_byspi ; unsigned int state_hmask ; unsigned int state_num ; struct work_struct state_hash_work ; struct hlist_head state_gc_list ; struct work_struct state_gc_work ; struct list_head policy_all ; struct hlist_head *policy_byidx ; unsigned int policy_idx_hmask ; struct hlist_head policy_inexact[3U] ; struct xfrm_policy_hash policy_bydst[3U] ; unsigned int policy_count[6U] ; struct work_struct policy_hash_work ; struct xfrm_policy_hthresh policy_hthresh ; struct sock *nlsk ; struct sock *nlsk_stash ; u32 sysctl_aevent_etime ; u32 sysctl_aevent_rseqth ; int sysctl_larval_drop ; u32 sysctl_acq_expires ; struct ctl_table_header *sysctl_hdr ; struct dst_ops xfrm4_dst_ops ; struct dst_ops xfrm6_dst_ops ; spinlock_t xfrm_state_lock ; rwlock_t xfrm_policy_lock ; struct mutex xfrm_cfg_mutex ; struct flow_cache flow_cache_global ; atomic_t flow_cache_genid ; struct list_head flow_cache_gc_list ; spinlock_t flow_cache_gc_lock ; struct work_struct flow_cache_gc_work ; struct work_struct flow_cache_flush_work ; struct mutex flow_flush_sem ; }; struct mpls_route; struct netns_mpls { size_t platform_labels ; struct mpls_route **platform_label ; struct ctl_table_header *ctl ; }; struct proc_ns_operations; struct ns_common { atomic_long_t stashed ; struct proc_ns_operations const *ops ; unsigned int inum ; }; struct net_generic; struct netns_ipvs; struct net { atomic_t passive ; atomic_t count ; spinlock_t rules_mod_lock ; atomic64_t cookie_gen ; struct list_head list ; struct list_head cleanup_list ; struct list_head exit_list ; struct user_namespace *user_ns ; spinlock_t nsid_lock ; struct idr netns_ids ; struct ns_common ns ; struct proc_dir_entry *proc_net ; struct proc_dir_entry *proc_net_stat ; struct ctl_table_set sysctls ; struct sock *rtnl ; struct sock *genl_sock ; struct list_head dev_base_head ; struct hlist_head *dev_name_head ; struct hlist_head *dev_index_head ; unsigned int dev_base_seq ; int ifindex ; unsigned int dev_unreg_count ; struct list_head rules_ops ; struct net_device *loopback_dev ; struct netns_core core ; struct netns_mib mib ; struct netns_packet packet ; struct netns_unix unx ; struct netns_ipv4 ipv4 ; struct netns_ipv6 ipv6 ; struct netns_ieee802154_lowpan ieee802154_lowpan ; struct netns_sctp sctp ; struct netns_dccp dccp ; struct netns_nf nf ; struct netns_xt xt ; struct netns_ct ct ; struct netns_nftables nft ; struct netns_nf_frag nf_frag ; struct sock *nfnl ; struct sock *nfnl_stash ; struct sk_buff_head wext_nlevents ; struct net_generic *gen ; struct netns_xfrm xfrm ; struct netns_ipvs *ipvs ; struct netns_mpls mpls ; struct sock *diag_nlsk ; atomic_t fnhe_genid ; }; struct __anonstruct_possible_net_t_320 { struct net *net ; }; typedef struct __anonstruct_possible_net_t_320 possible_net_t; enum fwnode_type { FWNODE_INVALID = 0, FWNODE_OF = 1, FWNODE_ACPI = 2, FWNODE_PDATA = 3 } ; struct fwnode_handle { enum fwnode_type type ; struct fwnode_handle *secondary ; }; typedef u32 phandle; struct property { char *name ; int length ; void *value ; struct property *next ; unsigned long _flags ; unsigned int unique_id ; struct bin_attribute attr ; }; struct device_node { char const *name ; char const *type ; phandle phandle ; char const *full_name ; struct fwnode_handle fwnode ; struct property *properties ; struct property *deadprops ; struct device_node *parent ; struct device_node *child ; struct device_node *sibling ; struct kobject kobj ; unsigned long _flags ; void *data ; }; enum ldv_31135 { PHY_INTERFACE_MODE_NA = 0, PHY_INTERFACE_MODE_MII = 1, PHY_INTERFACE_MODE_GMII = 2, PHY_INTERFACE_MODE_SGMII = 3, PHY_INTERFACE_MODE_TBI = 4, PHY_INTERFACE_MODE_REVMII = 5, PHY_INTERFACE_MODE_RMII = 6, PHY_INTERFACE_MODE_RGMII = 7, PHY_INTERFACE_MODE_RGMII_ID = 8, PHY_INTERFACE_MODE_RGMII_RXID = 9, PHY_INTERFACE_MODE_RGMII_TXID = 10, PHY_INTERFACE_MODE_RTBI = 11, PHY_INTERFACE_MODE_SMII = 12, PHY_INTERFACE_MODE_XGMII = 13, PHY_INTERFACE_MODE_MOCA = 14, PHY_INTERFACE_MODE_QSGMII = 15, PHY_INTERFACE_MODE_MAX = 16 } ; typedef enum ldv_31135 phy_interface_t; enum ldv_31189 { MDIOBUS_ALLOCATED = 1, MDIOBUS_REGISTERED = 2, MDIOBUS_UNREGISTERED = 3, MDIOBUS_RELEASED = 4 } ; struct phy_device; struct mii_bus { char const *name ; char id[17U] ; void *priv ; int (*read)(struct mii_bus * , int , int ) ; int (*write)(struct mii_bus * , int , int , u16 ) ; int (*reset)(struct mii_bus * ) ; struct mutex mdio_lock ; struct device *parent ; enum ldv_31189 state ; struct device dev ; struct phy_device *phy_map[32U] ; u32 phy_mask ; u32 phy_ignore_ta_mask ; int *irq ; }; enum phy_state { PHY_DOWN = 0, PHY_STARTING = 1, PHY_READY = 2, PHY_PENDING = 3, PHY_UP = 4, PHY_AN = 5, PHY_RUNNING = 6, PHY_NOLINK = 7, PHY_FORCING = 8, PHY_CHANGELINK = 9, PHY_HALTED = 10, PHY_RESUMING = 11 } ; struct phy_c45_device_ids { u32 devices_in_package ; u32 device_ids[8U] ; }; struct phy_driver; struct phy_device { struct phy_driver *drv ; struct mii_bus *bus ; struct device dev ; u32 phy_id ; struct phy_c45_device_ids c45_ids ; bool is_c45 ; bool is_internal ; bool has_fixups ; bool suspended ; enum phy_state state ; u32 dev_flags ; phy_interface_t interface ; int addr ; int speed ; int duplex ; int pause ; int asym_pause ; int link ; u32 interrupts ; u32 supported ; u32 advertising ; u32 lp_advertising ; int autoneg ; int link_timeout ; int irq ; void *priv ; struct work_struct phy_queue ; struct delayed_work state_queue ; atomic_t irq_disable ; struct mutex lock ; struct net_device *attached_dev ; void (*adjust_link)(struct net_device * ) ; }; struct phy_driver { u32 phy_id ; char *name ; unsigned int phy_id_mask ; u32 features ; u32 flags ; void const *driver_data ; int (*soft_reset)(struct phy_device * ) ; int (*config_init)(struct phy_device * ) ; int (*probe)(struct phy_device * ) ; int (*suspend)(struct phy_device * ) ; int (*resume)(struct phy_device * ) ; int (*config_aneg)(struct phy_device * ) ; int (*aneg_done)(struct phy_device * ) ; int (*read_status)(struct phy_device * ) ; int (*ack_interrupt)(struct phy_device * ) ; int (*config_intr)(struct phy_device * ) ; int (*did_interrupt)(struct phy_device * ) ; void (*remove)(struct phy_device * ) ; int (*match_phy_device)(struct phy_device * ) ; int (*ts_info)(struct phy_device * , struct ethtool_ts_info * ) ; int (*hwtstamp)(struct phy_device * , struct ifreq * ) ; bool (*rxtstamp)(struct phy_device * , struct sk_buff * , int ) ; void (*txtstamp)(struct phy_device * , struct sk_buff * , int ) ; int (*set_wol)(struct phy_device * , struct ethtool_wolinfo * ) ; void (*get_wol)(struct phy_device * , struct ethtool_wolinfo * ) ; void (*link_change_notify)(struct phy_device * ) ; int (*read_mmd_indirect)(struct phy_device * , int , int , int ) ; void (*write_mmd_indirect)(struct phy_device * , int , int , int , u32 ) ; int (*module_info)(struct phy_device * , struct ethtool_modinfo * ) ; int (*module_eeprom)(struct phy_device * , struct ethtool_eeprom * , u8 * ) ; struct device_driver driver ; }; struct fixed_phy_status { int link ; int speed ; int duplex ; int pause ; int asym_pause ; }; enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = 0, DSA_TAG_PROTO_DSA = 1, DSA_TAG_PROTO_TRAILER = 2, DSA_TAG_PROTO_EDSA = 3, DSA_TAG_PROTO_BRCM = 4 } ; struct dsa_chip_data { struct device *host_dev ; int sw_addr ; int eeprom_len ; struct device_node *of_node ; char *port_names[12U] ; struct device_node *port_dn[12U] ; s8 *rtable ; }; struct dsa_platform_data { struct device *netdev ; struct net_device *of_netdev ; int nr_chips ; struct dsa_chip_data *chip ; }; struct packet_type; struct dsa_switch; struct dsa_switch_tree { struct dsa_platform_data *pd ; struct net_device *master_netdev ; int (*rcv)(struct sk_buff * , struct net_device * , struct packet_type * , struct net_device * ) ; enum dsa_tag_protocol tag_protocol ; s8 cpu_switch ; s8 cpu_port ; int link_poll_needed ; struct work_struct link_poll_work ; struct timer_list link_poll_timer ; struct dsa_switch *ds[4U] ; }; struct dsa_switch_driver; struct dsa_switch { struct dsa_switch_tree *dst ; int index ; enum dsa_tag_protocol tag_protocol ; struct dsa_chip_data *pd ; struct dsa_switch_driver *drv ; struct device *master_dev ; char hwmon_name[24U] ; struct device *hwmon_dev ; u32 dsa_port_mask ; u32 phys_port_mask ; u32 phys_mii_mask ; struct mii_bus *slave_mii_bus ; struct net_device *ports[12U] ; }; struct dsa_switch_driver { struct list_head list ; enum dsa_tag_protocol tag_protocol ; int priv_size ; char *(*probe)(struct device * , int ) ; int (*setup)(struct dsa_switch * ) ; int (*set_addr)(struct dsa_switch * , u8 * ) ; u32 (*get_phy_flags)(struct dsa_switch * , int ) ; int (*phy_read)(struct dsa_switch * , int , int ) ; int (*phy_write)(struct dsa_switch * , int , int , u16 ) ; void (*poll_link)(struct dsa_switch * ) ; void (*adjust_link)(struct dsa_switch * , int , struct phy_device * ) ; void (*fixed_link_update)(struct dsa_switch * , int , struct fixed_phy_status * ) ; void (*get_strings)(struct dsa_switch * , int , uint8_t * ) ; void (*get_ethtool_stats)(struct dsa_switch * , int , uint64_t * ) ; int (*get_sset_count)(struct dsa_switch * ) ; void (*get_wol)(struct dsa_switch * , int , struct ethtool_wolinfo * ) ; int (*set_wol)(struct dsa_switch * , int , struct ethtool_wolinfo * ) ; int (*suspend)(struct dsa_switch * ) ; int (*resume)(struct dsa_switch * ) ; int (*port_enable)(struct dsa_switch * , int , struct phy_device * ) ; void (*port_disable)(struct dsa_switch * , int , struct phy_device * ) ; int (*set_eee)(struct dsa_switch * , int , struct phy_device * , struct ethtool_eee * ) ; int (*get_eee)(struct dsa_switch * , int , struct ethtool_eee * ) ; int (*get_temp)(struct dsa_switch * , int * ) ; int (*get_temp_limit)(struct dsa_switch * , int * ) ; int (*set_temp_limit)(struct dsa_switch * , int ) ; int (*get_temp_alarm)(struct dsa_switch * , bool * ) ; int (*get_eeprom_len)(struct dsa_switch * ) ; int (*get_eeprom)(struct dsa_switch * , struct ethtool_eeprom * , u8 * ) ; int (*set_eeprom)(struct dsa_switch * , struct ethtool_eeprom * , u8 * ) ; int (*get_regs_len)(struct dsa_switch * , int ) ; void (*get_regs)(struct dsa_switch * , int , struct ethtool_regs * , void * ) ; int (*port_join_bridge)(struct dsa_switch * , int , u32 ) ; int (*port_leave_bridge)(struct dsa_switch * , int , u32 ) ; int (*port_stp_update)(struct dsa_switch * , int , u8 ) ; int (*fdb_add)(struct dsa_switch * , int , unsigned char const * , u16 ) ; int (*fdb_del)(struct dsa_switch * , int , unsigned char const * , u16 ) ; int (*fdb_getnext)(struct dsa_switch * , int , unsigned char * , bool * ) ; }; struct ieee_ets { __u8 willing ; __u8 ets_cap ; __u8 cbs ; __u8 tc_tx_bw[8U] ; __u8 tc_rx_bw[8U] ; __u8 tc_tsa[8U] ; __u8 prio_tc[8U] ; __u8 tc_reco_bw[8U] ; __u8 tc_reco_tsa[8U] ; __u8 reco_prio_tc[8U] ; }; struct ieee_maxrate { __u64 tc_maxrate[8U] ; }; struct ieee_qcn { __u8 rpg_enable[8U] ; __u32 rppp_max_rps[8U] ; __u32 rpg_time_reset[8U] ; __u32 rpg_byte_reset[8U] ; __u32 rpg_threshold[8U] ; __u32 rpg_max_rate[8U] ; __u32 rpg_ai_rate[8U] ; __u32 rpg_hai_rate[8U] ; __u32 rpg_gd[8U] ; __u32 rpg_min_dec_fac[8U] ; __u32 rpg_min_rate[8U] ; __u32 cndd_state_machine[8U] ; }; struct ieee_qcn_stats { __u64 rppp_rp_centiseconds[8U] ; __u32 rppp_created_rps[8U] ; }; struct ieee_pfc { __u8 pfc_cap ; __u8 pfc_en ; __u8 mbc ; __u16 delay ; __u64 requests[8U] ; __u64 indications[8U] ; }; struct cee_pg { __u8 willing ; __u8 error ; __u8 pg_en ; __u8 tcs_supported ; __u8 pg_bw[8U] ; __u8 prio_pg[8U] ; }; struct cee_pfc { __u8 willing ; __u8 error ; __u8 pfc_en ; __u8 tcs_supported ; }; struct dcb_app { __u8 selector ; __u8 priority ; __u16 protocol ; }; struct dcb_peer_app_info { __u8 willing ; __u8 error ; }; struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_setets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_getmaxrate)(struct net_device * , struct ieee_maxrate * ) ; int (*ieee_setmaxrate)(struct net_device * , struct ieee_maxrate * ) ; int (*ieee_getqcn)(struct net_device * , struct ieee_qcn * ) ; int (*ieee_setqcn)(struct net_device * , struct ieee_qcn * ) ; int (*ieee_getqcnstats)(struct net_device * , struct ieee_qcn_stats * ) ; int (*ieee_getpfc)(struct net_device * , struct ieee_pfc * ) ; int (*ieee_setpfc)(struct net_device * , struct ieee_pfc * ) ; int (*ieee_getapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_setapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_delapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_peer_getets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_peer_getpfc)(struct net_device * , struct ieee_pfc * ) ; u8 (*getstate)(struct net_device * ) ; u8 (*setstate)(struct net_device * , u8 ) ; void (*getpermhwaddr)(struct net_device * , u8 * ) ; void (*setpgtccfgtx)(struct net_device * , int , u8 , u8 , u8 , u8 ) ; void (*setpgbwgcfgtx)(struct net_device * , int , u8 ) ; void (*setpgtccfgrx)(struct net_device * , int , u8 , u8 , u8 , u8 ) ; void (*setpgbwgcfgrx)(struct net_device * , int , u8 ) ; void (*getpgtccfgtx)(struct net_device * , int , u8 * , u8 * , u8 * , u8 * ) ; void (*getpgbwgcfgtx)(struct net_device * , int , u8 * ) ; void (*getpgtccfgrx)(struct net_device * , int , u8 * , u8 * , u8 * , u8 * ) ; void (*getpgbwgcfgrx)(struct net_device * , int , u8 * ) ; void (*setpfccfg)(struct net_device * , int , u8 ) ; void (*getpfccfg)(struct net_device * , int , u8 * ) ; u8 (*setall)(struct net_device * ) ; u8 (*getcap)(struct net_device * , int , u8 * ) ; int (*getnumtcs)(struct net_device * , int , u8 * ) ; int (*setnumtcs)(struct net_device * , int , u8 ) ; u8 (*getpfcstate)(struct net_device * ) ; void (*setpfcstate)(struct net_device * , u8 ) ; void (*getbcncfg)(struct net_device * , int , u32 * ) ; void (*setbcncfg)(struct net_device * , int , u32 ) ; void (*getbcnrp)(struct net_device * , int , u8 * ) ; void (*setbcnrp)(struct net_device * , int , u8 ) ; int (*setapp)(struct net_device * , u8 , u16 , u8 ) ; int (*getapp)(struct net_device * , u8 , u16 ) ; u8 (*getfeatcfg)(struct net_device * , int , u8 * ) ; u8 (*setfeatcfg)(struct net_device * , int , u8 ) ; u8 (*getdcbx)(struct net_device * ) ; u8 (*setdcbx)(struct net_device * , u8 ) ; int (*peer_getappinfo)(struct net_device * , struct dcb_peer_app_info * , u16 * ) ; int (*peer_getapptable)(struct net_device * , struct dcb_app * ) ; int (*cee_peer_getpg)(struct net_device * , struct cee_pg * ) ; int (*cee_peer_getpfc)(struct net_device * , struct cee_pfc * ) ; }; struct taskstats { __u16 version ; __u32 ac_exitcode ; __u8 ac_flag ; __u8 ac_nice ; __u64 cpu_count ; __u64 cpu_delay_total ; __u64 blkio_count ; __u64 blkio_delay_total ; __u64 swapin_count ; __u64 swapin_delay_total ; __u64 cpu_run_real_total ; __u64 cpu_run_virtual_total ; char ac_comm[32U] ; __u8 ac_sched ; __u8 ac_pad[3U] ; __u32 ac_uid ; __u32 ac_gid ; __u32 ac_pid ; __u32 ac_ppid ; __u32 ac_btime ; __u64 ac_etime ; __u64 ac_utime ; __u64 ac_stime ; __u64 ac_minflt ; __u64 ac_majflt ; __u64 coremem ; __u64 virtmem ; __u64 hiwater_rss ; __u64 hiwater_vm ; __u64 read_char ; __u64 write_char ; __u64 read_syscalls ; __u64 write_syscalls ; __u64 read_bytes ; __u64 write_bytes ; __u64 cancelled_write_bytes ; __u64 nvcsw ; __u64 nivcsw ; __u64 ac_utimescaled ; __u64 ac_stimescaled ; __u64 cpu_scaled_run_real_total ; __u64 freepages_count ; __u64 freepages_delay_total ; }; struct netprio_map { struct callback_head rcu ; u32 priomap_len ; u32 priomap[] ; }; struct xfrm_policy; struct xfrm_state; struct request_sock; struct mnt_namespace; struct ipc_namespace; struct nsproxy { atomic_t count ; struct uts_namespace *uts_ns ; struct ipc_namespace *ipc_ns ; struct mnt_namespace *mnt_ns ; struct pid_namespace *pid_ns_for_children ; struct net *net_ns ; }; struct nlmsghdr { __u32 nlmsg_len ; __u16 nlmsg_type ; __u16 nlmsg_flags ; __u32 nlmsg_seq ; __u32 nlmsg_pid ; }; struct nlattr { __u16 nla_len ; __u16 nla_type ; }; struct netlink_callback { struct sk_buff *skb ; struct nlmsghdr const *nlh ; int (*dump)(struct sk_buff * , struct netlink_callback * ) ; int (*done)(struct netlink_callback * ) ; void *data ; struct module *module ; u16 family ; u16 min_dump_alloc ; unsigned int prev_seq ; unsigned int seq ; long args[6U] ; }; struct ndmsg { __u8 ndm_family ; __u8 ndm_pad1 ; __u16 ndm_pad2 ; __s32 ndm_ifindex ; __u16 ndm_state ; __u8 ndm_flags ; __u8 ndm_type ; }; struct rtnl_link_stats64 { __u64 rx_packets ; __u64 tx_packets ; __u64 rx_bytes ; __u64 tx_bytes ; __u64 rx_errors ; __u64 tx_errors ; __u64 rx_dropped ; __u64 tx_dropped ; __u64 multicast ; __u64 collisions ; __u64 rx_length_errors ; __u64 rx_over_errors ; __u64 rx_crc_errors ; __u64 rx_frame_errors ; __u64 rx_fifo_errors ; __u64 rx_missed_errors ; __u64 tx_aborted_errors ; __u64 tx_carrier_errors ; __u64 tx_fifo_errors ; __u64 tx_heartbeat_errors ; __u64 tx_window_errors ; __u64 rx_compressed ; __u64 tx_compressed ; }; struct ifla_vf_stats { __u64 rx_packets ; __u64 tx_packets ; __u64 rx_bytes ; __u64 tx_bytes ; __u64 broadcast ; __u64 multicast ; }; struct ifla_vf_info { __u32 vf ; __u8 mac[32U] ; __u32 vlan ; __u32 qos ; __u32 spoofchk ; __u32 linkstate ; __u32 min_tx_rate ; __u32 max_tx_rate ; __u32 rss_query_en ; }; struct netpoll_info; struct wireless_dev; struct wpan_dev; struct mpls_dev; enum netdev_tx { __NETDEV_TX_MIN = (-0x7FFFFFFF-1), NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16, NETDEV_TX_LOCKED = 32 } ; typedef enum netdev_tx netdev_tx_t; struct net_device_stats { unsigned long rx_packets ; unsigned long tx_packets ; unsigned long rx_bytes ; unsigned long tx_bytes ; unsigned long rx_errors ; unsigned long tx_errors ; unsigned long rx_dropped ; unsigned long tx_dropped ; unsigned long multicast ; unsigned long collisions ; unsigned long rx_length_errors ; unsigned long rx_over_errors ; unsigned long rx_crc_errors ; unsigned long rx_frame_errors ; unsigned long rx_fifo_errors ; unsigned long rx_missed_errors ; unsigned long tx_aborted_errors ; unsigned long tx_carrier_errors ; unsigned long tx_fifo_errors ; unsigned long tx_heartbeat_errors ; unsigned long tx_window_errors ; unsigned long rx_compressed ; unsigned long tx_compressed ; }; struct neigh_parms; struct netdev_hw_addr_list { struct list_head list ; int count ; }; struct hh_cache { u16 hh_len ; u16 __pad ; seqlock_t hh_lock ; unsigned long hh_data[16U] ; }; struct header_ops { int (*create)(struct sk_buff * , struct net_device * , unsigned short , void const * , void const * , unsigned int ) ; int (*parse)(struct sk_buff const * , unsigned char * ) ; int (*cache)(struct neighbour const * , struct hh_cache * , __be16 ) ; void (*cache_update)(struct hh_cache * , struct net_device const * , unsigned char const * ) ; }; struct napi_struct { struct list_head poll_list ; unsigned long state ; int weight ; unsigned int gro_count ; int (*poll)(struct napi_struct * , int ) ; spinlock_t poll_lock ; int poll_owner ; struct net_device *dev ; struct sk_buff *gro_list ; struct sk_buff *skb ; struct hrtimer timer ; struct list_head dev_list ; struct hlist_node napi_hash_node ; unsigned int napi_id ; }; enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ; typedef enum rx_handler_result rx_handler_result_t; typedef rx_handler_result_t rx_handler_func_t(struct sk_buff ** ); struct Qdisc; struct netdev_queue { struct net_device *dev ; struct Qdisc *qdisc ; struct Qdisc *qdisc_sleeping ; struct kobject kobj ; int numa_node ; spinlock_t _xmit_lock ; int xmit_lock_owner ; unsigned long trans_start ; unsigned long trans_timeout ; unsigned long state ; struct dql dql ; unsigned long tx_maxrate ; }; struct rps_map { unsigned int len ; struct callback_head rcu ; u16 cpus[0U] ; }; struct rps_dev_flow { u16 cpu ; u16 filter ; unsigned int last_qtail ; }; struct rps_dev_flow_table { unsigned int mask ; struct callback_head rcu ; struct rps_dev_flow flows[0U] ; }; struct netdev_rx_queue { struct rps_map *rps_map ; struct rps_dev_flow_table *rps_flow_table ; struct kobject kobj ; struct net_device *dev ; }; struct xps_map { unsigned int len ; unsigned int alloc_len ; struct callback_head rcu ; u16 queues[0U] ; }; struct xps_dev_maps { struct callback_head rcu ; struct xps_map *cpu_map[0U] ; }; struct netdev_tc_txq { u16 count ; u16 offset ; }; struct netdev_fcoe_hbainfo { char manufacturer[64U] ; char serial_number[64U] ; char hardware_version[64U] ; char driver_version[64U] ; char optionrom_version[64U] ; char firmware_version[64U] ; char model[256U] ; char model_description[256U] ; }; struct netdev_phys_item_id { unsigned char id[32U] ; unsigned char id_len ; }; struct net_device_ops { int (*ndo_init)(struct net_device * ) ; void (*ndo_uninit)(struct net_device * ) ; int (*ndo_open)(struct net_device * ) ; int (*ndo_stop)(struct net_device * ) ; netdev_tx_t (*ndo_start_xmit)(struct sk_buff * , struct net_device * ) ; u16 (*ndo_select_queue)(struct net_device * , struct sk_buff * , void * , u16 (*)(struct net_device * , struct sk_buff * ) ) ; void (*ndo_change_rx_flags)(struct net_device * , int ) ; void (*ndo_set_rx_mode)(struct net_device * ) ; int (*ndo_set_mac_address)(struct net_device * , void * ) ; int (*ndo_validate_addr)(struct net_device * ) ; int (*ndo_do_ioctl)(struct net_device * , struct ifreq * , int ) ; int (*ndo_set_config)(struct net_device * , struct ifmap * ) ; int (*ndo_change_mtu)(struct net_device * , int ) ; int (*ndo_neigh_setup)(struct net_device * , struct neigh_parms * ) ; void (*ndo_tx_timeout)(struct net_device * ) ; struct rtnl_link_stats64 *(*ndo_get_stats64)(struct net_device * , struct rtnl_link_stats64 * ) ; struct net_device_stats *(*ndo_get_stats)(struct net_device * ) ; int (*ndo_vlan_rx_add_vid)(struct net_device * , __be16 , u16 ) ; int (*ndo_vlan_rx_kill_vid)(struct net_device * , __be16 , u16 ) ; void (*ndo_poll_controller)(struct net_device * ) ; int (*ndo_netpoll_setup)(struct net_device * , struct netpoll_info * ) ; void (*ndo_netpoll_cleanup)(struct net_device * ) ; int (*ndo_busy_poll)(struct napi_struct * ) ; int (*ndo_set_vf_mac)(struct net_device * , int , u8 * ) ; int (*ndo_set_vf_vlan)(struct net_device * , int , u16 , u8 ) ; int (*ndo_set_vf_rate)(struct net_device * , int , int , int ) ; int (*ndo_set_vf_spoofchk)(struct net_device * , int , bool ) ; int (*ndo_get_vf_config)(struct net_device * , int , struct ifla_vf_info * ) ; int (*ndo_set_vf_link_state)(struct net_device * , int , int ) ; int (*ndo_get_vf_stats)(struct net_device * , int , struct ifla_vf_stats * ) ; int (*ndo_set_vf_port)(struct net_device * , int , struct nlattr ** ) ; int (*ndo_get_vf_port)(struct net_device * , int , struct sk_buff * ) ; int (*ndo_set_vf_rss_query_en)(struct net_device * , int , bool ) ; int (*ndo_setup_tc)(struct net_device * , u8 ) ; int (*ndo_fcoe_enable)(struct net_device * ) ; int (*ndo_fcoe_disable)(struct net_device * ) ; int (*ndo_fcoe_ddp_setup)(struct net_device * , u16 , struct scatterlist * , unsigned int ) ; int (*ndo_fcoe_ddp_done)(struct net_device * , u16 ) ; int (*ndo_fcoe_ddp_target)(struct net_device * , u16 , struct scatterlist * , unsigned int ) ; int (*ndo_fcoe_get_hbainfo)(struct net_device * , struct netdev_fcoe_hbainfo * ) ; int (*ndo_fcoe_get_wwn)(struct net_device * , u64 * , int ) ; int (*ndo_rx_flow_steer)(struct net_device * , struct sk_buff const * , u16 , u32 ) ; int (*ndo_add_slave)(struct net_device * , struct net_device * ) ; int (*ndo_del_slave)(struct net_device * , struct net_device * ) ; netdev_features_t (*ndo_fix_features)(struct net_device * , netdev_features_t ) ; int (*ndo_set_features)(struct net_device * , netdev_features_t ) ; int (*ndo_neigh_construct)(struct neighbour * ) ; void (*ndo_neigh_destroy)(struct neighbour * ) ; int (*ndo_fdb_add)(struct ndmsg * , struct nlattr ** , struct net_device * , unsigned char const * , u16 , u16 ) ; int (*ndo_fdb_del)(struct ndmsg * , struct nlattr ** , struct net_device * , unsigned char const * , u16 ) ; int (*ndo_fdb_dump)(struct sk_buff * , struct netlink_callback * , struct net_device * , struct net_device * , int ) ; int (*ndo_bridge_setlink)(struct net_device * , struct nlmsghdr * , u16 ) ; int (*ndo_bridge_getlink)(struct sk_buff * , u32 , u32 , struct net_device * , u32 , int ) ; int (*ndo_bridge_dellink)(struct net_device * , struct nlmsghdr * , u16 ) ; int (*ndo_change_carrier)(struct net_device * , bool ) ; int (*ndo_get_phys_port_id)(struct net_device * , struct netdev_phys_item_id * ) ; int (*ndo_get_phys_port_name)(struct net_device * , char * , size_t ) ; void (*ndo_add_vxlan_port)(struct net_device * , sa_family_t , __be16 ) ; void (*ndo_del_vxlan_port)(struct net_device * , sa_family_t , __be16 ) ; void *(*ndo_dfwd_add_station)(struct net_device * , struct net_device * ) ; void (*ndo_dfwd_del_station)(struct net_device * , void * ) ; netdev_tx_t (*ndo_dfwd_start_xmit)(struct sk_buff * , struct net_device * , void * ) ; int (*ndo_get_lock_subclass)(struct net_device * ) ; netdev_features_t (*ndo_features_check)(struct sk_buff * , struct net_device * , netdev_features_t ) ; int (*ndo_set_tx_maxrate)(struct net_device * , int , u32 ) ; int (*ndo_get_iflink)(struct net_device const * ) ; }; struct __anonstruct_adj_list_330 { struct list_head upper ; struct list_head lower ; }; struct __anonstruct_all_adj_list_331 { struct list_head upper ; struct list_head lower ; }; struct iw_handler_def; struct iw_public_data; struct switchdev_ops; struct vlan_info; struct tipc_bearer; struct in_device; struct dn_dev; struct inet6_dev; struct tcf_proto; struct cpu_rmap; struct pcpu_lstats; struct pcpu_sw_netstats; struct pcpu_dstats; struct pcpu_vstats; union __anonunion____missing_field_name_332 { void *ml_priv ; struct pcpu_lstats *lstats ; struct pcpu_sw_netstats *tstats ; struct pcpu_dstats *dstats ; struct pcpu_vstats *vstats ; }; struct garp_port; struct mrp_port; struct rtnl_link_ops; struct net_device { char name[16U] ; struct hlist_node name_hlist ; char *ifalias ; unsigned long mem_end ; unsigned long mem_start ; unsigned long base_addr ; int irq ; atomic_t carrier_changes ; unsigned long state ; struct list_head dev_list ; struct list_head napi_list ; struct list_head unreg_list ; struct list_head close_list ; struct list_head ptype_all ; struct list_head ptype_specific ; struct __anonstruct_adj_list_330 adj_list ; struct __anonstruct_all_adj_list_331 all_adj_list ; netdev_features_t features ; netdev_features_t hw_features ; netdev_features_t wanted_features ; netdev_features_t vlan_features ; netdev_features_t hw_enc_features ; netdev_features_t mpls_features ; int ifindex ; int group ; struct net_device_stats stats ; atomic_long_t rx_dropped ; atomic_long_t tx_dropped ; struct iw_handler_def const *wireless_handlers ; struct iw_public_data *wireless_data ; struct net_device_ops const *netdev_ops ; struct ethtool_ops const *ethtool_ops ; struct switchdev_ops const *switchdev_ops ; struct header_ops const *header_ops ; unsigned int flags ; unsigned int priv_flags ; unsigned short gflags ; unsigned short padded ; unsigned char operstate ; unsigned char link_mode ; unsigned char if_port ; unsigned char dma ; unsigned int mtu ; unsigned short type ; unsigned short hard_header_len ; unsigned short needed_headroom ; unsigned short needed_tailroom ; unsigned char perm_addr[32U] ; unsigned char addr_assign_type ; unsigned char addr_len ; unsigned short neigh_priv_len ; unsigned short dev_id ; unsigned short dev_port ; spinlock_t addr_list_lock ; unsigned char name_assign_type ; bool uc_promisc ; struct netdev_hw_addr_list uc ; struct netdev_hw_addr_list mc ; struct netdev_hw_addr_list dev_addrs ; struct kset *queues_kset ; unsigned int promiscuity ; unsigned int allmulti ; struct vlan_info *vlan_info ; struct dsa_switch_tree *dsa_ptr ; struct tipc_bearer *tipc_ptr ; void *atalk_ptr ; struct in_device *ip_ptr ; struct dn_dev *dn_ptr ; struct inet6_dev *ip6_ptr ; void *ax25_ptr ; struct wireless_dev *ieee80211_ptr ; struct wpan_dev *ieee802154_ptr ; struct mpls_dev *mpls_ptr ; unsigned long last_rx ; unsigned char *dev_addr ; struct netdev_rx_queue *_rx ; unsigned int num_rx_queues ; unsigned int real_num_rx_queues ; unsigned long gro_flush_timeout ; rx_handler_func_t *rx_handler ; void *rx_handler_data ; struct tcf_proto *ingress_cl_list ; struct netdev_queue *ingress_queue ; struct list_head nf_hooks_ingress ; unsigned char broadcast[32U] ; struct cpu_rmap *rx_cpu_rmap ; struct hlist_node index_hlist ; struct netdev_queue *_tx ; unsigned int num_tx_queues ; unsigned int real_num_tx_queues ; struct Qdisc *qdisc ; unsigned long tx_queue_len ; spinlock_t tx_global_lock ; int watchdog_timeo ; struct xps_dev_maps *xps_maps ; unsigned long trans_start ; struct timer_list watchdog_timer ; int *pcpu_refcnt ; struct list_head todo_list ; struct list_head link_watch_list ; unsigned char reg_state ; bool dismantle ; unsigned short rtnl_link_state ; void (*destructor)(struct net_device * ) ; struct netpoll_info *npinfo ; possible_net_t nd_net ; union __anonunion____missing_field_name_332 __annonCompField104 ; struct garp_port *garp_port ; struct mrp_port *mrp_port ; struct device dev ; struct attribute_group const *sysfs_groups[4U] ; struct attribute_group const *sysfs_rx_queue_group ; struct rtnl_link_ops const *rtnl_link_ops ; unsigned int gso_max_size ; u16 gso_max_segs ; u16 gso_min_segs ; struct dcbnl_rtnl_ops const *dcbnl_ops ; u8 num_tc ; struct netdev_tc_txq tc_to_txq[16U] ; u8 prio_tc_map[16U] ; unsigned int fcoe_ddp_xid ; struct netprio_map *priomap ; struct phy_device *phydev ; struct lock_class_key *qdisc_tx_busylock ; }; struct packet_type { __be16 type ; struct net_device *dev ; int (*func)(struct sk_buff * , struct net_device * , struct packet_type * , struct net_device * ) ; bool (*id_match)(struct packet_type * , struct sock * ) ; void *af_packet_priv ; struct list_head list ; }; struct pcpu_sw_netstats { u64 rx_packets ; u64 rx_bytes ; u64 tx_packets ; u64 tx_bytes ; struct u64_stats_sync syncp ; }; struct page_counter { atomic_long_t count ; unsigned long limit ; struct page_counter *parent ; unsigned long watermark ; unsigned long failcnt ; }; struct sock_filter { __u16 code ; __u8 jt ; __u8 jf ; __u32 k ; }; struct bpf_insn { __u8 code ; unsigned char dst_reg : 4 ; unsigned char src_reg : 4 ; __s16 off ; __s32 imm ; }; enum bpf_prog_type { BPF_PROG_TYPE_UNSPEC = 0, BPF_PROG_TYPE_SOCKET_FILTER = 1, BPF_PROG_TYPE_KPROBE = 2, BPF_PROG_TYPE_SCHED_CLS = 3, BPF_PROG_TYPE_SCHED_ACT = 4 } ; struct bpf_prog_aux; struct sock_fprog_kern { u16 len ; struct sock_filter *filter ; }; union __anonunion____missing_field_name_343 { struct sock_filter insns[0U] ; struct bpf_insn insnsi[0U] ; }; struct bpf_prog { u16 pages ; bool jited ; bool gpl_compatible ; u32 len ; enum bpf_prog_type type ; struct bpf_prog_aux *aux ; struct sock_fprog_kern *orig_prog ; unsigned int (*bpf_func)(struct sk_buff const * , struct bpf_insn const * ) ; union __anonunion____missing_field_name_343 __annonCompField109 ; }; struct sk_filter { atomic_t refcnt ; struct callback_head rcu ; struct bpf_prog *prog ; }; struct pollfd { int fd ; short events ; short revents ; }; struct poll_table_struct { void (*_qproc)(struct file * , wait_queue_head_t * , struct poll_table_struct * ) ; unsigned long _key ; }; struct nla_policy { u16 type ; u16 len ; }; struct rtnl_link_ops { struct list_head list ; char const *kind ; size_t priv_size ; void (*setup)(struct net_device * ) ; int maxtype ; struct nla_policy const *policy ; int (*validate)(struct nlattr ** , struct nlattr ** ) ; int (*newlink)(struct net * , struct net_device * , struct nlattr ** , struct nlattr ** ) ; int (*changelink)(struct net_device * , struct nlattr ** , struct nlattr ** ) ; void (*dellink)(struct net_device * , struct list_head * ) ; size_t (*get_size)(struct net_device const * ) ; int (*fill_info)(struct sk_buff * , struct net_device const * ) ; size_t (*get_xstats_size)(struct net_device const * ) ; int (*fill_xstats)(struct sk_buff * , struct net_device const * ) ; unsigned int (*get_num_tx_queues)(void) ; unsigned int (*get_num_rx_queues)(void) ; int slave_maxtype ; struct nla_policy const *slave_policy ; int (*slave_validate)(struct nlattr ** , struct nlattr ** ) ; int (*slave_changelink)(struct net_device * , struct net_device * , struct nlattr ** , struct nlattr ** ) ; size_t (*get_slave_size)(struct net_device const * , struct net_device const * ) ; int (*fill_slave_info)(struct sk_buff * , struct net_device const * , struct net_device const * ) ; struct net *(*get_link_net)(struct net_device const * ) ; }; struct neigh_table; struct neigh_parms { possible_net_t net ; struct net_device *dev ; struct list_head list ; int (*neigh_setup)(struct neighbour * ) ; void (*neigh_cleanup)(struct neighbour * ) ; struct neigh_table *tbl ; void *sysctl_table ; int dead ; atomic_t refcnt ; struct callback_head callback_head ; int reachable_time ; int data[13U] ; unsigned long data_state[1U] ; }; struct neigh_statistics { unsigned long allocs ; unsigned long destroys ; unsigned long hash_grows ; unsigned long res_failed ; unsigned long lookups ; unsigned long hits ; unsigned long rcv_probes_mcast ; unsigned long rcv_probes_ucast ; unsigned long periodic_gc_runs ; unsigned long forced_gc_runs ; unsigned long unres_discards ; }; struct neigh_ops; struct neighbour { struct neighbour *next ; struct neigh_table *tbl ; struct neigh_parms *parms ; unsigned long confirmed ; unsigned long updated ; rwlock_t lock ; atomic_t refcnt ; struct sk_buff_head arp_queue ; unsigned int arp_queue_len_bytes ; struct timer_list timer ; unsigned long used ; atomic_t probes ; __u8 flags ; __u8 nud_state ; __u8 type ; __u8 dead ; seqlock_t ha_lock ; unsigned char ha[32U] ; struct hh_cache hh ; int (*output)(struct neighbour * , struct sk_buff * ) ; struct neigh_ops const *ops ; struct callback_head rcu ; struct net_device *dev ; u8 primary_key[0U] ; }; struct neigh_ops { int family ; void (*solicit)(struct neighbour * , struct sk_buff * ) ; void (*error_report)(struct neighbour * , struct sk_buff * ) ; int (*output)(struct neighbour * , struct sk_buff * ) ; int (*connected_output)(struct neighbour * , struct sk_buff * ) ; }; struct pneigh_entry { struct pneigh_entry *next ; possible_net_t net ; struct net_device *dev ; u8 flags ; u8 key[0U] ; }; struct neigh_hash_table { struct neighbour **hash_buckets ; unsigned int hash_shift ; __u32 hash_rnd[4U] ; struct callback_head rcu ; }; struct neigh_table { int family ; int entry_size ; int key_len ; __be16 protocol ; __u32 (*hash)(void const * , struct net_device const * , __u32 * ) ; bool (*key_eq)(struct neighbour const * , void const * ) ; int (*constructor)(struct neighbour * ) ; int (*pconstructor)(struct pneigh_entry * ) ; void (*pdestructor)(struct pneigh_entry * ) ; void (*proxy_redo)(struct sk_buff * ) ; char *id ; struct neigh_parms parms ; struct list_head parms_list ; int gc_interval ; int gc_thresh1 ; int gc_thresh2 ; int gc_thresh3 ; unsigned long last_flush ; struct delayed_work gc_work ; struct timer_list proxy_timer ; struct sk_buff_head proxy_queue ; atomic_t entries ; rwlock_t lock ; unsigned long last_rand ; struct neigh_statistics *stats ; struct neigh_hash_table *nht ; struct pneigh_entry **phash_buckets ; }; struct dn_route; union __anonunion____missing_field_name_354 { struct dst_entry *next ; struct rtable *rt_next ; struct rt6_info *rt6_next ; struct dn_route *dn_next ; }; struct dst_entry { struct callback_head callback_head ; struct dst_entry *child ; struct net_device *dev ; struct dst_ops *ops ; unsigned long _metrics ; unsigned long expires ; struct dst_entry *path ; struct dst_entry *from ; struct xfrm_state *xfrm ; int (*input)(struct sk_buff * ) ; int (*output)(struct sock * , struct sk_buff * ) ; unsigned short flags ; unsigned short pending_confirm ; short error ; short obsolete ; unsigned short header_len ; unsigned short trailer_len ; __u32 tclassid ; long __pad_to_align_refcnt[2U] ; atomic_t __refcnt ; int __use ; unsigned long lastuse ; union __anonunion____missing_field_name_354 __annonCompField110 ; }; struct __anonstruct_socket_lock_t_355 { spinlock_t slock ; int owned ; wait_queue_head_t wq ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_socket_lock_t_355 socket_lock_t; struct proto; typedef __u32 __portpair; typedef __u64 __addrpair; struct __anonstruct____missing_field_name_357 { __be32 skc_daddr ; __be32 skc_rcv_saddr ; }; union __anonunion____missing_field_name_356 { __addrpair skc_addrpair ; struct __anonstruct____missing_field_name_357 __annonCompField111 ; }; union __anonunion____missing_field_name_358 { unsigned int skc_hash ; __u16 skc_u16hashes[2U] ; }; struct __anonstruct____missing_field_name_360 { __be16 skc_dport ; __u16 skc_num ; }; union __anonunion____missing_field_name_359 { __portpair skc_portpair ; struct __anonstruct____missing_field_name_360 __annonCompField114 ; }; union __anonunion____missing_field_name_361 { struct hlist_node skc_bind_node ; struct hlist_nulls_node skc_portaddr_node ; }; union __anonunion____missing_field_name_362 { struct hlist_node skc_node ; struct hlist_nulls_node skc_nulls_node ; }; struct sock_common { union __anonunion____missing_field_name_356 __annonCompField112 ; union __anonunion____missing_field_name_358 __annonCompField113 ; union __anonunion____missing_field_name_359 __annonCompField115 ; unsigned short skc_family ; unsigned char volatile skc_state ; unsigned char skc_reuse : 4 ; unsigned char skc_reuseport : 1 ; unsigned char skc_ipv6only : 1 ; unsigned char skc_net_refcnt : 1 ; int skc_bound_dev_if ; union __anonunion____missing_field_name_361 __annonCompField116 ; struct proto *skc_prot ; possible_net_t skc_net ; struct in6_addr skc_v6_daddr ; struct in6_addr skc_v6_rcv_saddr ; atomic64_t skc_cookie ; int skc_dontcopy_begin[0U] ; union __anonunion____missing_field_name_362 __annonCompField117 ; int skc_tx_queue_mapping ; atomic_t skc_refcnt ; int skc_dontcopy_end[0U] ; }; struct cg_proto; struct __anonstruct_sk_backlog_363 { atomic_t rmem_alloc ; int len ; struct sk_buff *head ; struct sk_buff *tail ; }; struct sock { struct sock_common __sk_common ; socket_lock_t sk_lock ; struct sk_buff_head sk_receive_queue ; struct __anonstruct_sk_backlog_363 sk_backlog ; int sk_forward_alloc ; __u32 sk_rxhash ; u16 sk_incoming_cpu ; __u32 sk_txhash ; unsigned int sk_napi_id ; unsigned int sk_ll_usec ; atomic_t sk_drops ; int sk_rcvbuf ; struct sk_filter *sk_filter ; struct socket_wq *sk_wq ; struct xfrm_policy *sk_policy[2U] ; unsigned long sk_flags ; struct dst_entry *sk_rx_dst ; struct dst_entry *sk_dst_cache ; spinlock_t sk_dst_lock ; atomic_t sk_wmem_alloc ; atomic_t sk_omem_alloc ; int sk_sndbuf ; struct sk_buff_head sk_write_queue ; unsigned char sk_shutdown : 2 ; unsigned char sk_no_check_tx : 1 ; unsigned char sk_no_check_rx : 1 ; unsigned char sk_userlocks : 4 ; unsigned char sk_protocol ; unsigned short sk_type ; int sk_wmem_queued ; gfp_t sk_allocation ; u32 sk_pacing_rate ; u32 sk_max_pacing_rate ; netdev_features_t sk_route_caps ; netdev_features_t sk_route_nocaps ; int sk_gso_type ; unsigned int sk_gso_max_size ; u16 sk_gso_max_segs ; int sk_rcvlowat ; unsigned long sk_lingertime ; struct sk_buff_head sk_error_queue ; struct proto *sk_prot_creator ; rwlock_t sk_callback_lock ; int sk_err ; int sk_err_soft ; u32 sk_ack_backlog ; u32 sk_max_ack_backlog ; __u32 sk_priority ; __u32 sk_cgrp_prioidx ; struct pid *sk_peer_pid ; struct cred const *sk_peer_cred ; long sk_rcvtimeo ; long sk_sndtimeo ; struct timer_list sk_timer ; ktime_t sk_stamp ; u16 sk_tsflags ; u32 sk_tskey ; struct socket *sk_socket ; void *sk_user_data ; struct page_frag sk_frag ; struct sk_buff *sk_send_head ; __s32 sk_peek_off ; int sk_write_pending ; void *sk_security ; __u32 sk_mark ; u32 sk_classid ; struct cg_proto *sk_cgrp ; void (*sk_state_change)(struct sock * ) ; void (*sk_data_ready)(struct sock * ) ; void (*sk_write_space)(struct sock * ) ; void (*sk_error_report)(struct sock * ) ; int (*sk_backlog_rcv)(struct sock * , struct sk_buff * ) ; void (*sk_destruct)(struct sock * ) ; }; struct request_sock_ops; struct timewait_sock_ops; struct inet_hashinfo; struct raw_hashinfo; struct udp_table; union __anonunion_h_366 { struct inet_hashinfo *hashinfo ; struct udp_table *udp_table ; struct raw_hashinfo *raw_hash ; }; struct proto { void (*close)(struct sock * , long ) ; int (*connect)(struct sock * , struct sockaddr * , int ) ; int (*disconnect)(struct sock * , int ) ; struct sock *(*accept)(struct sock * , int , int * ) ; int (*ioctl)(struct sock * , int , unsigned long ) ; int (*init)(struct sock * ) ; void (*destroy)(struct sock * ) ; void (*shutdown)(struct sock * , int ) ; int (*setsockopt)(struct sock * , int , int , char * , unsigned int ) ; int (*getsockopt)(struct sock * , int , int , char * , int * ) ; int (*compat_setsockopt)(struct sock * , int , int , char * , unsigned int ) ; int (*compat_getsockopt)(struct sock * , int , int , char * , int * ) ; int (*compat_ioctl)(struct sock * , unsigned int , unsigned long ) ; int (*sendmsg)(struct sock * , struct msghdr * , size_t ) ; int (*recvmsg)(struct sock * , struct msghdr * , size_t , int , int , int * ) ; int (*sendpage)(struct sock * , struct page * , int , size_t , int ) ; int (*bind)(struct sock * , struct sockaddr * , int ) ; int (*backlog_rcv)(struct sock * , struct sk_buff * ) ; void (*release_cb)(struct sock * ) ; void (*hash)(struct sock * ) ; void (*unhash)(struct sock * ) ; void (*rehash)(struct sock * ) ; int (*get_port)(struct sock * , unsigned short ) ; void (*clear_sk)(struct sock * , int ) ; unsigned int inuse_idx ; bool (*stream_memory_free)(struct sock const * ) ; void (*enter_memory_pressure)(struct sock * ) ; atomic_long_t *memory_allocated ; struct percpu_counter *sockets_allocated ; int *memory_pressure ; long *sysctl_mem ; int *sysctl_wmem ; int *sysctl_rmem ; int max_header ; bool no_autobind ; struct kmem_cache *slab ; unsigned int obj_size ; int slab_flags ; struct percpu_counter *orphan_count ; struct request_sock_ops *rsk_prot ; struct timewait_sock_ops *twsk_prot ; union __anonunion_h_366 h ; struct module *owner ; char name[32U] ; struct list_head node ; int (*init_cgroup)(struct mem_cgroup * , struct cgroup_subsys * ) ; void (*destroy_cgroup)(struct mem_cgroup * ) ; struct cg_proto *(*proto_cgroup)(struct mem_cgroup * ) ; }; struct cg_proto { struct page_counter memory_allocated ; struct percpu_counter sockets_allocated ; int memory_pressure ; long sysctl_mem[3U] ; unsigned long flags ; struct mem_cgroup *memcg ; }; struct request_sock_ops { int family ; int obj_size ; struct kmem_cache *slab ; char *slab_name ; int (*rtx_syn_ack)(struct sock * , struct request_sock * ) ; void (*send_ack)(struct sock * , struct sk_buff * , struct request_sock * ) ; void (*send_reset)(struct sock * , struct sk_buff * ) ; void (*destructor)(struct request_sock * ) ; void (*syn_ack_timeout)(struct request_sock const * ) ; }; struct request_sock { struct sock_common __req_common ; struct request_sock *dl_next ; struct sock *rsk_listener ; u16 mss ; u8 num_retrans ; unsigned char cookie_ts : 1 ; unsigned char num_timeout : 7 ; u32 window_clamp ; u32 rcv_wnd ; u32 ts_recent ; struct timer_list rsk_timer ; struct request_sock_ops const *rsk_ops ; struct sock *sk ; u32 *saved_syn ; u32 secid ; u32 peer_secid ; }; struct timewait_sock_ops { struct kmem_cache *twsk_slab ; char *twsk_slab_name ; unsigned int twsk_obj_size ; int (*twsk_unique)(struct sock * , struct sock * , void * ) ; void (*twsk_destructor)(struct sock * ) ; }; struct ipv6_stable_secret { bool initialized ; struct in6_addr secret ; }; struct ipv6_devconf { __s32 forwarding ; __s32 hop_limit ; __s32 mtu6 ; __s32 accept_ra ; __s32 accept_redirects ; __s32 autoconf ; __s32 dad_transmits ; __s32 rtr_solicits ; __s32 rtr_solicit_interval ; __s32 rtr_solicit_delay ; __s32 force_mld_version ; __s32 mldv1_unsolicited_report_interval ; __s32 mldv2_unsolicited_report_interval ; __s32 use_tempaddr ; __s32 temp_valid_lft ; __s32 temp_prefered_lft ; __s32 regen_max_retry ; __s32 max_desync_factor ; __s32 max_addresses ; __s32 accept_ra_defrtr ; __s32 accept_ra_pinfo ; __s32 accept_ra_rtr_pref ; __s32 rtr_probe_interval ; __s32 accept_ra_rt_info_max_plen ; __s32 proxy_ndp ; __s32 accept_source_route ; __s32 accept_ra_from_local ; __s32 optimistic_dad ; __s32 use_optimistic ; __s32 mc_forwarding ; __s32 disable_ipv6 ; __s32 accept_dad ; __s32 force_tllao ; __s32 ndisc_notify ; __s32 suppress_frag_ndisc ; __s32 accept_ra_mtu ; struct ipv6_stable_secret stable_secret ; void *sysctl ; }; struct ip6_sf_list { struct ip6_sf_list *sf_next ; struct in6_addr sf_addr ; unsigned long sf_count[2U] ; unsigned char sf_gsresp ; unsigned char sf_oldin ; unsigned char sf_crcount ; }; struct ifmcaddr6 { struct in6_addr mca_addr ; struct inet6_dev *idev ; struct ifmcaddr6 *next ; struct ip6_sf_list *mca_sources ; struct ip6_sf_list *mca_tomb ; unsigned int mca_sfmode ; unsigned char mca_crcount ; unsigned long mca_sfcount[2U] ; struct timer_list mca_timer ; unsigned int mca_flags ; int mca_users ; atomic_t mca_refcnt ; spinlock_t mca_lock ; unsigned long mca_cstamp ; unsigned long mca_tstamp ; }; struct ifacaddr6 { struct in6_addr aca_addr ; struct inet6_dev *aca_idev ; struct rt6_info *aca_rt ; struct ifacaddr6 *aca_next ; int aca_users ; atomic_t aca_refcnt ; unsigned long aca_cstamp ; unsigned long aca_tstamp ; }; struct ipv6_devstat { struct proc_dir_entry *proc_dir_entry ; struct ipstats_mib *ipv6 ; struct icmpv6_mib_device *icmpv6dev ; struct icmpv6msg_mib_device *icmpv6msgdev ; }; struct inet6_dev { struct net_device *dev ; struct list_head addr_list ; struct ifmcaddr6 *mc_list ; struct ifmcaddr6 *mc_tomb ; spinlock_t mc_lock ; unsigned char mc_qrv ; unsigned char mc_gq_running ; unsigned char mc_ifc_count ; unsigned char mc_dad_count ; unsigned long mc_v1_seen ; unsigned long mc_qi ; unsigned long mc_qri ; unsigned long mc_maxdelay ; struct timer_list mc_gq_timer ; struct timer_list mc_ifc_timer ; struct timer_list mc_dad_timer ; struct ifacaddr6 *ac_list ; rwlock_t lock ; atomic_t refcnt ; __u32 if_flags ; int dead ; u8 rndid[8U] ; struct timer_list regen_timer ; struct list_head tempaddr_list ; struct in6_addr token ; struct neigh_parms *nd_parms ; struct ipv6_devconf cnf ; struct ipv6_devstat stats ; struct timer_list rs_timer ; __u8 rs_probes ; __u8 addr_gen_mode ; unsigned long tstamp ; struct callback_head rcu ; }; union __anonunion____missing_field_name_388 { __be32 a4 ; __be32 a6[4U] ; struct in6_addr in6 ; }; struct inetpeer_addr_base { union __anonunion____missing_field_name_388 __annonCompField119 ; }; struct inetpeer_addr { struct inetpeer_addr_base addr ; __u16 family ; }; union __anonunion____missing_field_name_389 { struct list_head gc_list ; struct callback_head gc_rcu ; }; struct __anonstruct____missing_field_name_391 { atomic_t rid ; }; union __anonunion____missing_field_name_390 { struct __anonstruct____missing_field_name_391 __annonCompField121 ; struct callback_head rcu ; struct inet_peer *gc_next ; }; struct inet_peer { struct inet_peer *avl_left ; struct inet_peer *avl_right ; struct inetpeer_addr daddr ; __u32 avl_height ; u32 metrics[16U] ; u32 rate_tokens ; unsigned long rate_last ; union __anonunion____missing_field_name_389 __annonCompField120 ; union __anonunion____missing_field_name_390 __annonCompField122 ; __u32 dtime ; atomic_t refcnt ; }; struct inet_peer_base { struct inet_peer *root ; seqlock_t lock ; int total ; }; struct uncached_list; struct rtable { struct dst_entry dst ; int rt_genid ; unsigned int rt_flags ; __u16 rt_type ; __u8 rt_is_input ; __u8 rt_uses_gateway ; int rt_iif ; __be32 rt_gateway ; u32 rt_pmtu ; struct list_head rt_uncached ; struct uncached_list *rt_uncached_list ; }; struct inet_ehash_bucket { struct hlist_nulls_head chain ; }; struct inet_bind_hashbucket { spinlock_t lock ; struct hlist_head chain ; }; struct inet_listen_hashbucket { spinlock_t lock ; struct hlist_nulls_head head ; }; struct inet_hashinfo { struct inet_ehash_bucket *ehash ; spinlock_t *ehash_locks ; unsigned int ehash_mask ; unsigned int ehash_locks_mask ; struct inet_bind_hashbucket *bhash ; unsigned int bhash_size ; struct kmem_cache *bind_bucket_cachep ; struct inet_listen_hashbucket listening_hash[32U] ; }; struct attribute_container { struct list_head node ; struct klist containers ; struct class *class ; struct attribute_group const *grp ; struct device_attribute **attrs ; int (*match)(struct attribute_container * , struct device * ) ; unsigned long flags ; }; struct transport_container; struct transport_container { struct attribute_container ac ; struct attribute_group const *statistics ; }; struct scsi_transport_template { struct transport_container host_attrs ; struct transport_container target_attrs ; struct transport_container device_attrs ; int (*user_scan)(struct Scsi_Host * , uint , uint , u64 ) ; int device_size ; int device_private_offset ; int target_size ; int target_private_offset ; int host_size ; unsigned char create_work_queue : 1 ; void (*eh_strategy_handler)(struct Scsi_Host * ) ; enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd * ) ; int (*it_nexus_response)(struct Scsi_Host * , u64 , int ) ; int (*tsk_mgmt_response)(struct Scsi_Host * , u64 , u64 , int ) ; }; typedef uint32_t itt_t; struct iscsi_hdr { uint8_t opcode ; uint8_t flags ; uint8_t rsvd2[2U] ; uint8_t hlength ; uint8_t dlength[3U] ; struct scsi_lun lun ; itt_t itt ; __be32 ttt ; __be32 statsn ; __be32 exp_statsn ; __be32 max_statsn ; uint8_t other[12U] ; }; struct iscsi_tm { uint8_t opcode ; uint8_t flags ; uint8_t rsvd1[2U] ; uint8_t hlength ; uint8_t dlength[3U] ; struct scsi_lun lun ; itt_t itt ; itt_t rtt ; __be32 cmdsn ; __be32 exp_statsn ; __be32 refcmdsn ; __be32 exp_datasn ; uint8_t rsvd2[8U] ; }; enum iscsi_tgt_dscvr { ISCSI_TGT_DSCVR_SEND_TARGETS = 1, ISCSI_TGT_DSCVR_ISNS = 2, ISCSI_TGT_DSCVR_SLP = 3 } ; enum iscsi_host_event_code { ISCSI_EVENT_LINKUP = 1, ISCSI_EVENT_LINKDOWN = 2, ISCSI_EVENT_MAX = 3 } ; enum iscsi_param_type { ISCSI_PARAM = 0, ISCSI_HOST_PARAM = 1, ISCSI_NET_PARAM = 2, ISCSI_FLASHNODE_PARAM = 3, ISCSI_CHAP_PARAM = 4, ISCSI_IFACE_PARAM = 5 } ; struct iscsi_param_info { uint32_t len ; uint16_t param ; uint8_t value[0U] ; }; struct iscsi_iface_param_info { uint32_t iface_num ; uint32_t len ; uint16_t param ; uint8_t iface_type ; uint8_t param_type ; uint8_t value[0U] ; }; union __anonunion_src_398 { struct in_addr v4_addr ; struct in6_addr v6_addr ; }; union __anonunion_dst_399 { struct in_addr v4_addr ; struct in6_addr v6_addr ; }; struct iscsi_path { uint64_t handle ; uint8_t mac_addr[6U] ; uint8_t mac_addr_old[6U] ; uint32_t ip_addr_len ; union __anonunion_src_398 src ; union __anonunion_dst_399 dst ; uint16_t vlan_id ; uint16_t pmtu ; }; enum iscsi_ipaddress_state { ISCSI_IPDDRESS_STATE_UNCONFIGURED = 0, ISCSI_IPDDRESS_STATE_ACQUIRING = 1, ISCSI_IPDDRESS_STATE_TENTATIVE = 2, ISCSI_IPDDRESS_STATE_VALID = 3, ISCSI_IPDDRESS_STATE_DISABLING = 4, ISCSI_IPDDRESS_STATE_INVALID = 5, ISCSI_IPDDRESS_STATE_DEPRECATED = 6 } ; enum iscsi_router_state { ISCSI_ROUTER_STATE_UNKNOWN = 0, ISCSI_ROUTER_STATE_ADVERTISED = 1, ISCSI_ROUTER_STATE_MANUAL = 2, ISCSI_ROUTER_STATE_STALE = 3 } ; enum iscsi_conn_state { ISCSI_CONN_STATE_FREE = 0, ISCSI_CONN_STATE_XPT_WAIT = 1, ISCSI_CONN_STATE_IN_LOGIN = 2, ISCSI_CONN_STATE_LOGGED_IN = 3, ISCSI_CONN_STATE_IN_LOGOUT = 4, ISCSI_CONN_STATE_LOGOUT_REQUESTED = 5, ISCSI_CONN_STATE_CLEANUP_WAIT = 6 } ; enum iscsi_err { ISCSI_OK = 0, ISCSI_ERR_DATASN = 1001, ISCSI_ERR_DATA_OFFSET = 1002, ISCSI_ERR_MAX_CMDSN = 1003, ISCSI_ERR_EXP_CMDSN = 1004, ISCSI_ERR_BAD_OPCODE = 1005, ISCSI_ERR_DATALEN = 1006, ISCSI_ERR_AHSLEN = 1007, ISCSI_ERR_PROTO = 1008, ISCSI_ERR_LUN = 1009, ISCSI_ERR_BAD_ITT = 1010, ISCSI_ERR_CONN_FAILED = 1011, ISCSI_ERR_R2TSN = 1012, ISCSI_ERR_SESSION_FAILED = 1013, ISCSI_ERR_HDR_DGST = 1014, ISCSI_ERR_DATA_DGST = 1015, ISCSI_ERR_PARAM_NOT_FOUND = 1016, ISCSI_ERR_NO_SCSI_CMD = 1017, ISCSI_ERR_INVALID_HOST = 1018, ISCSI_ERR_XMIT_FAILED = 1019, ISCSI_ERR_TCP_CONN_CLOSE = 1020, ISCSI_ERR_SCSI_EH_SESSION_RST = 1021, ISCSI_ERR_NOP_TIMEDOUT = 1022 } ; enum iscsi_param { ISCSI_PARAM_MAX_RECV_DLENGTH = 0, ISCSI_PARAM_MAX_XMIT_DLENGTH = 1, ISCSI_PARAM_HDRDGST_EN = 2, ISCSI_PARAM_DATADGST_EN = 3, ISCSI_PARAM_INITIAL_R2T_EN = 4, ISCSI_PARAM_MAX_R2T = 5, ISCSI_PARAM_IMM_DATA_EN = 6, ISCSI_PARAM_FIRST_BURST = 7, ISCSI_PARAM_MAX_BURST = 8, ISCSI_PARAM_PDU_INORDER_EN = 9, ISCSI_PARAM_DATASEQ_INORDER_EN = 10, ISCSI_PARAM_ERL = 11, ISCSI_PARAM_IFMARKER_EN = 12, ISCSI_PARAM_OFMARKER_EN = 13, ISCSI_PARAM_EXP_STATSN = 14, ISCSI_PARAM_TARGET_NAME = 15, ISCSI_PARAM_TPGT = 16, ISCSI_PARAM_PERSISTENT_ADDRESS = 17, ISCSI_PARAM_PERSISTENT_PORT = 18, ISCSI_PARAM_SESS_RECOVERY_TMO = 19, ISCSI_PARAM_CONN_PORT = 20, ISCSI_PARAM_CONN_ADDRESS = 21, ISCSI_PARAM_USERNAME = 22, ISCSI_PARAM_USERNAME_IN = 23, ISCSI_PARAM_PASSWORD = 24, ISCSI_PARAM_PASSWORD_IN = 25, ISCSI_PARAM_FAST_ABORT = 26, ISCSI_PARAM_ABORT_TMO = 27, ISCSI_PARAM_LU_RESET_TMO = 28, ISCSI_PARAM_HOST_RESET_TMO = 29, ISCSI_PARAM_PING_TMO = 30, ISCSI_PARAM_RECV_TMO = 31, ISCSI_PARAM_IFACE_NAME = 32, ISCSI_PARAM_ISID = 33, ISCSI_PARAM_INITIATOR_NAME = 34, ISCSI_PARAM_TGT_RESET_TMO = 35, ISCSI_PARAM_TARGET_ALIAS = 36, ISCSI_PARAM_CHAP_IN_IDX = 37, ISCSI_PARAM_CHAP_OUT_IDX = 38, ISCSI_PARAM_BOOT_ROOT = 39, ISCSI_PARAM_BOOT_NIC = 40, ISCSI_PARAM_BOOT_TARGET = 41, ISCSI_PARAM_AUTO_SND_TGT_DISABLE = 42, ISCSI_PARAM_DISCOVERY_SESS = 43, ISCSI_PARAM_PORTAL_TYPE = 44, ISCSI_PARAM_CHAP_AUTH_EN = 45, ISCSI_PARAM_DISCOVERY_LOGOUT_EN = 46, ISCSI_PARAM_BIDI_CHAP_EN = 47, ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL = 48, ISCSI_PARAM_DEF_TIME2WAIT = 49, ISCSI_PARAM_DEF_TIME2RETAIN = 50, ISCSI_PARAM_MAX_SEGMENT_SIZE = 51, ISCSI_PARAM_STATSN = 52, ISCSI_PARAM_KEEPALIVE_TMO = 53, ISCSI_PARAM_LOCAL_PORT = 54, ISCSI_PARAM_TSID = 55, ISCSI_PARAM_DEF_TASKMGMT_TMO = 56, ISCSI_PARAM_TCP_TIMESTAMP_STAT = 57, ISCSI_PARAM_TCP_WSF_DISABLE = 58, ISCSI_PARAM_TCP_NAGLE_DISABLE = 59, ISCSI_PARAM_TCP_TIMER_SCALE = 60, ISCSI_PARAM_TCP_TIMESTAMP_EN = 61, ISCSI_PARAM_TCP_XMIT_WSF = 62, ISCSI_PARAM_TCP_RECV_WSF = 63, ISCSI_PARAM_IP_FRAGMENT_DISABLE = 64, ISCSI_PARAM_IPV4_TOS = 65, ISCSI_PARAM_IPV6_TC = 66, ISCSI_PARAM_IPV6_FLOW_LABEL = 67, ISCSI_PARAM_IS_FW_ASSIGNED_IPV6 = 68, ISCSI_PARAM_DISCOVERY_PARENT_IDX = 69, ISCSI_PARAM_DISCOVERY_PARENT_TYPE = 70, ISCSI_PARAM_LOCAL_IPADDR = 71, ISCSI_PARAM_MAX = 72 } ; enum iscsi_host_param { ISCSI_HOST_PARAM_HWADDRESS = 0, ISCSI_HOST_PARAM_INITIATOR_NAME = 1, ISCSI_HOST_PARAM_NETDEV_NAME = 2, ISCSI_HOST_PARAM_IPADDRESS = 3, ISCSI_HOST_PARAM_PORT_STATE = 4, ISCSI_HOST_PARAM_PORT_SPEED = 5, ISCSI_HOST_PARAM_MAX = 6 } ; struct iscsi_flashnode_param_info { uint32_t len ; uint16_t param ; uint8_t value[0U] ; }; struct iscsi_stats_custom { char desc[64U] ; uint64_t value ; }; struct iscsi_stats { uint64_t txdata_octets ; uint64_t rxdata_octets ; uint32_t noptx_pdus ; uint32_t scsicmd_pdus ; uint32_t tmfcmd_pdus ; uint32_t login_pdus ; uint32_t text_pdus ; uint32_t dataout_pdus ; uint32_t logout_pdus ; uint32_t snack_pdus ; uint32_t noprx_pdus ; uint32_t scsirsp_pdus ; uint32_t tmfrsp_pdus ; uint32_t textrsp_pdus ; uint32_t datain_pdus ; uint32_t logoutrsp_pdus ; uint32_t r2t_pdus ; uint32_t async_pdus ; uint32_t rjt_pdus ; uint32_t digest_err ; uint32_t timeout_err ; uint32_t custom_length ; struct iscsi_stats_custom custom[0U] ; }; enum chap_type_e { CHAP_TYPE_OUT = 0, CHAP_TYPE_IN = 1 } ; struct iscsi_chap_rec { uint16_t chap_tbl_idx ; enum chap_type_e chap_type ; char username[256U] ; uint8_t password[256U] ; uint8_t password_length ; }; struct iscsi_host_stats_custom { char desc[64U] ; uint64_t value ; }; struct iscsi_offload_host_stats { uint64_t mactx_frames ; uint64_t mactx_bytes ; uint64_t mactx_multicast_frames ; uint64_t mactx_broadcast_frames ; uint64_t mactx_pause_frames ; uint64_t mactx_control_frames ; uint64_t mactx_deferral ; uint64_t mactx_excess_deferral ; uint64_t mactx_late_collision ; uint64_t mactx_abort ; uint64_t mactx_single_collision ; uint64_t mactx_multiple_collision ; uint64_t mactx_collision ; uint64_t mactx_frames_dropped ; uint64_t mactx_jumbo_frames ; uint64_t macrx_frames ; uint64_t macrx_bytes ; uint64_t macrx_unknown_control_frames ; uint64_t macrx_pause_frames ; uint64_t macrx_control_frames ; uint64_t macrx_dribble ; uint64_t macrx_frame_length_error ; uint64_t macrx_jabber ; uint64_t macrx_carrier_sense_error ; uint64_t macrx_frame_discarded ; uint64_t macrx_frames_dropped ; uint64_t mac_crc_error ; uint64_t mac_encoding_error ; uint64_t macrx_length_error_large ; uint64_t macrx_length_error_small ; uint64_t macrx_multicast_frames ; uint64_t macrx_broadcast_frames ; uint64_t iptx_packets ; uint64_t iptx_bytes ; uint64_t iptx_fragments ; uint64_t iprx_packets ; uint64_t iprx_bytes ; uint64_t iprx_fragments ; uint64_t ip_datagram_reassembly ; uint64_t ip_invalid_address_error ; uint64_t ip_error_packets ; uint64_t ip_fragrx_overlap ; uint64_t ip_fragrx_outoforder ; uint64_t ip_datagram_reassembly_timeout ; uint64_t ipv6tx_packets ; uint64_t ipv6tx_bytes ; uint64_t ipv6tx_fragments ; uint64_t ipv6rx_packets ; uint64_t ipv6rx_bytes ; uint64_t ipv6rx_fragments ; uint64_t ipv6_datagram_reassembly ; uint64_t ipv6_invalid_address_error ; uint64_t ipv6_error_packets ; uint64_t ipv6_fragrx_overlap ; uint64_t ipv6_fragrx_outoforder ; uint64_t ipv6_datagram_reassembly_timeout ; uint64_t tcptx_segments ; uint64_t tcptx_bytes ; uint64_t tcprx_segments ; uint64_t tcprx_byte ; uint64_t tcp_duplicate_ack_retx ; uint64_t tcp_retx_timer_expired ; uint64_t tcprx_duplicate_ack ; uint64_t tcprx_pure_ackr ; uint64_t tcptx_delayed_ack ; uint64_t tcptx_pure_ack ; uint64_t tcprx_segment_error ; uint64_t tcprx_segment_outoforder ; uint64_t tcprx_window_probe ; uint64_t tcprx_window_update ; uint64_t tcptx_window_probe_persist ; uint64_t ecc_error_correction ; uint64_t iscsi_pdu_tx ; uint64_t iscsi_data_bytes_tx ; uint64_t iscsi_pdu_rx ; uint64_t iscsi_data_bytes_rx ; uint64_t iscsi_io_completed ; uint64_t iscsi_unexpected_io_rx ; uint64_t iscsi_format_error ; uint64_t iscsi_hdr_digest_error ; uint64_t iscsi_data_digest_error ; uint64_t iscsi_sequence_error ; uint32_t custom_length ; struct iscsi_host_stats_custom custom[0U] ; }; struct iscsi_transport; struct iscsi_conn; struct iscsi_iface; struct iscsi_transport { struct module *owner ; char *name ; unsigned int caps ; struct iscsi_cls_session *(*create_session)(struct iscsi_endpoint * , uint16_t , uint16_t , uint32_t ) ; void (*destroy_session)(struct iscsi_cls_session * ) ; struct iscsi_cls_conn *(*create_conn)(struct iscsi_cls_session * , uint32_t ) ; int (*bind_conn)(struct iscsi_cls_session * , struct iscsi_cls_conn * , uint64_t , int ) ; int (*start_conn)(struct iscsi_cls_conn * ) ; void (*stop_conn)(struct iscsi_cls_conn * , int ) ; void (*destroy_conn)(struct iscsi_cls_conn * ) ; int (*set_param)(struct iscsi_cls_conn * , enum iscsi_param , char * , int ) ; int (*get_ep_param)(struct iscsi_endpoint * , enum iscsi_param , char * ) ; int (*get_conn_param)(struct iscsi_cls_conn * , enum iscsi_param , char * ) ; int (*get_session_param)(struct iscsi_cls_session * , enum iscsi_param , char * ) ; int (*get_host_param)(struct Scsi_Host * , enum iscsi_host_param , char * ) ; int (*set_host_param)(struct Scsi_Host * , enum iscsi_host_param , char * , int ) ; int (*send_pdu)(struct iscsi_cls_conn * , struct iscsi_hdr * , char * , uint32_t ) ; void (*get_stats)(struct iscsi_cls_conn * , struct iscsi_stats * ) ; int (*init_task)(struct iscsi_task * ) ; int (*xmit_task)(struct iscsi_task * ) ; void (*cleanup_task)(struct iscsi_task * ) ; int (*alloc_pdu)(struct iscsi_task * , uint8_t ) ; int (*xmit_pdu)(struct iscsi_task * ) ; int (*init_pdu)(struct iscsi_task * , unsigned int , unsigned int ) ; void (*parse_pdu_itt)(struct iscsi_conn * , itt_t , int * , int * ) ; void (*session_recovery_timedout)(struct iscsi_cls_session * ) ; struct iscsi_endpoint *(*ep_connect)(struct Scsi_Host * , struct sockaddr * , int ) ; int (*ep_poll)(struct iscsi_endpoint * , int ) ; void (*ep_disconnect)(struct iscsi_endpoint * ) ; int (*tgt_dscvr)(struct Scsi_Host * , enum iscsi_tgt_dscvr , uint32_t , struct sockaddr * ) ; int (*set_path)(struct Scsi_Host * , struct iscsi_path * ) ; int (*set_iface_param)(struct Scsi_Host * , void * , uint32_t ) ; int (*get_iface_param)(struct iscsi_iface * , enum iscsi_param_type , int , char * ) ; umode_t (*attr_is_visible)(int , int ) ; int (*bsg_request)(struct bsg_job * ) ; int (*send_ping)(struct Scsi_Host * , uint32_t , uint32_t , uint32_t , uint32_t , struct sockaddr * ) ; int (*get_chap)(struct Scsi_Host * , uint16_t , uint32_t * , char * ) ; int (*delete_chap)(struct Scsi_Host * , uint16_t ) ; int (*set_chap)(struct Scsi_Host * , void * , int ) ; int (*get_flashnode_param)(struct iscsi_bus_flash_session * , int , char * ) ; int (*set_flashnode_param)(struct iscsi_bus_flash_session * , struct iscsi_bus_flash_conn * , void * , int ) ; int (*new_flashnode)(struct Scsi_Host * , char const * , int ) ; int (*del_flashnode)(struct iscsi_bus_flash_session * ) ; int (*login_flashnode)(struct iscsi_bus_flash_session * , struct iscsi_bus_flash_conn * ) ; int (*logout_flashnode)(struct iscsi_bus_flash_session * , struct iscsi_bus_flash_conn * ) ; int (*logout_flashnode_sid)(struct iscsi_cls_session * ) ; int (*get_host_stats)(struct Scsi_Host * , char * , int ) ; u8 (*check_protection)(struct iscsi_task * , sector_t * ) ; }; struct iscsi_cls_conn { struct list_head conn_list ; void *dd_data ; struct iscsi_transport *transport ; uint32_t cid ; struct mutex ep_mutex ; struct iscsi_endpoint *ep ; struct device dev ; }; struct iscsi_cls_session { struct list_head sess_list ; struct iscsi_transport *transport ; spinlock_t lock ; struct work_struct block_work ; struct work_struct unblock_work ; struct work_struct scan_work ; struct work_struct unbind_work ; int recovery_tmo ; struct delayed_work recovery_work ; unsigned int target_id ; bool ida_used ; pid_t creator ; int state ; int sid ; void *dd_data ; struct device dev ; }; struct iscsi_cls_host { atomic_t nr_scans ; struct mutex mutex ; struct request_queue *bsg_q ; uint32_t port_speed ; uint32_t port_state ; }; struct iscsi_endpoint { void *dd_data ; struct device dev ; uint64_t id ; struct iscsi_cls_conn *conn ; }; struct iscsi_iface { struct device dev ; struct iscsi_transport *transport ; uint32_t iface_type ; uint32_t iface_num ; void *dd_data ; }; struct iscsi_bus_flash_conn { struct list_head conn_list ; void *dd_data ; struct iscsi_transport *transport ; struct device dev ; uint32_t exp_statsn ; uint32_t statsn ; unsigned int max_recv_dlength ; unsigned int max_xmit_dlength ; unsigned int max_segment_size ; unsigned int tcp_xmit_wsf ; unsigned int tcp_recv_wsf ; int hdrdgst_en ; int datadgst_en ; int port ; char *ipaddress ; char *link_local_ipv6_addr ; char *redirect_ipaddr ; uint16_t keepalive_timeout ; uint16_t local_port ; uint8_t snack_req_en ; uint8_t tcp_timestamp_stat ; uint8_t tcp_nagle_disable ; uint8_t tcp_wsf_disable ; uint8_t tcp_timer_scale ; uint8_t tcp_timestamp_en ; uint8_t ipv4_tos ; uint8_t ipv6_traffic_class ; uint8_t ipv6_flow_label ; uint8_t fragment_disable ; uint8_t is_fw_assigned_ipv6 ; }; struct iscsi_bus_flash_session { struct list_head sess_list ; struct iscsi_transport *transport ; unsigned int target_id ; int flash_state ; void *dd_data ; struct device dev ; unsigned int first_burst ; unsigned int max_burst ; unsigned short max_r2t ; int default_taskmgmt_timeout ; int initial_r2t_en ; int imm_data_en ; int time2wait ; int time2retain ; int pdu_inorder_en ; int dataseq_inorder_en ; int erl ; int tpgt ; char *username ; char *username_in ; char *password ; char *password_in ; char *targetname ; char *targetalias ; char *portal_type ; uint16_t tsid ; uint16_t chap_in_idx ; uint16_t chap_out_idx ; uint16_t discovery_parent_idx ; uint16_t discovery_parent_type ; uint8_t auto_snd_tgt_disable ; uint8_t discovery_sess ; uint8_t entry_state ; uint8_t chap_auth_en ; uint8_t discovery_logout_en ; uint8_t bidi_chap_en ; uint8_t discovery_auth_optional ; uint8_t isid[6U] ; uint8_t is_boot_target ; }; struct __kfifo { unsigned int in ; unsigned int out ; unsigned int mask ; unsigned int esize ; void *data ; }; union __anonunion____missing_field_name_402 { struct __kfifo kfifo ; unsigned char *type ; unsigned char const *const_type ; char (*rectype)[0U] ; void *ptr ; void const *ptr_const ; }; struct kfifo { union __anonunion____missing_field_name_402 __annonCompField126 ; unsigned char buf[0U] ; }; struct iscsi_session; struct iscsi_r2t_info { __be32 ttt ; __be32 exp_statsn ; uint32_t data_length ; uint32_t data_offset ; int data_count ; int datasn ; int sent ; }; struct iscsi_task { struct iscsi_hdr *hdr ; unsigned short hdr_max ; unsigned short hdr_len ; itt_t hdr_itt ; __be32 cmdsn ; struct scsi_lun lun ; int itt ; unsigned int imm_count ; struct iscsi_r2t_info unsol_r2t ; char *data ; unsigned int data_count ; struct scsi_cmnd *sc ; struct iscsi_conn *conn ; unsigned long last_xfer ; unsigned long last_timeout ; bool have_checked_conn ; bool protected ; int state ; atomic_t refcount ; struct list_head running ; void *dd_data ; }; struct iscsi_conn { struct iscsi_cls_conn *cls_conn ; void *dd_data ; struct iscsi_session *session ; int stop_stage ; struct timer_list transport_timer ; unsigned long last_recv ; unsigned long last_ping ; int ping_timeout ; int recv_timeout ; struct iscsi_task *ping_task ; uint32_t exp_statsn ; uint32_t statsn ; int id ; int c_stage ; char *data ; struct iscsi_task *login_task ; struct iscsi_task *task ; struct list_head mgmtqueue ; struct list_head cmdqueue ; struct list_head requeue ; struct work_struct xmitwork ; unsigned long suspend_tx ; unsigned long suspend_rx ; wait_queue_head_t ehwait ; struct iscsi_tm tmhdr ; struct timer_list tmf_timer ; int tmf_state ; unsigned int max_recv_dlength ; unsigned int max_xmit_dlength ; int hdrdgst_en ; int datadgst_en ; int ifmarker_en ; int ofmarker_en ; int persistent_port ; char *persistent_address ; unsigned int max_segment_size ; unsigned int tcp_xmit_wsf ; unsigned int tcp_recv_wsf ; uint16_t keepalive_tmo ; uint16_t local_port ; uint8_t tcp_timestamp_stat ; uint8_t tcp_nagle_disable ; uint8_t tcp_wsf_disable ; uint8_t tcp_timer_scale ; uint8_t tcp_timestamp_en ; uint8_t fragment_disable ; uint8_t ipv4_tos ; uint8_t ipv6_traffic_class ; uint8_t ipv6_flow_label ; uint8_t is_fw_assigned_ipv6 ; char *local_ipaddr ; uint64_t txdata_octets ; uint64_t rxdata_octets ; uint32_t scsicmd_pdus_cnt ; uint32_t dataout_pdus_cnt ; uint32_t scsirsp_pdus_cnt ; uint32_t datain_pdus_cnt ; uint32_t r2t_pdus_cnt ; uint32_t tmfcmd_pdus_cnt ; int32_t tmfrsp_pdus_cnt ; uint32_t eh_abort_cnt ; uint32_t fmr_unalign_cnt ; }; struct iscsi_pool { struct kfifo queue ; void **pool ; int max ; }; struct iscsi_session { struct iscsi_cls_session *cls_session ; struct mutex eh_mutex ; uint32_t cmdsn ; uint32_t exp_cmdsn ; uint32_t max_cmdsn ; uint32_t queued_cmdsn ; int abort_timeout ; int lu_reset_timeout ; int tgt_reset_timeout ; int initial_r2t_en ; unsigned short max_r2t ; int imm_data_en ; unsigned int first_burst ; unsigned int max_burst ; int time2wait ; int time2retain ; int pdu_inorder_en ; int dataseq_inorder_en ; int erl ; int fast_abort ; int tpgt ; char *username ; char *username_in ; char *password ; char *password_in ; char *targetname ; char *targetalias ; char *ifacename ; char *initiatorname ; char *boot_root ; char *boot_nic ; char *boot_target ; char *portal_type ; char *discovery_parent_type ; uint16_t discovery_parent_idx ; uint16_t def_taskmgmt_tmo ; uint16_t tsid ; uint8_t auto_snd_tgt_disable ; uint8_t discovery_sess ; uint8_t chap_auth_en ; uint8_t discovery_logout_en ; uint8_t bidi_chap_en ; uint8_t discovery_auth_optional ; uint8_t isid[6U] ; struct iscsi_transport *tt ; struct Scsi_Host *host ; struct iscsi_conn *leadconn ; spinlock_t frwd_lock ; spinlock_t back_lock ; int state ; int age ; int scsi_cmds_max ; int cmds_max ; struct iscsi_task **cmds ; struct iscsi_pool cmdpool ; void *dd_data ; }; struct port_ctrl_stat_regs { __le32 ext_hw_conf ; __le32 rsrvd0 ; __le32 port_ctrl ; __le32 port_status ; __le32 rsrvd1[32U] ; __le32 gp_out ; __le32 gp_in ; __le32 rsrvd2[5U] ; __le32 port_err_status ; }; struct host_mem_cfg_regs { __le32 rsrvd0[12U] ; __le32 req_q_out ; __le32 rsrvd1[31U] ; }; struct device_reg_82xx { __le32 req_q_out ; __le32 reserve1[63U] ; __le32 rsp_q_in ; __le32 reserve2[63U] ; __le32 rsp_q_out ; __le32 reserve3[63U] ; __le32 mailbox_in[8U] ; __le32 reserve4[24U] ; __le32 hint ; __le32 reserve5[31U] ; __le32 mailbox_out[8U] ; __le32 reserve6[56U] ; __le32 host_status ; __le32 host_int ; }; struct device_reg_83xx { __le32 mailbox_in[16U] ; __le32 reserve1[496U] ; __le32 mailbox_out[16U] ; __le32 reserve2[496U] ; __le32 mbox_int ; __le32 reserve3[63U] ; __le32 req_q_out ; __le32 reserve4[63U] ; __le32 rsp_q_in ; __le32 reserve5[1919U] ; __le32 req_q_in ; __le32 reserve6[3U] ; __le32 iocb_int_mask ; __le32 reserve7[3U] ; __le32 rsp_q_out ; __le32 reserve8[3U] ; __le32 anonymousbuff ; __le32 mb_int_mask ; __le32 host_intr ; __le32 risc_intr ; __le32 reserve9[544U] ; __le32 leg_int_ptr ; __le32 leg_int_trig ; __le32 leg_int_mask ; }; struct __anonstruct_isp4010_414 { __le32 nvram ; __le32 reserved1[2U] ; }; struct __anonstruct_isp4022_415 { __le32 intr_mask ; __le32 nvram ; __le32 semaphore ; }; union __anonunion_u1_413 { struct __anonstruct_isp4010_414 isp4010 ; struct __anonstruct_isp4022_415 isp4022 ; }; struct __anonstruct_isp4010_417 { __le32 ext_hw_conf ; __le32 flow_ctrl ; __le32 port_ctrl ; __le32 port_status ; __le32 reserved3[8U] ; __le32 req_q_out ; __le32 reserved4[23U] ; __le32 gp_out ; __le32 gp_in ; __le32 reserved5[5U] ; __le32 port_err_status ; }; union __anonunion____missing_field_name_419 { struct port_ctrl_stat_regs p0 ; struct host_mem_cfg_regs p1 ; }; struct __anonstruct_isp4022_418 { union __anonunion____missing_field_name_419 __annonCompField129 ; }; union __anonunion_u2_416 { struct __anonstruct_isp4010_417 isp4010 ; struct __anonstruct_isp4022_418 isp4022 ; }; struct isp_reg { __le32 mailbox[8U] ; __le32 flash_address ; __le32 flash_data ; __le32 ctrl_status ; union __anonunion_u1_413 u1 ; __le32 req_q_in ; __le32 rsp_q_out ; __le32 reserved2[4U] ; union __anonunion_u2_416 u2 ; }; struct shadow_regs { __le32 req_q_out ; __le32 rsp_q_in ; }; struct addr_ctrl_blk { uint8_t version ; uint8_t control ; uint16_t fw_options ; uint16_t exec_throttle ; uint8_t zio_count ; uint8_t res0 ; uint16_t eth_mtu_size ; uint16_t add_fw_options ; uint8_t hb_interval ; uint8_t inst_num ; uint16_t res1 ; uint16_t rqq_consumer_idx ; uint16_t compq_producer_idx ; uint16_t rqq_len ; uint16_t compq_len ; uint32_t rqq_addr_lo ; uint32_t rqq_addr_hi ; uint32_t compq_addr_lo ; uint32_t compq_addr_hi ; uint32_t shdwreg_addr_lo ; uint32_t shdwreg_addr_hi ; uint16_t iscsi_opts ; uint16_t ipv4_tcp_opts ; uint16_t ipv4_ip_opts ; uint16_t iscsi_max_pdu_size ; uint8_t ipv4_tos ; uint8_t ipv4_ttl ; uint8_t acb_version ; uint8_t res2 ; uint16_t def_timeout ; uint16_t iscsi_fburst_len ; uint16_t iscsi_def_time2wait ; uint16_t iscsi_def_time2retain ; uint16_t iscsi_max_outstnd_r2t ; uint16_t conn_ka_timeout ; uint16_t ipv4_port ; uint16_t iscsi_max_burst_len ; uint32_t res5 ; uint8_t ipv4_addr[4U] ; uint16_t ipv4_vlan_tag ; uint8_t ipv4_addr_state ; uint8_t ipv4_cacheid ; uint8_t res6[8U] ; uint8_t ipv4_subnet[4U] ; uint8_t res7[12U] ; uint8_t ipv4_gw_addr[4U] ; uint8_t res8[12U] ; uint8_t pri_dns_srvr_ip[4U] ; uint8_t sec_dns_srvr_ip[4U] ; uint16_t min_eph_port ; uint16_t max_eph_port ; uint8_t res9[4U] ; uint8_t iscsi_alias[32U] ; uint8_t res9_1[22U] ; uint16_t tgt_portal_grp ; uint8_t abort_timer ; uint8_t ipv4_tcp_wsf ; uint8_t res10[6U] ; uint8_t ipv4_sec_ip_addr[4U] ; uint8_t ipv4_dhcp_vid_len ; uint8_t ipv4_dhcp_vid[11U] ; uint8_t res11[20U] ; uint8_t ipv4_dhcp_alt_cid_len ; uint8_t ipv4_dhcp_alt_cid[11U] ; uint8_t iscsi_name[224U] ; uint8_t res12[32U] ; uint32_t cookie ; uint16_t ipv6_port ; uint16_t ipv6_opts ; uint16_t ipv6_addtl_opts ; uint16_t ipv6_tcp_opts ; uint8_t ipv6_tcp_wsf ; uint16_t ipv6_flow_lbl ; uint8_t ipv6_dflt_rtr_addr[16U] ; uint16_t ipv6_vlan_tag ; uint8_t ipv6_lnk_lcl_addr_state ; uint8_t ipv6_addr0_state ; uint8_t ipv6_addr1_state ; uint8_t ipv6_dflt_rtr_state ; uint8_t ipv6_traffic_class ; uint8_t ipv6_hop_limit ; uint8_t ipv6_if_id[8U] ; uint8_t ipv6_addr0[16U] ; uint8_t ipv6_addr1[16U] ; uint32_t ipv6_nd_reach_time ; uint32_t ipv6_nd_rexmit_timer ; uint32_t ipv6_nd_stale_timeout ; uint8_t ipv6_dup_addr_detect_count ; uint8_t ipv6_cache_id ; uint8_t res13[18U] ; uint32_t ipv6_gw_advrt_mtu ; uint8_t res14[140U] ; }; struct addr_ctrl_blk_def { uint8_t reserved1[1U] ; uint8_t control ; uint8_t reserved2[11U] ; uint8_t inst_num ; uint8_t reserved3[34U] ; uint16_t iscsi_opts ; uint16_t ipv4_tcp_opts ; uint16_t ipv4_ip_opts ; uint16_t iscsi_max_pdu_size ; uint8_t ipv4_tos ; uint8_t ipv4_ttl ; uint8_t reserved4[2U] ; uint16_t def_timeout ; uint16_t iscsi_fburst_len ; uint8_t reserved5[4U] ; uint16_t iscsi_max_outstnd_r2t ; uint8_t reserved6[2U] ; uint16_t ipv4_port ; uint16_t iscsi_max_burst_len ; uint8_t reserved7[4U] ; uint8_t ipv4_addr[4U] ; uint16_t ipv4_vlan_tag ; uint8_t ipv4_addr_state ; uint8_t ipv4_cacheid ; uint8_t reserved8[8U] ; uint8_t ipv4_subnet[4U] ; uint8_t reserved9[12U] ; uint8_t ipv4_gw_addr[4U] ; uint8_t reserved10[84U] ; uint8_t abort_timer ; uint8_t ipv4_tcp_wsf ; uint8_t reserved11[10U] ; uint8_t ipv4_dhcp_vid_len ; uint8_t ipv4_dhcp_vid[11U] ; uint8_t reserved12[20U] ; uint8_t ipv4_dhcp_alt_cid_len ; uint8_t ipv4_dhcp_alt_cid[11U] ; uint8_t iscsi_name[224U] ; uint8_t reserved13[32U] ; uint32_t cookie ; uint16_t ipv6_port ; uint16_t ipv6_opts ; uint16_t ipv6_addtl_opts ; uint16_t ipv6_tcp_opts ; uint8_t ipv6_tcp_wsf ; uint16_t ipv6_flow_lbl ; uint8_t ipv6_dflt_rtr_addr[16U] ; uint16_t ipv6_vlan_tag ; uint8_t ipv6_lnk_lcl_addr_state ; uint8_t ipv6_addr0_state ; uint8_t ipv6_addr1_state ; uint8_t ipv6_dflt_rtr_state ; uint8_t ipv6_traffic_class ; uint8_t ipv6_hop_limit ; uint8_t ipv6_if_id[8U] ; uint8_t ipv6_addr0[16U] ; uint8_t ipv6_addr1[16U] ; uint32_t ipv6_nd_reach_time ; uint32_t ipv6_nd_rexmit_timer ; uint32_t ipv6_nd_stale_timeout ; uint8_t ipv6_dup_addr_detect_count ; uint8_t ipv6_cache_id ; uint8_t reserved14[18U] ; uint32_t ipv6_gw_advrt_mtu ; uint8_t reserved15[140U] ; }; struct ql4_chap_table { uint16_t link ; uint8_t flags ; uint8_t secret_len ; uint8_t secret[100U] ; uint8_t name[256U] ; uint16_t reserved ; uint16_t cookie ; }; struct dev_db_entry { uint16_t options ; uint16_t exec_throttle ; uint16_t exec_count ; uint16_t res0 ; uint16_t iscsi_options ; uint16_t tcp_options ; uint16_t ip_options ; uint16_t iscsi_max_rcv_data_seg_len ; uint32_t res1 ; uint16_t iscsi_max_snd_data_seg_len ; uint16_t iscsi_first_burst_len ; uint16_t iscsi_def_time2wait ; uint16_t iscsi_def_time2retain ; uint16_t iscsi_max_outsnd_r2t ; uint16_t ka_timeout ; uint8_t isid[6U] ; uint16_t tsid ; uint16_t port ; uint16_t iscsi_max_burst_len ; uint16_t def_timeout ; uint16_t res2 ; uint8_t ip_addr[16U] ; uint8_t iscsi_alias[32U] ; uint8_t tgt_addr[32U] ; uint16_t mss ; uint16_t res3 ; uint16_t lcl_port ; uint8_t ipv4_tos ; uint16_t ipv6_flow_lbl ; uint8_t res4[54U] ; uint8_t iscsi_name[224U] ; uint8_t link_local_ipv6_addr[16U] ; uint8_t res5[16U] ; uint16_t ddb_link ; uint16_t chap_tbl_idx ; uint16_t tgt_portal_grp ; uint8_t tcp_xmt_wsf ; uint8_t tcp_rcv_wsf ; uint32_t stat_sn ; uint32_t exp_stat_sn ; uint8_t res6[43U] ; uint16_t cookie ; uint16_t len ; }; struct about_fw_info { uint16_t fw_major ; uint16_t fw_minor ; uint16_t fw_patch ; uint16_t fw_build ; uint8_t fw_build_date[16U] ; uint8_t fw_build_time[16U] ; uint8_t fw_build_user[16U] ; uint16_t fw_load_source ; uint8_t reserved1[6U] ; uint16_t iscsi_major ; uint16_t iscsi_minor ; uint16_t bootload_major ; uint16_t bootload_minor ; uint16_t bootload_patch ; uint16_t bootload_build ; uint8_t extended_timestamp[180U] ; }; struct qla4_header { uint8_t entryType ; uint8_t entryStatus ; uint8_t systemDefined ; uint8_t entryCount ; }; struct queue_entry { uint8_t data[60U] ; uint32_t signature ; }; struct passthru_status { struct qla4_header hdr ; uint32_t handle ; uint16_t target ; uint16_t connectionID ; uint8_t completionStatus ; uint8_t residualFlags ; uint16_t timeout ; uint16_t portNumber ; uint8_t res1[10U] ; uint32_t outResidual ; uint8_t res2[12U] ; uint32_t inResidual ; uint8_t res4[16U] ; }; struct mbox_cmd_iocb { struct qla4_header hdr ; uint32_t handle ; uint32_t in_mbox[8U] ; uint32_t res1[6U] ; }; struct ql_iscsi_stats { uint64_t mac_tx_frames ; uint64_t mac_tx_bytes ; uint64_t mac_tx_multicast_frames ; uint64_t mac_tx_broadcast_frames ; uint64_t mac_tx_pause_frames ; uint64_t mac_tx_control_frames ; uint64_t mac_tx_deferral ; uint64_t mac_tx_excess_deferral ; uint64_t mac_tx_late_collision ; uint64_t mac_tx_abort ; uint64_t mac_tx_single_collision ; uint64_t mac_tx_multiple_collision ; uint64_t mac_tx_collision ; uint64_t mac_tx_frames_dropped ; uint64_t mac_tx_jumbo_frames ; uint64_t mac_rx_frames ; uint64_t mac_rx_bytes ; uint64_t mac_rx_unknown_control_frames ; uint64_t mac_rx_pause_frames ; uint64_t mac_rx_control_frames ; uint64_t mac_rx_dribble ; uint64_t mac_rx_frame_length_error ; uint64_t mac_rx_jabber ; uint64_t mac_rx_carrier_sense_error ; uint64_t mac_rx_frame_discarded ; uint64_t mac_rx_frames_dropped ; uint64_t mac_crc_error ; uint64_t mac_encoding_error ; uint64_t mac_rx_length_error_large ; uint64_t mac_rx_length_error_small ; uint64_t mac_rx_multicast_frames ; uint64_t mac_rx_broadcast_frames ; uint64_t ip_tx_packets ; uint64_t ip_tx_bytes ; uint64_t ip_tx_fragments ; uint64_t ip_rx_packets ; uint64_t ip_rx_bytes ; uint64_t ip_rx_fragments ; uint64_t ip_datagram_reassembly ; uint64_t ip_invalid_address_error ; uint64_t ip_error_packets ; uint64_t ip_fragrx_overlap ; uint64_t ip_fragrx_outoforder ; uint64_t ip_datagram_reassembly_timeout ; uint64_t ipv6_tx_packets ; uint64_t ipv6_tx_bytes ; uint64_t ipv6_tx_fragments ; uint64_t ipv6_rx_packets ; uint64_t ipv6_rx_bytes ; uint64_t ipv6_rx_fragments ; uint64_t ipv6_datagram_reassembly ; uint64_t ipv6_invalid_address_error ; uint64_t ipv6_error_packets ; uint64_t ipv6_fragrx_overlap ; uint64_t ipv6_fragrx_outoforder ; uint64_t ipv6_datagram_reassembly_timeout ; uint64_t tcp_tx_segments ; uint64_t tcp_tx_bytes ; uint64_t tcp_rx_segments ; uint64_t tcp_rx_byte ; uint64_t tcp_duplicate_ack_retx ; uint64_t tcp_retx_timer_expired ; uint64_t tcp_rx_duplicate_ack ; uint64_t tcp_rx_pure_ackr ; uint64_t tcp_tx_delayed_ack ; uint64_t tcp_tx_pure_ack ; uint64_t tcp_rx_segment_error ; uint64_t tcp_rx_segment_outoforder ; uint64_t tcp_rx_window_probe ; uint64_t tcp_rx_window_update ; uint64_t tcp_tx_window_probe_persist ; uint64_t ecc_error_correction ; uint64_t iscsi_pdu_tx ; uint64_t iscsi_data_bytes_tx ; uint64_t iscsi_pdu_rx ; uint64_t iscsi_data_bytes_rx ; uint64_t iscsi_io_completed ; uint64_t iscsi_unexpected_io_rx ; uint64_t iscsi_format_error ; uint64_t iscsi_hdr_digest_error ; uint64_t iscsi_data_digest_error ; uint64_t iscsi_sequence_error ; uint32_t tx_cmd_pdu ; uint32_t tx_resp_pdu ; uint32_t rx_cmd_pdu ; uint32_t rx_resp_pdu ; uint64_t tx_data_octets ; uint64_t rx_data_octets ; uint32_t hdr_digest_err ; uint32_t data_digest_err ; uint32_t conn_timeout_err ; uint32_t framing_err ; uint32_t tx_nopout_pdus ; uint32_t tx_scsi_cmd_pdus ; uint32_t tx_tmf_cmd_pdus ; uint32_t tx_login_cmd_pdus ; uint32_t tx_text_cmd_pdus ; uint32_t tx_scsi_write_pdus ; uint32_t tx_logout_cmd_pdus ; uint32_t tx_snack_req_pdus ; uint32_t rx_nopin_pdus ; uint32_t rx_scsi_resp_pdus ; uint32_t rx_tmf_resp_pdus ; uint32_t rx_login_resp_pdus ; uint32_t rx_text_resp_pdus ; uint32_t rx_scsi_read_pdus ; uint32_t rx_logout_resp_pdus ; uint32_t rx_r2t_pdus ; uint32_t rx_async_pdus ; uint32_t rx_reject_pdus ; uint8_t reserved2[264U] ; }; struct bios_params { unsigned char SpinUpDelay : 1 ; unsigned char BIOSDisable : 1 ; unsigned char MMAPEnable : 1 ; unsigned char BootEnable : 1 ; unsigned short Reserved0 : 12 ; unsigned char bootID0 : 7 ; unsigned char bootID0Valid : 1 ; uint8_t bootLUN0[8U] ; unsigned char bootID1 : 7 ; unsigned char bootID1Valid : 1 ; uint8_t bootLUN1[8U] ; uint16_t MaxLunsPerTarget ; uint8_t Reserved1[10U] ; }; struct eeprom_port_cfg { u16 etherMtu_mac ; u16 pauseThreshold_mac ; u16 resumeThreshold_mac ; u16 reserved[13U] ; }; struct eeprom_function_cfg { u8 reserved[30U] ; u8 macAddress[6U] ; u8 macAddressSecondary[6U] ; u16 subsysVendorId ; u16 subsysDeviceId ; }; struct __anonstruct_isp4010_423 { u8 asic_id[4U] ; u8 version ; u8 reserved ; u16 board_id ; u8 serial_number[16U] ; u16 ext_hw_conf ; u8 mac0[6U] ; u8 mac1[6U] ; u8 mac2[6U] ; u8 mac3[6U] ; u16 etherMtu ; u16 macConfig ; u16 phyConfig ; u16 reserved_56 ; u8 unused_1[2U] ; u16 bufletSize ; u16 bufletCount ; u16 bufletPauseThreshold ; u16 tcpWindowThreshold50 ; u16 tcpWindowThreshold25 ; u16 tcpWindowThreshold0 ; u16 ipHashTableBaseHi ; u16 ipHashTableBaseLo ; u16 ipHashTableSize ; u16 tcpHashTableBaseHi ; u16 tcpHashTableBaseLo ; u16 tcpHashTableSize ; u16 ncbTableBaseHi ; u16 ncbTableBaseLo ; u16 ncbTableSize ; u16 drbTableBaseHi ; u16 drbTableBaseLo ; u16 drbTableSize ; u8 unused_2[4U] ; u16 ipReassemblyTimeout ; u16 tcpMaxWindowSizeHi ; u16 tcpMaxWindowSizeLo ; u32 net_ip_addr0 ; u32 net_ip_addr1 ; u32 scsi_ip_addr0 ; u32 scsi_ip_addr1 ; u8 unused_3[128U] ; u16 subsysVendorId_f0 ; u16 subsysDeviceId_f0 ; u16 signature ; u8 unused_4[250U] ; u16 subsysVendorId_f1 ; u16 subsysDeviceId_f1 ; u16 checksum ; }; struct __anonstruct_isp4022_424 { u8 asicId[4U] ; u8 version ; u8 reserved_5 ; u16 boardId ; u8 boardIdStr[16U] ; u8 serialNumber[16U] ; u16 ext_hw_conf ; struct eeprom_port_cfg macCfg_port0 ; struct eeprom_port_cfg macCfg_port1 ; u16 bufletSize ; u16 bufletCount ; u16 tcpWindowThreshold50 ; u16 tcpWindowThreshold25 ; u16 tcpWindowThreshold0 ; u16 ipHashTableBaseHi ; u16 ipHashTableBaseLo ; u16 ipHashTableSize ; u16 tcpHashTableBaseHi ; u16 tcpHashTableBaseLo ; u16 tcpHashTableSize ; u16 ncbTableBaseHi ; u16 ncbTableBaseLo ; u16 ncbTableSize ; u16 drbTableBaseHi ; u16 drbTableBaseLo ; u16 drbTableSize ; u16 reserved_142[4U] ; u16 ipReassemblyTimeout ; u16 tcpMaxWindowSize ; u16 ipSecurity ; u8 reserved_156[294U] ; u16 qDebug[8U] ; struct eeprom_function_cfg funcCfg_fn0 ; u16 reserved_510 ; u8 oemSpace[432U] ; struct bios_params sBIOSParams_fn1 ; struct eeprom_function_cfg funcCfg_fn1 ; u16 reserved_1022 ; u8 reserved_1024[464U] ; struct eeprom_function_cfg funcCfg_fn2 ; u16 reserved_1534 ; u8 reserved_1536[432U] ; struct bios_params sBIOSParams_fn3 ; struct eeprom_function_cfg funcCfg_fn3 ; u16 checksum ; }; union __anonunion____missing_field_name_422 { struct __anonstruct_isp4010_423 isp4010 ; struct __anonstruct_isp4022_424 isp4022 ; }; struct eeprom_data { union __anonunion____missing_field_name_422 __annonCompField131 ; }; struct qla4_83xx_reset_template_hdr { __le16 version ; __le16 signature ; __le16 size ; __le16 entries ; __le16 hdr_size ; __le16 checksum ; __le16 init_seq_offset ; __le16 start_seq_offset ; }; struct qla4_83xx_reset_template { int seq_index ; int seq_error ; int array_index ; uint32_t array[16U] ; uint8_t *buff ; uint8_t *stop_offset ; uint8_t *start_offset ; uint8_t *init_offset ; struct qla4_83xx_reset_template_hdr *hdr ; uint8_t seq_end ; uint8_t template_end ; }; struct qla4_83xx_idc_information { uint32_t request_desc ; uint32_t info1 ; uint32_t info2 ; uint32_t info3 ; }; struct ddb_entry; struct srb { struct list_head list ; struct scsi_qla_host *ha ; struct ddb_entry *ddb ; uint16_t flags ; uint8_t state ; struct scsi_cmnd *cmd ; dma_addr_t dma_handle ; struct kref srb_ref ; uint8_t err_id ; uint16_t reserved ; uint16_t iocb_tov ; uint16_t iocb_cnt ; uint16_t cc_stat ; uint8_t *req_sense_ptr ; uint16_t req_sense_len ; uint16_t reserved2 ; }; struct mrb { struct scsi_qla_host *ha ; struct mbox_cmd_iocb *mbox ; uint32_t mbox_cmd ; uint16_t iocb_cnt ; uint32_t pid ; }; struct aen { uint32_t mbox_sts[8U] ; }; struct ql4_aen_log { int count ; struct aen entry[512U] ; }; struct ddb_entry { struct scsi_qla_host *ha ; struct iscsi_cls_session *sess ; struct iscsi_cls_conn *conn ; uint16_t fw_ddb_index ; uint32_t fw_ddb_device_state ; uint16_t ddb_type ; struct dev_db_entry fw_ddb_entry ; int (*unblock_sess)(struct iscsi_cls_session * ) ; int (*ddb_change)(struct scsi_qla_host * , uint32_t , struct ddb_entry * , uint32_t ) ; unsigned long flags ; uint16_t default_relogin_timeout ; atomic_t retry_relogin_timer ; atomic_t relogin_timer ; atomic_t relogin_retry_count ; uint32_t default_time2wait ; uint16_t chap_tbl_idx ; }; struct qla_ddb_index { struct list_head list ; uint16_t fw_ddb_idx ; uint16_t flash_ddb_idx ; struct dev_db_entry fw_ddb ; uint8_t flash_isid[6U] ; }; struct ql4_tuple_ddb { int port ; int tpgt ; char ip_addr[64U] ; char iscsi_name[224U] ; uint16_t options ; uint8_t isid[6U] ; }; enum qla4_work_type { QLA4_EVENT_AEN = 0, QLA4_EVENT_PING_STATUS = 1 } ; struct __anonstruct_aen_427 { enum iscsi_host_event_code code ; uint32_t data_size ; uint8_t data[0U] ; }; struct __anonstruct_ping_428 { uint32_t status ; uint32_t pid ; uint32_t data_size ; uint8_t data[0U] ; }; union __anonunion_u_426 { struct __anonstruct_aen_427 aen ; struct __anonstruct_ping_428 ping ; }; struct qla4_work_evt { struct list_head list ; enum qla4_work_type type ; union __anonunion_u_426 u ; }; struct ql82xx_hw_data { uint32_t flash_conf_off ; uint32_t flash_data_off ; uint32_t fdt_wrt_disable ; uint32_t fdt_erase_cmd ; uint32_t fdt_block_size ; uint32_t fdt_unprotect_sec_cmd ; uint32_t fdt_protect_sec_cmd ; uint32_t flt_region_flt ; uint32_t flt_region_fdt ; uint32_t flt_region_boot ; uint32_t flt_region_bootload ; uint32_t flt_region_fw ; uint32_t flt_iscsi_param ; uint32_t flt_region_chap ; uint32_t flt_chap_size ; uint32_t flt_region_ddb ; uint32_t flt_ddb_size ; }; struct qla4_8xxx_legacy_intr_set { uint32_t int_vec_bit ; uint32_t tgt_status_reg ; uint32_t tgt_mask_reg ; uint32_t pci_int_reg ; }; struct ql4_msix_entry { int have_irq ; uint16_t msix_vector ; uint16_t msix_entry ; }; struct isp_operations { int (*iospace_config)(struct scsi_qla_host * ) ; void (*pci_config)(struct scsi_qla_host * ) ; void (*disable_intrs)(struct scsi_qla_host * ) ; void (*enable_intrs)(struct scsi_qla_host * ) ; int (*start_firmware)(struct scsi_qla_host * ) ; int (*restart_firmware)(struct scsi_qla_host * ) ; irqreturn_t (*intr_handler)(int , void * ) ; void (*interrupt_service_routine)(struct scsi_qla_host * , uint32_t ) ; int (*need_reset)(struct scsi_qla_host * ) ; int (*reset_chip)(struct scsi_qla_host * ) ; int (*reset_firmware)(struct scsi_qla_host * ) ; void (*queue_iocb)(struct scsi_qla_host * ) ; void (*complete_iocb)(struct scsi_qla_host * ) ; uint16_t (*rd_shdw_req_q_out)(struct scsi_qla_host * ) ; uint16_t (*rd_shdw_rsp_q_in)(struct scsi_qla_host * ) ; int (*get_sys_info)(struct scsi_qla_host * ) ; uint32_t (*rd_reg_direct)(struct scsi_qla_host * , ulong ) ; void (*wr_reg_direct)(struct scsi_qla_host * , ulong , uint32_t ) ; int (*rd_reg_indirect)(struct scsi_qla_host * , uint32_t , uint32_t * ) ; int (*wr_reg_indirect)(struct scsi_qla_host * , uint32_t , uint32_t ) ; int (*idc_lock)(struct scsi_qla_host * ) ; void (*idc_unlock)(struct scsi_qla_host * ) ; void (*rom_lock_recovery)(struct scsi_qla_host * ) ; void (*queue_mailbox_command)(struct scsi_qla_host * , uint32_t * , int ) ; void (*process_mailbox_interrupt)(struct scsi_qla_host * , int ) ; }; struct ipaddress_config { uint16_t ipv4_options ; uint16_t tcp_options ; uint16_t ipv4_vlan_tag ; uint8_t ipv4_addr_state ; uint8_t ip_address[4U] ; uint8_t subnet_mask[4U] ; uint8_t gateway[4U] ; uint32_t ipv6_options ; uint32_t ipv6_addl_options ; uint8_t ipv6_link_local_state ; uint8_t ipv6_addr0_state ; uint8_t ipv6_addr1_state ; uint8_t ipv6_default_router_state ; uint16_t ipv6_vlan_tag ; struct in6_addr ipv6_link_local_addr ; struct in6_addr ipv6_addr0 ; struct in6_addr ipv6_addr1 ; struct in6_addr ipv6_default_router_addr ; uint16_t eth_mtu_size ; uint16_t ipv4_port ; uint16_t ipv6_port ; uint8_t control ; uint16_t ipv6_tcp_options ; uint8_t tcp_wsf ; uint8_t ipv6_tcp_wsf ; uint8_t ipv4_tos ; uint8_t ipv4_cache_id ; uint8_t ipv6_cache_id ; uint8_t ipv4_alt_cid_len ; uint8_t ipv4_alt_cid[11U] ; uint8_t ipv4_vid_len ; uint8_t ipv4_vid[11U] ; uint8_t ipv4_ttl ; uint16_t ipv6_flow_lbl ; uint8_t ipv6_traffic_class ; uint8_t ipv6_hop_limit ; uint32_t ipv6_nd_reach_time ; uint32_t ipv6_nd_rexmit_timer ; uint32_t ipv6_nd_stale_timeout ; uint8_t ipv6_dup_addr_detect_count ; uint32_t ipv6_gw_advrt_mtu ; uint16_t def_timeout ; uint8_t abort_timer ; uint16_t iscsi_options ; uint16_t iscsi_max_pdu_size ; uint16_t iscsi_first_burst_len ; uint16_t iscsi_max_outstnd_r2t ; uint16_t iscsi_max_burst_len ; uint8_t iscsi_name[224U] ; }; struct ql4_chap_format { u8 intr_chap_name[256U] ; u8 intr_secret[100U] ; u8 target_chap_name[256U] ; u8 target_secret[100U] ; u16 intr_chap_name_length ; u16 intr_secret_length ; u16 target_chap_name_length ; u16 target_secret_length ; }; struct ip_address_format { u8 ip_type ; u8 ip_address[16U] ; }; struct ql4_conn_info { u16 dest_port ; struct ip_address_format dest_ipaddr ; struct ql4_chap_format chap ; }; struct ql4_boot_session_info { u8 target_name[224U] ; struct ql4_conn_info conn_list[1U] ; }; struct ql4_boot_tgt_info { struct ql4_boot_session_info boot_pri_sess ; struct ql4_boot_session_info boot_sec_sess ; }; struct scsi_qla_host { unsigned long flags ; unsigned long dpc_flags ; struct Scsi_Host *host ; uint32_t tot_ddbs ; uint16_t iocb_cnt ; uint16_t iocb_hiwat ; mempool_t *srb_mempool ; struct pci_dev *pdev ; struct isp_reg *reg ; unsigned long pio_address ; unsigned long pio_length ; uint16_t req_q_count ; unsigned long host_no ; struct eeprom_data *nvram ; spinlock_t hardware_lock ; uint32_t eeprom_cmd_data ; uint64_t isr_count ; uint64_t adapter_error_count ; uint64_t device_error_count ; uint64_t total_io_count ; uint64_t total_mbytes_xferred ; uint64_t link_failure_count ; uint64_t invalid_crc_count ; uint32_t bytes_xfered ; uint32_t spurious_int_count ; uint32_t aborted_io_count ; uint32_t io_timeout_count ; uint32_t mailbox_timeout_count ; uint32_t seconds_since_last_intr ; uint32_t seconds_since_last_heartbeat ; uint32_t mac_index ; uint32_t firmware_version[2U] ; uint32_t patch_number ; uint32_t build_number ; uint32_t board_id ; uint16_t firmware_options ; uint8_t alias[32U] ; uint8_t name_string[256U] ; uint8_t heartbeat_interval ; uint8_t my_mac[6U] ; uint8_t serial_number[16U] ; uint16_t port_num ; uint32_t firmware_state ; uint32_t addl_fw_state ; struct workqueue_struct *dpc_thread ; struct work_struct dpc_work ; struct timer_list timer ; uint32_t timer_active ; atomic_t check_relogin_timeouts ; uint32_t retry_reset_ha_cnt ; uint32_t isp_reset_timer ; uint32_t nic_reset_timer ; int eh_start ; struct list_head free_srb_q ; uint16_t free_srb_q_count ; uint16_t num_srbs_allocated ; void *queues ; dma_addr_t queues_dma ; unsigned long queues_len ; dma_addr_t request_dma ; struct queue_entry *request_ring ; struct queue_entry *request_ptr ; dma_addr_t response_dma ; struct queue_entry *response_ring ; struct queue_entry *response_ptr ; dma_addr_t shadow_regs_dma ; struct shadow_regs *shadow_regs ; uint16_t request_in ; uint16_t request_out ; uint16_t response_in ; uint16_t response_out ; uint16_t aen_q_count ; uint16_t aen_in ; uint16_t aen_out ; struct aen aen_q[512U] ; struct ql4_aen_log aen_log ; struct mutex mbox_sem ; uint8_t volatile mbox_status_count ; uint32_t volatile mbox_status[8U] ; struct ddb_entry *fw_ddb_index_map[512U] ; struct srb *status_srb ; uint8_t acb_version ; struct device_reg_82xx *qla4_82xx_reg ; unsigned long nx_pcibase ; uint8_t *nx_db_rd_ptr ; unsigned long nx_db_wr_ptr ; unsigned long first_page_group_start ; unsigned long first_page_group_end ; uint32_t crb_win ; uint32_t curr_window ; uint32_t ddr_mn_window ; unsigned long mn_win_crb ; unsigned long ms_win_crb ; int qdr_sn_window ; rwlock_t hw_lock ; uint16_t func_num ; int link_width ; struct qla4_8xxx_legacy_intr_set nx_legacy_intr ; u32 nx_crb_mask ; uint8_t revision_id ; uint32_t fw_heartbeat_counter ; struct isp_operations *isp_ops ; struct ql82xx_hw_data hw ; struct ql4_msix_entry msix_entries[2U] ; uint32_t nx_dev_init_timeout ; uint32_t nx_reset_timeout ; void *fw_dump ; uint32_t fw_dump_size ; uint32_t fw_dump_capture_mask ; void *fw_dump_tmplt_hdr ; uint32_t fw_dump_tmplt_size ; uint32_t fw_dump_skip_size ; struct completion mbx_intr_comp ; struct ipaddress_config ip_config ; struct iscsi_iface *iface_ipv4 ; struct iscsi_iface *iface_ipv6_0 ; struct iscsi_iface *iface_ipv6_1 ; struct about_fw_info fw_info ; uint32_t fw_uptime_secs ; uint32_t fw_uptime_msecs ; uint16_t def_timeout ; uint32_t flash_state ; struct dma_pool *chap_dma_pool ; uint8_t *chap_list ; struct mutex chap_sem ; struct workqueue_struct *task_wq ; unsigned long ddb_idx_map[8U] ; struct iscsi_boot_kset *boot_kset ; struct ql4_boot_tgt_info boot_tgt ; uint16_t phy_port_num ; uint16_t phy_port_cnt ; uint16_t iscsi_pci_func_cnt ; uint8_t model_name[16U] ; struct completion disable_acb_comp ; struct dma_pool *fw_ddb_dma_pool ; uint16_t pri_ddb_idx ; uint16_t sec_ddb_idx ; int is_reset ; uint16_t temperature ; struct list_head work_list ; spinlock_t work_lock ; struct mrb *active_mrb_array[128U] ; uint32_t mrb_index ; uint32_t *reg_tbl ; struct qla4_83xx_reset_template reset_tmplt ; struct device_reg_83xx *qla4_83xx_reg ; uint32_t pf_bit ; struct qla4_83xx_idc_information idc_info ; struct addr_ctrl_blk *saved_acb ; int notify_idc_comp ; int notify_link_up_comp ; int idc_extend_tmo ; struct completion idc_comp ; struct completion link_up_comp ; }; struct ql4_task_data { struct scsi_qla_host *ha ; uint8_t iocb_req_cnt ; dma_addr_t data_dma ; void *req_buffer ; dma_addr_t req_dma ; uint32_t req_len ; void *resp_buffer ; dma_addr_t resp_dma ; uint32_t resp_len ; struct iscsi_task *task ; struct passthru_status sts ; struct work_struct task_work ; }; struct qla_endpoint { struct Scsi_Host *host ; struct __kernel_sockaddr_storage dst_addr ; }; struct qla_conn { struct qla_endpoint *qla_ep ; }; typedef bool ldv_func_ret_type___2; typedef bool ldv_func_ret_type___3; typedef bool ldv_func_ret_type___4; typedef bool ldv_func_ret_type___5; typedef int ldv_func_ret_type___6; typedef int ldv_func_ret_type___7; typedef int ldv_func_ret_type___8; typedef int ldv_func_ret_type___9; typedef int ldv_func_ret_type___10; typedef signed char __s8; typedef __s8 int8_t; enum hrtimer_restart; struct __anonstruct____missing_field_name_420 { unsigned char bReserved0 : 1 ; unsigned char bSDRAMProtectionMethod : 2 ; unsigned char bSDRAMBanks : 1 ; unsigned char bSDRAMChipWidth : 1 ; unsigned char bSDRAMChipSize : 2 ; unsigned char bParityDisable : 1 ; unsigned char bExternalMemoryType : 1 ; unsigned char bFlashBIOSWriteEnable : 1 ; unsigned char bFlashUpperBankSelect : 1 ; unsigned char bWriteBurst : 2 ; unsigned char bReserved1 : 3 ; unsigned short bMask ; }; union external_hw_config_reg { struct __anonstruct____missing_field_name_420 __annonCompField130 ; uint32_t Asuint32_t ; }; struct sys_info_phys_addr { uint8_t address[6U] ; uint8_t filler[2U] ; }; struct flash_sys_info { uint32_t cookie ; uint32_t physAddrCount ; struct sys_info_phys_addr physAddr[4U] ; uint8_t vendorId[128U] ; uint8_t productId[128U] ; uint32_t serialNumber ; uint32_t pciDeviceVendor ; uint32_t pciDeviceId ; uint32_t pciSubsysVendor ; uint32_t pciSubsysId ; uint32_t crumbs ; uint32_t enterpriseNumber ; uint32_t mtu ; uint32_t reserved0 ; uint32_t crumbs2 ; uint8_t acSerialNumber[16U] ; uint32_t crumbs3 ; uint32_t reserved1[39U] ; }; struct response { uint8_t data[60U] ; uint32_t signature ; }; struct qla4_8xxx_minidump_template_hdr { uint32_t entry_type ; uint32_t first_entry_offset ; uint32_t size_of_template ; uint32_t capture_debug_level ; uint32_t num_of_entries ; uint32_t version ; uint32_t driver_timestamp ; uint32_t checksum ; uint32_t driver_capture_mask ; uint32_t driver_info_word2 ; uint32_t driver_info_word3 ; uint32_t driver_info_word4 ; uint32_t saved_state_array[16U] ; uint32_t capture_size_array[8U] ; uint32_t ocm_window_reg[16U] ; uint32_t capabilities[16U] ; }; typedef unsigned long u_long; enum hrtimer_restart; struct crash_record { uint16_t fw_major_version ; uint16_t fw_minor_version ; uint16_t fw_patch_version ; uint16_t fw_build_version ; uint8_t build_date[16U] ; uint8_t build_time[16U] ; uint8_t build_user[16U] ; uint8_t card_serial_num[16U] ; uint32_t time_of_crash_in_secs ; uint32_t time_of_crash_in_ms ; uint16_t out_RISC_sd_num_frames ; uint16_t OAP_sd_num_words ; uint16_t IAP_sd_num_frames ; uint16_t in_RISC_sd_num_words ; uint8_t reserved1[28U] ; uint8_t out_RISC_reg_dump[256U] ; uint8_t in_RISC_reg_dump[256U] ; uint8_t in_out_RISC_stack_dump[0U] ; }; struct conn_event_log_entry { uint32_t timestamp_sec ; uint32_t timestamp_ms ; uint16_t device_index ; uint16_t fw_conn_state ; uint8_t event_type ; uint8_t error_code ; uint16_t error_code_detail ; uint8_t num_consecutive_events ; uint8_t rsvd[3U] ; }; enum hrtimer_restart; struct __anonstruct_base_421 { uint32_t addrLow ; uint32_t addrHigh ; }; struct data_seg_a64 { struct __anonstruct_base_421 base ; uint32_t count ; }; struct command_t3_entry { struct qla4_header hdr ; uint32_t handle ; uint16_t target ; uint16_t connection_id ; uint8_t control_flags ; uint8_t state_flags ; uint8_t cmdRefNum ; uint8_t reserved1 ; uint8_t cdb[16U] ; struct scsi_lun lun ; uint32_t cmdSeqNum ; uint16_t timeout ; uint16_t dataSegCnt ; uint32_t ttlByteCnt ; struct data_seg_a64 dataseg[1U] ; }; struct continuation_t1_entry { struct qla4_header hdr ; struct data_seg_a64 dataseg[5U] ; }; struct qla4_marker_entry { struct qla4_header hdr ; uint32_t system_defined ; uint16_t target ; uint16_t modifier ; uint16_t flags ; uint16_t reserved1 ; struct scsi_lun lun ; uint64_t reserved2 ; uint64_t reserved3 ; uint64_t reserved4 ; uint64_t reserved5 ; uint64_t reserved6 ; }; struct passthru0 { struct qla4_header hdr ; uint32_t handle ; uint16_t target ; uint16_t connection_id ; uint16_t control_flags ; uint16_t timeout ; struct data_seg_a64 out_dsd ; uint32_t res1 ; struct data_seg_a64 in_dsd ; uint8_t res2[20U] ; }; enum hrtimer_restart; struct status_entry { struct qla4_header hdr ; uint32_t handle ; uint8_t scsiStatus ; uint8_t iscsiFlags ; uint8_t iscsiResponse ; uint8_t completionStatus ; uint8_t reserved1 ; uint8_t state_flags ; uint16_t senseDataByteCnt ; uint32_t residualByteCnt ; uint32_t bidiResidualByteCnt ; uint32_t expSeqNum ; uint32_t maxCmdSeqNum ; uint8_t senseData[32U] ; }; struct status_cont_entry { struct qla4_header hdr ; uint8_t ext_sense_data[60U] ; }; struct mbox_status_iocb { struct qla4_header hdr ; uint32_t handle ; uint32_t out_mbox[8U] ; uint32_t res1[6U] ; }; enum hrtimer_restart; enum kobject_action { KOBJ_ADD = 0, KOBJ_REMOVE = 1, KOBJ_CHANGE = 2, KOBJ_MOVE = 3, KOBJ_ONLINE = 4, KOBJ_OFFLINE = 5, KOBJ_MAX = 6 } ; struct msix_entry { u32 vector ; u16 entry ; }; struct crb_128M_2M_sub_block_map { unsigned int valid ; unsigned int start_128M ; unsigned int end_128M ; unsigned int start_2M ; }; struct crb_128M_2M_block_map { struct crb_128M_2M_sub_block_map sub_block[16U] ; }; struct crb_addr_pair { long addr ; long data ; }; struct __anonstruct_d_ctrl_405 { uint8_t entry_capture_mask ; uint8_t entry_code ; uint8_t driver_code ; uint8_t driver_flags ; }; struct qla8xxx_minidump_entry_hdr { uint32_t entry_type ; uint32_t entry_size ; uint32_t entry_capture_size ; struct __anonstruct_d_ctrl_405 d_ctrl ; }; struct __anonstruct_crb_strd_406 { uint8_t addr_stride ; uint8_t state_index_a ; uint16_t poll_timeout ; }; struct __anonstruct_crb_ctrl_407 { uint8_t opcode ; uint8_t state_index_v ; uint8_t shl ; uint8_t shr ; }; struct qla8xxx_minidump_entry_crb { struct qla8xxx_minidump_entry_hdr h ; uint32_t addr ; struct __anonstruct_crb_strd_406 crb_strd ; uint32_t data_size ; uint32_t op_count ; struct __anonstruct_crb_ctrl_407 crb_ctrl ; uint32_t value_1 ; uint32_t value_2 ; uint32_t value_3 ; }; struct __anonstruct_addr_ctrl_408 { uint16_t tag_value_stride ; uint16_t init_tag_value ; }; struct __anonstruct_cache_ctrl_409 { uint16_t write_value ; uint8_t poll_mask ; uint8_t poll_wait ; }; struct __anonstruct_read_ctrl_410 { uint8_t read_addr_stride ; uint8_t read_addr_cnt ; uint16_t rsvd_1 ; }; struct qla8xxx_minidump_entry_cache { struct qla8xxx_minidump_entry_hdr h ; uint32_t tag_reg_addr ; struct __anonstruct_addr_ctrl_408 addr_ctrl ; uint32_t data_size ; uint32_t op_count ; uint32_t control_addr ; struct __anonstruct_cache_ctrl_409 cache_ctrl ; uint32_t read_addr ; struct __anonstruct_read_ctrl_410 read_ctrl ; }; struct qla8xxx_minidump_entry_rdocm { struct qla8xxx_minidump_entry_hdr h ; uint32_t rsvd_0 ; uint32_t rsvd_1 ; uint32_t data_size ; uint32_t op_count ; uint32_t rsvd_2 ; uint32_t rsvd_3 ; uint32_t read_addr ; uint32_t read_addr_stride ; }; struct qla8xxx_minidump_entry_rdmem { struct qla8xxx_minidump_entry_hdr h ; uint32_t rsvd[6U] ; uint32_t read_addr ; uint32_t read_data_size ; }; struct qla8xxx_minidump_entry_rdrom { struct qla8xxx_minidump_entry_hdr h ; uint32_t rsvd[6U] ; uint32_t read_addr ; uint32_t read_data_size ; }; struct qla8xxx_minidump_entry_mux { struct qla8xxx_minidump_entry_hdr h ; uint32_t select_addr ; uint32_t rsvd_0 ; uint32_t data_size ; uint32_t op_count ; uint32_t select_value ; uint32_t select_value_stride ; uint32_t read_addr ; uint32_t rsvd_1 ; }; struct __anonstruct_q_strd_411 { uint16_t queue_id_stride ; uint16_t rsvd_0 ; }; struct __anonstruct_rd_strd_412 { uint8_t read_addr_stride ; uint8_t read_addr_cnt ; uint16_t rsvd_3 ; }; struct qla8xxx_minidump_entry_queue { struct qla8xxx_minidump_entry_hdr h ; uint32_t select_addr ; struct __anonstruct_q_strd_411 q_strd ; uint32_t data_size ; uint32_t op_count ; uint32_t rsvd_1 ; uint32_t rsvd_2 ; uint32_t read_addr ; struct __anonstruct_rd_strd_412 rd_strd ; }; struct qla_fdt_layout { uint8_t sig[4U] ; uint16_t version ; uint16_t len ; uint16_t checksum ; uint8_t unused1[2U] ; uint8_t model[16U] ; uint16_t man_id ; uint16_t id ; uint8_t flags ; uint8_t erase_cmd ; uint8_t alt_erase_cmd ; uint8_t wrt_enable_cmd ; uint8_t wrt_enable_bits ; uint8_t wrt_sts_reg_cmd ; uint8_t unprotect_sec_cmd ; uint8_t read_man_id_cmd ; uint32_t block_size ; uint32_t alt_block_size ; uint32_t flash_size ; uint32_t wrt_enable_data ; uint8_t read_id_addr_len ; uint8_t wrt_disable_bits ; uint8_t read_dev_id_len ; uint8_t chip_erase_cmd ; uint16_t read_timeout ; uint8_t protect_sec_cmd ; uint8_t unused2[65U] ; }; struct qla_flt_header { uint16_t version ; uint16_t length ; uint16_t checksum ; uint16_t unused ; }; struct qla_flt_region { uint32_t code ; uint32_t size ; uint32_t start ; uint32_t end ; }; struct mbx_sys_info { uint8_t board_id_str[16U] ; uint16_t board_id ; uint16_t phys_port_cnt ; uint16_t port_num ; uint8_t mac_addr[6U] ; uint32_t iscsi_pci_func_cnt ; uint32_t pci_func ; unsigned char serial_number[16U] ; uint8_t reserved[12U] ; }; struct qla83xx_minidump_entry_pollrd { struct qla8xxx_minidump_entry_hdr h ; uint32_t select_addr ; uint32_t read_addr ; uint32_t select_value ; uint16_t select_value_stride ; uint16_t op_count ; uint32_t poll_wait ; uint32_t poll_mask ; uint32_t data_size ; uint32_t rsvd_1 ; }; struct qla8044_minidump_entry_rddfe { struct qla8xxx_minidump_entry_hdr h ; uint32_t addr_1 ; uint32_t value ; uint8_t stride ; uint8_t stride2 ; uint16_t count ; uint32_t poll ; uint32_t mask ; uint32_t modify_mask ; uint32_t data_size ; uint32_t rsvd ; }; struct qla8044_minidump_entry_rdmdio { struct qla8xxx_minidump_entry_hdr h ; uint32_t addr_1 ; uint32_t addr_2 ; uint32_t value_1 ; uint8_t stride_1 ; uint8_t stride_2 ; uint16_t count ; uint32_t poll ; uint32_t mask ; uint32_t value_2 ; uint32_t data_size ; }; struct qla8044_minidump_entry_pollwr { struct qla8xxx_minidump_entry_hdr h ; uint32_t addr_1 ; uint32_t addr_2 ; uint32_t value_1 ; uint32_t value_2 ; uint32_t poll ; uint32_t mask ; uint32_t data_size ; uint32_t rsvd ; }; struct qla83xx_minidump_entry_rdmux2 { struct qla8xxx_minidump_entry_hdr h ; uint32_t select_addr_1 ; uint32_t select_addr_2 ; uint32_t select_value_1 ; uint32_t select_value_2 ; uint32_t op_count ; uint32_t select_value_mask ; uint32_t read_addr ; uint8_t select_value_stride ; uint8_t data_size ; uint8_t rsvd[2U] ; }; struct qla83xx_minidump_entry_pollrdmwr { struct qla8xxx_minidump_entry_hdr h ; uint32_t addr_1 ; uint32_t addr_2 ; uint32_t value_1 ; uint32_t value_2 ; uint32_t poll_wait ; uint32_t poll_mask ; uint32_t modify_mask ; uint32_t data_size ; }; struct qla4_83xx_minidump_entry_rdmem_pex_dma { struct qla8xxx_minidump_entry_hdr h ; uint32_t desc_card_addr ; uint16_t dma_desc_cmd ; uint8_t rsvd[2U] ; uint32_t start_dma_cmd ; uint8_t rsvd2[12U] ; uint32_t read_addr ; uint32_t read_data_size ; }; struct __anonstruct_cmd_425 { uint32_t read_data_size ; uint8_t rsvd[2U] ; uint16_t dma_desc_cmd ; }; struct qla4_83xx_pex_dma_descriptor { struct __anonstruct_cmd_425 cmd ; uint64_t src_addr ; uint64_t dma_bus_addr ; uint8_t rsvd[24U] ; }; struct crb_addr_pair___0 { long addr ; long data ; }; struct ql4_init_msix_entry { uint16_t entry ; uint16_t index ; char const *name ; irqreturn_t (*handler)(int , void * ) ; }; enum hrtimer_restart; enum hrtimer_restart; enum hrtimer_restart; struct sysfs_entry { char *name ; struct bin_attribute *attr ; }; enum hrtimer_restart; struct iscsi_bsg_host_vendor { uint64_t vendor_id ; uint32_t vendor_cmd[0U] ; }; struct iscsi_bsg_host_vendor_reply { uint32_t vendor_rsp[0U] ; }; union __anonunion_rqst_data_400 { struct iscsi_bsg_host_vendor h_vendor ; }; struct iscsi_bsg_request { uint32_t msgcode ; union __anonunion_rqst_data_400 rqst_data ; }; union __anonunion_reply_data_401 { struct iscsi_bsg_host_vendor_reply vendor_reply ; }; struct iscsi_bsg_reply { uint32_t result ; uint32_t reply_payload_rcv_len ; union __anonunion_reply_data_401 reply_data ; }; enum hrtimer_restart; struct qla4_83xx_reset_entry_hdr { __le16 cmd ; __le16 size ; __le16 count ; __le16 delay ; }; struct qla4_83xx_poll { __le32 test_mask ; __le32 test_value ; }; struct qla4_83xx_rmw { __le32 test_mask ; __le32 xor_value ; __le32 or_value ; uint8_t shl ; uint8_t shr ; uint8_t index_a ; uint8_t rsvd ; }; struct qla4_83xx_entry { __le32 arg1 ; __le32 arg2 ; }; struct qla4_83xx_quad_entry { __le32 dr_addr ; __le32 dr_value ; __le32 ar_addr ; __le32 ar_value ; }; struct device_info { int func_num ; int device_type ; int port_num ; }; __inline static long ldv__builtin_expect(long exp , long c ) ; extern struct module __this_module ; __inline static void set_bit(long nr , unsigned long volatile *addr ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; bts %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr): "memory"); return; } } __inline static void clear_bit(long nr , unsigned long volatile *addr ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; btr %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr)); return; } } __inline static int test_and_set_bit(long nr , unsigned long volatile *addr ) { char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; bts %2, %0; setc %1": "+m" (*addr), "=qm" (c): "Ir" (nr): "memory"); return ((int )((signed char )c) != 0); } } __inline static int test_and_clear_bit(long nr , unsigned long volatile *addr ) { char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; btr %2, %0; setc %1": "+m" (*addr), "=qm" (c): "Ir" (nr): "memory"); return ((int )((signed char )c) != 0); } } __inline static int constant_test_bit(long nr , unsigned long const volatile *addr ) { { return ((int )((unsigned long )*(addr + (unsigned long )(nr >> 6)) >> ((int )nr & 63)) & 1); } } extern unsigned long find_first_zero_bit(unsigned long const * , unsigned long ) ; __inline static __u16 __fswab16(__u16 val ) { { return ((__u16 )((int )((short )((int )val << 8)) | (int )((short )((int )val >> 8)))); } } extern int printk(char const * , ...) ; extern void __dynamic_dev_dbg(struct _ddebug * , struct device const * , char const * , ...) ; extern int sprintf(char * , char const * , ...) ; void *ldv_err_ptr(long error ) ; void ldv_spin_lock(void) ; void ldv_spin_unlock(void) ; extern void *malloc(size_t ) ; extern void *calloc(size_t , size_t ) ; extern void *memset(void * , int , size_t ) ; extern int __VERIFIER_nondet_int(void) ; extern unsigned long __VERIFIER_nondet_ulong(void) ; extern void *__VERIFIER_nondet_pointer(void) ; extern void __VERIFIER_assume(int ) ; void *ldv_malloc(size_t size ) { void *p ; void *tmp ; int tmp___0 ; { tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { return ((void *)0); } else { tmp = malloc(size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } } void *ldv_zalloc(size_t size ) { void *p ; void *tmp ; int tmp___0 ; { tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { return ((void *)0); } else { tmp = calloc(1UL, size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } } void *ldv_init_zalloc(size_t size ) { void *p ; void *tmp ; { tmp = calloc(1UL, size); p = tmp; __VERIFIER_assume((unsigned long )p != (unsigned long )((void *)0)); return (p); } } void *ldv_memset(void *s , int c , size_t n ) { void *tmp ; { tmp = memset(s, c, n); return (tmp); } } int ldv_undef_int(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); return (tmp); } } void *ldv_undef_ptr(void) { void *tmp ; { tmp = __VERIFIER_nondet_pointer(); return (tmp); } } unsigned long ldv_undef_ulong(void) { unsigned long tmp ; { tmp = __VERIFIER_nondet_ulong(); return (tmp); } } __inline static void ldv_stop(void) { { LDV_STOP: ; goto LDV_STOP; } } __inline static long ldv__builtin_expect(long exp , long c ) { { return (exp); } } __inline static void INIT_LIST_HEAD(struct list_head *list ) { { list->next = list; list->prev = list; return; } } extern void __list_add(struct list_head * , struct list_head * , struct list_head * ) ; __inline static void list_add_tail(struct list_head *new , struct list_head *head ) { { __list_add(new, head->prev, head); return; } } extern void __list_del_entry(struct list_head * ) ; __inline static void list_del_init(struct list_head *entry ) { { __list_del_entry(entry); INIT_LIST_HEAD(entry); return; } } __inline static int list_empty(struct list_head const *head ) { { return ((unsigned long )((struct list_head const *)head->next) == (unsigned long )head); } } __inline static void __list_splice(struct list_head const *list , struct list_head *prev , struct list_head *next ) { struct list_head *first ; struct list_head *last ; { first = list->next; last = list->prev; first->prev = prev; prev->next = first; last->next = next; next->prev = last; return; } } __inline static void list_splice_init(struct list_head *list , struct list_head *head ) { int tmp ; { tmp = list_empty((struct list_head const *)list); if (tmp == 0) { __list_splice((struct list_head const *)list, head, head->next); INIT_LIST_HEAD(list); } else { } return; } } extern void __bad_percpu_size(void) ; extern void warn_slowpath_fmt(char const * , int const , char const * , ...) ; extern void warn_slowpath_null(char const * , int const ) ; extern unsigned long __phys_addr(unsigned long ) ; extern struct task_struct *current_task ; __inline static struct task_struct *get_current(void) { struct task_struct *pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& current_task)); goto ldv_3233; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_3233; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_3233; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_3233; default: __bad_percpu_size(); } ldv_3233: ; return (pfo_ret__); } } extern void *memcpy(void * , void const * , size_t ) ; extern void *memset(void * , int , size_t ) ; extern int memcmp(void const * , void const * , size_t ) ; extern size_t strlen(char const * ) ; extern char *strcpy(char * , char const * ) ; extern char *strcat(char * , char const * ) ; extern int strcmp(char const * , char const * ) ; extern size_t strlcpy(char * , char const * , size_t ) ; extern int strncmp(char const * , char const * , __kernel_size_t ) ; extern int strncasecmp(char const * , char const * , size_t ) ; extern void *kmemdup(void const * , size_t , gfp_t ) ; __inline static void *ERR_PTR(long error ) ; extern void __xchg_wrong_size(void) ; extern void __xadd_wrong_size(void) ; __inline static int atomic_read(atomic_t const *v ) { int __var ; { __var = 0; return ((int )*((int const volatile *)(& v->counter))); } } __inline static void atomic_set(atomic_t *v , int i ) { { v->counter = i; return; } } __inline static int atomic_sub_and_test(int i , atomic_t *v ) { char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; subl %2, %0; sete %1": "+m" (v->counter), "=qm" (c): "er" (i): "memory"); return ((int )((signed char )c) != 0); } } __inline static void atomic_inc(atomic_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; incl %0": "+m" (v->counter)); return; } } __inline static void atomic_dec(atomic_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; decl %0": "+m" (v->counter)); return; } } __inline static int atomic_dec_and_test(atomic_t *v ) { char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; decl %0; sete %1": "+m" (v->counter), "=qm" (c): : "memory"); return ((int )((signed char )c) != 0); } } __inline static int atomic_add_return(int i , atomic_t *v ) { int __ret ; { __ret = i; switch (4UL) { case 1UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddb %b0, %1\n": "+q" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5763; case 2UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddw %w0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5763; case 4UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddl %0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5763; case 8UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddq %q0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5763; default: __xadd_wrong_size(); } ldv_5763: ; return (__ret + i); } } extern void lockdep_init_map(struct lockdep_map * , char const * , struct lock_class_key * , int ) ; extern void __raw_spin_lock_init(raw_spinlock_t * , char const * , struct lock_class_key * ) ; extern void __rwlock_init(rwlock_t * , char const * , struct lock_class_key * ) ; extern void _raw_spin_unlock_irqrestore(raw_spinlock_t * , unsigned long ) ; __inline static raw_spinlock_t *spinlock_check(spinlock_t *lock ) { { return (& lock->__annonCompField18.rlock); } } __inline static void ldv_spin_unlock_irqrestore_12(spinlock_t *lock , unsigned long flags ) { { _raw_spin_unlock_irqrestore(& lock->__annonCompField18.rlock, flags); return; } } __inline static void spin_unlock_irqrestore(spinlock_t *lock , unsigned long flags ) ; extern void __init_waitqueue_head(wait_queue_head_t * , char const * , struct lock_class_key * ) ; extern void __mutex_init(struct mutex * , char const * , struct lock_class_key * ) ; extern void mutex_lock_nested(struct mutex * , unsigned int ) ; extern void mutex_unlock(struct mutex * ) ; __inline static void init_completion(struct completion *x ) { struct lock_class_key __key ; { x->done = 0U; __init_waitqueue_head(& x->wait, "&x->wait", & __key); return; } } extern unsigned long wait_for_completion_timeout(struct completion * , unsigned long ) ; extern unsigned long volatile jiffies ; extern int mod_timer(struct timer_list * , unsigned long ) ; int ldv_mod_timer_48(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) ; int ldv_mod_timer_49(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) ; extern void add_timer(struct timer_list * ) ; extern int del_timer_sync(struct timer_list * ) ; int ldv_del_timer_sync_46(struct timer_list *ldv_func_arg1 ) ; extern void __init_work(struct work_struct * , int ) ; extern struct workqueue_struct *__alloc_workqueue_key(char const * , unsigned int , int , struct lock_class_key * , char const * , ...) ; extern void destroy_workqueue(struct workqueue_struct * ) ; void ldv_destroy_workqueue_50(struct workqueue_struct *ldv_func_arg1 ) ; void ldv_destroy_workqueue_51(struct workqueue_struct *ldv_func_arg1 ) ; extern bool queue_work_on(int , struct workqueue_struct * , struct work_struct * ) ; bool ldv_queue_work_on_15(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_17(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; extern bool queue_delayed_work_on(int , struct workqueue_struct * , struct delayed_work * , unsigned long ) ; bool ldv_queue_delayed_work_on_16(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_19(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; extern void flush_workqueue(struct workqueue_struct * ) ; void ldv_flush_workqueue_18(struct workqueue_struct *ldv_func_arg1 ) ; __inline static bool queue_work(struct workqueue_struct *wq , struct work_struct *work ) { bool tmp ; { tmp = ldv_queue_work_on_15(8192, wq, work); return (tmp); } } __inline static unsigned short readw(void const volatile *addr ) { unsigned short ret ; { __asm__ volatile ("movw %1,%0": "=r" (ret): "m" (*((unsigned short volatile *)addr)): "memory"); return (ret); } } __inline static unsigned int readl(void const volatile *addr ) { unsigned int ret ; { __asm__ volatile ("movl %1,%0": "=r" (ret): "m" (*((unsigned int volatile *)addr)): "memory"); return (ret); } } __inline static void writel(unsigned int val , void volatile *addr ) { { __asm__ volatile ("movl %0,%1": : "r" (val), "m" (*((unsigned int volatile *)addr)): "memory"); return; } } extern void *ioremap_nocache(resource_size_t , unsigned long ) ; __inline static void *ioremap(resource_size_t offset , unsigned long size ) { void *tmp ; { tmp = ioremap_nocache(offset, size); return (tmp); } } extern void iounmap(void volatile * ) ; __inline static void kref_init(struct kref *kref ) { { atomic_set(& kref->refcount, 1); return; } } __inline static void kref_get(struct kref *kref ) { bool __warned ; int __ret_warn_once ; int tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = atomic_add_return(1, & kref->refcount); __ret_warn_once = tmp <= 1; tmp___2 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___2 != 0L) { __ret_warn_on = ! __warned; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 47); } else { } tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return; } } __inline static int kref_sub(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 71); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub(kref, 1U, release); return (tmp); } } __inline static char const *kobject_name(struct kobject const *kobj ) { { return ((char const *)kobj->name); } } extern bool try_module_get(struct module * ) ; extern void module_put(struct module * ) ; extern long schedule_timeout(long ) ; extern long schedule_timeout_uninterruptible(long ) ; extern struct kmem_cache *kmem_cache_create(char const * , size_t , size_t , unsigned long , void (*)(void * ) ) ; extern void kmem_cache_destroy(struct kmem_cache * ) ; extern void kfree(void const * ) ; void *ldv_kmem_cache_alloc_25(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_43(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; __inline static void *kzalloc(size_t size , gfp_t flags ) ; void ldv_check_alloc_flags(gfp_t flags ) ; void ldv_check_alloc_nonatomic(void) ; struct kobject *sysfs_fw_dump_attr_group0 ; int ldv_state_variable_8 ; struct pci_dev *qla4xxx_pci_driver_group1 ; int ldv_state_variable_15 ; struct iscsi_bus_flash_session *qla4xxx_iscsi_transport_group2 ; int ldv_state_variable_20 ; struct timer_list *ldv_timer_list_4 ; int pci_counter ; struct work_struct *ldv_work_struct_3_1 ; struct sockaddr *qla4xxx_iscsi_transport_group6 ; int ldv_state_variable_0 ; struct iscsi_task *qla4xxx_iscsi_transport_group5 ; int ldv_state_variable_21 ; int ldv_state_variable_5 ; int ldv_state_variable_13 ; int ldv_state_variable_12 ; int ldv_work_3_2 ; struct scsi_device *qla4xxx_driver_template_group2 ; int ldv_state_variable_22 ; int ldv_state_variable_14 ; int ldv_work_3_0 ; int ldv_state_variable_17 ; struct work_struct *ldv_work_struct_2_3 ; struct scsi_qla_host *qla4xxx_isp_ops_group0 ; struct pci_dev *qla4xxx_err_handler_group0 ; struct work_struct *ldv_work_struct_2_0 ; struct scsi_qla_host *qla4_82xx_isp_ops_group0 ; int ldv_state_variable_19 ; int ldv_state_variable_27 ; int ldv_state_variable_9 ; int ldv_state_variable_24 ; struct work_struct *ldv_work_struct_2_2 ; struct iscsi_bus_flash_conn *qla4xxx_iscsi_transport_group1 ; int ldv_timer_state_4 = 0; int ref_cnt ; int ldv_irq_line_1_1 ; int ldv_work_3_3 ; int ldv_state_variable_1 ; struct iscsi_cls_session *qla4xxx_iscsi_transport_group3 ; int ldv_state_variable_7 ; int ldv_irq_line_1_2 ; int ldv_state_variable_23 ; struct work_struct *ldv_work_struct_3_3 ; struct iscsi_cls_conn *qla4xxx_iscsi_transport_group4 ; int ldv_irq_1_3 = 0; void *ldv_irq_data_1_1 ; struct scsi_cmnd *qla4xxx_driver_template_group1 ; int ldv_state_variable_10 ; int ldv_irq_1_0 = 0; struct work_struct *ldv_work_struct_2_1 ; struct file *sysfs_fw_dump_attr_group1 ; struct work_struct *ldv_work_struct_3_2 ; void *ldv_irq_data_1_0 ; int ldv_state_variable_6 ; int ldv_state_variable_16 ; int ldv_work_3_1 ; struct Scsi_Host *qla4xxx_driver_template_group0 ; void *ldv_irq_data_1_3 ; int ldv_state_variable_2 ; int ldv_state_variable_25 ; int ldv_work_2_0 ; int ldv_state_variable_26 ; void *ldv_irq_data_1_2 ; struct iscsi_endpoint *qla4xxx_iscsi_transport_group7 ; struct work_struct *ldv_work_struct_3_0 ; int ldv_state_variable_11 ; int ldv_irq_1_2 = 0; int LDV_IN_INTERRUPT = 1; int ldv_irq_1_1 = 0; struct scsi_qla_host *qla4_83xx_isp_ops_group0 ; int ldv_state_variable_18 ; struct bin_attribute *sysfs_fw_dump_attr_group2 ; struct Scsi_Host *qla4xxx_iscsi_transport_group0 ; int ldv_irq_line_1_3 ; int ldv_work_2_2 ; int ldv_state_variable_3 ; int ldv_irq_line_1_0 ; int ldv_work_2_3 ; int ldv_state_variable_4 ; int ldv_work_2_1 ; void work_init_3(void) ; void work_init_2(void) ; void ldv_initialize_isp_operations_25(void) ; void activate_pending_timer_4(struct timer_list *timer , unsigned long data , int pending_flag ) ; void ldv_initialize_isp_operations_24(void) ; void call_and_disable_all_2(int state ) ; void activate_work_2(struct work_struct *work , int state ) ; void activate_work_3(struct work_struct *work , int state ) ; void ldv_initialize_iscsi_transport_26(void) ; void ldv_initialize_isp_operations_23(void) ; void choose_timer_4(struct timer_list *timer ) ; void call_and_disable_work_3(struct work_struct *work ) ; void disable_suitable_timer_4(struct timer_list *timer ) ; void disable_work_3(struct work_struct *work ) ; void disable_work_2(struct work_struct *work ) ; int reg_timer_4(struct timer_list *timer ) ; void invoke_work_3(void) ; void ldv_pci_driver_21(void) ; void call_and_disable_all_3(int state ) ; void ldv_initialize_scsi_host_template_27(void) ; void ldv_initialize_pci_error_handlers_22(void) ; void call_and_disable_work_2(struct work_struct *work ) ; void invoke_work_2(void) ; void ldv_initialize_bin_attribute_20(void) ; extern int ___ratelimit(struct ratelimit_state * , char const * ) ; __inline static char const *dev_name(struct device const *dev ) { char const *tmp ; { if ((unsigned long )dev->init_name != (unsigned long )((char const */* const */)0)) { return ((char const *)dev->init_name); } else { } tmp = kobject_name(& dev->kobj); return (tmp); } } __inline static void *dev_get_drvdata(struct device const *dev ) { { return ((void *)dev->driver_data); } } __inline static void dev_set_drvdata(struct device *dev , void *data ) { { dev->driver_data = data; return; } } extern int device_for_each_child(struct device * , void * , int (*)(struct device * , void * ) ) ; extern void put_device(struct device * ) ; extern void dev_printk(char const * , struct device const * , char const * , ...) ; extern mempool_t *mempool_create(int , mempool_alloc_t * , mempool_free_t * , void * ) ; extern void mempool_destroy(mempool_t * ) ; void *ldv_mempool_alloc_47(mempool_t *ldv_func_arg1 , gfp_t flags ) ; extern void mempool_free(void * , mempool_t * ) ; extern void *mempool_alloc_slab(gfp_t , void * ) ; extern void mempool_free_slab(void * , void * ) ; extern struct blk_queue_tag *blk_init_tags(int , int ) ; __inline static struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt , int tag ) { long tmp ; { tmp = ldv__builtin_expect((long )((unsigned long )bqt == (unsigned long )((struct blk_queue_tag *)0) || bqt->real_max_depth <= tag), 0L); if (tmp != 0L) { return ((struct request *)0); } else { } return (*(bqt->tag_index + (unsigned long )tag)); } } extern struct iscsi_boot_kobj *iscsi_boot_create_initiator(struct iscsi_boot_kset * , int , void * , ssize_t (*)(void * , int , char * ) , umode_t (*)(void * , int ) , void (*)(void * ) ) ; extern struct iscsi_boot_kobj *iscsi_boot_create_ethernet(struct iscsi_boot_kset * , int , void * , ssize_t (*)(void * , int , char * ) , umode_t (*)(void * , int ) , void (*)(void * ) ) ; extern struct iscsi_boot_kobj *iscsi_boot_create_target(struct iscsi_boot_kset * , int , void * , ssize_t (*)(void * , int , char * ) , umode_t (*)(void * , int ) , void (*)(void * ) ) ; extern struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int ) ; extern void iscsi_boot_destroy_kset(struct iscsi_boot_kset * ) ; extern int in4_pton(char const * , int , u8 * , int , char const ** ) ; extern int in6_pton(char const * , int , u8 * , int , char const ** ) ; __inline static int valid_dma_direction(int dma_direction ) { { return ((dma_direction == 0 || dma_direction == 1) || dma_direction == 2); } } __inline static void kmemcheck_mark_initialized(void *address , unsigned int n ) { { return; } } extern void debug_dma_map_page(struct device * , struct page * , size_t , size_t , int , dma_addr_t , bool ) ; extern void debug_dma_unmap_page(struct device * , dma_addr_t , size_t , int , bool ) ; extern struct dma_map_ops *dma_ops ; __inline static struct dma_map_ops *get_dma_ops(struct device *dev ) { long tmp ; { tmp = ldv__builtin_expect((unsigned long )dev == (unsigned long )((struct device *)0), 0L); if (tmp != 0L || (unsigned long )dev->archdata.dma_ops == (unsigned long )((struct dma_map_ops *)0)) { return (dma_ops); } else { return (dev->archdata.dma_ops); } } } __inline static dma_addr_t dma_map_single_attrs(struct device *dev , void *ptr , size_t size , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; dma_addr_t addr ; int tmp___0 ; long tmp___1 ; unsigned long tmp___2 ; unsigned long tmp___3 ; { tmp = get_dma_ops(dev); ops = tmp; kmemcheck_mark_initialized(ptr, (unsigned int )size); tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (19), "i" (12UL)); ldv_33321: ; goto ldv_33321; } else { } tmp___2 = __phys_addr((unsigned long )ptr); addr = (*(ops->map_page))(dev, (struct page *)-24189255811072L + (tmp___2 >> 12), (unsigned long )ptr & 4095UL, size, dir, attrs); tmp___3 = __phys_addr((unsigned long )ptr); debug_dma_map_page(dev, (struct page *)-24189255811072L + (tmp___3 >> 12), (unsigned long )ptr & 4095UL, size, (int )dir, addr, 1); return (addr); } } __inline static void dma_unmap_single_attrs(struct device *dev , dma_addr_t addr , size_t size , enum dma_data_direction dir , struct dma_attrs *attrs ) { struct dma_map_ops *ops ; struct dma_map_ops *tmp ; int tmp___0 ; long tmp___1 ; { tmp = get_dma_ops(dev); ops = tmp; tmp___0 = valid_dma_direction((int )dir); tmp___1 = ldv__builtin_expect(tmp___0 == 0, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/asm-generic/dma-mapping-common.h"), "i" (36), "i" (12UL)); ldv_33330: ; goto ldv_33330; } else { } if ((unsigned long )ops->unmap_page != (unsigned long )((void (*)(struct device * , dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs * ))0)) { (*(ops->unmap_page))(dev, addr, size, dir, attrs); } else { } debug_dma_unmap_page(dev, addr, size, (int )dir, 1); return; } } extern int dma_supported(struct device * , u64 ) ; extern int dma_set_mask(struct device * , u64 ) ; extern void *dma_alloc_attrs(struct device * , size_t , dma_addr_t * , gfp_t , struct dma_attrs * ) ; extern void dma_free_attrs(struct device * , size_t , void * , dma_addr_t , struct dma_attrs * ) ; __inline static int dma_set_coherent_mask(struct device *dev , u64 mask ) { int tmp ; { tmp = dma_supported(dev, mask); if (tmp == 0) { return (-5); } else { } dev->coherent_dma_mask = mask; return (0); } } __inline static struct scsi_target *scsi_target(struct scsi_device *sdev ) { struct device const *__mptr ; { __mptr = (struct device const *)sdev->sdev_gendev.parent; return ((struct scsi_target *)__mptr + 0xffffffffffffffd8UL); } } extern int scsi_change_queue_depth(struct scsi_device * , int ) ; extern void scsi_dma_unmap(struct scsi_cmnd * ) ; extern struct request *blk_mq_tag_to_rq(struct blk_mq_tags * , unsigned int ) ; __inline static u16 blk_mq_unique_tag_to_hwq(u32 unique_tag ) { { return ((u16 )(unique_tag >> 16)); } } __inline static u16 blk_mq_unique_tag_to_tag(u32 unique_tag ) { { return ((u16 )unique_tag); } } __inline static void *shost_priv(struct Scsi_Host *shost ) { { return ((void *)(& shost->hostdata)); } } extern int scsi_is_host_device(struct device const * ) ; __inline static struct Scsi_Host *dev_to_shost(struct device *dev ) { int tmp ; struct device const *__mptr ; { goto ldv_34766; ldv_34765: ; if ((unsigned long )dev->parent == (unsigned long )((struct device *)0)) { return ((struct Scsi_Host *)0); } else { } dev = dev->parent; ldv_34766: tmp = scsi_is_host_device((struct device const *)dev); if (tmp == 0) { goto ldv_34765; } else { } __mptr = (struct device const *)dev; return ((struct Scsi_Host *)__mptr + 0xfffffffffffffc48UL); } } __inline static bool shost_use_blk_mq(struct Scsi_Host *shost ) { { return ((int )shost->use_blk_mq != 0); } } extern int scsi_queue_work(struct Scsi_Host * , struct work_struct * ) ; extern int scsi_add_host_with_dma(struct Scsi_Host * , struct device * , struct device * ) ; int ldv_scsi_add_host_with_dma_29(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; extern void scsi_remove_host(struct Scsi_Host * ) ; void ldv_scsi_remove_host_67(struct Scsi_Host *shost ) ; void ldv_scsi_remove_host_68(struct Scsi_Host *shost ) ; extern struct Scsi_Host *scsi_host_get(struct Scsi_Host * ) ; extern void scsi_host_put(struct Scsi_Host * ) ; __inline static int scsi_add_host(struct Scsi_Host *host , struct device *dev ) { int tmp ; { tmp = ldv_scsi_add_host_with_dma_29(host, dev, dev); return (tmp); } } extern void scsi_unblock_requests(struct Scsi_Host * ) ; extern void scsi_block_requests(struct Scsi_Host * ) ; __inline static struct scsi_cmnd *scsi_mq_find_tag(struct Scsi_Host *shost , int unique_tag ) { u16 hwq ; u16 tmp ; struct request *req ; u16 tmp___0 ; { tmp = blk_mq_unique_tag_to_hwq((u32 )unique_tag); hwq = tmp; req = (struct request *)0; if ((unsigned int )hwq < shost->__annonCompField84.tag_set.nr_hw_queues) { tmp___0 = blk_mq_unique_tag_to_tag((u32 )unique_tag); req = blk_mq_tag_to_rq(*(shost->__annonCompField84.tag_set.tags + (unsigned long )hwq), (unsigned int )tmp___0); } else { } return ((unsigned long )req != (unsigned long )((struct request *)0) ? (struct scsi_cmnd *)req->special : (struct scsi_cmnd *)0); } } __inline static int scsi_init_shared_tag_map(struct Scsi_Host *shost , int depth ) { bool tmp ; { tmp = shost_use_blk_mq(shost); if ((int )tmp) { return (0); } else { } if ((unsigned long )shost->__annonCompField84.bqt == (unsigned long )((struct blk_queue_tag *)0)) { shost->__annonCompField84.bqt = blk_init_tags(depth, (shost->hostt)->tag_alloc_policy); if ((unsigned long )shost->__annonCompField84.bqt == (unsigned long )((struct blk_queue_tag *)0)) { return (-12); } else { } } else { } return (0); } } __inline static struct scsi_cmnd *scsi_host_find_tag(struct Scsi_Host *shost , int tag ) { struct request *req ; struct scsi_cmnd *tmp ; bool tmp___0 ; { if (tag != -1) { tmp___0 = shost_use_blk_mq(shost); if ((int )tmp___0) { tmp = scsi_mq_find_tag(shost, tag); return (tmp); } else { } req = blk_map_queue_find_tag(shost->__annonCompField84.bqt, tag); return ((unsigned long )req != (unsigned long )((struct request *)0) ? (struct scsi_cmnd *)req->special : (struct scsi_cmnd *)0); } else { } return ((struct scsi_cmnd *)0); } } __inline static int pci_channel_offline(struct pci_dev *pdev ) { { return (pdev->error_state != 1U); } } extern void pci_dev_put(struct pci_dev * ) ; extern struct pci_dev *pci_get_domain_bus_and_slot(int , unsigned int , unsigned int ) ; extern int pci_bus_read_config_word(struct pci_bus * , unsigned int , int , u16 * ) ; __inline static int pci_read_config_word(struct pci_dev const *dev , int where , u16 *val ) { int tmp ; { tmp = pci_bus_read_config_word(dev->bus, dev->devfn, where, val); return (tmp); } } extern int pci_enable_device(struct pci_dev * ) ; __inline static int pci_is_enabled(struct pci_dev *pdev ) { int tmp ; { tmp = atomic_read((atomic_t const *)(& pdev->enable_cnt)); return (tmp > 0); } } extern void pci_disable_device(struct pci_dev * ) ; extern int pci_save_state(struct pci_dev * ) ; extern void pci_restore_state(struct pci_dev * ) ; extern int pci_request_regions(struct pci_dev * , char const * ) ; extern void pci_release_regions(struct pci_dev * ) ; extern int __pci_register_driver(struct pci_driver * , struct module * , char const * ) ; int ldv___pci_register_driver_69(struct pci_driver *ldv_func_arg1 , struct module *ldv_func_arg2 , char const *ldv_func_arg3 ) ; extern void pci_unregister_driver(struct pci_driver * ) ; void ldv_pci_unregister_driver_70(struct pci_driver *ldv_func_arg1 ) ; extern struct dma_pool *dma_pool_create(char const * , struct device * , size_t , size_t , size_t ) ; extern void dma_pool_destroy(struct dma_pool * ) ; void *ldv_dma_pool_alloc_45(struct dma_pool *ldv_func_arg1 , gfp_t flags , dma_addr_t *ldv_func_arg3 ) ; void *ldv_dma_pool_alloc_57(struct dma_pool *ldv_func_arg1 , gfp_t flags , dma_addr_t *ldv_func_arg3 ) ; void *ldv_dma_pool_alloc_58(struct dma_pool *ldv_func_arg1 , gfp_t flags , dma_addr_t *ldv_func_arg3 ) ; void *ldv_dma_pool_alloc_60(struct dma_pool *ldv_func_arg1 , gfp_t flags , dma_addr_t *ldv_func_arg3 ) ; void *ldv_dma_pool_alloc_62(struct dma_pool *ldv_func_arg1 , gfp_t flags , dma_addr_t *ldv_func_arg3 ) ; void *ldv_dma_pool_alloc_64(struct dma_pool *ldv_func_arg1 , gfp_t flags , dma_addr_t *ldv_func_arg3 ) ; extern void dma_pool_free(struct dma_pool * , void * , dma_addr_t ) ; __inline static int pci_domain_nr(struct pci_bus *bus ) { struct pci_sysdata *sd ; { sd = (struct pci_sysdata *)bus->sysdata; return (sd->domain); } } __inline static int pci_set_dma_mask(struct pci_dev *dev , u64 mask ) { int tmp ; { tmp = dma_set_mask(& dev->dev, mask); return (tmp); } } __inline static int pci_set_consistent_dma_mask(struct pci_dev *dev , u64 mask ) { int tmp ; { tmp = dma_set_coherent_mask(& dev->dev, mask); return (tmp); } } __inline static void *pci_get_drvdata(struct pci_dev *pdev ) { void *tmp ; { tmp = dev_get_drvdata((struct device const *)(& pdev->dev)); return (tmp); } } __inline static void pci_set_drvdata(struct pci_dev *pdev , void *data ) { { dev_set_drvdata(& pdev->dev, data); return; } } __inline static char const *pci_name(struct pci_dev const *pdev ) { char const *tmp ; { tmp = dev_name(& pdev->dev); return (tmp); } } extern void msleep(unsigned int ) ; __inline static void ssleep(unsigned int seconds ) { { msleep(seconds * 1000U); return; } } extern int pci_enable_pcie_error_reporting(struct pci_dev * ) ; extern int pci_disable_pcie_error_reporting(struct pci_dev * ) ; extern int pci_cleanup_aer_uncorrect_error_status(struct pci_dev * ) ; void *ldv_vmalloc_44(unsigned long ldv_func_arg1 ) ; void *ldv_vmalloc_56(unsigned long ldv_func_arg1 ) ; void *ldv_vmalloc_61(unsigned long ldv_func_arg1 ) ; void *ldv_vmalloc_63(unsigned long ldv_func_arg1 ) ; void *ldv_vzalloc_52(unsigned long ldv_func_arg1 ) ; void *ldv_vzalloc_53(unsigned long ldv_func_arg1 ) ; void *ldv_vzalloc_54(unsigned long ldv_func_arg1 ) ; void *ldv_vzalloc_55(unsigned long ldv_func_arg1 ) ; void *ldv_vzalloc_59(unsigned long ldv_func_arg1 ) ; void *ldv_vzalloc_65(unsigned long ldv_func_arg1 ) ; void *ldv_vzalloc_66(unsigned long ldv_func_arg1 ) ; extern void vfree(void const * ) ; struct sk_buff *ldv_skb_clone_34(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_42(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_36(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_32(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_40(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_41(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_37(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_38(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_39(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; extern ssize_t sysfs_format_mac(char * , unsigned char const * , int ) ; __inline static void *nla_data(struct nlattr const *nla ) { { return ((void *)nla + 4U); } } __inline static int nla_ok(struct nlattr const *nla , int remaining ) { { return ((remaining > 3 && (unsigned int )((unsigned short )nla->nla_len) > 3U) && (int )nla->nla_len <= remaining); } } __inline static struct nlattr *nla_next(struct nlattr const *nla , int *remaining ) { int totlen ; { totlen = ((int )nla->nla_len + 3) & -4; *remaining = *remaining - totlen; return ((struct nlattr *)nla + (unsigned long )totlen); } } extern struct scsi_transport_template *iscsi_register_transport(struct iscsi_transport * ) ; extern int iscsi_unregister_transport(struct iscsi_transport * ) ; extern void iscsi_conn_login_event(struct iscsi_cls_conn * , enum iscsi_conn_state ) ; extern void iscsi_post_host_event(uint32_t , struct iscsi_transport * , enum iscsi_host_event_code , uint32_t , uint8_t * ) ; extern void iscsi_ping_comp_event(uint32_t , struct iscsi_transport * , uint32_t , uint32_t , uint32_t , uint8_t * ) ; extern void iscsi_host_for_each_session(struct Scsi_Host * , void (*)(struct iscsi_cls_session * ) ) ; extern int iscsi_session_chkready(struct iscsi_cls_session * ) ; extern int iscsi_is_session_online(struct iscsi_cls_session * ) ; extern void iscsi_unblock_session(struct iscsi_cls_session * ) ; extern void iscsi_block_session(struct iscsi_cls_session * ) ; extern struct iscsi_endpoint *iscsi_create_endpoint(int ) ; extern void iscsi_destroy_endpoint(struct iscsi_endpoint * ) ; extern struct iscsi_endpoint *iscsi_lookup_endpoint(u64 ) ; extern int iscsi_block_scsi_eh(struct scsi_cmnd * ) ; extern struct iscsi_iface *iscsi_create_iface(struct Scsi_Host * , struct iscsi_transport * , uint32_t , uint32_t , int ) ; extern void iscsi_destroy_iface(struct iscsi_iface * ) ; extern char *iscsi_get_port_speed_name(struct Scsi_Host * ) ; extern char *iscsi_get_port_state_name(struct Scsi_Host * ) ; extern int iscsi_is_session_dev(struct device const * ) ; extern char *iscsi_get_discovery_parent_name(int ) ; extern struct iscsi_bus_flash_session *iscsi_create_flashnode_sess(struct Scsi_Host * , int , struct iscsi_transport * , int ) ; extern struct iscsi_bus_flash_conn *iscsi_create_flashnode_conn(struct Scsi_Host * , struct iscsi_bus_flash_session * , struct iscsi_transport * , int ) ; extern void iscsi_destroy_flashnode_sess(struct iscsi_bus_flash_session * ) ; extern void iscsi_destroy_all_flashnode(struct Scsi_Host * ) ; extern int iscsi_flashnode_bus_match(struct device * , struct device_driver * ) ; extern struct device *iscsi_find_flashnode_sess(struct Scsi_Host * , void * , int (*)(struct device * , void * ) ) ; extern struct device *iscsi_find_flashnode_conn(struct iscsi_bus_flash_session * ) ; extern char *iscsi_get_ipaddress_state_name(enum iscsi_ipaddress_state ) ; extern char *iscsi_get_router_state_name(enum iscsi_router_state ) ; extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template * , int , bool ) ; extern struct iscsi_cls_session *iscsi_session_setup(struct iscsi_transport * , struct Scsi_Host * , uint16_t , int , int , uint32_t , unsigned int ) ; extern void iscsi_session_teardown(struct iscsi_cls_session * ) ; extern int iscsi_set_param(struct iscsi_cls_conn * , enum iscsi_param , char * , int ) ; extern int iscsi_session_get_param(struct iscsi_cls_session * , enum iscsi_param , char * ) ; extern struct iscsi_cls_conn *iscsi_conn_setup(struct iscsi_cls_session * , int , uint32_t ) ; extern int iscsi_conn_start(struct iscsi_cls_conn * ) ; extern void iscsi_conn_stop(struct iscsi_cls_conn * , int ) ; extern int iscsi_conn_bind(struct iscsi_cls_session * , struct iscsi_cls_conn * , int ) ; extern void iscsi_session_failure(struct iscsi_session * , enum iscsi_err ) ; extern int iscsi_conn_get_param(struct iscsi_cls_conn * , enum iscsi_param , char * ) ; extern int iscsi_conn_get_addr_param(struct __kernel_sockaddr_storage * , enum iscsi_param , char * ) ; extern int iscsi_conn_send_pdu(struct iscsi_cls_conn * , struct iscsi_hdr * , char * , uint32_t ) ; extern int iscsi_complete_pdu(struct iscsi_conn * , struct iscsi_hdr * , char * , int ) ; extern int iscsi_switch_str_param(char ** , char * ) ; static uint32_t const qla4_82xx_reg_tbl[14U] = { 136323240U, 136323244U, 136323248U, 136323384U, 136323392U, 136323396U, 136323400U, 136323404U, 136323444U, 136323408U, 136323412U, 136323416U, 136323664U, 136324020U}; __inline static uint32_t set_rmask(uint32_t val ) { { return ((val & 65535U) | (val << 16)); } } __inline static uint32_t clr_rmask(uint32_t val ) { { return (val << 16); } } static uint32_t const qla4_83xx_reg_tbl[14U] = { 13480U, 13484U, 13488U, 14216U, 14212U, 14220U, 13640U, 14304U, 14208U, 13648U, 13652U, 13656U, 13904U, 14260U}; __inline static int is_qla4010(struct scsi_qla_host *ha ) { { return ((unsigned int )(ha->pdev)->device == 16400U); } } __inline static int is_qla4022(struct scsi_qla_host *ha ) { { return ((unsigned int )(ha->pdev)->device == 16418U); } } __inline static int is_qla4032(struct scsi_qla_host *ha ) { { return ((unsigned int )(ha->pdev)->device == 16434U); } } __inline static int is_qla40XX(struct scsi_qla_host *ha ) { int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = is_qla4032(ha); if (tmp != 0) { tmp___2 = 1; } else { tmp___0 = is_qla4022(ha); if (tmp___0 != 0) { tmp___2 = 1; } else { tmp___1 = is_qla4010(ha); if (tmp___1 != 0) { tmp___2 = 1; } else { tmp___2 = 0; } } } return (tmp___2); } } __inline static int is_qla8022(struct scsi_qla_host *ha ) { { return ((unsigned int )(ha->pdev)->device == 32802U); } } __inline static int is_qla8032(struct scsi_qla_host *ha ) { { return ((unsigned int )(ha->pdev)->device == 32818U); } } __inline static int is_qla8042(struct scsi_qla_host *ha ) { { return ((unsigned int )(ha->pdev)->device == 32834U); } } __inline static int is_qla80XX(struct scsi_qla_host *ha ) { int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = is_qla8022(ha); if (tmp != 0) { tmp___2 = 1; } else { tmp___0 = is_qla8032(ha); if (tmp___0 != 0) { tmp___2 = 1; } else { tmp___1 = is_qla8042(ha); if (tmp___1 != 0) { tmp___2 = 1; } else { tmp___2 = 0; } } } return (tmp___2); } } __inline static int is_aer_supported(struct scsi_qla_host *ha ) { { return (((unsigned int )(ha->pdev)->device == 32802U || (unsigned int )(ha->pdev)->device == 32818U) || (unsigned int )(ha->pdev)->device == 32834U); } } __inline static int adapter_up(struct scsi_qla_host *ha ) { int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = constant_test_bit(0L, (unsigned long const volatile *)(& ha->flags)); if (tmp != 0) { tmp___0 = constant_test_bit(8L, (unsigned long const volatile *)(& ha->flags)); if (tmp___0 != 0) { tmp___1 = constant_test_bit(9L, (unsigned long const volatile *)(& ha->flags)); if (tmp___1 == 0) { tmp___2 = 1; } else { tmp___2 = 0; } } else { tmp___2 = 0; } } else { tmp___2 = 0; } return (tmp___2); } } __inline static struct scsi_qla_host *to_qla_host(struct Scsi_Host *shost ) { void *tmp ; { tmp = shost_priv(shost); return ((struct scsi_qla_host *)tmp + 224U); } } __inline static int qla4_8xxx_rd_direct(struct scsi_qla_host *ha , uint32_t const crb_reg ) { uint32_t tmp ; { tmp = (*((ha->isp_ops)->rd_reg_direct))(ha, (ulong )*(ha->reg_tbl + (unsigned long )crb_reg)); return ((int )tmp); } } __inline static void qla4_8xxx_wr_direct(struct scsi_qla_host *ha , uint32_t const crb_reg , uint32_t const value ) { { (*((ha->isp_ops)->wr_reg_direct))(ha, (ulong )*(ha->reg_tbl + (unsigned long )crb_reg), value); return; } } int qla4xxx_hw_reset(struct scsi_qla_host *ha ) ; int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a ) ; int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha , struct srb *srb ) ; int qla4xxx_initialize_adapter(struct scsi_qla_host *ha , int is_reset ) ; int qla4xxx_soft_reset(struct scsi_qla_host *ha ) ; irqreturn_t qla4xxx_intr_handler(int irq , void *dev_id ) ; void qla4xxx_free_ddb(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry ) ; void qla4xxx_process_aen(struct scsi_qla_host *ha , uint8_t process_aen ) ; int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host *ha ) ; int qla4xxx_abort_task(struct scsi_qla_host *ha , struct srb *srb ) ; int qla4xxx_reset_lun(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry , uint64_t lun ) ; int qla4xxx_reset_target(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry ) ; int qla4xxx_get_flash(struct scsi_qla_host *ha , dma_addr_t dma_addr , uint32_t offset , uint32_t len ) ; int qla4xxx_get_firmware_state(struct scsi_qla_host *ha ) ; int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha , uint16_t fw_ddb_index , struct dev_db_entry *fw_ddb_entry , dma_addr_t fw_ddb_entry_dma , uint32_t *num_valid_ddb_entries , uint32_t *next_ddb_index , uint32_t *fw_ddb_device_state , uint32_t *conn_err_detail , uint16_t *tcp_source_port_num , uint16_t *connection_id ) ; int qla4xxx_set_ddb_entry(struct scsi_qla_host *ha , uint16_t fw_ddb_index , dma_addr_t fw_ddb_entry_dma , uint32_t *mbx_sts ) ; uint8_t qla4xxx_get_ifcb(struct scsi_qla_host *ha , uint32_t *mbox_cmd , uint32_t *mbox_sts , dma_addr_t init_fw_cb_dma ) ; int qla4xxx_disable_acb(struct scsi_qla_host *ha ) ; int qla4xxx_set_acb(struct scsi_qla_host *ha , uint32_t *mbox_cmd , uint32_t *mbox_sts , dma_addr_t acb_dma ) ; int qla4xxx_get_acb(struct scsi_qla_host *ha , dma_addr_t acb_dma , uint32_t acb_type , uint32_t len ) ; int qla4xxx_get_ip_state(struct scsi_qla_host *ha , uint32_t acb_idx , uint32_t ip_idx , uint32_t *sts ) ; void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session ) ; u8 rd_nvram_byte(struct scsi_qla_host *ha , int offset ) ; void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha , uint32_t intr_status ) ; void qla4xxx_srb_compl(struct kref *ref ) ; struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha , uint32_t index ) ; int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry , uint64_t lun , uint16_t mrkr_mod ) ; int qla4xxx_set_flash(struct scsi_qla_host *ha , dma_addr_t dma_addr , uint32_t offset , uint32_t length , uint32_t options ) ; int qla4xxx_get_chap_index(struct scsi_qla_host *ha , char *username , char *password , int bidi , uint16_t *chap_index ) ; int qla4xxx_set_chap(struct scsi_qla_host *ha , char *username , char *password , uint16_t idx , int bidi ) ; void qla4xxx_queue_iocb(struct scsi_qla_host *ha ) ; void qla4xxx_complete_iocb(struct scsi_qla_host *ha ) ; int qla4xxx_get_sys_info(struct scsi_qla_host *ha ) ; int qla4xxx_iospace_config(struct scsi_qla_host *ha ) ; void qla4xxx_pci_config(struct scsi_qla_host *ha ) ; int qla4xxx_start_firmware(struct scsi_qla_host *ha ) ; uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha ) ; uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha ) ; int qla4xxx_request_irqs(struct scsi_qla_host *ha ) ; void qla4xxx_free_irqs(struct scsi_qla_host *ha ) ; void qla4xxx_wake_dpc(struct scsi_qla_host *ha ) ; void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha ) ; uint8_t qla4xxx_update_local_ifcb(struct scsi_qla_host *ha , uint32_t *mbox_cmd , uint32_t *mbox_sts , struct addr_ctrl_blk *init_fw_cb , dma_addr_t init_fw_cb_dma ) ; void qla4_8xxx_pci_config(struct scsi_qla_host *ha ) ; int qla4_8xxx_iospace_config(struct scsi_qla_host *ha ) ; int qla4_8xxx_load_risc(struct scsi_qla_host *ha ) ; irqreturn_t qla4_82xx_intr_handler(int irq , void *dev_id ) ; void qla4_82xx_queue_iocb(struct scsi_qla_host *ha ) ; void qla4_82xx_complete_iocb(struct scsi_qla_host *ha ) ; void qla4_82xx_wr_32(struct scsi_qla_host *ha , ulong off , u32 data ) ; uint32_t qla4_82xx_rd_32(struct scsi_qla_host *ha , ulong off ) ; int qla4_82xx_isp_reset(struct scsi_qla_host *ha ) ; void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha , uint32_t intr_status ) ; uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha ) ; uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha ) ; int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha ) ; void qla4_8xxx_watchdog(struct scsi_qla_host *ha ) ; int qla4_8xxx_stop_firmware(struct scsi_qla_host *ha ) ; int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha ) ; void qla4_82xx_enable_intrs(struct scsi_qla_host *ha ) ; void qla4_82xx_disable_intrs(struct scsi_qla_host *ha ) ; void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha ) ; void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha ) ; int qla4_82xx_idc_lock(struct scsi_qla_host *ha ) ; void qla4_82xx_idc_unlock(struct scsi_qla_host *ha ) ; int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha ) ; void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha ) ; void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha ) ; void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha ) ; int qla4xxx_conn_open(struct scsi_qla_host *ha , uint16_t fw_ddb_index ) ; int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry , struct iscsi_cls_conn *cls_conn , uint32_t *mbx_sts ) ; int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry , int options ) ; int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha , uint32_t ddb_index , uint32_t *mbx_sts ) ; int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha , uint32_t ddb_index ) ; int qla4xxx_send_passthru0(struct iscsi_task *task ) ; void qla4xxx_free_ddb_index(struct scsi_qla_host *ha ) ; int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha , uint16_t fw_ddb_index , uint16_t stats_size , dma_addr_t stats_dma ) ; void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry ) ; void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry ) ; int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha , struct dev_db_entry *fw_ddb_entry , dma_addr_t fw_ddb_entry_dma , uint16_t ddb_index ) ; int qla4xxx_get_chap(struct scsi_qla_host *ha , char *username , char *password , uint16_t idx ) ; int qla4xxx_get_ddb_index(struct scsi_qla_host *ha , uint16_t *ddb_index ) ; void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session ) ; int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session ) ; int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session ) ; int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha , uint32_t fw_ddb_index , struct ddb_entry *ddb_entry , uint32_t state ) ; int qla4xxx_ddb_change(struct scsi_qla_host *ha , uint32_t fw_ddb_index , struct ddb_entry *ddb_entry , uint32_t state ) ; void qla4xxx_build_ddb_list(struct scsi_qla_host *ha , int is_reset ) ; int qla4xxx_post_aen_work(struct scsi_qla_host *ha , enum iscsi_host_event_code aen_code , uint32_t data_size , uint8_t *data ) ; int qla4xxx_ping_iocb(struct scsi_qla_host *ha , uint32_t options , uint32_t payload_size , uint32_t pid , uint8_t *ipaddr ) ; int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha , uint32_t status , uint32_t pid , uint32_t data_size , uint8_t *data ) ; int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha , struct dev_db_entry *fw_ddb_entry , dma_addr_t fw_ddb_entry_dma , uint16_t ddb_index ) ; int qla4xxx_bsg_request(struct bsg_job *bsg_job ) ; void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry ) ; void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha ) ; void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha ) ; int qla4_82xx_try_start_fw(struct scsi_qla_host *ha ) ; extern int qla4_8xxx_need_reset(struct scsi_qla_host * ) ; int qla4_82xx_md_rd_32(struct scsi_qla_host *ha , uint32_t off , uint32_t *data ) ; int qla4_82xx_md_wr_32(struct scsi_qla_host *ha , uint32_t off , uint32_t data ) ; void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha ) ; void qla4_82xx_queue_mbox_cmd(struct scsi_qla_host *ha , uint32_t *mbx_cmd , int in_count ) ; void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha , int out_count ) ; void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha , uint32_t *mbx_cmd , int in_count ) ; void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha , int out_count ) ; void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha ) ; void qla4_83xx_disable_intrs(struct scsi_qla_host *ha ) ; void qla4_83xx_enable_intrs(struct scsi_qla_host *ha ) ; int qla4_83xx_start_firmware(struct scsi_qla_host *ha ) ; irqreturn_t qla4_83xx_intr_handler(int irq , void *dev_id ) ; void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha , uint32_t intr_status ) ; int qla4_83xx_isp_reset(struct scsi_qla_host *ha ) ; void qla4_83xx_queue_iocb(struct scsi_qla_host *ha ) ; void qla4_83xx_complete_iocb(struct scsi_qla_host *ha ) ; uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha , ulong addr ) ; void qla4_83xx_wr_reg(struct scsi_qla_host *ha , ulong addr , uint32_t val ) ; int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha , uint32_t addr , uint32_t *data ) ; int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha , uint32_t addr , uint32_t data ) ; int qla4_83xx_drv_lock(struct scsi_qla_host *ha ) ; void qla4_83xx_drv_unlock(struct scsi_qla_host *ha ) ; void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha ) ; void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha , uint32_t *mbx_cmd , int incount ) ; void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha , int outcount ) ; void qla4_83xx_read_reset_template(struct scsi_qla_host *ha ) ; void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha ) ; int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha ) ; int qla4_8xxx_set_param(struct scsi_qla_host *ha , int param ) ; int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha ) ; int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha ) ; void qla4_83xx_disable_pause(struct scsi_qla_host *ha ) ; int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha ) ; int qla4xxx_get_default_ddb(struct scsi_qla_host *ha , uint32_t options , dma_addr_t dma_addr ) ; int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha , char *username , char *password , uint16_t chap_index ) ; int qla4_84xx_config_acb(struct scsi_qla_host *ha , int acb_config ) ; int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha ) ; int qla4_83xx_is_detached(struct scsi_qla_host *ha ) ; int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha ) ; int ql4xextended_error_logging ; int ql4xdontresethba ; int ql4xenablemsix ; int ql4xmdcapmask ; int ql4xenablemd ; struct device_attribute *qla4xxx_host_attrs[16U] ; __inline static struct ddb_entry *qla4xxx_lookup_ddb_by_fw_index(struct scsi_qla_host *ha , uint32_t fw_ddb_index ) { struct ddb_entry *ddb_entry ; { ddb_entry = (struct ddb_entry *)0; if (fw_ddb_index <= 511U && (unsigned long )ha->fw_ddb_index_map[fw_ddb_index] != (unsigned long )((struct ddb_entry *)65535)) { ddb_entry = ha->fw_ddb_index_map[fw_ddb_index]; } else { } return (ddb_entry); } } __inline static void __qla4xxx_enable_intrs(struct scsi_qla_host *ha ) { uint32_t tmp ; uint32_t tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp___1 = is_qla4022(ha); tmp___2 = is_qla4032(ha); if ((tmp___1 | tmp___2) != 0) { tmp = set_rmask(4U); writel(tmp, (void volatile *)(& (ha->reg)->u1.isp4022.intr_mask)); readl((void const volatile *)(& (ha->reg)->u1.isp4022.intr_mask)); } else { tmp___0 = set_rmask(4U); writel(tmp___0, (void volatile *)(& (ha->reg)->ctrl_status)); readl((void const volatile *)(& (ha->reg)->ctrl_status)); } set_bit(6L, (unsigned long volatile *)(& ha->flags)); return; } } __inline static void __qla4xxx_disable_intrs(struct scsi_qla_host *ha ) { uint32_t tmp ; uint32_t tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp___1 = is_qla4022(ha); tmp___2 = is_qla4032(ha); if ((tmp___1 | tmp___2) != 0) { tmp = clr_rmask(4U); writel(tmp, (void volatile *)(& (ha->reg)->u1.isp4022.intr_mask)); readl((void const volatile *)(& (ha->reg)->u1.isp4022.intr_mask)); } else { tmp___0 = clr_rmask(4U); writel(tmp___0, (void volatile *)(& (ha->reg)->ctrl_status)); readl((void const volatile *)(& (ha->reg)->ctrl_status)); } clear_bit(6L, (unsigned long volatile *)(& ha->flags)); return; } } __inline static void qla4xxx_enable_intrs(struct scsi_qla_host *ha ) { unsigned long flags ; { ldv_spin_lock(); __qla4xxx_enable_intrs(ha); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } __inline static void qla4xxx_disable_intrs(struct scsi_qla_host *ha ) { unsigned long flags ; { ldv_spin_lock(); __qla4xxx_disable_intrs(ha); spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } __inline static int qla4xxx_get_chap_type(struct ql4_chap_table *chap_entry ) { int type ; { if ((int )((signed char )chap_entry->flags) < 0) { type = 0; } else { type = 1; } return (type); } } static char qla4xxx_version_str[40U] ; static struct kmem_cache *srb_cachep ; static int ql4xdisablesysfsboot = 1; int ql4xenablemsix = 1; static int ql4xmaxqdepth = 32; static int ql4xqfulltracking = 1; static int ql4xsess_recovery_tmo = 120; int ql4xmdcapmask = 0; int ql4xenablemd = 1; static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha ) ; static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha ) ; static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess , enum iscsi_param param , char *buf ) ; static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn , enum iscsi_param param , char *buf ) ; static int qla4xxx_host_get_param(struct Scsi_Host *shost , enum iscsi_host_param param , char *buf ) ; static int qla4xxx_iface_set_param(struct Scsi_Host *shost , void *data , uint32_t len ) ; static int qla4xxx_get_iface_param(struct iscsi_iface *iface , enum iscsi_param_type param_type , int param , char *buf ) ; static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc ) ; static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost , struct sockaddr *dst_addr , int non_blocking ) ; static int qla4xxx_ep_poll(struct iscsi_endpoint *ep , int timeout_ms ) ; static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep ) ; static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep , enum iscsi_param param , char *buf ) ; static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn ) ; static struct iscsi_cls_conn *qla4xxx_conn_create(struct iscsi_cls_session *cls_sess , uint32_t conn_idx ) ; static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session , struct iscsi_cls_conn *cls_conn , uint64_t transport_fd , int is_leading ) ; static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn ) ; static struct iscsi_cls_session *qla4xxx_session_create(struct iscsi_endpoint *ep , uint16_t cmds_max , uint16_t qdepth , uint32_t initial_cmdsn ) ; static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess ) ; static void qla4xxx_task_work(struct work_struct *wdata ) ; static int qla4xxx_alloc_pdu(struct iscsi_task *task , uint8_t opcode ) ; static int qla4xxx_task_xmit(struct iscsi_task *task ) ; static void qla4xxx_task_cleanup(struct iscsi_task *task ) ; static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session ) ; static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn , struct iscsi_stats *stats ) ; static int qla4xxx_send_ping(struct Scsi_Host *shost , uint32_t iface_num , uint32_t iface_type , uint32_t payload_size , uint32_t pid , struct sockaddr *dst_addr ) ; static int qla4xxx_get_chap_list(struct Scsi_Host *shost , uint16_t chap_tbl_idx , uint32_t *num_entries , char *buf ) ; static int qla4xxx_delete_chap(struct Scsi_Host *shost , uint16_t chap_tbl_idx ) ; static int qla4xxx_set_chap_entry(struct Scsi_Host *shost , void *data , int len ) ; static int qla4xxx_get_host_stats(struct Scsi_Host *shost , char *buf , int len ) ; static int qla4xxx_queuecommand(struct Scsi_Host *host , struct scsi_cmnd *cmd ) ; static int qla4xxx_eh_abort(struct scsi_cmnd *cmd ) ; static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd ) ; static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd ) ; static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd ) ; static int qla4xxx_slave_alloc(struct scsi_device *sdev ) ; static umode_t qla4_attr_is_visible(int param_type , int param ) ; static int qla4xxx_host_reset(struct Scsi_Host *shost , int reset_type ) ; static int qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess , struct iscsi_bus_flash_conn *fnode_conn , void *data , int len ) ; static int qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess , int param , char *buf ) ; static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost , char const *buf , int len ) ; static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess ) ; static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess , struct iscsi_bus_flash_conn *fnode_conn ) ; static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess , struct iscsi_bus_flash_conn *fnode_conn ) ; static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess ) ; static struct qla4_8xxx_legacy_intr_set legacy_intr[8U] = { {128U, 101777688U, 101777704U, 101789696U}, {256U, 101777760U, 101777776U, 101789700U}, {512U, 101777764U, 101777780U, 101789704U}, {1024U, 101777768U, 101777784U, 101789708U}, {2048U, 101778272U, 101778288U, 101789712U}, {4096U, 101778276U, 101778292U, 101789716U}, {8192U, 101778280U, 101778296U, 101789720U}, {16384U, 101778284U, 101778300U, 101789724U}}; static struct scsi_host_template qla4xxx_driver_template = {& __this_module, "qla4xxx", 0, 0, 0, 0, 0, & qla4xxx_queuecommand, & qla4xxx_eh_abort, & qla4xxx_eh_device_reset, & qla4xxx_eh_target_reset, 0, & qla4xxx_eh_host_reset, & qla4xxx_slave_alloc, 0, 0, 0, 0, 0, 0, & scsi_change_queue_depth, 0, 0, 0, 0, & qla4xxx_eh_cmd_timed_out, & qla4xxx_host_reset, "qla4xxx", 0, 0, -1, 128U, (unsigned short)0, 65535U, 0UL, 3, (unsigned char)0, 0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, 1U, (unsigned char)0, (unsigned char)0, (unsigned char)0, (unsigned char)0, 0U, (struct device_attribute **)(& qla4xxx_host_attrs), 0, {0, 0}, 72057594037932151ULL, 0U, 0, (_Bool)0}; static struct iscsi_transport qla4xxx_iscsi_transport = {& __this_module, (char *)"qla4xxx", 18616U, & qla4xxx_session_create, & qla4xxx_session_destroy, & qla4xxx_conn_create, & qla4xxx_conn_bind, & qla4xxx_conn_start, & iscsi_conn_stop, & qla4xxx_conn_destroy, & iscsi_set_param, & qla4xxx_get_ep_param, & qla4xxx_conn_get_param, & qla4xxx_session_get_param, & qla4xxx_host_get_param, 0, & iscsi_conn_send_pdu, & qla4xxx_conn_get_stats, 0, & qla4xxx_task_xmit, & qla4xxx_task_cleanup, & qla4xxx_alloc_pdu, 0, 0, 0, 0, & qla4xxx_ep_connect, & qla4xxx_ep_poll, & qla4xxx_ep_disconnect, 0, 0, & qla4xxx_iface_set_param, & qla4xxx_get_iface_param, & qla4_attr_is_visible, & qla4xxx_bsg_request, & qla4xxx_send_ping, & qla4xxx_get_chap_list, & qla4xxx_delete_chap, & qla4xxx_set_chap_entry, & qla4xxx_sysfs_ddb_get_param, & qla4xxx_sysfs_ddb_set_param, & qla4xxx_sysfs_ddb_add, & qla4xxx_sysfs_ddb_delete, & qla4xxx_sysfs_ddb_login, & qla4xxx_sysfs_ddb_logout, & qla4xxx_sysfs_ddb_logout_sid, & qla4xxx_get_host_stats, 0}; static struct scsi_transport_template *qla4xxx_scsi_transport ; static int qla4xxx_send_ping(struct Scsi_Host *shost , uint32_t iface_num , uint32_t iface_type , uint32_t payload_size , uint32_t pid , struct sockaddr *dst_addr ) { struct scsi_qla_host *ha ; struct scsi_qla_host *tmp ; struct sockaddr_in *addr ; struct sockaddr_in6 *addr6 ; uint32_t options ; uint8_t ipaddr[16U] ; int rval ; { tmp = to_qla_host(shost); ha = tmp; options = 0U; memset((void *)(& ipaddr), 0, 16UL); if (iface_type == 1U && (unsigned int )dst_addr->sa_family == 2U) { addr = (struct sockaddr_in *)dst_addr; memcpy((void *)(& ipaddr), (void const *)(& addr->sin_addr.s_addr), 4UL); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: IPv4 Ping src: %pI4 dest: %pI4\n", "qla4xxx_send_ping", & ha->ip_config.ip_address, (uint8_t *)(& ipaddr)); } else { } rval = qla4xxx_ping_iocb(ha, options, payload_size, pid, (uint8_t *)(& ipaddr)); if (rval != 0) { rval = -22; } else { } } else if (iface_type == 2U && (unsigned int )dst_addr->sa_family == 10U) { addr6 = (struct sockaddr_in6 *)dst_addr; memcpy((void *)(& ipaddr), (void const *)(& addr6->sin6_addr.in6_u.u6_addr8), 16UL); options = options | 1U; if (iface_num == 0U || iface_num == 1U) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: LinkLocal Ping src: %pI6 dest: %pI6\n", "qla4xxx_send_ping", & ha->ip_config.ipv6_link_local_addr, (uint8_t *)(& ipaddr)); } else { } options = options | 4U; rval = qla4xxx_ping_iocb(ha, options, payload_size, pid, (uint8_t *)(& ipaddr)); } else { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: iface num = %d not supported\n", "qla4xxx_send_ping", iface_num); rval = -38; goto exit_send_ping; } if (rval != 0) { options = options & 4294967291U; if (iface_num == 0U) { options = options | 8U; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: IPv6 Ping src: %pI6 dest: %pI6\n", "qla4xxx_send_ping", & ha->ip_config.ipv6_addr0, (uint8_t *)(& ipaddr)); } else { } } else if (iface_num == 1U) { options = options | 12U; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: IPv6 Ping src: %pI6 dest: %pI6\n", "qla4xxx_send_ping", & ha->ip_config.ipv6_addr1, (uint8_t *)(& ipaddr)); } else { } } else { } rval = qla4xxx_ping_iocb(ha, options, payload_size, pid, (uint8_t *)(& ipaddr)); if (rval != 0) { rval = -22; } else { } } else { } } else { rval = -38; } exit_send_ping: ; return (rval); } } static umode_t qla4_attr_is_visible(int param_type , int param ) { { switch (param_type) { case 1: ; switch (param) { case 0: ; case 3: ; case 1: ; case 4: ; case 5: ; return (292U); default: ; return (0U); } case 0: ; switch (param) { case 17: ; case 18: ; case 21: ; case 20: ; case 15: ; case 16: ; case 36: ; case 8: ; case 5: ; case 7: ; case 0: ; case 1: ; case 32: ; case 38: ; case 37: ; case 22: ; case 24: ; case 23: ; case 25: ; case 42: ; case 43: ; case 44: ; case 45: ; case 46: ; case 47: ; case 48: ; case 49: ; case 50: ; case 2: ; case 3: ; case 4: ; case 6: ; case 9: ; case 10: ; case 51: ; case 57: ; case 58: ; case 59: ; case 60: ; case 61: ; case 62: ; case 63: ; case 64: ; case 65: ; case 66: ; case 67: ; case 68: ; case 53: ; case 54: ; case 33: ; case 55: ; case 56: ; case 11: ; case 52: ; case 14: ; case 69: ; case 70: ; case 71: ; return (292U); default: ; return (0U); } case 2: ; switch (param) { case 1: ; case 2: ; case 3: ; case 4: ; case 12: ; case 6: ; case 7: ; case 8: ; case 9: ; case 10: ; case 13: ; case 14: ; case 15: ; case 19: ; case 20: ; case 21: ; case 22: ; case 23: ; case 24: ; case 25: ; case 26: ; case 27: ; case 28: ; case 29: ; case 30: ; case 31: ; case 32: ; case 33: ; case 34: ; case 35: ; case 36: ; case 37: ; case 38: ; case 39: ; case 40: ; case 41: ; case 42: ; case 43: ; case 55: ; case 44: ; case 45: ; case 46: ; case 47: ; case 48: ; case 49: ; case 50: ; case 51: ; case 52: ; case 53: ; case 54: ; return (292U); default: ; return (0U); } case 5: ; switch (param) { case 0: ; case 1: ; case 2: ; case 3: ; case 4: ; case 5: ; case 6: ; case 7: ; case 8: ; case 9: ; case 10: ; case 11: ; case 12: ; case 13: ; case 14: ; case 15: ; case 16: ; case 17: ; return (292U); default: ; return (0U); } case 3: ; switch (param) { case 0: ; case 1: ; case 2: ; case 3: ; case 4: ; case 5: ; case 6: ; case 7: ; case 8: ; case 9: ; case 10: ; case 11: ; case 12: ; case 13: ; case 14: ; case 15: ; case 16: ; case 17: ; case 18: ; case 19: ; case 20: ; case 21: ; case 22: ; case 23: ; case 24: ; case 25: ; case 26: ; case 27: ; case 28: ; case 29: ; case 30: ; case 31: ; case 32: ; case 33: ; case 34: ; case 35: ; case 36: ; case 37: ; case 38: ; case 39: ; case 40: ; case 41: ; case 42: ; case 43: ; case 44: ; case 45: ; case 46: ; case 47: ; case 48: ; case 49: ; case 51: ; case 52: ; case 54: ; case 56: ; case 57: ; case 58: ; return (292U); default: ; return (0U); } } return (0U); } } static void qla4xxx_create_chap_list(struct scsi_qla_host *ha ) { int rval ; uint8_t *chap_flash_data ; uint32_t offset ; dma_addr_t chap_dma ; uint32_t chap_size ; int tmp ; void *tmp___0 ; int tmp___1 ; void *tmp___2 ; { rval = 0; chap_flash_data = (uint8_t *)0U; chap_size = 0U; tmp = is_qla40XX(ha); if (tmp != 0) { chap_size = 46592U; } else { chap_size = ha->hw.flt_chap_size / 2U; } tmp___0 = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )chap_size, & chap_dma, 208U, (struct dma_attrs *)0); chap_flash_data = (uint8_t *)tmp___0; if ((unsigned long )chap_flash_data == (unsigned long )((uint8_t *)0U)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "No memory for chap_flash_data\n"); return; } else { } tmp___1 = is_qla40XX(ha); if (tmp___1 != 0) { offset = 100663296U; } else { offset = (ha->hw.flt_region_chap << 2) + 2382364672U; if ((unsigned int )ha->port_num == 1U) { offset = offset + chap_size; } else { } } rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); if (rval != 0) { goto exit_chap_list; } else { } if ((unsigned long )ha->chap_list == (unsigned long )((uint8_t *)0U)) { tmp___2 = ldv_vmalloc_44((unsigned long )chap_size); ha->chap_list = (uint8_t *)tmp___2; } else { } if ((unsigned long )ha->chap_list == (unsigned long )((uint8_t *)0U)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "No memory for ha->chap_list\n"); goto exit_chap_list; } else { } memset((void *)ha->chap_list, 0, (size_t )chap_size); memcpy((void *)ha->chap_list, (void const *)chap_flash_data, (size_t )chap_size); exit_chap_list: dma_free_attrs(& (ha->pdev)->dev, (size_t )chap_size, (void *)chap_flash_data, chap_dma, (struct dma_attrs *)0); return; } } static int qla4xxx_get_chap_by_index(struct scsi_qla_host *ha , int16_t chap_index , struct ql4_chap_table **chap_entry ) { int rval ; int max_chap_entries ; int tmp ; { rval = 1; if ((unsigned long )ha->chap_list == (unsigned long )((uint8_t *)0U)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "CHAP table cache is empty!\n"); rval = 1; goto exit_get_chap; } else { } tmp = is_qla80XX(ha); if (tmp != 0) { max_chap_entries = (int )(ha->hw.flt_chap_size / 728U); } else { max_chap_entries = 128; } if ((int )chap_index > max_chap_entries) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Invalid Chap index\n"); rval = 1; goto exit_get_chap; } else { } *chap_entry = (struct ql4_chap_table *)ha->chap_list + (unsigned long )chap_index; if ((unsigned int )(*chap_entry)->cookie != 16530U) { rval = 1; *chap_entry = (struct ql4_chap_table *)0; } else { rval = 0; } exit_get_chap: ; return (rval); } } static int qla4xxx_find_free_chap_index(struct scsi_qla_host *ha , uint16_t *chap_index ) { int i ; int rval ; int free_index ; int max_chap_entries ; struct ql4_chap_table *chap_table ; int tmp ; { free_index = -1; max_chap_entries = 0; tmp = is_qla80XX(ha); if (tmp != 0) { max_chap_entries = (int )(ha->hw.flt_chap_size / 728U); } else { max_chap_entries = 128; } if ((unsigned long )ha->chap_list == (unsigned long )((uint8_t *)0U)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "CHAP table cache is empty!\n"); rval = 1; goto exit_find_chap; } else { } i = 0; goto ldv_64132; ldv_64131: chap_table = (struct ql4_chap_table *)ha->chap_list + (unsigned long )i; if ((unsigned int )chap_table->cookie != 16530U && i > 3) { free_index = i; goto ldv_64130; } else { } i = i + 1; ldv_64132: ; if (i < max_chap_entries) { goto ldv_64131; } else { } ldv_64130: ; if (free_index != -1) { *chap_index = (uint16_t )free_index; rval = 0; } else { rval = 1; } exit_find_chap: ; return (rval); } } static int qla4xxx_get_chap_list(struct Scsi_Host *shost , uint16_t chap_tbl_idx , uint32_t *num_entries , char *buf ) { struct scsi_qla_host *ha ; struct scsi_qla_host *tmp ; struct ql4_chap_table *chap_table ; struct iscsi_chap_rec *chap_rec ; int max_chap_entries ; int valid_chap_entries ; int ret ; int i ; int tmp___0 ; { tmp = to_qla_host(shost); ha = tmp; max_chap_entries = 0; valid_chap_entries = 0; ret = 0; tmp___0 = is_qla80XX(ha); if (tmp___0 != 0) { max_chap_entries = (int )(ha->hw.flt_chap_size / 728U); } else { max_chap_entries = 128; } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: num_entries = %d, CHAP idx = %d\n", "qla4xxx_get_chap_list", *num_entries, (int )chap_tbl_idx); if ((unsigned long )buf == (unsigned long )((char *)0)) { ret = -12; goto exit_get_chap_list; } else { } qla4xxx_create_chap_list(ha); chap_rec = (struct iscsi_chap_rec *)buf; mutex_lock_nested(& ha->chap_sem, 0U); i = (int )chap_tbl_idx; goto ldv_64151; ldv_64150: chap_table = (struct ql4_chap_table *)ha->chap_list + (unsigned long )i; if ((unsigned int )chap_table->cookie != 16530U) { goto ldv_64148; } else { } chap_rec->chap_tbl_idx = (uint16_t )i; strlcpy((char *)(& chap_rec->username), (char const *)(& chap_table->name), 256UL); strlcpy((char *)(& chap_rec->password), (char const *)(& chap_table->secret), 100UL); chap_rec->password_length = chap_table->secret_len; if ((int )((signed char )chap_table->flags) < 0) { chap_rec->chap_type = 0; } else { } if (((int )chap_table->flags & 64) != 0) { chap_rec->chap_type = 1; } else { } chap_rec = chap_rec + 1; valid_chap_entries = valid_chap_entries + 1; if ((uint32_t )valid_chap_entries == *num_entries) { goto ldv_64149; } else { } ldv_64148: i = i + 1; ldv_64151: ; if (i < max_chap_entries) { goto ldv_64150; } else { } ldv_64149: mutex_unlock(& ha->chap_sem); exit_get_chap_list: dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Valid CHAP Entries = %d\n", "qla4xxx_get_chap_list", valid_chap_entries); *num_entries = (uint32_t )valid_chap_entries; return (ret); } } static int __qla4xxx_is_chap_active(struct device *dev , void *data ) { int ret ; uint16_t *chap_tbl_idx ; struct iscsi_cls_session *cls_session ; struct iscsi_session *sess ; struct ddb_entry *ddb_entry ; int tmp ; struct device const *__mptr ; int tmp___0 ; { ret = 0; chap_tbl_idx = (uint16_t *)data; tmp = iscsi_is_session_dev((struct device const *)dev); if (tmp == 0) { goto exit_is_chap_active; } else { } __mptr = (struct device const *)dev; cls_session = (struct iscsi_cls_session *)__mptr + 0xfffffffffffffd58UL; sess = (struct iscsi_session *)cls_session->dd_data; ddb_entry = (struct ddb_entry *)sess->dd_data; tmp___0 = iscsi_session_chkready(cls_session); if (tmp___0 != 0) { goto exit_is_chap_active; } else { } if ((int )ddb_entry->chap_tbl_idx == (int )*chap_tbl_idx) { ret = 1; } else { } exit_is_chap_active: ; return (ret); } } static int qla4xxx_is_chap_active(struct Scsi_Host *shost , uint16_t chap_tbl_idx ) { int ret ; { ret = 0; ret = device_for_each_child(& shost->shost_gendev, (void *)(& chap_tbl_idx), & __qla4xxx_is_chap_active); return (ret); } } static int qla4xxx_delete_chap(struct Scsi_Host *shost , uint16_t chap_tbl_idx ) { struct scsi_qla_host *ha ; struct scsi_qla_host *tmp ; struct ql4_chap_table *chap_table ; dma_addr_t chap_dma ; int max_chap_entries ; uint32_t offset ; uint32_t chap_size ; int ret ; void *tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = to_qla_host(shost); ha = tmp; max_chap_entries = 0; offset = 0U; ret = 0; tmp___0 = ldv_dma_pool_alloc_45(ha->chap_dma_pool, 208U, & chap_dma); chap_table = (struct ql4_chap_table *)tmp___0; if ((unsigned long )chap_table == (unsigned long )((struct ql4_chap_table *)0)) { return (-12); } else { } memset((void *)chap_table, 0, 364UL); tmp___1 = is_qla80XX(ha); if (tmp___1 != 0) { max_chap_entries = (int )(ha->hw.flt_chap_size / 728U); } else { max_chap_entries = 128; } if ((int )chap_tbl_idx > max_chap_entries) { ret = -22; goto exit_delete_chap; } else { } ret = qla4xxx_is_chap_active(shost, (int )chap_tbl_idx); if (ret != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "CHAP entry %d is in use, cannot delete from flash\n", (int )chap_tbl_idx); ret = -16; goto exit_delete_chap; } else { } chap_size = 364U; tmp___2 = is_qla40XX(ha); if (tmp___2 != 0) { offset = (uint32_t )chap_tbl_idx * chap_size | 100663296U; } else { offset = (ha->hw.flt_region_chap << 2) + 2382364672U; if ((unsigned int )ha->port_num == 1U) { offset = ha->hw.flt_chap_size / 2U + offset; } else { } offset = (uint32_t )chap_tbl_idx * chap_size + offset; } ret = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); if (ret != 0) { ret = -22; goto exit_delete_chap; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Chap Cookie: x%x\n", (int )chap_table->cookie); } else { } if ((unsigned int )chap_table->cookie != 16530U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "No valid chap entry found\n"); goto exit_delete_chap; } else { } chap_table->cookie = 65535U; offset = (uint32_t )chap_tbl_idx * 364U | 100663296U; ret = qla4xxx_set_flash(ha, chap_dma, offset, chap_size, 3U); if (ret == 0 && (unsigned long )ha->chap_list != (unsigned long )((uint8_t *)0U)) { mutex_lock_nested(& ha->chap_sem, 0U); memcpy((void *)ha->chap_list + (unsigned long )chap_tbl_idx, (void const *)chap_table, 364UL); mutex_unlock(& ha->chap_sem); } else { } if (ret != 0) { ret = -22; } else { } exit_delete_chap: dma_pool_free(ha->chap_dma_pool, (void *)chap_table, chap_dma); return (ret); } } static int qla4xxx_set_chap_entry(struct Scsi_Host *shost , void *data , int len ) { struct scsi_qla_host *ha ; struct scsi_qla_host *tmp ; struct iscsi_chap_rec chap_rec ; struct ql4_chap_table *chap_entry ; struct iscsi_param_info *param_info ; struct nlattr *attr ; int max_chap_entries ; int type ; int rem ; int rc ; int size ; void *tmp___0 ; size_t __min1 ; size_t __min2 ; size_t __min1___0 ; size_t __min2___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { tmp = to_qla_host(shost); ha = tmp; chap_entry = (struct ql4_chap_table *)0; max_chap_entries = 0; rem = len; rc = 0; memset((void *)(& chap_rec), 0, 524UL); attr = (struct nlattr *)data; rem = len; goto ldv_64212; ldv_64211: tmp___0 = nla_data((struct nlattr const *)attr); param_info = (struct iscsi_param_info *)tmp___0; switch ((int )param_info->param) { case 0: chap_rec.chap_tbl_idx = *((uint16_t *)(& param_info->value)); goto ldv_64197; case 1: chap_rec.chap_type = (enum chap_type_e )param_info->value[0]; goto ldv_64197; case 2: __min1 = 256UL; __min2 = (size_t )param_info->len; size = (int )(__min1 < __min2 ? __min1 : __min2); memcpy((void *)(& chap_rec.username), (void const *)(& param_info->value), (size_t )size); goto ldv_64197; case 3: __min1___0 = 256UL; __min2___0 = (size_t )param_info->len; size = (int )(__min1___0 < __min2___0 ? __min1___0 : __min2___0); memcpy((void *)(& chap_rec.password), (void const *)(& param_info->value), (size_t )size); goto ldv_64197; case 4: chap_rec.password_length = param_info->value[0]; goto ldv_64197; default: dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: No such sysfs attribute\n", "qla4xxx_set_chap_entry"); rc = -38; goto exit_set_chap; } ldv_64197: attr = nla_next((struct nlattr const *)attr, & rem); ldv_64212: tmp___1 = nla_ok((struct nlattr const *)attr, rem); if (tmp___1 != 0) { goto ldv_64211; } else { } if ((unsigned int )chap_rec.chap_type == 1U) { type = 1; } else { type = 0; } tmp___2 = is_qla80XX(ha); if (tmp___2 != 0) { max_chap_entries = (int )(ha->hw.flt_chap_size / 728U); } else { max_chap_entries = 128; } mutex_lock_nested(& ha->chap_sem, 0U); if ((int )chap_rec.chap_tbl_idx < max_chap_entries) { rc = qla4xxx_get_chap_by_index(ha, (int )((int16_t )chap_rec.chap_tbl_idx), & chap_entry); if (rc == 0) { tmp___3 = qla4xxx_get_chap_type(chap_entry); if (tmp___3 != type) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Type mismatch for CHAP entry %d\n", (int )chap_rec.chap_tbl_idx); rc = -22; goto exit_unlock_chap; } else { } rc = qla4xxx_is_chap_active(shost, (int )chap_rec.chap_tbl_idx); if (rc != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "CHAP entry %d is in use\n", (int )chap_rec.chap_tbl_idx); rc = -16; goto exit_unlock_chap; } else { } } else { } } else { rc = qla4xxx_find_free_chap_index(ha, & chap_rec.chap_tbl_idx); if (rc != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "CHAP entry not available\n"); rc = -16; goto exit_unlock_chap; } else { } } rc = qla4xxx_set_chap(ha, (char *)(& chap_rec.username), (char *)(& chap_rec.password), (int )chap_rec.chap_tbl_idx, type); exit_unlock_chap: mutex_unlock(& ha->chap_sem); exit_set_chap: ; return (rc); } } static int qla4xxx_get_host_stats(struct Scsi_Host *shost , char *buf , int len ) { struct scsi_qla_host *ha ; struct scsi_qla_host *tmp ; struct iscsi_offload_host_stats *host_stats ; int host_stats_size ; int ret ; int ddb_idx ; struct ql_iscsi_stats *ql_iscsi_stats ; int stats_size ; dma_addr_t iscsi_stats_dma ; void *tmp___0 ; { tmp = to_qla_host(shost); ha = tmp; host_stats = (struct iscsi_offload_host_stats *)0; ret = 0; ddb_idx = 0; ql_iscsi_stats = (struct ql_iscsi_stats *)0; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Func: %s\n", "qla4xxx_get_host_stats"); } else { } host_stats_size = 664; if (host_stats_size != len) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: host_stats size mismatch expected = %d, is = %d\n", "qla4xxx_get_host_stats", len, host_stats_size); ret = -22; goto exit_host_stats; } else { } host_stats = (struct iscsi_offload_host_stats *)buf; if ((unsigned long )buf == (unsigned long )((char *)0)) { ret = -12; goto exit_host_stats; } else { } stats_size = 4096; tmp___0 = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )stats_size, & iscsi_stats_dma, 208U, (struct dma_attrs *)0); ql_iscsi_stats = (struct ql_iscsi_stats *)tmp___0; if ((unsigned long )ql_iscsi_stats == (unsigned long )((struct ql_iscsi_stats *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Unable to allocate memory for iscsi stats\n"); ret = -12; goto exit_host_stats; } else { } ret = qla4xxx_get_mgmt_data(ha, (int )((uint16_t )ddb_idx), (int )((uint16_t )stats_size), iscsi_stats_dma); if (ret != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Unable to retrieve iscsi stats\n"); ret = -5; goto exit_host_stats; } else { } host_stats->mactx_frames = ql_iscsi_stats->mac_tx_frames; host_stats->mactx_bytes = ql_iscsi_stats->mac_tx_bytes; host_stats->mactx_multicast_frames = ql_iscsi_stats->mac_tx_multicast_frames; host_stats->mactx_broadcast_frames = ql_iscsi_stats->mac_tx_broadcast_frames; host_stats->mactx_pause_frames = ql_iscsi_stats->mac_tx_pause_frames; host_stats->mactx_control_frames = ql_iscsi_stats->mac_tx_control_frames; host_stats->mactx_deferral = ql_iscsi_stats->mac_tx_deferral; host_stats->mactx_excess_deferral = ql_iscsi_stats->mac_tx_excess_deferral; host_stats->mactx_late_collision = ql_iscsi_stats->mac_tx_late_collision; host_stats->mactx_abort = ql_iscsi_stats->mac_tx_abort; host_stats->mactx_single_collision = ql_iscsi_stats->mac_tx_single_collision; host_stats->mactx_multiple_collision = ql_iscsi_stats->mac_tx_multiple_collision; host_stats->mactx_collision = ql_iscsi_stats->mac_tx_collision; host_stats->mactx_frames_dropped = ql_iscsi_stats->mac_tx_frames_dropped; host_stats->mactx_jumbo_frames = ql_iscsi_stats->mac_tx_jumbo_frames; host_stats->macrx_frames = ql_iscsi_stats->mac_rx_frames; host_stats->macrx_bytes = ql_iscsi_stats->mac_rx_bytes; host_stats->macrx_unknown_control_frames = ql_iscsi_stats->mac_rx_unknown_control_frames; host_stats->macrx_pause_frames = ql_iscsi_stats->mac_rx_pause_frames; host_stats->macrx_control_frames = ql_iscsi_stats->mac_rx_control_frames; host_stats->macrx_dribble = ql_iscsi_stats->mac_rx_dribble; host_stats->macrx_frame_length_error = ql_iscsi_stats->mac_rx_frame_length_error; host_stats->macrx_jabber = ql_iscsi_stats->mac_rx_jabber; host_stats->macrx_carrier_sense_error = ql_iscsi_stats->mac_rx_carrier_sense_error; host_stats->macrx_frame_discarded = ql_iscsi_stats->mac_rx_frame_discarded; host_stats->macrx_frames_dropped = ql_iscsi_stats->mac_rx_frames_dropped; host_stats->mac_crc_error = ql_iscsi_stats->mac_crc_error; host_stats->mac_encoding_error = ql_iscsi_stats->mac_encoding_error; host_stats->macrx_length_error_large = ql_iscsi_stats->mac_rx_length_error_large; host_stats->macrx_length_error_small = ql_iscsi_stats->mac_rx_length_error_small; host_stats->macrx_multicast_frames = ql_iscsi_stats->mac_rx_multicast_frames; host_stats->macrx_broadcast_frames = ql_iscsi_stats->mac_rx_broadcast_frames; host_stats->iptx_packets = ql_iscsi_stats->ip_tx_packets; host_stats->iptx_bytes = ql_iscsi_stats->ip_tx_bytes; host_stats->iptx_fragments = ql_iscsi_stats->ip_tx_fragments; host_stats->iprx_packets = ql_iscsi_stats->ip_rx_packets; host_stats->iprx_bytes = ql_iscsi_stats->ip_rx_bytes; host_stats->iprx_fragments = ql_iscsi_stats->ip_rx_fragments; host_stats->ip_datagram_reassembly = ql_iscsi_stats->ip_datagram_reassembly; host_stats->ip_invalid_address_error = ql_iscsi_stats->ip_invalid_address_error; host_stats->ip_error_packets = ql_iscsi_stats->ip_error_packets; host_stats->ip_fragrx_overlap = ql_iscsi_stats->ip_fragrx_overlap; host_stats->ip_fragrx_outoforder = ql_iscsi_stats->ip_fragrx_outoforder; host_stats->ip_datagram_reassembly_timeout = ql_iscsi_stats->ip_datagram_reassembly_timeout; host_stats->ipv6tx_packets = ql_iscsi_stats->ipv6_tx_packets; host_stats->ipv6tx_bytes = ql_iscsi_stats->ipv6_tx_bytes; host_stats->ipv6tx_fragments = ql_iscsi_stats->ipv6_tx_fragments; host_stats->ipv6rx_packets = ql_iscsi_stats->ipv6_rx_packets; host_stats->ipv6rx_bytes = ql_iscsi_stats->ipv6_rx_bytes; host_stats->ipv6rx_fragments = ql_iscsi_stats->ipv6_rx_fragments; host_stats->ipv6_datagram_reassembly = ql_iscsi_stats->ipv6_datagram_reassembly; host_stats->ipv6_invalid_address_error = ql_iscsi_stats->ipv6_invalid_address_error; host_stats->ipv6_error_packets = ql_iscsi_stats->ipv6_error_packets; host_stats->ipv6_fragrx_overlap = ql_iscsi_stats->ipv6_fragrx_overlap; host_stats->ipv6_fragrx_outoforder = ql_iscsi_stats->ipv6_fragrx_outoforder; host_stats->ipv6_datagram_reassembly_timeout = ql_iscsi_stats->ipv6_datagram_reassembly_timeout; host_stats->tcptx_segments = ql_iscsi_stats->tcp_tx_segments; host_stats->tcptx_bytes = ql_iscsi_stats->tcp_tx_bytes; host_stats->tcprx_segments = ql_iscsi_stats->tcp_rx_segments; host_stats->tcprx_byte = ql_iscsi_stats->tcp_rx_byte; host_stats->tcp_duplicate_ack_retx = ql_iscsi_stats->tcp_duplicate_ack_retx; host_stats->tcp_retx_timer_expired = ql_iscsi_stats->tcp_retx_timer_expired; host_stats->tcprx_duplicate_ack = ql_iscsi_stats->tcp_rx_duplicate_ack; host_stats->tcprx_pure_ackr = ql_iscsi_stats->tcp_rx_pure_ackr; host_stats->tcptx_delayed_ack = ql_iscsi_stats->tcp_tx_delayed_ack; host_stats->tcptx_pure_ack = ql_iscsi_stats->tcp_tx_pure_ack; host_stats->tcprx_segment_error = ql_iscsi_stats->tcp_rx_segment_error; host_stats->tcprx_segment_outoforder = ql_iscsi_stats->tcp_rx_segment_outoforder; host_stats->tcprx_window_probe = ql_iscsi_stats->tcp_rx_window_probe; host_stats->tcprx_window_update = ql_iscsi_stats->tcp_rx_window_update; host_stats->tcptx_window_probe_persist = ql_iscsi_stats->tcp_tx_window_probe_persist; host_stats->ecc_error_correction = ql_iscsi_stats->ecc_error_correction; host_stats->iscsi_pdu_tx = ql_iscsi_stats->iscsi_pdu_tx; host_stats->iscsi_data_bytes_tx = ql_iscsi_stats->iscsi_data_bytes_tx; host_stats->iscsi_pdu_rx = ql_iscsi_stats->iscsi_pdu_rx; host_stats->iscsi_data_bytes_rx = ql_iscsi_stats->iscsi_data_bytes_rx; host_stats->iscsi_io_completed = ql_iscsi_stats->iscsi_io_completed; host_stats->iscsi_unexpected_io_rx = ql_iscsi_stats->iscsi_unexpected_io_rx; host_stats->iscsi_format_error = ql_iscsi_stats->iscsi_format_error; host_stats->iscsi_hdr_digest_error = ql_iscsi_stats->iscsi_hdr_digest_error; host_stats->iscsi_data_digest_error = ql_iscsi_stats->iscsi_data_digest_error; host_stats->iscsi_sequence_error = ql_iscsi_stats->iscsi_sequence_error; exit_host_stats: ; if ((unsigned long )ql_iscsi_stats != (unsigned long )((struct ql_iscsi_stats *)0)) { dma_free_attrs(& (ha->pdev)->dev, (size_t )host_stats_size, (void *)ql_iscsi_stats, iscsi_stats_dma, (struct dma_attrs *)0); } else { } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Get host stats done\n", "qla4xxx_get_host_stats"); return (ret); } } static int qla4xxx_get_iface_param(struct iscsi_iface *iface , enum iscsi_param_type param_type , int param , char *buf ) { struct Scsi_Host *shost ; struct Scsi_Host *tmp ; struct scsi_qla_host *ha ; struct scsi_qla_host *tmp___0 ; int ival ; char *pval ; int len ; { tmp = dev_to_shost(iface->dev.parent); shost = tmp; tmp___0 = to_qla_host(shost); ha = tmp___0; pval = (char *)0; len = -38; if ((unsigned int )param_type == 2U) { switch (param) { case 1: len = sprintf(buf, "%pI4\n", & ha->ip_config.ip_address); goto ldv_64242; case 2: len = sprintf(buf, "%pI4\n", & ha->ip_config.subnet_mask); goto ldv_64242; case 3: len = sprintf(buf, "%pI4\n", & ha->ip_config.gateway); goto ldv_64242; case 12: ; if (iface->iface_type == 1U) { pval = (int )((short )ha->ip_config.ipv4_options) < 0 ? (char *)"enable" : (char *)"disable"; } else { pval = (ha->ip_config.ipv6_options & 32768U) != 0U ? (char *)"enable" : (char *)"disable"; } len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 4: len = sprintf(buf, "%s\n", ((int )ha->ip_config.tcp_options & 512) != 0 ? (char *)"dhcp" : (char *)"static"); goto ldv_64242; case 7: ; if (iface->iface_num == 0U) { len = sprintf(buf, "%pI6\n", & ha->ip_config.ipv6_addr0); } else { } if (iface->iface_num == 1U) { len = sprintf(buf, "%pI6\n", & ha->ip_config.ipv6_addr1); } else { } goto ldv_64242; case 6: len = sprintf(buf, "%pI6\n", & ha->ip_config.ipv6_link_local_addr); goto ldv_64242; case 8: len = sprintf(buf, "%pI6\n", & ha->ip_config.ipv6_default_router_addr); goto ldv_64242; case 9: pval = (ha->ip_config.ipv6_addl_options & 2U) != 0U ? (char *)"nd" : (char *)"static"; len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 10: pval = (int )ha->ip_config.ipv6_addl_options & 1 ? (char *)"auto" : (char *)"static"; len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 13: ; if (iface->iface_type == 1U) { ival = (int )ha->ip_config.ipv4_vlan_tag & 4095; } else { ival = (int )ha->ip_config.ipv6_vlan_tag & 4095; } len = sprintf(buf, "%d\n", ival); goto ldv_64242; case 14: ; if (iface->iface_type == 1U) { ival = ((int )ha->ip_config.ipv4_vlan_tag >> 13) & 7; } else { ival = ((int )ha->ip_config.ipv6_vlan_tag >> 13) & 7; } len = sprintf(buf, "%d\n", ival); goto ldv_64242; case 15: ; if (iface->iface_type == 1U) { pval = ((int )ha->ip_config.ipv4_options & 8192) != 0 ? (char *)"enable" : (char *)"disable"; } else { pval = (ha->ip_config.ipv6_options & 8192U) != 0U ? (char *)"enable" : (char *)"disable"; } len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 19: len = sprintf(buf, "%d\n", (int )ha->ip_config.eth_mtu_size); goto ldv_64242; case 20: ; if (iface->iface_type == 1U) { len = sprintf(buf, "%d\n", (int )ha->ip_config.ipv4_port); } else { len = sprintf(buf, "%d\n", (int )ha->ip_config.ipv6_port); } goto ldv_64242; case 21: ; if (iface->iface_type == 1U) { pval = iscsi_get_ipaddress_state_name((enum iscsi_ipaddress_state )ha->ip_config.ipv4_addr_state); } else if (iface->iface_num == 0U) { pval = iscsi_get_ipaddress_state_name((enum iscsi_ipaddress_state )ha->ip_config.ipv6_addr0_state); } else if (iface->iface_num == 1U) { pval = iscsi_get_ipaddress_state_name((enum iscsi_ipaddress_state )ha->ip_config.ipv6_addr1_state); } else { } len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 22: pval = iscsi_get_ipaddress_state_name((enum iscsi_ipaddress_state )ha->ip_config.ipv6_link_local_state); len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 23: pval = iscsi_get_router_state_name((enum iscsi_router_state )ha->ip_config.ipv6_default_router_state); len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 24: ; if (iface->iface_type == 1U) { pval = (int )((short )ha->ip_config.tcp_options) >= 0 ? (char *)"enable" : (char *)"disable"; } else { pval = (int )((short )ha->ip_config.ipv6_tcp_options) >= 0 ? (char *)"enable" : (char *)"disable"; } len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 25: ; if (iface->iface_type == 1U) { pval = ((int )ha->ip_config.tcp_options & 32) == 0 ? (char *)"enable" : (char *)"disable"; } else { pval = ((int )ha->ip_config.ipv6_tcp_options & 32) == 0 ? (char *)"enable" : (char *)"disable"; } len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 26: ; if (iface->iface_type == 1U) { pval = ((int )ha->ip_config.tcp_options & 16) == 0 ? (char *)"enable" : (char *)"disable"; } else { pval = ((int )ha->ip_config.ipv6_tcp_options & 16) == 0 ? (char *)"enable" : (char *)"disable"; } len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 27: ; if (iface->iface_type == 1U) { len = sprintf(buf, "%d\n", (int )ha->ip_config.tcp_wsf); } else { len = sprintf(buf, "%d\n", (int )ha->ip_config.ipv6_tcp_wsf); } goto ldv_64242; case 28: ; if (iface->iface_type == 1U) { ival = ((int )ha->ip_config.tcp_options & 14) >> 1; } else { ival = ((int )ha->ip_config.ipv6_tcp_options & 14) >> 1; } len = sprintf(buf, "%d\n", ival); goto ldv_64242; case 29: ; if (iface->iface_type == 1U) { pval = (int )ha->ip_config.tcp_options & 1 ? (char *)"enable" : (char *)"disable"; } else { pval = (int )ha->ip_config.ipv6_tcp_options & 1 ? (char *)"enable" : (char *)"disable"; } len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 30: ; if (iface->iface_type == 1U) { len = sprintf(buf, "%d\n", (int )ha->ip_config.ipv4_cache_id); } else { len = sprintf(buf, "%d\n", (int )ha->ip_config.ipv6_cache_id); } goto ldv_64242; case 31: pval = ((int )ha->ip_config.tcp_options & 256) != 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 32: pval = ((int )ha->ip_config.tcp_options & 128) != 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 33: pval = ((int )ha->ip_config.ipv4_options & 16384) != 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 34: len = sprintf(buf, "%d\n", (int )ha->ip_config.ipv4_tos); goto ldv_64242; case 35: pval = ((int )ha->ip_config.ipv4_options & 4096) != 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 36: pval = ((int )ha->ip_config.ipv4_options & 2048) != 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 37: pval = (unsigned int )ha->ip_config.ipv4_alt_cid_len != 0U ? (char *)(& ha->ip_config.ipv4_alt_cid) : (char *)""; len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 38: pval = ((int )ha->ip_config.ipv4_options & 1024) != 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 39: pval = ((int )ha->ip_config.ipv4_options & 512) != 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 40: pval = (unsigned int )ha->ip_config.ipv4_vid_len != 0U ? (char *)(& ha->ip_config.ipv4_vid) : (char *)""; len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 41: pval = ((int )ha->ip_config.ipv4_options & 256) != 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 42: pval = ((int )ha->ip_config.ipv4_options & 16) == 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 43: pval = ((int )ha->ip_config.ipv4_options & 8) != 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 55: ; if (iface->iface_type == 1U) { pval = ((int )ha->ip_config.ipv4_options & 4) != 0 ? (char *)"enable" : (char *)"disable"; } else { pval = (ha->ip_config.ipv6_options & 4U) != 0U ? (char *)"enable" : (char *)"disable"; } len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 44: len = sprintf(buf, "%d\n", (int )ha->ip_config.ipv4_ttl); goto ldv_64242; case 45: pval = (ha->ip_config.ipv6_options & 4096U) != 0U ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 46: pval = (ha->ip_config.ipv6_addl_options & 4U) != 0U ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64242; case 47: len = sprintf(buf, "%u\n", (int )ha->ip_config.ipv6_flow_lbl); goto ldv_64242; case 48: len = sprintf(buf, "%d\n", (int )ha->ip_config.ipv6_traffic_class); goto ldv_64242; case 49: len = sprintf(buf, "%d\n", (int )ha->ip_config.ipv6_hop_limit); goto ldv_64242; case 50: len = sprintf(buf, "%d\n", ha->ip_config.ipv6_nd_reach_time); goto ldv_64242; case 51: len = sprintf(buf, "%d\n", ha->ip_config.ipv6_nd_rexmit_timer); goto ldv_64242; case 52: len = sprintf(buf, "%d\n", ha->ip_config.ipv6_nd_stale_timeout); goto ldv_64242; case 53: len = sprintf(buf, "%d\n", (int )ha->ip_config.ipv6_dup_addr_detect_count); goto ldv_64242; case 54: len = sprintf(buf, "%d\n", ha->ip_config.ipv6_gw_advrt_mtu); goto ldv_64242; default: len = -38; } ldv_64242: ; } else if ((unsigned int )param_type == 5U) { switch (param) { case 0: len = sprintf(buf, "%d\n", (int )ha->ip_config.def_timeout); goto ldv_64294; case 1: pval = ((int )ha->ip_config.iscsi_options & 8192) != 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64294; case 2: pval = ((int )ha->ip_config.iscsi_options & 4096) != 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64294; case 3: pval = ((int )ha->ip_config.iscsi_options & 2048) != 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64294; case 4: pval = ((int )ha->ip_config.iscsi_options & 1024) != 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64294; case 5: pval = ((int )ha->ip_config.iscsi_options & 512) != 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64294; case 6: pval = ((int )ha->ip_config.iscsi_options & 256) != 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64294; case 7: len = sprintf(buf, "%d\n", (int )ha->ip_config.iscsi_options & 3); goto ldv_64294; case 8: len = sprintf(buf, "%u\n", (int )ha->ip_config.iscsi_max_pdu_size * 512); goto ldv_64294; case 9: len = sprintf(buf, "%u\n", (int )ha->ip_config.iscsi_first_burst_len * 512); goto ldv_64294; case 10: len = sprintf(buf, "%d\n", (int )ha->ip_config.iscsi_max_outstnd_r2t); goto ldv_64294; case 11: len = sprintf(buf, "%u\n", (int )ha->ip_config.iscsi_max_burst_len * 512); goto ldv_64294; case 12: pval = ((int )ha->ip_config.iscsi_options & 128) != 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64294; case 13: pval = ((int )ha->ip_config.iscsi_options & 16) != 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64294; case 14: pval = ((int )ha->ip_config.iscsi_options & 8) != 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64294; case 15: pval = ((int )ha->ip_config.iscsi_options & 32) != 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64294; case 16: pval = ((int )ha->ip_config.iscsi_options & 4) != 0 ? (char *)"enable" : (char *)"disable"; len = sprintf(buf, "%s\n", pval); goto ldv_64294; case 17: len = sprintf(buf, "%s\n", (uint8_t *)(& ha->ip_config.iscsi_name)); goto ldv_64294; default: len = -38; } ldv_64294: ; } else { } return (len); } } static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost , struct sockaddr *dst_addr , int non_blocking ) { int ret ; struct iscsi_endpoint *ep ; struct qla_endpoint *qla_ep ; struct scsi_qla_host *ha ; struct sockaddr_in *addr ; struct sockaddr_in6 *addr6 ; void *tmp ; void *tmp___0 ; void *tmp___1 ; { if ((unsigned long )shost == (unsigned long )((struct Scsi_Host *)0)) { ret = -6; printk("\v%s: shost is NULL\n", "qla4xxx_ep_connect"); tmp = ERR_PTR((long )ret); return ((struct iscsi_endpoint *)tmp); } else { } tmp___0 = shost_priv(shost); ha = (struct scsi_qla_host *)tmp___0 + 224U; ep = iscsi_create_endpoint(136); if ((unsigned long )ep == (unsigned long )((struct iscsi_endpoint *)0)) { ret = -12; tmp___1 = ERR_PTR((long )ret); return ((struct iscsi_endpoint *)tmp___1); } else { } qla_ep = (struct qla_endpoint *)ep->dd_data; memset((void *)qla_ep, 0, 136UL); if ((unsigned int )dst_addr->sa_family == 2U) { memcpy((void *)(& qla_ep->dst_addr), (void const *)dst_addr, 16UL); addr = (struct sockaddr_in *)(& qla_ep->dst_addr); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: %pI4\n", "qla4xxx_ep_connect", (char *)(& addr->sin_addr)); } else { } } else if ((unsigned int )dst_addr->sa_family == 10U) { memcpy((void *)(& qla_ep->dst_addr), (void const *)dst_addr, 28UL); addr6 = (struct sockaddr_in6 *)(& qla_ep->dst_addr); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: %pI6\n", "qla4xxx_ep_connect", (char *)(& addr6->sin6_addr)); } else { } } else { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: Invalid endpoint\n", "qla4xxx_ep_connect"); } qla_ep->host = shost; return (ep); } } static int qla4xxx_ep_poll(struct iscsi_endpoint *ep , int timeout_ms ) { struct qla_endpoint *qla_ep ; struct scsi_qla_host *ha ; int ret ; struct ratelimit_state _rs ; int tmp ; int tmp___0 ; int tmp___1 ; { ret = 0; qla_ep = (struct qla_endpoint *)ep->dd_data; ha = to_qla_host(qla_ep->host); if (ql4xextended_error_logging == 2) { _rs.lock.raw_lock.val.counter = 0; _rs.lock.magic = 3735899821U; _rs.lock.owner_cpu = 4294967295U; _rs.lock.owner = (void *)-1; _rs.lock.dep_map.key = 0; _rs.lock.dep_map.class_cache[0] = 0; _rs.lock.dep_map.class_cache[1] = 0; _rs.lock.dep_map.name = "_rs.lock"; _rs.lock.dep_map.cpu = 0; _rs.lock.dep_map.ip = 0UL; _rs.interval = 1250; _rs.burst = 10; _rs.printed = 0; _rs.missed = 0; _rs.begin = 0UL; tmp = ___ratelimit(& _rs, "qla4xxx_ep_poll"); if (tmp != 0) { printk("\016%s: host: %ld\n", "qla4xxx_ep_poll", ha->host_no); } else { } } else { } tmp___0 = adapter_up(ha); if (tmp___0 != 0) { tmp___1 = constant_test_bit(22L, (unsigned long const volatile *)(& ha->flags)); if (tmp___1 == 0) { ret = 1; } else { } } else { } return (ret); } } static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep ) { struct qla_endpoint *qla_ep ; struct scsi_qla_host *ha ; { qla_ep = (struct qla_endpoint *)ep->dd_data; ha = to_qla_host(qla_ep->host); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: host: %ld\n", "qla4xxx_ep_disconnect", ha->host_no); } else { } iscsi_destroy_endpoint(ep); return; } } static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep , enum iscsi_param param , char *buf ) { struct qla_endpoint *qla_ep ; struct sockaddr *dst_addr ; struct scsi_qla_host *ha ; int tmp ; { qla_ep = (struct qla_endpoint *)ep->dd_data; if ((unsigned long )qla_ep == (unsigned long )((struct qla_endpoint *)0)) { return (-107); } else { } ha = to_qla_host(qla_ep->host); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: host: %ld\n", "qla4xxx_get_ep_param", ha->host_no); } else { } switch ((unsigned int )param) { case 20U: ; case 21U: dst_addr = (struct sockaddr *)(& qla_ep->dst_addr); if ((unsigned long )dst_addr == (unsigned long )((struct sockaddr *)0)) { return (-107); } else { } tmp = iscsi_conn_get_addr_param(& qla_ep->dst_addr, param, buf); return (tmp); default: ; return (-38); } } } static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn , struct iscsi_stats *stats ) { struct iscsi_session *sess ; struct iscsi_cls_session *cls_sess ; struct ddb_entry *ddb_entry ; struct scsi_qla_host *ha ; struct ql_iscsi_stats *ql_iscsi_stats ; int stats_size ; int ret ; dma_addr_t iscsi_stats_dma ; struct device const *__mptr ; void *tmp ; { __mptr = (struct device const *)cls_conn->dev.parent; cls_sess = (struct iscsi_cls_session *)__mptr + 0xfffffffffffffd58UL; sess = (struct iscsi_session *)cls_sess->dd_data; ddb_entry = (struct ddb_entry *)sess->dd_data; ha = ddb_entry->ha; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: host: %ld\n", "qla4xxx_conn_get_stats", ha->host_no); } else { } stats_size = 4096; tmp = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )stats_size, & iscsi_stats_dma, 208U, (struct dma_attrs *)0); ql_iscsi_stats = (struct ql_iscsi_stats *)tmp; if ((unsigned long )ql_iscsi_stats == (unsigned long )((struct ql_iscsi_stats *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Unable to allocate memory for iscsi stats\n"); goto exit_get_stats; } else { } ret = qla4xxx_get_mgmt_data(ha, (int )ddb_entry->fw_ddb_index, (int )((uint16_t )stats_size), iscsi_stats_dma); if (ret != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Unable to retrieve iscsi stats\n"); goto free_stats; } else { } stats->txdata_octets = ql_iscsi_stats->tx_data_octets; stats->rxdata_octets = ql_iscsi_stats->rx_data_octets; stats->noptx_pdus = ql_iscsi_stats->tx_nopout_pdus; stats->scsicmd_pdus = ql_iscsi_stats->tx_scsi_cmd_pdus; stats->tmfcmd_pdus = ql_iscsi_stats->tx_tmf_cmd_pdus; stats->login_pdus = ql_iscsi_stats->tx_login_cmd_pdus; stats->text_pdus = ql_iscsi_stats->tx_text_cmd_pdus; stats->dataout_pdus = ql_iscsi_stats->tx_scsi_write_pdus; stats->logout_pdus = ql_iscsi_stats->tx_logout_cmd_pdus; stats->snack_pdus = ql_iscsi_stats->tx_snack_req_pdus; stats->noprx_pdus = ql_iscsi_stats->rx_nopin_pdus; stats->scsirsp_pdus = ql_iscsi_stats->rx_scsi_resp_pdus; stats->tmfrsp_pdus = ql_iscsi_stats->rx_tmf_resp_pdus; stats->textrsp_pdus = ql_iscsi_stats->rx_text_resp_pdus; stats->datain_pdus = ql_iscsi_stats->rx_scsi_read_pdus; stats->logoutrsp_pdus = ql_iscsi_stats->rx_logout_resp_pdus; stats->r2t_pdus = ql_iscsi_stats->rx_r2t_pdus; stats->async_pdus = ql_iscsi_stats->rx_async_pdus; stats->rjt_pdus = ql_iscsi_stats->rx_reject_pdus; free_stats: dma_free_attrs(& (ha->pdev)->dev, (size_t )stats_size, (void *)ql_iscsi_stats, iscsi_stats_dma, (struct dma_attrs *)0); exit_get_stats: ; return; } } static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc ) { struct iscsi_cls_session *session ; struct iscsi_session *sess ; unsigned long flags ; enum blk_eh_timer_return ret ; struct device const *__mptr ; struct scsi_target *tmp ; { ret = 0; tmp = scsi_target(sc->device); __mptr = (struct device const *)tmp->dev.parent; session = (struct iscsi_cls_session *)__mptr + 0xfffffffffffffd58UL; sess = (struct iscsi_session *)session->dd_data; ldv_spin_lock(); if (session->state == 1) { ret = 2; } else { } spin_unlock_irqrestore(& session->lock, flags); return (ret); } } static void qla4xxx_set_port_speed(struct Scsi_Host *shost ) { struct scsi_qla_host *ha ; struct scsi_qla_host *tmp ; struct iscsi_cls_host *ihost ; uint32_t speed ; { tmp = to_qla_host(shost); ha = tmp; ihost = (struct iscsi_cls_host *)shost->shost_data; speed = 1U; qla4xxx_get_firmware_state(ha); switch (ha->addl_fw_state & 3840U) { case 256U: speed = 2U; goto ldv_64386; case 512U: speed = 4U; goto ldv_64386; case 1024U: speed = 8U; goto ldv_64386; case 2048U: speed = 16U; goto ldv_64386; } ldv_64386: ihost->port_speed = speed; return; } } static void qla4xxx_set_port_state(struct Scsi_Host *shost ) { struct scsi_qla_host *ha ; struct scsi_qla_host *tmp ; struct iscsi_cls_host *ihost ; uint32_t state ; int tmp___0 ; { tmp = to_qla_host(shost); ha = tmp; ihost = (struct iscsi_cls_host *)shost->shost_data; state = 1U; tmp___0 = constant_test_bit(8L, (unsigned long const volatile *)(& ha->flags)); if (tmp___0 != 0) { state = 2U; } else { } ihost->port_state = state; return; } } static int qla4xxx_host_get_param(struct Scsi_Host *shost , enum iscsi_host_param param , char *buf ) { struct scsi_qla_host *ha ; struct scsi_qla_host *tmp ; int len ; ssize_t tmp___0 ; char *tmp___1 ; char *tmp___2 ; { tmp = to_qla_host(shost); ha = tmp; switch ((unsigned int )param) { case 0U: tmp___0 = sysfs_format_mac(buf, (unsigned char const *)(& ha->my_mac), 6); len = (int )tmp___0; goto ldv_64404; case 3U: len = sprintf(buf, "%pI4\n", & ha->ip_config.ip_address); goto ldv_64404; case 1U: len = sprintf(buf, "%s\n", (uint8_t *)(& ha->name_string)); goto ldv_64404; case 4U: qla4xxx_set_port_state(shost); tmp___1 = iscsi_get_port_state_name(shost); len = sprintf(buf, "%s\n", tmp___1); goto ldv_64404; case 5U: qla4xxx_set_port_speed(shost); tmp___2 = iscsi_get_port_speed_name(shost); len = sprintf(buf, "%s\n", tmp___2); goto ldv_64404; default: ; return (-38); } ldv_64404: ; return (len); } } static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha ) { { if ((unsigned long )ha->iface_ipv4 != (unsigned long )((struct iscsi_iface *)0)) { return; } else { } ha->iface_ipv4 = iscsi_create_iface(ha->host, & qla4xxx_iscsi_transport, 1U, 0U, 0); if ((unsigned long )ha->iface_ipv4 == (unsigned long )((struct iscsi_iface *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Could not create IPv4 iSCSI iface0.\n"); } else { } return; } } static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha ) { { if ((unsigned long )ha->iface_ipv6_0 == (unsigned long )((struct iscsi_iface *)0)) { ha->iface_ipv6_0 = iscsi_create_iface(ha->host, & qla4xxx_iscsi_transport, 2U, 0U, 0); } else { } if ((unsigned long )ha->iface_ipv6_0 == (unsigned long )((struct iscsi_iface *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Could not create IPv6 iSCSI iface0.\n"); } else { } if ((unsigned long )ha->iface_ipv6_1 == (unsigned long )((struct iscsi_iface *)0)) { ha->iface_ipv6_1 = iscsi_create_iface(ha->host, & qla4xxx_iscsi_transport, 2U, 1U, 0); } else { } if ((unsigned long )ha->iface_ipv6_1 == (unsigned long )((struct iscsi_iface *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Could not create IPv6 iSCSI iface1.\n"); } else { } return; } } static void qla4xxx_create_ifaces(struct scsi_qla_host *ha ) { { if ((int )((short )ha->ip_config.ipv4_options) < 0) { qla4xxx_create_ipv4_iface(ha); } else { } if ((ha->ip_config.ipv6_options & 32768U) != 0U) { qla4xxx_create_ipv6_iface(ha); } else { } return; } } static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha ) { { if ((unsigned long )ha->iface_ipv4 != (unsigned long )((struct iscsi_iface *)0)) { iscsi_destroy_iface(ha->iface_ipv4); ha->iface_ipv4 = (struct iscsi_iface *)0; } else { } return; } } static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha ) { { if ((unsigned long )ha->iface_ipv6_0 != (unsigned long )((struct iscsi_iface *)0)) { iscsi_destroy_iface(ha->iface_ipv6_0); ha->iface_ipv6_0 = (struct iscsi_iface *)0; } else { } if ((unsigned long )ha->iface_ipv6_1 != (unsigned long )((struct iscsi_iface *)0)) { iscsi_destroy_iface(ha->iface_ipv6_1); ha->iface_ipv6_1 = (struct iscsi_iface *)0; } else { } return; } } static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha ) { { qla4xxx_destroy_ipv4_iface(ha); qla4xxx_destroy_ipv6_iface(ha); return; } } static void qla4xxx_set_ipv6(struct scsi_qla_host *ha , struct iscsi_iface_param_info *iface_param , struct addr_ctrl_blk *init_fw_cb ) { __u16 tmp ; { switch ((int )iface_param->param) { case 7: ; if ((int )iface_param->iface_num & 1) { memcpy((void *)(& init_fw_cb->ipv6_addr1), (void const *)(& iface_param->value), 16UL); } else { memcpy((void *)(& init_fw_cb->ipv6_addr0), (void const *)(& iface_param->value), 16UL); } goto ldv_64434; case 6: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } memcpy((void *)(& init_fw_cb->ipv6_if_id), (void const *)(& iface_param->value) + 8U, 8UL); goto ldv_64434; case 8: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } memcpy((void *)(& init_fw_cb->ipv6_dflt_rtr_addr), (void const *)(& iface_param->value), 16UL); goto ldv_64434; case 9: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } if ((unsigned int )iface_param->value[0] == 1U) { init_fw_cb->ipv6_addtl_opts = (unsigned int )init_fw_cb->ipv6_addtl_opts & 65533U; } else if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv6_addtl_opts = (uint16_t )((unsigned int )init_fw_cb->ipv6_addtl_opts | 2U); } else { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Invalid autocfg setting for IPv6 addr\n"); } goto ldv_64434; case 10: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } if ((unsigned int )iface_param->value[0] == 1U) { init_fw_cb->ipv6_addtl_opts = (uint16_t )((unsigned int )init_fw_cb->ipv6_addtl_opts | 1U); } else if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv6_addtl_opts = (unsigned int )init_fw_cb->ipv6_addtl_opts & 65534U; } else { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Invalid autocfg setting for IPv6 linklocal addr\n"); } goto ldv_64434; case 11: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } if ((unsigned int )iface_param->value[0] == 1U) { memset((void *)(& init_fw_cb->ipv6_dflt_rtr_addr), 0, 16UL); } else { } goto ldv_64434; case 12: ; if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv6_opts = (uint16_t )((unsigned int )init_fw_cb->ipv6_opts | 32768U); qla4xxx_create_ipv6_iface(ha); } else { init_fw_cb->ipv6_opts = (unsigned int )init_fw_cb->ipv6_opts & 32767U; qla4xxx_destroy_ipv6_iface(ha); } goto ldv_64434; case 16: ; if (iface_param->len != 2U) { goto ldv_64434; } else { } tmp = __fswab16((int )*((uint16_t *)(& iface_param->value))); init_fw_cb->ipv6_vlan_tag = tmp; goto ldv_64434; case 15: ; if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv6_opts = (uint16_t )((unsigned int )init_fw_cb->ipv6_opts | 8192U); } else { init_fw_cb->ipv6_opts = (unsigned int )init_fw_cb->ipv6_opts & 57343U; } goto ldv_64434; case 19: init_fw_cb->eth_mtu_size = *((uint16_t *)(& iface_param->value)); goto ldv_64434; case 20: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } init_fw_cb->ipv6_port = *((uint16_t *)(& iface_param->value)); goto ldv_64434; case 24: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } if ((unsigned int )iface_param->value[0] == 1U) { init_fw_cb->ipv6_tcp_opts = (uint16_t )((unsigned int )init_fw_cb->ipv6_tcp_opts | 32768U); } else { init_fw_cb->ipv6_tcp_opts = (unsigned int )init_fw_cb->ipv6_tcp_opts & 32767U; } goto ldv_64434; case 25: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } if ((unsigned int )iface_param->value[0] == 1U) { init_fw_cb->ipv6_tcp_opts = (uint16_t )((unsigned int )init_fw_cb->ipv6_tcp_opts | 32U); } else { init_fw_cb->ipv6_tcp_opts = (unsigned int )init_fw_cb->ipv6_tcp_opts & 65503U; } goto ldv_64434; case 26: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } if ((unsigned int )iface_param->value[0] == 1U) { init_fw_cb->ipv6_tcp_opts = (uint16_t )((unsigned int )init_fw_cb->ipv6_tcp_opts | 16U); } else { init_fw_cb->ipv6_tcp_opts = (unsigned int )init_fw_cb->ipv6_tcp_opts & 65519U; } goto ldv_64434; case 27: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } init_fw_cb->ipv6_tcp_wsf = iface_param->value[0]; goto ldv_64434; case 28: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } init_fw_cb->ipv6_tcp_opts = (unsigned int )init_fw_cb->ipv6_tcp_opts & 65521U; init_fw_cb->ipv6_tcp_opts = (uint16_t )((int )((short )init_fw_cb->ipv6_tcp_opts) | ((int )((short )((int )iface_param->value[0] << 1)) & 14)); goto ldv_64434; case 29: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv6_tcp_opts = (uint16_t )((unsigned int )init_fw_cb->ipv6_tcp_opts | 1U); } else { init_fw_cb->ipv6_tcp_opts = (unsigned int )init_fw_cb->ipv6_tcp_opts & 65534U; } goto ldv_64434; case 45: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv6_opts = (uint16_t )((unsigned int )init_fw_cb->ipv6_opts | 4096U); } else { init_fw_cb->ipv6_opts = (unsigned int )init_fw_cb->ipv6_opts & 61439U; } goto ldv_64434; case 55: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv6_opts = (uint16_t )((unsigned int )init_fw_cb->ipv6_opts | 4U); } else { init_fw_cb->ipv6_opts = (unsigned int )init_fw_cb->ipv6_opts & 65531U; } goto ldv_64434; case 46: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv6_addtl_opts = (uint16_t )((unsigned int )init_fw_cb->ipv6_addtl_opts | 4U); } else { init_fw_cb->ipv6_addtl_opts = (unsigned int )init_fw_cb->ipv6_addtl_opts & 65531U; } goto ldv_64434; case 47: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } init_fw_cb->ipv6_flow_lbl = *((uint16_t *)(& iface_param->value)); goto ldv_64434; case 48: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } init_fw_cb->ipv6_traffic_class = iface_param->value[0]; goto ldv_64434; case 49: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } init_fw_cb->ipv6_hop_limit = iface_param->value[0]; goto ldv_64434; case 50: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } init_fw_cb->ipv6_nd_reach_time = *((uint32_t *)(& iface_param->value)); goto ldv_64434; case 51: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } init_fw_cb->ipv6_nd_rexmit_timer = *((uint32_t *)(& iface_param->value)); goto ldv_64434; case 52: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } init_fw_cb->ipv6_nd_stale_timeout = *((uint32_t *)(& iface_param->value)); goto ldv_64434; case 53: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } init_fw_cb->ipv6_dup_addr_detect_count = iface_param->value[0]; goto ldv_64434; case 54: ; if ((int )iface_param->iface_num & 1) { goto ldv_64434; } else { } init_fw_cb->ipv6_gw_advrt_mtu = *((uint32_t *)(& iface_param->value)); goto ldv_64434; default: dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Unknown IPv6 param = %d\n", (int )iface_param->param); goto ldv_64434; } ldv_64434: ; return; } } static void qla4xxx_set_ipv4(struct scsi_qla_host *ha , struct iscsi_iface_param_info *iface_param , struct addr_ctrl_blk *init_fw_cb ) { __u16 tmp ; size_t tmp___0 ; size_t tmp___1 ; { switch ((int )iface_param->param) { case 1: memcpy((void *)(& init_fw_cb->ipv4_addr), (void const *)(& iface_param->value), 4UL); goto ldv_64469; case 2: memcpy((void *)(& init_fw_cb->ipv4_subnet), (void const *)(& iface_param->value), 4UL); goto ldv_64469; case 3: memcpy((void *)(& init_fw_cb->ipv4_gw_addr), (void const *)(& iface_param->value), 4UL); goto ldv_64469; case 4: ; if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv4_tcp_opts = (uint16_t )((unsigned int )init_fw_cb->ipv4_tcp_opts | 512U); } else if ((unsigned int )iface_param->value[0] == 1U) { init_fw_cb->ipv4_tcp_opts = (unsigned int )init_fw_cb->ipv4_tcp_opts & 65023U; } else { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Invalid IPv4 bootproto\n"); } goto ldv_64469; case 12: ; if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv4_ip_opts = (uint16_t )((unsigned int )init_fw_cb->ipv4_ip_opts | 32768U); qla4xxx_create_ipv4_iface(ha); } else { init_fw_cb->ipv4_ip_opts = (unsigned int )init_fw_cb->ipv4_ip_opts & 32767U; qla4xxx_destroy_ipv4_iface(ha); } goto ldv_64469; case 16: ; if (iface_param->len != 2U) { goto ldv_64469; } else { } tmp = __fswab16((int )*((uint16_t *)(& iface_param->value))); init_fw_cb->ipv4_vlan_tag = tmp; goto ldv_64469; case 15: ; if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv4_ip_opts = (uint16_t )((unsigned int )init_fw_cb->ipv4_ip_opts | 8192U); } else { init_fw_cb->ipv4_ip_opts = (unsigned int )init_fw_cb->ipv4_ip_opts & 57343U; } goto ldv_64469; case 19: init_fw_cb->eth_mtu_size = *((uint16_t *)(& iface_param->value)); goto ldv_64469; case 20: init_fw_cb->ipv4_port = *((uint16_t *)(& iface_param->value)); goto ldv_64469; case 24: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } if ((unsigned int )iface_param->value[0] == 1U) { init_fw_cb->ipv4_tcp_opts = (uint16_t )((unsigned int )init_fw_cb->ipv4_tcp_opts | 32768U); } else { init_fw_cb->ipv4_tcp_opts = (unsigned int )init_fw_cb->ipv4_tcp_opts & 32767U; } goto ldv_64469; case 25: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } if ((unsigned int )iface_param->value[0] == 1U) { init_fw_cb->ipv4_tcp_opts = (uint16_t )((unsigned int )init_fw_cb->ipv4_tcp_opts | 32U); } else { init_fw_cb->ipv4_tcp_opts = (unsigned int )init_fw_cb->ipv4_tcp_opts & 65503U; } goto ldv_64469; case 26: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } if ((unsigned int )iface_param->value[0] == 1U) { init_fw_cb->ipv4_tcp_opts = (uint16_t )((unsigned int )init_fw_cb->ipv4_tcp_opts | 16U); } else { init_fw_cb->ipv4_tcp_opts = (unsigned int )init_fw_cb->ipv4_tcp_opts & 65519U; } goto ldv_64469; case 27: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } init_fw_cb->ipv4_tcp_wsf = iface_param->value[0]; goto ldv_64469; case 28: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } init_fw_cb->ipv4_tcp_opts = (unsigned int )init_fw_cb->ipv4_tcp_opts & 65521U; init_fw_cb->ipv4_tcp_opts = (uint16_t )((int )((short )init_fw_cb->ipv4_tcp_opts) | ((int )((short )((int )iface_param->value[0] << 1)) & 14)); goto ldv_64469; case 29: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv4_tcp_opts = (uint16_t )((unsigned int )init_fw_cb->ipv4_tcp_opts | 1U); } else { init_fw_cb->ipv4_tcp_opts = (unsigned int )init_fw_cb->ipv4_tcp_opts & 65534U; } goto ldv_64469; case 31: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv4_tcp_opts = (uint16_t )((unsigned int )init_fw_cb->ipv4_tcp_opts | 256U); } else { init_fw_cb->ipv4_tcp_opts = (unsigned int )init_fw_cb->ipv4_tcp_opts & 65279U; } goto ldv_64469; case 32: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv4_tcp_opts = (uint16_t )((unsigned int )init_fw_cb->ipv4_tcp_opts | 128U); } else { init_fw_cb->ipv4_tcp_opts = (unsigned int )init_fw_cb->ipv4_tcp_opts & 65407U; } goto ldv_64469; case 33: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv4_ip_opts = (uint16_t )((unsigned int )init_fw_cb->ipv4_ip_opts | 16384U); } else { init_fw_cb->ipv4_ip_opts = (unsigned int )init_fw_cb->ipv4_ip_opts & 49151U; } goto ldv_64469; case 34: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } init_fw_cb->ipv4_tos = iface_param->value[0]; goto ldv_64469; case 35: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv4_ip_opts = (uint16_t )((unsigned int )init_fw_cb->ipv4_ip_opts | 4096U); } else { init_fw_cb->ipv4_ip_opts = (unsigned int )init_fw_cb->ipv4_ip_opts & 61439U; } goto ldv_64469; case 36: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv4_ip_opts = (uint16_t )((unsigned int )init_fw_cb->ipv4_ip_opts | 2048U); } else { init_fw_cb->ipv4_ip_opts = (unsigned int )init_fw_cb->ipv4_ip_opts & 63487U; } goto ldv_64469; case 37: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } memcpy((void *)(& init_fw_cb->ipv4_dhcp_alt_cid), (void const *)(& iface_param->value), 10UL); tmp___0 = strlen((char const *)(& init_fw_cb->ipv4_dhcp_alt_cid)); init_fw_cb->ipv4_dhcp_alt_cid_len = (uint8_t )tmp___0; goto ldv_64469; case 38: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv4_ip_opts = (uint16_t )((unsigned int )init_fw_cb->ipv4_ip_opts | 1024U); } else { init_fw_cb->ipv4_ip_opts = (unsigned int )init_fw_cb->ipv4_ip_opts & 64511U; } goto ldv_64469; case 39: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv4_ip_opts = (uint16_t )((unsigned int )init_fw_cb->ipv4_ip_opts | 512U); } else { init_fw_cb->ipv4_ip_opts = (unsigned int )init_fw_cb->ipv4_ip_opts & 65023U; } goto ldv_64469; case 40: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } memcpy((void *)(& init_fw_cb->ipv4_dhcp_vid), (void const *)(& iface_param->value), 10UL); tmp___1 = strlen((char const *)(& init_fw_cb->ipv4_dhcp_vid)); init_fw_cb->ipv4_dhcp_vid_len = (uint8_t )tmp___1; goto ldv_64469; case 41: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv4_ip_opts = (uint16_t )((unsigned int )init_fw_cb->ipv4_ip_opts | 256U); } else { init_fw_cb->ipv4_ip_opts = (unsigned int )init_fw_cb->ipv4_ip_opts & 65279U; } goto ldv_64469; case 42: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } if ((unsigned int )iface_param->value[0] == 1U) { init_fw_cb->ipv4_ip_opts = (uint16_t )((unsigned int )init_fw_cb->ipv4_ip_opts | 16U); } else { init_fw_cb->ipv4_ip_opts = (unsigned int )init_fw_cb->ipv4_ip_opts & 65519U; } goto ldv_64469; case 43: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv4_ip_opts = (uint16_t )((unsigned int )init_fw_cb->ipv4_ip_opts | 8U); } else { init_fw_cb->ipv4_ip_opts = (unsigned int )init_fw_cb->ipv4_ip_opts & 65527U; } goto ldv_64469; case 55: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->ipv4_ip_opts = (uint16_t )((unsigned int )init_fw_cb->ipv4_ip_opts | 4U); } else { init_fw_cb->ipv4_ip_opts = (unsigned int )init_fw_cb->ipv4_ip_opts & 65531U; } goto ldv_64469; case 44: ; if ((int )iface_param->iface_num & 1) { goto ldv_64469; } else { } init_fw_cb->ipv4_ttl = iface_param->value[0]; goto ldv_64469; default: dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Unknown IPv4 param = %d\n", (int )iface_param->param); goto ldv_64469; } ldv_64469: ; return; } } static void qla4xxx_set_iscsi_param(struct scsi_qla_host *ha , struct iscsi_iface_param_info *iface_param , struct addr_ctrl_blk *init_fw_cb ) { { switch ((int )iface_param->param) { case 0: ; if ((int )iface_param->iface_num & 1) { goto ldv_64506; } else { } init_fw_cb->def_timeout = *((uint16_t *)(& iface_param->value)); goto ldv_64506; case 1: ; if ((int )iface_param->iface_num & 1) { goto ldv_64506; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->iscsi_opts = (uint16_t )((unsigned int )init_fw_cb->iscsi_opts | 8192U); } else { init_fw_cb->iscsi_opts = (unsigned int )init_fw_cb->iscsi_opts & 57343U; } goto ldv_64506; case 2: ; if ((int )iface_param->iface_num & 1) { goto ldv_64506; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->iscsi_opts = (uint16_t )((unsigned int )init_fw_cb->iscsi_opts | 4096U); } else { init_fw_cb->iscsi_opts = (unsigned int )init_fw_cb->iscsi_opts & 61439U; } goto ldv_64506; case 3: ; if ((int )iface_param->iface_num & 1) { goto ldv_64506; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->iscsi_opts = (uint16_t )((unsigned int )init_fw_cb->iscsi_opts | 2048U); } else { init_fw_cb->iscsi_opts = (unsigned int )init_fw_cb->iscsi_opts & 63487U; } goto ldv_64506; case 4: ; if ((int )iface_param->iface_num & 1) { goto ldv_64506; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->iscsi_opts = (uint16_t )((unsigned int )init_fw_cb->iscsi_opts | 1024U); } else { init_fw_cb->iscsi_opts = (unsigned int )init_fw_cb->iscsi_opts & 64511U; } goto ldv_64506; case 5: ; if ((int )iface_param->iface_num & 1) { goto ldv_64506; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->iscsi_opts = (uint16_t )((unsigned int )init_fw_cb->iscsi_opts | 512U); } else { init_fw_cb->iscsi_opts = (unsigned int )init_fw_cb->iscsi_opts & 65023U; } goto ldv_64506; case 6: ; if ((int )iface_param->iface_num & 1) { goto ldv_64506; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->iscsi_opts = (uint16_t )((unsigned int )init_fw_cb->iscsi_opts | 256U); } else { init_fw_cb->iscsi_opts = (unsigned int )init_fw_cb->iscsi_opts & 65279U; } goto ldv_64506; case 7: ; if ((int )iface_param->iface_num & 1) { goto ldv_64506; } else { } init_fw_cb->iscsi_opts = (unsigned int )init_fw_cb->iscsi_opts & 65532U; init_fw_cb->iscsi_opts = (uint16_t )((int )((short )init_fw_cb->iscsi_opts) | ((int )((short )iface_param->value[0]) & 3)); goto ldv_64506; case 8: ; if ((int )iface_param->iface_num & 1) { goto ldv_64506; } else { } init_fw_cb->iscsi_max_pdu_size = (uint16_t )(*((uint32_t *)(& iface_param->value)) / 512U); goto ldv_64506; case 9: ; if ((int )iface_param->iface_num & 1) { goto ldv_64506; } else { } init_fw_cb->iscsi_fburst_len = (uint16_t )(*((uint32_t *)(& iface_param->value)) / 512U); goto ldv_64506; case 10: ; if ((int )iface_param->iface_num & 1) { goto ldv_64506; } else { } init_fw_cb->iscsi_max_outstnd_r2t = *((uint16_t *)(& iface_param->value)); goto ldv_64506; case 11: ; if ((int )iface_param->iface_num & 1) { goto ldv_64506; } else { } init_fw_cb->iscsi_max_burst_len = (uint16_t )(*((uint32_t *)(& iface_param->value)) / 512U); goto ldv_64506; case 12: ; if ((int )iface_param->iface_num & 1) { goto ldv_64506; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->iscsi_opts = (uint16_t )((unsigned int )init_fw_cb->iscsi_opts | 128U); } else { init_fw_cb->iscsi_opts = (unsigned int )init_fw_cb->iscsi_opts & 65407U; } goto ldv_64506; case 13: ; if ((int )iface_param->iface_num & 1) { goto ldv_64506; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->iscsi_opts = (uint16_t )((unsigned int )init_fw_cb->iscsi_opts | 16U); } else { init_fw_cb->iscsi_opts = (unsigned int )init_fw_cb->iscsi_opts & 65519U; } goto ldv_64506; case 14: ; if ((int )iface_param->iface_num & 1) { goto ldv_64506; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->iscsi_opts = (uint16_t )((unsigned int )init_fw_cb->iscsi_opts | 8U); } else { init_fw_cb->iscsi_opts = (unsigned int )init_fw_cb->iscsi_opts & 65527U; } goto ldv_64506; case 15: ; if ((int )iface_param->iface_num & 1) { goto ldv_64506; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->iscsi_opts = (uint16_t )((unsigned int )init_fw_cb->iscsi_opts | 32U); } else { init_fw_cb->iscsi_opts = (unsigned int )init_fw_cb->iscsi_opts & 65503U; } goto ldv_64506; case 16: ; if ((int )iface_param->iface_num & 1) { goto ldv_64506; } else { } if ((unsigned int )iface_param->value[0] == 2U) { init_fw_cb->iscsi_opts = (uint16_t )((unsigned int )init_fw_cb->iscsi_opts | 4U); } else { init_fw_cb->iscsi_opts = (unsigned int )init_fw_cb->iscsi_opts & 65531U; } goto ldv_64506; default: dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Unknown iscsi param = %d\n", (int )iface_param->param); goto ldv_64506; } ldv_64506: ; return; } } static void qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb ) { struct addr_ctrl_blk_def *acb ; { acb = (struct addr_ctrl_blk_def *)init_fw_cb; memset((void *)(& acb->reserved1), 0, 1UL); memset((void *)(& acb->reserved2), 0, 11UL); memset((void *)(& acb->reserved3), 0, 34UL); memset((void *)(& acb->reserved4), 0, 2UL); memset((void *)(& acb->reserved5), 0, 4UL); memset((void *)(& acb->reserved6), 0, 2UL); memset((void *)(& acb->reserved7), 0, 4UL); memset((void *)(& acb->reserved8), 0, 8UL); memset((void *)(& acb->reserved9), 0, 12UL); memset((void *)(& acb->reserved10), 0, 84UL); memset((void *)(& acb->reserved11), 0, 10UL); memset((void *)(& acb->reserved12), 0, 20UL); memset((void *)(& acb->reserved13), 0, 32UL); memset((void *)(& acb->reserved14), 0, 18UL); memset((void *)(& acb->reserved15), 0, 140UL); return; } } static int qla4xxx_iface_set_param(struct Scsi_Host *shost , void *data , uint32_t len ) { struct scsi_qla_host *ha ; struct scsi_qla_host *tmp ; int rval ; struct iscsi_iface_param_info *iface_param ; struct addr_ctrl_blk *init_fw_cb ; dma_addr_t init_fw_cb_dma ; uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; uint32_t rem ; struct nlattr *attr ; void *tmp___0 ; uint8_t tmp___1 ; void *tmp___2 ; int tmp___3 ; { tmp = to_qla_host(shost); ha = tmp; rval = 0; iface_param = (struct iscsi_iface_param_info *)0; init_fw_cb = (struct addr_ctrl_blk *)0; rem = len; tmp___0 = dma_alloc_attrs(& (ha->pdev)->dev, 768UL, & init_fw_cb_dma, 208U, (struct dma_attrs *)0); init_fw_cb = (struct addr_ctrl_blk *)tmp___0; if ((unsigned long )init_fw_cb == (unsigned long )((struct addr_ctrl_blk *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to alloc init_cb\n", "qla4xxx_iface_set_param"); return (-12); } else { } memset((void *)init_fw_cb, 0, 768UL); memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); tmp___1 = qla4xxx_get_ifcb(ha, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts), init_fw_cb_dma); if ((unsigned int )tmp___1 != 0U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: get ifcb failed\n", "qla4xxx_iface_set_param"); rval = -5; goto exit_init_fw_cb; } else { } attr = (struct nlattr *)data; rem = len; goto ldv_64557; ldv_64556: tmp___2 = nla_data((struct nlattr const *)attr); iface_param = (struct iscsi_iface_param_info *)tmp___2; if ((unsigned int )iface_param->param_type == 2U) { switch ((int )iface_param->iface_type) { case 1: ; switch (iface_param->iface_num) { case 0U: qla4xxx_set_ipv4(ha, iface_param, init_fw_cb); goto ldv_64546; default: dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Invalid IPv4 iface number = %d\n", iface_param->iface_num); goto ldv_64546; } ldv_64546: ; goto ldv_64548; case 2: ; switch (iface_param->iface_num) { case 0U: ; case 1U: qla4xxx_set_ipv6(ha, iface_param, init_fw_cb); goto ldv_64552; default: dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Invalid IPv6 iface number = %d\n", iface_param->iface_num); goto ldv_64552; } ldv_64552: ; goto ldv_64548; default: dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Invalid iface type\n"); goto ldv_64548; } ldv_64548: ; } else if ((unsigned int )iface_param->param_type == 5U) { qla4xxx_set_iscsi_param(ha, iface_param, init_fw_cb); } else { } attr = nla_next((struct nlattr const *)attr, (int *)(& rem)); ldv_64557: tmp___3 = nla_ok((struct nlattr const *)attr, (int )rem); if (tmp___3 != 0) { goto ldv_64556; } else { } init_fw_cb->cookie = 297708890U; rval = qla4xxx_set_flash(ha, init_fw_cb_dma, 67108864U, 768U, 3U); if (rval != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: set flash mbx failed\n", "qla4xxx_iface_set_param"); rval = -5; goto exit_init_fw_cb; } else { } rval = qla4xxx_disable_acb(ha); if (rval != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: disable acb mbx failed\n", "qla4xxx_iface_set_param"); rval = -5; goto exit_init_fw_cb; } else { } wait_for_completion_timeout(& ha->disable_acb_comp, 7500UL); qla4xxx_initcb_to_acb(init_fw_cb); rval = qla4xxx_set_acb(ha, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts), init_fw_cb_dma); if (rval != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: set acb mbx failed\n", "qla4xxx_iface_set_param"); rval = -5; goto exit_init_fw_cb; } else { } memset((void *)init_fw_cb, 0, 768UL); qla4xxx_update_local_ifcb(ha, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts), init_fw_cb, init_fw_cb_dma); exit_init_fw_cb: dma_free_attrs(& (ha->pdev)->dev, 768UL, (void *)init_fw_cb, init_fw_cb_dma, (struct dma_attrs *)0); return (rval); } } static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess , enum iscsi_param param , char *buf ) { struct iscsi_session *sess ; struct ddb_entry *ddb_entry ; struct scsi_qla_host *ha ; struct iscsi_cls_conn *cls_conn ; struct ql4_chap_table chap_tbl ; int rval ; int len ; uint16_t idx ; size_t tmp ; int tmp___0 ; { sess = (struct iscsi_session *)cls_sess->dd_data; ddb_entry = (struct ddb_entry *)sess->dd_data; ha = ddb_entry->ha; cls_conn = ddb_entry->conn; memset((void *)(& chap_tbl), 0, 364UL); switch ((unsigned int )param) { case 37U: rval = qla4xxx_get_chap_index(ha, sess->username_in, sess->password_in, 1, & idx); if (rval != 0) { len = sprintf(buf, "\n"); } else { len = sprintf(buf, "%hu\n", (int )idx); } goto ldv_64573; case 38U: ; if ((unsigned int )ddb_entry->ddb_type == 1U) { if ((unsigned int )ddb_entry->chap_tbl_idx != 65535U) { idx = ddb_entry->chap_tbl_idx; rval = 0; } else { rval = 1; } } else { rval = qla4xxx_get_chap_index(ha, sess->username, sess->password, 0, & idx); } if (rval != 0) { len = sprintf(buf, "\n"); } else { len = sprintf(buf, "%hu\n", (int )idx); } goto ldv_64573; case 22U: ; case 24U: ; if ((((unsigned int )ddb_entry->ddb_type == 1U && (unsigned int )ddb_entry->chap_tbl_idx != 65535U) && (unsigned long )sess->username == (unsigned long )((char *)0)) && (unsigned long )sess->password == (unsigned long )((char *)0)) { idx = ddb_entry->chap_tbl_idx; rval = qla4xxx_get_uni_chap_at_index(ha, (char *)(& chap_tbl.name), (char *)(& chap_tbl.secret), (int )idx); if (rval == 0) { tmp = strlen((char const *)(& chap_tbl.name)); iscsi_set_param(cls_conn, 22, (char *)(& chap_tbl.name), (int )tmp); iscsi_set_param(cls_conn, 24, (char *)(& chap_tbl.secret), (int )chap_tbl.secret_len); } else { } } else { } default: tmp___0 = iscsi_session_get_param(cls_sess, param, buf); return (tmp___0); } ldv_64573: ; return (len); } } static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn , enum iscsi_param param , char *buf ) { struct iscsi_conn *conn ; struct qla_conn *qla_conn ; struct sockaddr *dst_addr ; int tmp ; int tmp___0 ; { conn = (struct iscsi_conn *)cls_conn->dd_data; qla_conn = (struct qla_conn *)conn->dd_data; dst_addr = (struct sockaddr *)(& (qla_conn->qla_ep)->dst_addr); switch ((unsigned int )param) { case 20U: ; case 21U: tmp = iscsi_conn_get_addr_param((struct __kernel_sockaddr_storage *)dst_addr, param, buf); return (tmp); default: tmp___0 = iscsi_conn_get_param(cls_conn, param, buf); return (tmp___0); } } } int qla4xxx_get_ddb_index(struct scsi_qla_host *ha , uint16_t *ddb_index ) { uint32_t mbx_sts ; uint16_t tmp_ddb_index ; int ret ; unsigned long tmp ; int tmp___0 ; { mbx_sts = 0U; get_ddb_index: tmp = find_first_zero_bit((unsigned long const *)(& ha->ddb_idx_map), 512UL); tmp_ddb_index = (uint16_t )tmp; if ((unsigned int )tmp_ddb_index > 511U) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Free DDB index not available\n"); } else { } ret = 1; goto exit_get_ddb_index; } else { } tmp___0 = test_and_set_bit((long )tmp_ddb_index, (unsigned long volatile *)(& ha->ddb_idx_map)); if (tmp___0 != 0) { goto get_ddb_index; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Found a free DDB index at %d\n", (int )tmp_ddb_index); } else { } ret = qla4xxx_req_ddb_entry(ha, (uint32_t )tmp_ddb_index, & mbx_sts); if (ret == 1) { if (mbx_sts == 16389U) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "DDB index = %d not available trying next\n", (int )tmp_ddb_index); goto get_ddb_index; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Free FW DDB not available\n"); } else { } } else { } *ddb_index = tmp_ddb_index; exit_get_ddb_index: ; return (ret); } } static int qla4xxx_match_ipaddress(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry , char *existing_ipaddr , char *user_ipaddr ) { uint8_t dst_ipaddr[16U] ; char formatted_ipaddr[64U] ; int status ; int ret ; size_t tmp ; size_t tmp___0 ; int tmp___1 ; { status = 0; ret = 0; if (((int )ddb_entry->fw_ddb_entry.options & 256) != 0) { tmp = strlen((char const *)user_ipaddr); ret = in6_pton((char const *)user_ipaddr, (int )tmp, (u8 *)(& dst_ipaddr), 0, (char const **)0); if (ret == 0) { status = 1; goto out_match; } else { } ret = sprintf((char *)(& formatted_ipaddr), "%pI6", (uint8_t *)(& dst_ipaddr)); } else { tmp___0 = strlen((char const *)user_ipaddr); ret = in4_pton((char const *)user_ipaddr, (int )tmp___0, (u8 *)(& dst_ipaddr), 0, (char const **)0); if (ret == 0) { status = 1; goto out_match; } else { } ret = sprintf((char *)(& formatted_ipaddr), "%pI4", (uint8_t *)(& dst_ipaddr)); } tmp___1 = strcmp((char const *)existing_ipaddr, (char const *)(& formatted_ipaddr)); if (tmp___1 != 0) { status = 1; } else { } out_match: ; return (status); } } static int qla4xxx_match_fwdb_session(struct scsi_qla_host *ha , struct iscsi_cls_conn *cls_conn ) { int idx ; int max_ddbs ; int rval ; struct iscsi_cls_session *cls_sess ; struct device const *__mptr ; struct iscsi_session *sess ; struct iscsi_session *existing_sess ; struct iscsi_conn *conn ; struct iscsi_conn *existing_conn ; struct ddb_entry *ddb_entry ; int tmp ; int tmp___0 ; { idx = 0; __mptr = (struct device const *)cls_conn->dev.parent; cls_sess = (struct iscsi_cls_session *)__mptr + 0xfffffffffffffd58UL; sess = (struct iscsi_session *)cls_sess->dd_data; conn = (struct iscsi_conn *)cls_conn->dd_data; if (((unsigned long )sess->targetname == (unsigned long )((char *)0) || (unsigned long )conn->persistent_address == (unsigned long )((char *)0)) || conn->persistent_port == 0) { return (1); } else { } tmp = is_qla40XX(ha); max_ddbs = tmp != 0 ? 256 : 512; idx = 0; goto ldv_64627; ldv_64626: ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, (uint32_t )idx); if ((unsigned long )ddb_entry == (unsigned long )((struct ddb_entry *)0)) { goto ldv_64624; } else { } if ((unsigned int )ddb_entry->ddb_type != 1U) { goto ldv_64624; } else { } existing_sess = (struct iscsi_session *)(ddb_entry->sess)->dd_data; existing_conn = (struct iscsi_conn *)(ddb_entry->conn)->dd_data; if (((unsigned long )existing_sess->targetname == (unsigned long )((char *)0) || (unsigned long )existing_conn->persistent_address == (unsigned long )((char *)0)) || existing_conn->persistent_port == 0) { goto ldv_64624; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "IQN = %s User IQN = %s\n", existing_sess->targetname, sess->targetname); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "IP = %s User IP = %s\n", existing_conn->persistent_address, conn->persistent_address); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Port = %d User Port = %d\n", existing_conn->persistent_port, conn->persistent_port); } else { } tmp___0 = strcmp((char const *)existing_sess->targetname, (char const *)sess->targetname); if (tmp___0 != 0) { goto ldv_64624; } else { } rval = qla4xxx_match_ipaddress(ha, ddb_entry, existing_conn->persistent_address, conn->persistent_address); if (rval == 1) { goto ldv_64624; } else { } if (existing_conn->persistent_port != conn->persistent_port) { goto ldv_64624; } else { } goto ldv_64625; ldv_64624: idx = idx + 1; ldv_64627: ; if (idx < max_ddbs) { goto ldv_64626; } else { } ldv_64625: ; if (idx == max_ddbs) { return (1); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Match found in fwdb sessions\n"); } else { } return (0); } } static struct iscsi_cls_session *qla4xxx_session_create(struct iscsi_endpoint *ep , uint16_t cmds_max , uint16_t qdepth , uint32_t initial_cmdsn ) { struct iscsi_cls_session *cls_sess ; struct scsi_qla_host *ha ; struct qla_endpoint *qla_ep ; struct ddb_entry *ddb_entry ; uint16_t ddb_index ; struct iscsi_session *sess ; struct sockaddr *dst_addr ; int ret ; { if ((unsigned long )ep == (unsigned long )((struct iscsi_endpoint *)0)) { printk("\vqla4xxx: missing ep.\n"); return ((struct iscsi_cls_session *)0); } else { } qla_ep = (struct qla_endpoint *)ep->dd_data; dst_addr = (struct sockaddr *)(& qla_ep->dst_addr); ha = to_qla_host(qla_ep->host); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: host: %ld\n", "qla4xxx_session_create", ha->host_no); } else { } ret = qla4xxx_get_ddb_index(ha, & ddb_index); if (ret == 1) { return ((struct iscsi_cls_session *)0); } else { } cls_sess = iscsi_session_setup(& qla4xxx_iscsi_transport, qla_ep->host, (int )cmds_max, 600, 224, initial_cmdsn, (unsigned int )ddb_index); if ((unsigned long )cls_sess == (unsigned long )((struct iscsi_cls_session *)0)) { return ((struct iscsi_cls_session *)0); } else { } sess = (struct iscsi_session *)cls_sess->dd_data; ddb_entry = (struct ddb_entry *)sess->dd_data; ddb_entry->fw_ddb_index = ddb_index; ddb_entry->fw_ddb_device_state = 1U; ddb_entry->ha = ha; ddb_entry->sess = cls_sess; ddb_entry->unblock_sess = & qla4xxx_unblock_ddb; ddb_entry->ddb_change = & qla4xxx_ddb_change; clear_bit(0L, (unsigned long volatile *)(& ddb_entry->flags)); cls_sess->recovery_tmo = ql4xsess_recovery_tmo; ha->fw_ddb_index_map[(int )ddb_entry->fw_ddb_index] = ddb_entry; ha->tot_ddbs = ha->tot_ddbs + 1U; return (cls_sess); } } static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess ) { struct iscsi_session *sess ; struct ddb_entry *ddb_entry ; struct scsi_qla_host *ha ; unsigned long flags ; unsigned long wtime ; struct dev_db_entry *fw_ddb_entry ; dma_addr_t fw_ddb_entry_dma ; uint32_t ddb_state ; int ret ; void *tmp ; int tmp___0 ; { fw_ddb_entry = (struct dev_db_entry *)0; sess = (struct iscsi_session *)cls_sess->dd_data; ddb_entry = (struct ddb_entry *)sess->dd_data; ha = ddb_entry->ha; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: host: %ld\n", "qla4xxx_session_destroy", ha->host_no); } else { } tmp = dma_alloc_attrs(& (ha->pdev)->dev, 512UL, & fw_ddb_entry_dma, 208U, (struct dma_attrs *)0); fw_ddb_entry = (struct dev_db_entry *)tmp; if ((unsigned long )fw_ddb_entry == (unsigned long )((struct dev_db_entry *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to allocate dma buffer\n", "qla4xxx_session_destroy"); goto destroy_session; } else { } wtime = (unsigned long )jiffies + 2500UL; ldv_64663: ret = qla4xxx_get_fwddb_entry(ha, (int )ddb_entry->fw_ddb_index, fw_ddb_entry, fw_ddb_entry_dma, (uint32_t *)0U, (uint32_t *)0U, & ddb_state, (uint32_t *)0U, (uint16_t *)0U, (uint16_t *)0U); if (ret == 1) { goto destroy_session; } else { } if (ddb_state == 1U || ddb_state == 6U) { goto destroy_session; } else { } schedule_timeout_uninterruptible(250L); if ((long )((unsigned long )jiffies - wtime) < 0L) { goto ldv_64663; } else { } destroy_session: qla4xxx_clear_ddb_entry(ha, (uint32_t )ddb_entry->fw_ddb_index); tmp___0 = test_and_clear_bit(0L, (unsigned long volatile *)(& ddb_entry->flags)); if (tmp___0 != 0) { clear_bit((long )ddb_entry->fw_ddb_index, (unsigned long volatile *)(& ha->ddb_idx_map)); } else { } ldv_spin_lock(); qla4xxx_free_ddb(ha, ddb_entry); spin_unlock_irqrestore(& ha->hardware_lock, flags); iscsi_session_teardown(cls_sess); if ((unsigned long )fw_ddb_entry != (unsigned long )((struct dev_db_entry *)0)) { dma_free_attrs(& (ha->pdev)->dev, 512UL, (void *)fw_ddb_entry, fw_ddb_entry_dma, (struct dma_attrs *)0); } else { } return; } } static struct iscsi_cls_conn *qla4xxx_conn_create(struct iscsi_cls_session *cls_sess , uint32_t conn_idx ) { struct iscsi_cls_conn *cls_conn ; struct iscsi_session *sess ; struct ddb_entry *ddb_entry ; struct scsi_qla_host *ha ; { cls_conn = iscsi_conn_setup(cls_sess, 8, conn_idx); if ((unsigned long )cls_conn == (unsigned long )((struct iscsi_cls_conn *)0)) { printk("\016%s: Can not create connection for conn_idx = %u\n", "qla4xxx_conn_create", conn_idx); return ((struct iscsi_cls_conn *)0); } else { } sess = (struct iscsi_session *)cls_sess->dd_data; ddb_entry = (struct ddb_entry *)sess->dd_data; ddb_entry->conn = cls_conn; ha = ddb_entry->ha; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: conn_idx = %u\n", "qla4xxx_conn_create", conn_idx); } else { } return (cls_conn); } } static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session , struct iscsi_cls_conn *cls_conn , uint64_t transport_fd , int is_leading ) { struct iscsi_conn *conn ; struct qla_conn *qla_conn ; struct iscsi_endpoint *ep ; struct ddb_entry *ddb_entry ; struct scsi_qla_host *ha ; struct iscsi_session *sess ; int tmp ; { sess = (struct iscsi_session *)cls_session->dd_data; ddb_entry = (struct ddb_entry *)sess->dd_data; ha = ddb_entry->ha; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: sid = %d, cid = %d\n", "qla4xxx_conn_bind", cls_session->sid, cls_conn->cid); } else { } tmp = iscsi_conn_bind(cls_session, cls_conn, is_leading); if (tmp != 0) { return (-22); } else { } ep = iscsi_lookup_endpoint(transport_fd); conn = (struct iscsi_conn *)cls_conn->dd_data; qla_conn = (struct qla_conn *)conn->dd_data; qla_conn->qla_ep = (struct qla_endpoint *)ep->dd_data; return (0); } } static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn ) { struct iscsi_cls_session *cls_sess ; struct device const *__mptr ; struct iscsi_session *sess ; struct ddb_entry *ddb_entry ; struct scsi_qla_host *ha ; struct dev_db_entry *fw_ddb_entry ; dma_addr_t fw_ddb_entry_dma ; uint32_t mbx_sts ; int ret ; int status ; void *tmp ; { __mptr = (struct device const *)cls_conn->dev.parent; cls_sess = (struct iscsi_cls_session *)__mptr + 0xfffffffffffffd58UL; fw_ddb_entry = (struct dev_db_entry *)0; mbx_sts = 0U; ret = 0; status = 0; sess = (struct iscsi_session *)cls_sess->dd_data; ddb_entry = (struct ddb_entry *)sess->dd_data; ha = ddb_entry->ha; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: sid = %d, cid = %d\n", "qla4xxx_conn_start", cls_sess->sid, cls_conn->cid); } else { } ret = qla4xxx_match_fwdb_session(ha, cls_conn); if (ret == 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Session already exist in FW.\n"); ret = -17; goto exit_conn_start; } else { } tmp = dma_alloc_attrs(& (ha->pdev)->dev, 512UL, & fw_ddb_entry_dma, 208U, (struct dma_attrs *)0); fw_ddb_entry = (struct dev_db_entry *)tmp; if ((unsigned long )fw_ddb_entry == (unsigned long )((struct dev_db_entry *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to allocate dma buffer\n", "qla4xxx_conn_start"); ret = -12; goto exit_conn_start; } else { } ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, & mbx_sts); if (ret != 0) { if (mbx_sts != 0U) { if (ddb_entry->fw_ddb_device_state == 4U) { (*(ddb_entry->unblock_sess))(ddb_entry->sess); goto exit_set_param; } else { } } else { } dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Failed set param for index[%d]\n", "qla4xxx_conn_start", (int )ddb_entry->fw_ddb_index); goto exit_conn_start; } else { } status = qla4xxx_conn_open(ha, (int )ddb_entry->fw_ddb_index); if (status == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Login failed: %s\n", "qla4xxx_conn_start", sess->targetname); ret = -22; goto exit_conn_start; } else { } if (ddb_entry->fw_ddb_device_state == 1U) { ddb_entry->fw_ddb_device_state = 7U; } else { } if (ql4xextended_error_logging == 2) { printk("\016%s: DDB state [%d]\n", "qla4xxx_conn_start", ddb_entry->fw_ddb_device_state); } else { } exit_set_param: ret = 0; exit_conn_start: ; if ((unsigned long )fw_ddb_entry != (unsigned long )((struct dev_db_entry *)0)) { dma_free_attrs(& (ha->pdev)->dev, 512UL, (void *)fw_ddb_entry, fw_ddb_entry_dma, (struct dma_attrs *)0); } else { } return (ret); } } static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn ) { struct iscsi_cls_session *cls_sess ; struct device const *__mptr ; struct iscsi_session *sess ; struct scsi_qla_host *ha ; struct ddb_entry *ddb_entry ; int options ; int tmp ; { __mptr = (struct device const *)cls_conn->dev.parent; cls_sess = (struct iscsi_cls_session *)__mptr + 0xfffffffffffffd58UL; sess = (struct iscsi_session *)cls_sess->dd_data; ddb_entry = (struct ddb_entry *)sess->dd_data; ha = ddb_entry->ha; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: cid = %d\n", "qla4xxx_conn_destroy", cls_conn->cid); } else { } options = 2; tmp = qla4xxx_session_logout_ddb(ha, ddb_entry, options); if (tmp == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Logout failed\n", "qla4xxx_conn_destroy"); } else { } return; } } static void qla4xxx_task_work(struct work_struct *wdata ) { struct ql4_task_data *task_data ; struct scsi_qla_host *ha ; struct passthru_status *sts ; struct iscsi_task *task ; struct iscsi_hdr *hdr ; uint8_t *data ; uint32_t data_len ; struct iscsi_conn *conn ; int hdr_len ; itt_t itt ; struct work_struct const *__mptr ; { __mptr = (struct work_struct const *)wdata; task_data = (struct ql4_task_data *)__mptr + 0xffffffffffffff70UL; ha = task_data->ha; task = task_data->task; sts = & task_data->sts; hdr_len = 48; conn = task->conn; switch ((int )sts->completionStatus) { case 1: hdr = (struct iscsi_hdr *)task_data->resp_buffer; itt = sts->handle; hdr->itt = itt; data = (uint8_t *)task_data->resp_buffer + (unsigned long )hdr_len; data_len = task_data->resp_len - (uint32_t )hdr_len; iscsi_complete_pdu(conn, hdr, (char *)data, (int )data_len); goto ldv_64731; default: dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Passthru failed status = 0x%x\n", (int )sts->completionStatus); goto ldv_64731; } ldv_64731: ; return; } } static int qla4xxx_alloc_pdu(struct iscsi_task *task , uint8_t opcode ) { struct ql4_task_data *task_data ; struct iscsi_session *sess ; struct ddb_entry *ddb_entry ; struct scsi_qla_host *ha ; int hdr_len ; struct lock_class_key __key ; atomic_long_t __constr_expr_0 ; { sess = (task->conn)->session; ddb_entry = (struct ddb_entry *)sess->dd_data; ha = ddb_entry->ha; task_data = (struct ql4_task_data *)task->dd_data; memset((void *)task_data, 0, 224UL); if ((unsigned long )task->sc != (unsigned long )((struct scsi_cmnd *)0)) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: SCSI Commands not implemented\n", "qla4xxx_alloc_pdu"); return (-22); } else { } hdr_len = 48; task_data->ha = ha; task_data->task = task; if (task->data_count != 0U) { task_data->data_dma = dma_map_single_attrs(& (ha->pdev)->dev, (void *)task->data, (size_t )task->data_count, 1, (struct dma_attrs *)0); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: MaxRecvLen %u, iscsi hrd %d\n", "qla4xxx_alloc_pdu", (task->conn)->max_recv_dlength, hdr_len); } else { } task_data->resp_len = (task->conn)->max_recv_dlength + (unsigned int )hdr_len; task_data->resp_buffer = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )task_data->resp_len, & task_data->resp_dma, 32U, (struct dma_attrs *)0); if ((unsigned long )task_data->resp_buffer == (unsigned long )((void *)0)) { goto exit_alloc_pdu; } else { } task_data->req_len = task->data_count + (unsigned int )hdr_len; task_data->req_buffer = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )task_data->req_len, & task_data->req_dma, 32U, (struct dma_attrs *)0); if ((unsigned long )task_data->req_buffer == (unsigned long )((void *)0)) { goto exit_alloc_pdu; } else { } task->hdr = (struct iscsi_hdr *)task_data->req_buffer; __init_work(& task_data->task_work, 0); __constr_expr_0.counter = 137438953408L; task_data->task_work.data = __constr_expr_0; lockdep_init_map(& task_data->task_work.lockdep_map, "(&task_data->task_work)", & __key, 0); INIT_LIST_HEAD(& task_data->task_work.entry); task_data->task_work.func = & qla4xxx_task_work; return (0); exit_alloc_pdu: ; if ((unsigned long )task_data->resp_buffer != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, (size_t )task_data->resp_len, task_data->resp_buffer, task_data->resp_dma, (struct dma_attrs *)0); } else { } if ((unsigned long )task_data->req_buffer != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, (size_t )task_data->req_len, task_data->req_buffer, task_data->req_dma, (struct dma_attrs *)0); } else { } return (-12); } } static void qla4xxx_task_cleanup(struct iscsi_task *task ) { struct ql4_task_data *task_data ; struct iscsi_session *sess ; struct ddb_entry *ddb_entry ; struct scsi_qla_host *ha ; int hdr_len ; { hdr_len = 48; sess = (task->conn)->session; ddb_entry = (struct ddb_entry *)sess->dd_data; ha = ddb_entry->ha; task_data = (struct ql4_task_data *)task->dd_data; if (task->data_count != 0U) { dma_unmap_single_attrs(& (ha->pdev)->dev, task_data->data_dma, (size_t )task->data_count, 1, (struct dma_attrs *)0); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: MaxRecvLen %u, iscsi hrd %d\n", "qla4xxx_task_cleanup", (task->conn)->max_recv_dlength, hdr_len); } else { } dma_free_attrs(& (ha->pdev)->dev, (size_t )task_data->resp_len, task_data->resp_buffer, task_data->resp_dma, (struct dma_attrs *)0); dma_free_attrs(& (ha->pdev)->dev, (size_t )task_data->req_len, task_data->req_buffer, task_data->req_dma, (struct dma_attrs *)0); return; } } static int qla4xxx_task_xmit(struct iscsi_task *task ) { struct scsi_cmnd *sc ; struct iscsi_session *sess ; struct ddb_entry *ddb_entry ; struct scsi_qla_host *ha ; int tmp ; { sc = task->sc; sess = (task->conn)->session; ddb_entry = (struct ddb_entry *)sess->dd_data; ha = ddb_entry->ha; if ((unsigned long )sc == (unsigned long )((struct scsi_cmnd *)0)) { tmp = qla4xxx_send_passthru0(task); return (tmp); } else { } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: scsi cmd xmit not implemented\n", "qla4xxx_task_xmit"); return (-38); } } static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess , struct iscsi_bus_flash_conn *conn , struct dev_db_entry *fw_ddb_entry ) { unsigned long options ; int rc ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; int tmp___15 ; int tmp___16 ; int tmp___17 ; int tmp___18 ; void *tmp___19 ; void *tmp___20 ; void *tmp___21 ; int tmp___22 ; int i ; int j ; int tmp___23 ; int tmp___24 ; { options = 0UL; rc = 0; options = (unsigned long )fw_ddb_entry->options; tmp = constant_test_bit(11L, (unsigned long const volatile *)(& options)); conn->is_fw_assigned_ipv6 = (uint8_t )tmp; tmp___0 = constant_test_bit(8L, (unsigned long const volatile *)(& options)); if (tmp___0 != 0) { rc = iscsi_switch_str_param(& sess->portal_type, (char *)"ipv6"); if (rc != 0) { goto exit_copy; } else { } } else { rc = iscsi_switch_str_param(& sess->portal_type, (char *)"ipv4"); if (rc != 0) { goto exit_copy; } else { } } tmp___1 = constant_test_bit(6L, (unsigned long const volatile *)(& options)); sess->auto_snd_tgt_disable = (uint8_t )tmp___1; tmp___2 = constant_test_bit(4L, (unsigned long const volatile *)(& options)); sess->discovery_sess = (uint8_t )tmp___2; tmp___3 = constant_test_bit(3L, (unsigned long const volatile *)(& options)); sess->entry_state = (uint8_t )tmp___3; options = (unsigned long )fw_ddb_entry->iscsi_options; conn->hdrdgst_en = constant_test_bit(13L, (unsigned long const volatile *)(& options)); conn->datadgst_en = constant_test_bit(12L, (unsigned long const volatile *)(& options)); sess->imm_data_en = constant_test_bit(11L, (unsigned long const volatile *)(& options)); sess->initial_r2t_en = constant_test_bit(10L, (unsigned long const volatile *)(& options)); sess->dataseq_inorder_en = constant_test_bit(9L, (unsigned long const volatile *)(& options)); sess->pdu_inorder_en = constant_test_bit(8L, (unsigned long const volatile *)(& options)); tmp___4 = constant_test_bit(7L, (unsigned long const volatile *)(& options)); sess->chap_auth_en = (uint8_t )tmp___4; tmp___5 = constant_test_bit(6L, (unsigned long const volatile *)(& options)); conn->snack_req_en = (uint8_t )tmp___5; tmp___6 = constant_test_bit(5L, (unsigned long const volatile *)(& options)); sess->discovery_logout_en = (uint8_t )tmp___6; tmp___7 = constant_test_bit(4L, (unsigned long const volatile *)(& options)); sess->bidi_chap_en = (uint8_t )tmp___7; tmp___8 = constant_test_bit(3L, (unsigned long const volatile *)(& options)); sess->discovery_auth_optional = (uint8_t )tmp___8; tmp___9 = constant_test_bit(1L, (unsigned long const volatile *)(& options)); if (tmp___9 != 0) { sess->erl = sess->erl | 2; } else { } tmp___10 = constant_test_bit(0L, (unsigned long const volatile *)(& options)); if (tmp___10 != 0) { sess->erl = sess->erl | 1; } else { } options = (unsigned long )fw_ddb_entry->tcp_options; tmp___11 = constant_test_bit(6L, (unsigned long const volatile *)(& options)); conn->tcp_timestamp_stat = (uint8_t )tmp___11; tmp___12 = constant_test_bit(5L, (unsigned long const volatile *)(& options)); conn->tcp_nagle_disable = (uint8_t )tmp___12; tmp___13 = constant_test_bit(4L, (unsigned long const volatile *)(& options)); conn->tcp_wsf_disable = (uint8_t )tmp___13; tmp___14 = constant_test_bit(3L, (unsigned long const volatile *)(& options)); if (tmp___14 != 0) { conn->tcp_timer_scale = (uint8_t )((unsigned int )conn->tcp_timer_scale | 8U); } else { } tmp___15 = constant_test_bit(2L, (unsigned long const volatile *)(& options)); if (tmp___15 != 0) { conn->tcp_timer_scale = (uint8_t )((unsigned int )conn->tcp_timer_scale | 4U); } else { } tmp___16 = constant_test_bit(1L, (unsigned long const volatile *)(& options)); if (tmp___16 != 0) { conn->tcp_timer_scale = (uint8_t )((unsigned int )conn->tcp_timer_scale | 2U); } else { } conn->tcp_timer_scale = (uint8_t )((int )conn->tcp_timer_scale >> 1); tmp___17 = constant_test_bit(0L, (unsigned long const volatile *)(& options)); conn->tcp_timestamp_en = (uint8_t )tmp___17; options = (unsigned long )fw_ddb_entry->ip_options; tmp___18 = constant_test_bit(4L, (unsigned long const volatile *)(& options)); conn->fragment_disable = (uint8_t )tmp___18; conn->max_recv_dlength = (unsigned int )((int )fw_ddb_entry->iscsi_max_rcv_data_seg_len * 512); conn->max_xmit_dlength = (unsigned int )((int )fw_ddb_entry->iscsi_max_snd_data_seg_len * 512); sess->first_burst = (unsigned int )((int )fw_ddb_entry->iscsi_first_burst_len * 512); sess->max_burst = (unsigned int )((int )fw_ddb_entry->iscsi_max_burst_len * 512); sess->max_r2t = fw_ddb_entry->iscsi_max_outsnd_r2t; sess->time2wait = (int )fw_ddb_entry->iscsi_def_time2wait; sess->time2retain = (int )fw_ddb_entry->iscsi_def_time2retain; sess->tpgt = (int )fw_ddb_entry->tgt_portal_grp; conn->max_segment_size = (unsigned int )fw_ddb_entry->mss; conn->tcp_xmit_wsf = (unsigned int )fw_ddb_entry->tcp_xmt_wsf; conn->tcp_recv_wsf = (unsigned int )fw_ddb_entry->tcp_rcv_wsf; conn->ipv6_flow_label = (uint8_t )fw_ddb_entry->ipv6_flow_lbl; conn->keepalive_timeout = fw_ddb_entry->ka_timeout; conn->local_port = fw_ddb_entry->lcl_port; conn->statsn = fw_ddb_entry->stat_sn; conn->exp_statsn = fw_ddb_entry->exp_stat_sn; sess->discovery_parent_idx = fw_ddb_entry->ddb_link; sess->discovery_parent_type = fw_ddb_entry->ddb_link; sess->chap_out_idx = fw_ddb_entry->chap_tbl_idx; sess->tsid = fw_ddb_entry->tsid; sess->default_taskmgmt_timeout = (int )fw_ddb_entry->def_timeout; conn->port = (int )fw_ddb_entry->port; options = (unsigned long )fw_ddb_entry->options; tmp___19 = kzalloc(16UL, 208U); conn->ipaddress = (char *)tmp___19; if ((unsigned long )conn->ipaddress == (unsigned long )((char *)0)) { rc = -12; goto exit_copy; } else { } tmp___20 = kzalloc(16UL, 208U); conn->redirect_ipaddr = (char *)tmp___20; if ((unsigned long )conn->redirect_ipaddr == (unsigned long )((char *)0)) { rc = -12; goto exit_copy; } else { } memcpy((void *)conn->ipaddress, (void const *)(& fw_ddb_entry->ip_addr), 16UL); memcpy((void *)conn->redirect_ipaddr, (void const *)(& fw_ddb_entry->tgt_addr), 16UL); tmp___22 = constant_test_bit(8L, (unsigned long const volatile *)(& options)); if (tmp___22 != 0) { conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos; tmp___21 = kmemdup((void const *)(& fw_ddb_entry->link_local_ipv6_addr), 16UL, 208U); conn->link_local_ipv6_addr = (char *)tmp___21; if ((unsigned long )conn->link_local_ipv6_addr == (unsigned long )((char *)0)) { rc = -12; goto exit_copy; } else { } } else { conn->ipv4_tos = fw_ddb_entry->ipv4_tos; } if ((unsigned int )fw_ddb_entry->iscsi_name[0] != 0U) { rc = iscsi_switch_str_param(& sess->targetname, (char *)(& fw_ddb_entry->iscsi_name)); if (rc != 0) { goto exit_copy; } else { } } else { } if ((unsigned int )fw_ddb_entry->iscsi_alias[0] != 0U) { rc = iscsi_switch_str_param(& sess->targetalias, (char *)(& fw_ddb_entry->iscsi_alias)); if (rc != 0) { goto exit_copy; } else { } } else { } i = 0; j = 5; goto ldv_64774; ldv_64773: tmp___23 = i; i = i + 1; tmp___24 = j; j = j - 1; sess->isid[tmp___23] = fw_ddb_entry->isid[tmp___24]; ldv_64774: ; if (i <= 5) { goto ldv_64773; } else { } exit_copy: ; return (rc); } } static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess , struct iscsi_bus_flash_conn *conn , struct dev_db_entry *fw_ddb_entry ) { uint16_t options ; int rc ; int tmp ; int tmp___0 ; int i ; int j ; int tmp___1 ; int tmp___2 ; { rc = 0; options = fw_ddb_entry->options; if ((unsigned int )conn->is_fw_assigned_ipv6 != 0U) { options = (uint16_t )((unsigned int )options | 2048U); } else { options = (unsigned int )options & 63487U; } tmp = strncmp((char const *)sess->portal_type, "ipv6", 4UL); if (tmp == 0) { options = (uint16_t )((unsigned int )options | 256U); } else { options = (unsigned int )options & 65279U; } if ((unsigned int )sess->auto_snd_tgt_disable != 0U) { options = (uint16_t )((unsigned int )options | 64U); } else { options = (unsigned int )options & 65471U; } if ((unsigned int )sess->discovery_sess != 0U) { options = (uint16_t )((unsigned int )options | 16U); } else { options = (unsigned int )options & 65519U; } if ((unsigned int )sess->entry_state != 0U) { options = (uint16_t )((unsigned int )options | 8U); } else { options = (unsigned int )options & 65527U; } fw_ddb_entry->options = options; options = fw_ddb_entry->iscsi_options; if (conn->hdrdgst_en != 0) { options = (uint16_t )((unsigned int )options | 8192U); } else { options = (unsigned int )options & 57343U; } if (conn->datadgst_en != 0) { options = (uint16_t )((unsigned int )options | 4096U); } else { options = (unsigned int )options & 61439U; } if (sess->imm_data_en != 0) { options = (uint16_t )((unsigned int )options | 2048U); } else { options = (unsigned int )options & 63487U; } if (sess->initial_r2t_en != 0) { options = (uint16_t )((unsigned int )options | 1024U); } else { options = (unsigned int )options & 64511U; } if (sess->dataseq_inorder_en != 0) { options = (uint16_t )((unsigned int )options | 512U); } else { options = (unsigned int )options & 65023U; } if (sess->pdu_inorder_en != 0) { options = (uint16_t )((unsigned int )options | 256U); } else { options = (unsigned int )options & 65279U; } if ((unsigned int )sess->chap_auth_en != 0U) { options = (uint16_t )((unsigned int )options | 128U); } else { options = (unsigned int )options & 65407U; } if ((unsigned int )conn->snack_req_en != 0U) { options = (uint16_t )((unsigned int )options | 64U); } else { options = (unsigned int )options & 65471U; } if ((unsigned int )sess->discovery_logout_en != 0U) { options = (uint16_t )((unsigned int )options | 32U); } else { options = (unsigned int )options & 65503U; } if ((unsigned int )sess->bidi_chap_en != 0U) { options = (uint16_t )((unsigned int )options | 16U); } else { options = (unsigned int )options & 65519U; } if ((unsigned int )sess->discovery_auth_optional != 0U) { options = (uint16_t )((unsigned int )options | 8U); } else { options = (unsigned int )options & 65527U; } if ((sess->erl & 2) != 0) { options = (uint16_t )((unsigned int )options | 2U); } else { options = (unsigned int )options & 65533U; } if (sess->erl & 1) { options = (uint16_t )((unsigned int )options | 1U); } else { options = (unsigned int )options & 65534U; } fw_ddb_entry->iscsi_options = options; options = fw_ddb_entry->tcp_options; if ((unsigned int )conn->tcp_timestamp_stat != 0U) { options = (uint16_t )((unsigned int )options | 64U); } else { options = (unsigned int )options & 65471U; } if ((unsigned int )conn->tcp_nagle_disable != 0U) { options = (uint16_t )((unsigned int )options | 32U); } else { options = (unsigned int )options & 65503U; } if ((unsigned int )conn->tcp_wsf_disable != 0U) { options = (uint16_t )((unsigned int )options | 16U); } else { options = (unsigned int )options & 65519U; } if (((int )conn->tcp_timer_scale & 4) != 0) { options = (uint16_t )((unsigned int )options | 8U); } else { options = (unsigned int )options & 65527U; } if (((int )conn->tcp_timer_scale & 2) != 0) { options = (uint16_t )((unsigned int )options | 4U); } else { options = (unsigned int )options & 65531U; } if ((int )conn->tcp_timer_scale & 1) { options = (uint16_t )((unsigned int )options | 2U); } else { options = (unsigned int )options & 65533U; } if ((unsigned int )conn->tcp_timestamp_en != 0U) { options = (uint16_t )((unsigned int )options | 1U); } else { options = (unsigned int )options & 65534U; } fw_ddb_entry->tcp_options = options; options = fw_ddb_entry->ip_options; if ((unsigned int )conn->fragment_disable != 0U) { options = (uint16_t )((unsigned int )options | 16U); } else { options = (unsigned int )options & 65519U; } fw_ddb_entry->ip_options = options; fw_ddb_entry->iscsi_max_outsnd_r2t = sess->max_r2t; fw_ddb_entry->iscsi_max_rcv_data_seg_len = (unsigned short )(conn->max_recv_dlength / 512U); fw_ddb_entry->iscsi_max_snd_data_seg_len = (unsigned short )(conn->max_xmit_dlength / 512U); fw_ddb_entry->iscsi_first_burst_len = (unsigned short )(sess->first_burst / 512U); fw_ddb_entry->iscsi_max_burst_len = (unsigned short )(sess->max_burst / 512U); fw_ddb_entry->iscsi_def_time2wait = (unsigned short )sess->time2wait; fw_ddb_entry->iscsi_def_time2retain = (unsigned short )sess->time2retain; fw_ddb_entry->tgt_portal_grp = (unsigned short )sess->tpgt; fw_ddb_entry->mss = (unsigned short )conn->max_segment_size; fw_ddb_entry->tcp_xmt_wsf = (unsigned char )conn->tcp_xmit_wsf; fw_ddb_entry->tcp_rcv_wsf = (unsigned char )conn->tcp_recv_wsf; fw_ddb_entry->ipv6_flow_lbl = (unsigned short )conn->ipv6_flow_label; fw_ddb_entry->ka_timeout = conn->keepalive_timeout; fw_ddb_entry->lcl_port = conn->local_port; fw_ddb_entry->stat_sn = conn->statsn; fw_ddb_entry->exp_stat_sn = conn->exp_statsn; fw_ddb_entry->ddb_link = sess->discovery_parent_idx; fw_ddb_entry->chap_tbl_idx = sess->chap_out_idx; fw_ddb_entry->tsid = sess->tsid; fw_ddb_entry->port = (unsigned short )conn->port; fw_ddb_entry->def_timeout = (unsigned short )sess->default_taskmgmt_timeout; tmp___0 = strncmp((char const *)sess->portal_type, "ipv6", 4UL); if (tmp___0 == 0) { fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class; } else { fw_ddb_entry->ipv4_tos = conn->ipv4_tos; } if ((unsigned long )conn->ipaddress != (unsigned long )((char *)0)) { memcpy((void *)(& fw_ddb_entry->ip_addr), (void const *)conn->ipaddress, 16UL); } else { } if ((unsigned long )conn->redirect_ipaddr != (unsigned long )((char *)0)) { memcpy((void *)(& fw_ddb_entry->tgt_addr), (void const *)conn->redirect_ipaddr, 32UL); } else { } if ((unsigned long )conn->link_local_ipv6_addr != (unsigned long )((char *)0)) { memcpy((void *)(& fw_ddb_entry->link_local_ipv6_addr), (void const *)conn->link_local_ipv6_addr, 16UL); } else { } if ((unsigned long )sess->targetname != (unsigned long )((char *)0)) { memcpy((void *)(& fw_ddb_entry->iscsi_name), (void const *)sess->targetname, 224UL); } else { } if ((unsigned long )sess->targetalias != (unsigned long )((char *)0)) { memcpy((void *)(& fw_ddb_entry->iscsi_alias), (void const *)sess->targetalias, 32UL); } else { } i = 0; j = 5; goto ldv_64786; ldv_64785: tmp___1 = i; i = i + 1; tmp___2 = j; j = j - 1; fw_ddb_entry->isid[tmp___1] = sess->isid[tmp___2]; ldv_64786: ; if (i <= 5) { goto ldv_64785; } else { } return (rc); } } static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn , struct iscsi_session *sess , struct dev_db_entry *fw_ddb_entry ) { unsigned long options ; uint16_t ddb_link ; uint16_t disc_parent ; char ip_addr[64U] ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; int tmp___15 ; int i ; int j ; int tmp___16 ; int tmp___17 ; char *tmp___18 ; { options = 0UL; options = (unsigned long )fw_ddb_entry->options; tmp = constant_test_bit(11L, (unsigned long const volatile *)(& options)); conn->is_fw_assigned_ipv6 = (uint8_t )tmp; tmp___0 = constant_test_bit(6L, (unsigned long const volatile *)(& options)); sess->auto_snd_tgt_disable = (uint8_t )tmp___0; tmp___1 = constant_test_bit(4L, (unsigned long const volatile *)(& options)); sess->discovery_sess = (uint8_t )tmp___1; options = (unsigned long )fw_ddb_entry->iscsi_options; conn->hdrdgst_en = constant_test_bit(13L, (unsigned long const volatile *)(& options)); conn->datadgst_en = constant_test_bit(12L, (unsigned long const volatile *)(& options)); sess->imm_data_en = constant_test_bit(11L, (unsigned long const volatile *)(& options)); sess->initial_r2t_en = constant_test_bit(10L, (unsigned long const volatile *)(& options)); sess->dataseq_inorder_en = constant_test_bit(9L, (unsigned long const volatile *)(& options)); sess->pdu_inorder_en = constant_test_bit(8L, (unsigned long const volatile *)(& options)); tmp___2 = constant_test_bit(7L, (unsigned long const volatile *)(& options)); sess->chap_auth_en = (uint8_t )tmp___2; tmp___3 = constant_test_bit(5L, (unsigned long const volatile *)(& options)); sess->discovery_logout_en = (uint8_t )tmp___3; tmp___4 = constant_test_bit(4L, (unsigned long const volatile *)(& options)); sess->bidi_chap_en = (uint8_t )tmp___4; tmp___5 = constant_test_bit(3L, (unsigned long const volatile *)(& options)); sess->discovery_auth_optional = (uint8_t )tmp___5; tmp___6 = constant_test_bit(1L, (unsigned long const volatile *)(& options)); if (tmp___6 != 0) { sess->erl = sess->erl | 2; } else { } tmp___7 = constant_test_bit(0L, (unsigned long const volatile *)(& options)); if (tmp___7 != 0) { sess->erl = sess->erl | 1; } else { } options = (unsigned long )fw_ddb_entry->tcp_options; tmp___8 = constant_test_bit(6L, (unsigned long const volatile *)(& options)); conn->tcp_timestamp_stat = (uint8_t )tmp___8; tmp___9 = constant_test_bit(5L, (unsigned long const volatile *)(& options)); conn->tcp_nagle_disable = (uint8_t )tmp___9; tmp___10 = constant_test_bit(4L, (unsigned long const volatile *)(& options)); conn->tcp_wsf_disable = (uint8_t )tmp___10; tmp___11 = constant_test_bit(3L, (unsigned long const volatile *)(& options)); if (tmp___11 != 0) { conn->tcp_timer_scale = (uint8_t )((unsigned int )conn->tcp_timer_scale | 8U); } else { } tmp___12 = constant_test_bit(2L, (unsigned long const volatile *)(& options)); if (tmp___12 != 0) { conn->tcp_timer_scale = (uint8_t )((unsigned int )conn->tcp_timer_scale | 4U); } else { } tmp___13 = constant_test_bit(1L, (unsigned long const volatile *)(& options)); if (tmp___13 != 0) { conn->tcp_timer_scale = (uint8_t )((unsigned int )conn->tcp_timer_scale | 2U); } else { } conn->tcp_timer_scale = (uint8_t )((int )conn->tcp_timer_scale >> 1); tmp___14 = constant_test_bit(0L, (unsigned long const volatile *)(& options)); conn->tcp_timestamp_en = (uint8_t )tmp___14; options = (unsigned long )fw_ddb_entry->ip_options; tmp___15 = constant_test_bit(4L, (unsigned long const volatile *)(& options)); conn->fragment_disable = (uint8_t )tmp___15; conn->max_recv_dlength = (unsigned int )((int )fw_ddb_entry->iscsi_max_rcv_data_seg_len * 512); conn->max_xmit_dlength = (unsigned int )((int )fw_ddb_entry->iscsi_max_snd_data_seg_len * 512); sess->max_r2t = fw_ddb_entry->iscsi_max_outsnd_r2t; sess->first_burst = (unsigned int )((int )fw_ddb_entry->iscsi_first_burst_len * 512); sess->max_burst = (unsigned int )((int )fw_ddb_entry->iscsi_max_burst_len * 512); sess->time2wait = (int )fw_ddb_entry->iscsi_def_time2wait; sess->time2retain = (int )fw_ddb_entry->iscsi_def_time2retain; sess->tpgt = (int )fw_ddb_entry->tgt_portal_grp; conn->max_segment_size = (unsigned int )fw_ddb_entry->mss; conn->tcp_xmit_wsf = (unsigned int )fw_ddb_entry->tcp_xmt_wsf; conn->tcp_recv_wsf = (unsigned int )fw_ddb_entry->tcp_rcv_wsf; conn->ipv4_tos = fw_ddb_entry->ipv4_tos; conn->keepalive_tmo = fw_ddb_entry->ka_timeout; conn->local_port = fw_ddb_entry->lcl_port; conn->statsn = fw_ddb_entry->stat_sn; conn->exp_statsn = fw_ddb_entry->exp_stat_sn; sess->tsid = fw_ddb_entry->tsid; i = 0; j = 5; goto ldv_64800; ldv_64799: tmp___16 = i; i = i + 1; tmp___17 = j; j = j - 1; sess->isid[tmp___16] = fw_ddb_entry->isid[tmp___17]; ldv_64800: ; if (i <= 5) { goto ldv_64799; } else { } ddb_link = fw_ddb_entry->ddb_link; if ((unsigned int )ddb_link == 65533U) { disc_parent = 3U; } else if ((unsigned int )ddb_link == 65535U) { disc_parent = 1U; } else if ((unsigned int )ddb_link <= 511U) { disc_parent = 2U; } else { disc_parent = 1U; } tmp___18 = iscsi_get_discovery_parent_name((int )disc_parent); iscsi_set_param(conn->cls_conn, 70, tmp___18, 0); iscsi_set_param(conn->cls_conn, 36, (char *)(& fw_ddb_entry->iscsi_alias), 0); options = (unsigned long )fw_ddb_entry->options; if ((options & 256UL) != 0UL) { memset((void *)(& ip_addr), 0, 64UL); sprintf((char *)(& ip_addr), "%pI6", (uint8_t *)(& fw_ddb_entry->link_local_ipv6_addr)); iscsi_set_param(conn->cls_conn, 71, (char *)(& ip_addr), 0); } else { } return; } } static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha , struct dev_db_entry *fw_ddb_entry , struct iscsi_cls_session *cls_sess , struct iscsi_cls_conn *cls_conn ) { int buflen ; struct iscsi_session *sess ; struct ddb_entry *ddb_entry ; struct ql4_chap_table chap_tbl ; struct iscsi_conn *conn ; char ip_addr[64U] ; uint16_t options ; size_t tmp ; int tmp___0 ; { buflen = 0; options = 0U; sess = (struct iscsi_session *)cls_sess->dd_data; ddb_entry = (struct ddb_entry *)sess->dd_data; conn = (struct iscsi_conn *)cls_conn->dd_data; memset((void *)(& chap_tbl), 0, 364UL); ddb_entry->chap_tbl_idx = fw_ddb_entry->chap_tbl_idx; qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); sess->def_taskmgmt_tmo = fw_ddb_entry->def_timeout; conn->persistent_port = (int )fw_ddb_entry->port; memset((void *)(& ip_addr), 0, 64UL); options = fw_ddb_entry->options; if (((int )options & 256) != 0) { iscsi_set_param(cls_conn, 44, (char *)"ipv6", 4); memset((void *)(& ip_addr), 0, 64UL); sprintf((char *)(& ip_addr), "%pI6", (uint8_t *)(& fw_ddb_entry->ip_addr)); } else { iscsi_set_param(cls_conn, 44, (char *)"ipv4", 4); sprintf((char *)(& ip_addr), "%pI4", (uint8_t *)(& fw_ddb_entry->ip_addr)); } iscsi_set_param(cls_conn, 17, (char *)(& ip_addr), buflen); iscsi_set_param(cls_conn, 15, (char *)(& fw_ddb_entry->iscsi_name), buflen); iscsi_set_param(cls_conn, 34, (char *)(& ha->name_string), buflen); if ((unsigned int )ddb_entry->chap_tbl_idx != 65535U) { tmp___0 = qla4xxx_get_uni_chap_at_index(ha, (char *)(& chap_tbl.name), (char *)(& chap_tbl.secret), (int )ddb_entry->chap_tbl_idx); if (tmp___0 == 0) { tmp = strlen((char const *)(& chap_tbl.name)); iscsi_set_param(cls_conn, 22, (char *)(& chap_tbl.name), (int )tmp); iscsi_set_param(cls_conn, 24, (char *)(& chap_tbl.secret), (int )chap_tbl.secret_len); } else { } } else { } return; } } void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry ) { struct iscsi_cls_session *cls_sess ; struct iscsi_cls_conn *cls_conn ; uint32_t ddb_state ; dma_addr_t fw_ddb_entry_dma ; struct dev_db_entry *fw_ddb_entry ; void *tmp ; int tmp___0 ; { tmp = dma_alloc_attrs(& (ha->pdev)->dev, 512UL, & fw_ddb_entry_dma, 208U, (struct dma_attrs *)0); fw_ddb_entry = (struct dev_db_entry *)tmp; if ((unsigned long )fw_ddb_entry == (unsigned long )((struct dev_db_entry *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to allocate dma buffer\n", "qla4xxx_update_session_conn_fwddb_param"); goto exit_session_conn_fwddb_param; } else { } tmp___0 = qla4xxx_get_fwddb_entry(ha, (int )ddb_entry->fw_ddb_index, fw_ddb_entry, fw_ddb_entry_dma, (uint32_t *)0U, (uint32_t *)0U, & ddb_state, (uint32_t *)0U, (uint16_t *)0U, (uint16_t *)0U); if (tmp___0 == 1) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: failed get_ddb_entry for fw_ddb_index %d\n", ha->host_no, "qla4xxx_update_session_conn_fwddb_param", (int )ddb_entry->fw_ddb_index); } else { } goto exit_session_conn_fwddb_param; } else { } cls_sess = ddb_entry->sess; cls_conn = ddb_entry->conn; qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); exit_session_conn_fwddb_param: ; if ((unsigned long )fw_ddb_entry != (unsigned long )((struct dev_db_entry *)0)) { dma_free_attrs(& (ha->pdev)->dev, 512UL, (void *)fw_ddb_entry, fw_ddb_entry_dma, (struct dma_attrs *)0); } else { } return; } } void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry ) { struct iscsi_cls_session *cls_sess ; struct iscsi_cls_conn *cls_conn ; struct iscsi_session *sess ; struct iscsi_conn *conn ; uint32_t ddb_state ; dma_addr_t fw_ddb_entry_dma ; struct dev_db_entry *fw_ddb_entry ; void *tmp ; int tmp___0 ; unsigned long _min1 ; unsigned long _min2 ; { tmp = dma_alloc_attrs(& (ha->pdev)->dev, 512UL, & fw_ddb_entry_dma, 208U, (struct dma_attrs *)0); fw_ddb_entry = (struct dev_db_entry *)tmp; if ((unsigned long )fw_ddb_entry == (unsigned long )((struct dev_db_entry *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to allocate dma buffer\n", "qla4xxx_update_session_conn_param"); goto exit_session_conn_param; } else { } tmp___0 = qla4xxx_get_fwddb_entry(ha, (int )ddb_entry->fw_ddb_index, fw_ddb_entry, fw_ddb_entry_dma, (uint32_t *)0U, (uint32_t *)0U, & ddb_state, (uint32_t *)0U, (uint16_t *)0U, (uint16_t *)0U); if (tmp___0 == 1) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: failed get_ddb_entry for fw_ddb_index %d\n", ha->host_no, "qla4xxx_update_session_conn_param", (int )ddb_entry->fw_ddb_index); } else { } goto exit_session_conn_param; } else { } cls_sess = ddb_entry->sess; sess = (struct iscsi_session *)cls_sess->dd_data; cls_conn = ddb_entry->conn; conn = (struct iscsi_conn *)cls_conn->dd_data; ddb_entry->default_relogin_timeout = (unsigned int )fw_ddb_entry->def_timeout > 12U && (unsigned int )fw_ddb_entry->def_timeout <= 119U ? fw_ddb_entry->def_timeout : 12U; ddb_entry->default_time2wait = (uint32_t )fw_ddb_entry->iscsi_def_time2wait; ddb_entry->chap_tbl_idx = fw_ddb_entry->chap_tbl_idx; qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry); _min1 = 256UL; _min2 = 8UL; memcpy((void *)sess->initiatorname, (void const *)(& ha->name_string), _min1 < _min2 ? _min1 : _min2); exit_session_conn_param: ; if ((unsigned long )fw_ddb_entry != (unsigned long )((struct dev_db_entry *)0)) { dma_free_attrs(& (ha->pdev)->dev, 512UL, (void *)fw_ddb_entry, fw_ddb_entry_dma, (struct dma_attrs *)0); } else { } return; } } static void qla4xxx_start_timer(struct scsi_qla_host *ha , void *func , unsigned long interval ) { { reg_timer_4(& ha->timer); ha->timer.expires = interval * 250UL + (unsigned long )jiffies; ha->timer.data = (unsigned long )ha; ha->timer.function = (void (*)(unsigned long ))func; add_timer(& ha->timer); ha->timer_active = 1U; return; } } static void qla4xxx_stop_timer(struct scsi_qla_host *ha ) { { ldv_del_timer_sync_46(& ha->timer); ha->timer_active = 0U; return; } } void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session ) { { iscsi_block_session(cls_session); return; } } void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha ) { { iscsi_host_for_each_session(ha->host, & qla4xxx_mark_device_missing); return; } } static struct srb *qla4xxx_get_new_srb(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry , struct scsi_cmnd *cmd ) { struct srb *srb ; void *tmp ; { tmp = ldv_mempool_alloc_47(ha->srb_mempool, 32U); srb = (struct srb *)tmp; if ((unsigned long )srb == (unsigned long )((struct srb *)0)) { return (srb); } else { } kref_init(& srb->srb_ref); srb->ha = ha; srb->ddb = ddb_entry; srb->cmd = cmd; srb->flags = 0U; cmd->SCp.ptr = (char *)srb; return (srb); } } static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha , struct srb *srb ) { struct scsi_cmnd *cmd ; { cmd = srb->cmd; if (((int )srb->flags & 8) != 0) { scsi_dma_unmap(cmd); srb->flags = (unsigned int )srb->flags & 65527U; } else { } cmd->SCp.ptr = (char *)0; return; } } void qla4xxx_srb_compl(struct kref *ref ) { struct srb *srb ; struct kref const *__mptr ; struct scsi_cmnd *cmd ; struct scsi_qla_host *ha ; { __mptr = (struct kref const *)ref; srb = (struct srb *)__mptr + 0xffffffffffffffc8UL; cmd = srb->cmd; ha = srb->ha; qla4xxx_srb_free_dma(ha, srb); mempool_free((void *)srb, ha->srb_mempool); (*(cmd->scsi_done))(cmd); return; } } static int qla4xxx_queuecommand(struct Scsi_Host *host , struct scsi_cmnd *cmd ) { struct scsi_qla_host *ha ; struct scsi_qla_host *tmp ; struct ddb_entry *ddb_entry ; struct iscsi_cls_session *sess ; struct srb *srb ; int rval ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; { tmp = to_qla_host(host); ha = tmp; ddb_entry = (struct ddb_entry *)(cmd->device)->hostdata; sess = ddb_entry->sess; tmp___1 = constant_test_bit(20L, (unsigned long const volatile *)(& ha->flags)); if (tmp___1 != 0) { tmp___0 = constant_test_bit(21L, (unsigned long const volatile *)(& ha->flags)); if (tmp___0 != 0) { cmd->result = 65536; } else { cmd->result = 851968; } goto qc_fail_command; } else { } if ((unsigned long )sess == (unsigned long )((struct iscsi_cls_session *)0)) { cmd->result = 786432; goto qc_fail_command; } else { } rval = iscsi_session_chkready(sess); if (rval != 0) { cmd->result = rval; goto qc_fail_command; } else { } tmp___2 = constant_test_bit(5L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___2 != 0) { goto qc_host_busy; } else { tmp___3 = constant_test_bit(20L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___3 != 0) { goto qc_host_busy; } else { tmp___4 = constant_test_bit(1L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___4 != 0) { goto qc_host_busy; } else { tmp___5 = constant_test_bit(21L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___5 != 0) { goto qc_host_busy; } else { tmp___6 = constant_test_bit(22L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___6 != 0) { goto qc_host_busy; } else { tmp___7 = constant_test_bit(0L, (unsigned long const volatile *)(& ha->flags)); if (tmp___7 == 0) { goto qc_host_busy; } else { tmp___8 = constant_test_bit(8L, (unsigned long const volatile *)(& ha->flags)); if (tmp___8 == 0) { goto qc_host_busy; } else { tmp___9 = constant_test_bit(9L, (unsigned long const volatile *)(& ha->flags)); if (tmp___9 != 0) { goto qc_host_busy; } else { tmp___10 = constant_test_bit(23L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___10 != 0) { goto qc_host_busy; } else { tmp___11 = constant_test_bit(24L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___11 != 0) { goto qc_host_busy; } else { tmp___12 = constant_test_bit(4L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___12 != 0) { goto qc_host_busy; } else { } } } } } } } } } } } srb = qla4xxx_get_new_srb(ha, ddb_entry, cmd); if ((unsigned long )srb == (unsigned long )((struct srb *)0)) { goto qc_host_busy; } else { } rval = qla4xxx_send_command_to_isp(ha, srb); if (rval != 0) { goto qc_host_busy_free_sp; } else { } return (0); qc_host_busy_free_sp: qla4xxx_srb_free_dma(ha, srb); mempool_free((void *)srb, ha->srb_mempool); qc_host_busy: ; return (4181); qc_fail_command: (*(cmd->scsi_done))(cmd); return (0); } } static void qla4xxx_mem_free(struct scsi_qla_host *ha ) { int tmp ; int tmp___0 ; int tmp___1 ; { if ((unsigned long )ha->queues != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, ha->queues_len, ha->queues, ha->queues_dma, (struct dma_attrs *)0); } else { } if ((unsigned long )ha->fw_dump != (unsigned long )((void *)0)) { vfree((void const *)ha->fw_dump); } else { } ha->queues_len = 0UL; ha->queues = (void *)0; ha->queues_dma = 0ULL; ha->request_ring = (struct queue_entry *)0; ha->request_dma = 0ULL; ha->response_ring = (struct queue_entry *)0; ha->response_dma = 0ULL; ha->shadow_regs = (struct shadow_regs *)0; ha->shadow_regs_dma = 0ULL; ha->fw_dump = (void *)0; ha->fw_dump_size = 0U; if ((unsigned long )ha->srb_mempool != (unsigned long )((mempool_t *)0)) { mempool_destroy(ha->srb_mempool); } else { } ha->srb_mempool = (mempool_t *)0; if ((unsigned long )ha->chap_dma_pool != (unsigned long )((struct dma_pool *)0)) { dma_pool_destroy(ha->chap_dma_pool); } else { } if ((unsigned long )ha->chap_list != (unsigned long )((uint8_t *)0U)) { vfree((void const *)ha->chap_list); } else { } ha->chap_list = (uint8_t *)0U; if ((unsigned long )ha->fw_ddb_dma_pool != (unsigned long )((struct dma_pool *)0)) { dma_pool_destroy(ha->fw_ddb_dma_pool); } else { } tmp___1 = is_qla8022(ha); if (tmp___1 != 0) { if (ha->nx_pcibase != 0UL) { iounmap((void volatile *)ha->nx_pcibase); } else { } } else { tmp = is_qla8032(ha); if (tmp != 0) { goto _L; } else { tmp___0 = is_qla8042(ha); if (tmp___0 != 0) { _L: /* CIL Label */ if (ha->nx_pcibase != 0UL) { iounmap((void volatile *)ha->nx_pcibase); } else { } } else if ((unsigned long )ha->reg != (unsigned long )((struct isp_reg *)0)) { iounmap((void volatile *)ha->reg); } else { } } } if ((unsigned long )ha->reset_tmplt.buff != (unsigned long )((uint8_t *)0U)) { vfree((void const *)ha->reset_tmplt.buff); } else { } pci_release_regions(ha->pdev); return; } } static int qla4xxx_mem_alloc(struct scsi_qla_host *ha ) { unsigned long align ; int _max1 ; int _max2 ; int _max1___0 ; int _max2___0 ; int _max1___1 ; int _max2___1 ; int _max1___2 ; int _max2___2 ; { _max1 = 1024; _max2 = 64; ha->queues_len = ((unsigned long )(_max1 > _max2 ? _max1 : _max2) * 64UL + 73735UL) & 0xfffffffffffff000UL; ha->queues = dma_alloc_attrs(& (ha->pdev)->dev, ha->queues_len, & ha->queues_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )ha->queues == (unsigned long )((void *)0)) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "Memory Allocation failed - queues.\n"); goto mem_alloc_error_exit; } else { } memset(ha->queues, 0, ha->queues_len); align = 0UL; _max1___2 = 1024; _max2___2 = 64; if (((unsigned long )ha->queues_dma & ((unsigned long )(_max1___2 > _max2___2 ? _max1___2 : _max2___2) * 64UL - 1UL)) != 0UL) { _max1___0 = 1024; _max2___0 = 64; _max1___1 = 1024; _max2___1 = 64; align = (unsigned long )(_max1___0 > _max2___0 ? _max1___0 : _max2___0) * 64UL - ((unsigned long )ha->queues_dma & ((unsigned long )(_max1___1 > _max2___1 ? _max1___1 : _max2___1) * 64UL - 1UL)); } else { } ha->request_dma = ha->queues_dma + (unsigned long long )align; ha->request_ring = (struct queue_entry *)(ha->queues + align); ha->response_dma = (ha->queues_dma + (unsigned long long )align) + 65536ULL; ha->response_ring = (struct queue_entry *)(ha->queues + (align + 65536UL)); ha->shadow_regs_dma = (ha->queues_dma + (unsigned long long )align) + 69632ULL; ha->shadow_regs = (struct shadow_regs *)(ha->queues + (align + 69632UL)); ha->srb_mempool = mempool_create(128, & mempool_alloc_slab, & mempool_free_slab, (void *)srb_cachep); if ((unsigned long )ha->srb_mempool == (unsigned long )((mempool_t *)0)) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "Memory Allocation failed - SRB Pool.\n"); goto mem_alloc_error_exit; } else { } ha->chap_dma_pool = dma_pool_create("ql4_chap", & (ha->pdev)->dev, 512UL, 8UL, 0UL); if ((unsigned long )ha->chap_dma_pool == (unsigned long )((struct dma_pool *)0)) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: chap_dma_pool allocation failed..\n", "qla4xxx_mem_alloc"); goto mem_alloc_error_exit; } else { } ha->fw_ddb_dma_pool = dma_pool_create("ql4_fw_ddb", & (ha->pdev)->dev, 512UL, 8UL, 0UL); if ((unsigned long )ha->fw_ddb_dma_pool == (unsigned long )((struct dma_pool *)0)) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: fw_ddb_dma_pool allocation failed..\n", "qla4xxx_mem_alloc"); goto mem_alloc_error_exit; } else { } return (0); mem_alloc_error_exit: qla4xxx_mem_free(ha); return (1); } } static int qla4_8xxx_check_temp(struct scsi_qla_host *ha ) { uint32_t temp ; uint32_t temp_state ; uint32_t temp_val ; int status ; int tmp ; { status = 0; tmp = qla4_8xxx_rd_direct(ha, 13U); temp = (uint32_t )tmp; temp_state = temp & 65535U; temp_val = temp >> 16; if (temp_state == 3U) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "Device temperature %d degrees C exceeds maximum allowed. Hardware has been shut down.\n", temp_val); status = 1; } else if (temp_state == 2U) { if ((unsigned int )ha->temperature == 1U) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "Device temperature %d degrees C exceeds operating range. Immediate action needed.\n", temp_val); } else { } } else if ((unsigned int )ha->temperature == 2U) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Device temperature is now %d degrees C in normal range.\n", temp_val); } else { } ha->temperature = (uint16_t )temp_state; return (status); } } static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha ) { uint32_t fw_heartbeat_counter ; int status ; int tmp ; { status = 0; tmp = qla4_8xxx_rd_direct(ha, 2U); fw_heartbeat_counter = (uint32_t )tmp; if (fw_heartbeat_counter == 4294967295U) { if (ql4xextended_error_logging == 2) { printk("\fscsi%ld: %s: Device in frozen state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n", ha->host_no, "qla4_8xxx_check_fw_alive"); } else { } return (status); } else { } if (ha->fw_heartbeat_counter == fw_heartbeat_counter) { ha->seconds_since_last_heartbeat = ha->seconds_since_last_heartbeat + 1U; if (ha->seconds_since_last_heartbeat == 2U) { ha->seconds_since_last_heartbeat = 0U; qla4_8xxx_dump_peg_reg(ha); status = 1; } else { } } else { ha->seconds_since_last_heartbeat = 0U; } ha->fw_heartbeat_counter = fw_heartbeat_counter; return (status); } } static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha ) { uint32_t halt_status ; int halt_status_unrecoverable ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { halt_status_unrecoverable = 0; tmp = qla4_8xxx_rd_direct(ha, 0U); halt_status = (uint32_t )tmp; tmp___2 = is_qla8022(ha); if (tmp___2 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: disabling pause transmit on port 0 & 1.\n", "qla4_8xxx_process_fw_error"); qla4_82xx_wr_32(ha, 106954904UL, 9U); if (((halt_status >> 8) & 2097151U) == 103U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Firmware aborted with error code 0x00006700. Device is being reset\n", "qla4_8xxx_process_fw_error"); } else { } if ((int )halt_status < 0) { halt_status_unrecoverable = 1; } else { } } else { tmp___0 = is_qla8032(ha); if (tmp___0 != 0) { goto _L; } else { tmp___1 = is_qla8042(ha); if (tmp___1 != 0) { _L: /* CIL Label */ if ((halt_status & 1073741824U) != 0U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Firmware error detected device is being reset\n", "qla4_8xxx_process_fw_error"); } else if ((int )halt_status < 0) { halt_status_unrecoverable = 1; } else { } } else { } } } if (halt_status_unrecoverable != 0) { set_bit(21L, (unsigned long volatile *)(& ha->dpc_flags)); } else { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: detect abort needed!\n", "qla4_8xxx_process_fw_error"); set_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); } qla4xxx_mailbox_premature_completion(ha); qla4xxx_wake_dpc(ha); return; } } void qla4_8xxx_watchdog(struct scsi_qla_host *ha ) { uint32_t dev_state ; uint32_t idc_ctrl ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; int tmp___15 ; int tmp___16 ; int tmp___17 ; { tmp___2 = is_qla8032(ha); if (tmp___2 != 0) { tmp___3 = qla4_83xx_is_detached(ha); if (tmp___3 == 0) { __ret_warn_once = 1; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_fmt("/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/3192/dscv_tempdir/dscv/ri/43_2a/drivers/scsi/qla4xxx/ql4_os.c", 4401, "%s: iSCSI function %d marked invisible\n", "qla4_8xxx_watchdog", (int )ha->func_num); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); } else { } } else { } tmp___15 = constant_test_bit(20L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___15 == 0) { tmp___16 = constant_test_bit(1L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___16 == 0) { tmp___17 = constant_test_bit(2L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___17 == 0) { tmp___4 = qla4_8xxx_rd_direct(ha, 4U); dev_state = (uint32_t )tmp___4; tmp___14 = qla4_8xxx_check_temp(ha); if (tmp___14 != 0) { tmp___5 = is_qla8022(ha); if (tmp___5 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "disabling pause transmit on port 0 & 1.\n"); qla4_82xx_wr_32(ha, 106954904UL, 9U); } else { } set_bit(21L, (unsigned long volatile *)(& ha->dpc_flags)); qla4xxx_wake_dpc(ha); } else if (dev_state == 4U) { tmp___13 = constant_test_bit(1L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___13 == 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: HW State: NEED RESET!\n", "qla4_8xxx_watchdog"); tmp___6 = is_qla8032(ha); if (tmp___6 != 0) { goto _L; } else { tmp___7 = is_qla8042(ha); if (tmp___7 != 0) { _L: /* CIL Label */ idc_ctrl = qla4_83xx_rd_reg(ha, 14224UL); if ((idc_ctrl & 2U) == 0U) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Graceful reset bit is not set\n", "qla4_8xxx_watchdog"); qla4xxx_mailbox_premature_completion(ha); } else { } } else { } } tmp___8 = is_qla8032(ha); if (tmp___8 != 0) { set_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); qla4xxx_wake_dpc(ha); } else { tmp___9 = is_qla8042(ha); if (tmp___9 != 0) { set_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); qla4xxx_wake_dpc(ha); } else { tmp___10 = is_qla8022(ha); if (tmp___10 != 0 && ql4xdontresethba == 0) { set_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); qla4xxx_wake_dpc(ha); } else { } } } } else { goto _L___1; } } else _L___1: /* CIL Label */ if (dev_state == 5U) { tmp___12 = constant_test_bit(22L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___12 == 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: HW State: NEED QUIES!\n", "qla4_8xxx_watchdog"); set_bit(22L, (unsigned long volatile *)(& ha->dpc_flags)); qla4xxx_wake_dpc(ha); } else { goto _L___0; } } else { _L___0: /* CIL Label */ tmp___11 = qla4_8xxx_check_fw_alive(ha); if (tmp___11 != 0) { qla4_8xxx_process_fw_error(ha); } else { } } } else { } } else { } } else { } return; } } static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess ) { struct iscsi_session *sess ; struct ddb_entry *ddb_entry ; struct scsi_qla_host *ha ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; { sess = (struct iscsi_session *)cls_sess->dd_data; ddb_entry = (struct ddb_entry *)sess->dd_data; ha = ddb_entry->ha; if ((unsigned int )ddb_entry->ddb_type != 1U) { return; } else { } tmp___1 = adapter_up(ha); if (tmp___1 != 0) { tmp___2 = constant_test_bit(0L, (unsigned long const volatile *)(& ddb_entry->flags)); if (tmp___2 == 0) { tmp___3 = iscsi_is_session_online(cls_sess); if (tmp___3 == 0) { tmp___0 = atomic_read((atomic_t const *)(& ddb_entry->retry_relogin_timer)); if (tmp___0 != 65535) { tmp = atomic_read((atomic_t const *)(& ddb_entry->retry_relogin_timer)); if (tmp == 0) { atomic_set(& ddb_entry->retry_relogin_timer, 65535); set_bit(3L, (unsigned long volatile *)(& ha->dpc_flags)); set_bit(0L, (unsigned long volatile *)(& ddb_entry->flags)); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: index [%d] login device\n", "qla4xxx_check_relogin_flash_ddb", (int )ddb_entry->fw_ddb_index); } else { } } else { atomic_dec(& ddb_entry->retry_relogin_timer); } } else { } } else { } } else { } } else { } tmp___6 = atomic_read((atomic_t const *)(& ddb_entry->relogin_timer)); if (tmp___6 != 0) { tmp___7 = atomic_dec_and_test(& ddb_entry->relogin_timer); if (tmp___7 != 0) { tmp___5 = iscsi_is_session_online(cls_sess); if (tmp___5 == 0) { atomic_inc(& ddb_entry->relogin_retry_count); if (ql4xextended_error_logging == 2) { tmp___4 = atomic_read((atomic_t const *)(& ddb_entry->relogin_retry_count)); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: index[%d] relogin timed out-retrying relogin (%d), retry (%d)\n", "qla4xxx_check_relogin_flash_ddb", (int )ddb_entry->fw_ddb_index, tmp___4, ddb_entry->default_time2wait + 4U); } else { } set_bit(3L, (unsigned long volatile *)(& ha->dpc_flags)); atomic_set(& ddb_entry->retry_relogin_timer, (int )(ddb_entry->default_time2wait + 4U)); } else { } } else { } } else { } return; } } static void qla4xxx_timer(struct scsi_qla_host *ha ) { int start_dpc ; uint16_t w ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; { start_dpc = 0; iscsi_host_for_each_session(ha->host, & qla4xxx_check_relogin_flash_ddb); tmp = constant_test_bit(20L, (unsigned long const volatile *)(& ha->flags)); if (tmp != 0) { ldv_mod_timer_48(& ha->timer, (unsigned long )jiffies + 250UL); return; } else { } tmp___0 = pci_channel_offline(ha->pdev); if (tmp___0 == 0) { pci_read_config_word((struct pci_dev const *)ha->pdev, 0, & w); } else { } tmp___1 = is_qla80XX(ha); if (tmp___1 != 0) { qla4_8xxx_watchdog(ha); } else { } tmp___2 = is_qla40XX(ha); if (tmp___2 != 0) { if (((int )ha->firmware_options & 4096) != 0 && (unsigned int )ha->heartbeat_interval != 0U) { ha->seconds_since_last_heartbeat = ha->seconds_since_last_heartbeat + 1U; if (ha->seconds_since_last_heartbeat > (uint32_t )((int )ha->heartbeat_interval + 2)) { set_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); } else { } } else { } } else { } tmp___3 = list_empty((struct list_head const *)(& ha->work_list)); if (tmp___3 == 0) { start_dpc = start_dpc + 1; } else { } if (start_dpc != 0) { goto _L; } else { tmp___4 = constant_test_bit(1L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___4 != 0) { goto _L; } else { tmp___5 = constant_test_bit(2L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___5 != 0) { goto _L; } else { tmp___6 = constant_test_bit(3L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___6 != 0) { goto _L; } else { tmp___7 = constant_test_bit(4L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___7 != 0) { goto _L; } else { tmp___8 = constant_test_bit(5L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___8 != 0) { goto _L; } else { tmp___9 = constant_test_bit(15L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___9 != 0) { goto _L; } else { tmp___10 = constant_test_bit(18L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___10 != 0) { goto _L; } else { tmp___11 = constant_test_bit(21L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___11 != 0) { goto _L; } else { tmp___12 = constant_test_bit(22L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___12 != 0) { goto _L; } else { tmp___13 = constant_test_bit(25L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___13 != 0) { goto _L; } else { tmp___14 = constant_test_bit(9L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___14 != 0) { _L: /* CIL Label */ if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: scheduling dpc routine - dpc flags = 0x%lx\n", ha->host_no, "qla4xxx_timer", ha->dpc_flags); } else { } qla4xxx_wake_dpc(ha); } else { } } } } } } } } } } } } ldv_mod_timer_49(& ha->timer, (unsigned long )jiffies + 250UL); if (ql4xextended_error_logging == 2) { ha->seconds_since_last_intr = ha->seconds_since_last_intr + 1U; } else { } return; } } static int qla4xxx_cmd_wait(struct scsi_qla_host *ha ) { uint32_t index ; unsigned long flags ; struct scsi_cmnd *cmd ; unsigned long wtime ; uint32_t wtmo ; int tmp ; { index = 0U; tmp = is_qla40XX(ha); if (tmp != 0) { wtmo = 5U; } else { wtmo = ha->nx_reset_timeout / 2U; } wtime = (unsigned long )(wtmo * 250U) + (unsigned long )jiffies; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Wait up to %u seconds for cmds to complete\n", wtmo); } else { } goto ldv_64970; ldv_64969: ldv_spin_lock(); index = 0U; goto ldv_64968; ldv_64967: cmd = scsi_host_find_tag(ha->host, (int )index); if ((unsigned long )cmd != (unsigned long )((struct scsi_cmnd *)0) && (unsigned long )cmd->SCp.ptr != (unsigned long )((char *)0)) { goto ldv_64966; } else { } index = index + 1U; ldv_64968: ; if ((uint32_t )(ha->host)->can_queue > index) { goto ldv_64967; } else { } ldv_64966: spin_unlock_irqrestore(& ha->hardware_lock, flags); if ((uint32_t )(ha->host)->can_queue == index) { return (0); } else { } msleep(1000U); ldv_64970: ; if ((long )((unsigned long )jiffies - wtime) < 0L) { goto ldv_64969; } else { } return (1); } } int qla4xxx_hw_reset(struct scsi_qla_host *ha ) { uint32_t ctrl_status ; unsigned long flags ; int tmp ; unsigned short tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; { flags = 0UL; if (ql4xextended_error_logging == 2) { printk("\vscsi%ld: %s\n", ha->host_no, "qla4xxx_hw_reset"); } else { } tmp = ql4xxx_lock_drvr_wait(ha); if (tmp != 0) { return (1); } else { } ldv_spin_lock(); tmp___0 = readw((void const volatile *)(& (ha->reg)->ctrl_status)); ctrl_status = (uint32_t )tmp___0; if ((ctrl_status & 8U) != 0U) { tmp___1 = set_rmask(8U); writel(tmp___1, (void volatile *)(& (ha->reg)->ctrl_status)); } else { } tmp___2 = set_rmask(32768U); writel(tmp___2, (void volatile *)(& (ha->reg)->ctrl_status)); readl((void const volatile *)(& (ha->reg)->ctrl_status)); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); } } int qla4xxx_soft_reset(struct scsi_qla_host *ha ) { uint32_t max_wait_time ; unsigned long flags ; int status ; uint32_t ctrl_status ; unsigned short tmp ; uint32_t tmp___0 ; unsigned short tmp___1 ; unsigned short tmp___2 ; uint32_t tmp___3 ; uint32_t tmp___4 ; unsigned short tmp___5 ; { flags = 0UL; status = qla4xxx_hw_reset(ha); if (status != 0) { return (status); } else { } status = 1; max_wait_time = 3U; ldv_64986: ldv_spin_lock(); tmp = readw((void const volatile *)(& (ha->reg)->ctrl_status)); ctrl_status = (uint32_t )tmp; spin_unlock_irqrestore(& ha->hardware_lock, flags); if ((ctrl_status & 2048U) == 0U) { goto ldv_64985; } else { } msleep(1000U); max_wait_time = max_wait_time - 1U; if (max_wait_time != 0U) { goto ldv_64986; } else { } ldv_64985: ; if ((ctrl_status & 2048U) != 0U) { if (ql4xextended_error_logging == 2) { printk("\fscsi%ld: Network Reset Intr not cleared by Network function, clearing it now!\n", ha->host_no); } else { } ldv_spin_lock(); tmp___0 = set_rmask(2048U); writel(tmp___0, (void volatile *)(& (ha->reg)->ctrl_status)); readl((void const volatile *)(& (ha->reg)->ctrl_status)); spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { } max_wait_time = 30U; ldv_64988: ldv_spin_lock(); tmp___1 = readw((void const volatile *)(& (ha->reg)->ctrl_status)); ctrl_status = (uint32_t )tmp___1; spin_unlock_irqrestore(& ha->hardware_lock, flags); if ((ctrl_status & 32768U) == 0U) { status = 0; goto ldv_64987; } else { } msleep(1000U); max_wait_time = max_wait_time - 1U; if (max_wait_time != 0U) { goto ldv_64988; } else { } ldv_64987: ldv_spin_lock(); tmp___2 = readw((void const volatile *)(& (ha->reg)->ctrl_status)); ctrl_status = (uint32_t )tmp___2; if ((ctrl_status & 8U) != 0U) { tmp___3 = set_rmask(8U); writel(tmp___3, (void volatile *)(& (ha->reg)->ctrl_status)); readl((void const volatile *)(& (ha->reg)->ctrl_status)); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); if (max_wait_time == 0U) { ldv_spin_lock(); tmp___4 = set_rmask(8192U); writel(tmp___4, (void volatile *)(& (ha->reg)->ctrl_status)); readl((void const volatile *)(& (ha->reg)->ctrl_status)); spin_unlock_irqrestore(& ha->hardware_lock, flags); max_wait_time = 30U; ldv_64990: ldv_spin_lock(); tmp___5 = readw((void const volatile *)(& (ha->reg)->ctrl_status)); ctrl_status = (uint32_t )tmp___5; spin_unlock_irqrestore(& ha->hardware_lock, flags); if ((ctrl_status & 8192U) == 0U) { status = 0; goto ldv_64989; } else { } msleep(1000U); max_wait_time = max_wait_time - 1U; if (max_wait_time != 0U) { goto ldv_64990; } else { } ldv_64989: ; } else { } return (status); } } static void qla4xxx_abort_active_cmds(struct scsi_qla_host *ha , int res ) { struct srb *srb ; int i ; unsigned long flags ; { ldv_spin_lock(); i = 0; goto ldv_64999; ldv_64998: srb = qla4xxx_del_from_active_array(ha, (uint32_t )i); if ((unsigned long )srb != (unsigned long )((struct srb *)0)) { (srb->cmd)->result = res; kref_put(& srb->srb_ref, & qla4xxx_srb_compl); } else { } i = i + 1; ldv_64999: ; if ((ha->host)->can_queue > i) { goto ldv_64998; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha ) { { clear_bit(0L, (unsigned long volatile *)(& ha->flags)); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Disabling the board\n"); qla4xxx_abort_active_cmds(ha, 65536); qla4xxx_mark_all_devices_missing(ha); clear_bit(1L, (unsigned long volatile *)(& ha->flags)); return; } } static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session ) { struct iscsi_session *sess ; struct ddb_entry *ddb_entry ; { sess = (struct iscsi_session *)cls_session->dd_data; ddb_entry = (struct ddb_entry *)sess->dd_data; ddb_entry->fw_ddb_device_state = 6U; if ((unsigned int )ddb_entry->ddb_type == 1U) { iscsi_block_session(ddb_entry->sess); } else { iscsi_session_failure((struct iscsi_session *)cls_session->dd_data, 1011); } return; } } static int qla4xxx_recover_adapter(struct scsi_qla_host *ha ) { int status ; uint8_t reset_chip ; uint32_t dev_state ; unsigned long wait ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; struct task_struct *tmp___9 ; long volatile __ret ; struct task_struct *tmp___10 ; struct task_struct *tmp___11 ; struct task_struct *tmp___12 ; struct task_struct *tmp___13 ; int tmp___14 ; int tmp___15 ; int tmp___16 ; int tmp___17 ; int tmp___18 ; int tmp___19 ; int tmp___20 ; int tmp___21 ; int tmp___22 ; int tmp___23 ; { status = 1; reset_chip = 0U; scsi_block_requests(ha->host); clear_bit(0L, (unsigned long volatile *)(& ha->flags)); clear_bit(8L, (unsigned long volatile *)(& ha->flags)); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: adapter OFFLINE\n", "qla4xxx_recover_adapter"); } else { } set_bit(20L, (unsigned long volatile *)(& ha->dpc_flags)); tmp = is_qla8032(ha); if (tmp != 0) { goto _L; } else { tmp___0 = is_qla8042(ha); if (tmp___0 != 0) { _L: /* CIL Label */ tmp___1 = constant_test_bit(4L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___1 == 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: disabling pause transmit on port 0 & 1.\n", "qla4xxx_recover_adapter"); qla4_83xx_disable_pause(ha); } else { } } else { } } iscsi_host_for_each_session(ha->host, & qla4xxx_fail_session); tmp___2 = constant_test_bit(1L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___2 != 0) { reset_chip = 1U; } else { } tmp___3 = constant_test_bit(5L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___3 != 0) { status = 0; goto recover_ha_init_adapter; } else { } tmp___4 = is_qla80XX(ha); if (tmp___4 != 0 && (unsigned int )reset_chip == 0U) { tmp___5 = constant_test_bit(4L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___5 != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s - Performing stop_firmware...\n", ha->host_no, "qla4xxx_recover_adapter"); } else { } status = (*((ha->isp_ops)->reset_firmware))(ha); if (status == 0) { (*((ha->isp_ops)->disable_intrs))(ha); qla4xxx_process_aen(ha, 1); qla4xxx_abort_active_cmds(ha, 524288); } else { reset_chip = 1U; clear_bit(4L, (unsigned long volatile *)(& ha->dpc_flags)); set_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); } } else { } } else { } tmp___15 = is_qla40XX(ha); if (tmp___15 != 0 || (unsigned int )reset_chip != 0U) { tmp___6 = is_qla40XX(ha); if (tmp___6 != 0) { goto chip_reset; } else { } tmp___7 = constant_test_bit(19L, (unsigned long const volatile *)(& ha->flags)); if (tmp___7 != 0) { goto chip_reset; } else { } wait = (unsigned long )jiffies + 750UL; goto ldv_65035; ldv_65034: tmp___8 = qla4_8xxx_check_fw_alive(ha); if (tmp___8 != 0) { qla4xxx_mailbox_premature_completion(ha); goto ldv_65025; } else { } tmp___9 = get_current(); tmp___9->task_state_change = 0UL; __ret = 2L; switch (8UL) { case 1UL: tmp___10 = get_current(); __asm__ volatile ("xchgb %b0, %1\n": "+q" (__ret), "+m" (tmp___10->state): : "memory", "cc"); goto ldv_65028; case 2UL: tmp___11 = get_current(); __asm__ volatile ("xchgw %w0, %1\n": "+r" (__ret), "+m" (tmp___11->state): : "memory", "cc"); goto ldv_65028; case 4UL: tmp___12 = get_current(); __asm__ volatile ("xchgl %0, %1\n": "+r" (__ret), "+m" (tmp___12->state): : "memory", "cc"); goto ldv_65028; case 8UL: tmp___13 = get_current(); __asm__ volatile ("xchgq %q0, %1\n": "+r" (__ret), "+m" (tmp___13->state): : "memory", "cc"); goto ldv_65028; default: __xchg_wrong_size(); } ldv_65028: schedule_timeout(250L); ldv_65035: ; if ((long )((unsigned long )jiffies - wait) < 0L) { goto ldv_65034; } else { } ldv_65025: ; chip_reset: tmp___14 = constant_test_bit(19L, (unsigned long const volatile *)(& ha->flags)); if (tmp___14 == 0) { qla4xxx_cmd_wait(ha); } else { } qla4xxx_process_aen(ha, 1); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s - Performing chip reset..\n", ha->host_no, "qla4xxx_recover_adapter"); } else { } status = (*((ha->isp_ops)->reset_chip))(ha); qla4xxx_abort_active_cmds(ha, 524288); } else { } qla4xxx_process_aen(ha, 1); recover_ha_init_adapter: ; if (status == 0) { tmp___16 = is_qla40XX(ha); if (tmp___16 != 0 && ha->mac_index == 3U) { ssleep(6U); } else { } status = qla4xxx_initialize_adapter(ha, 1); tmp___17 = is_qla80XX(ha); if (tmp___17 != 0 && status == 1) { status = qla4_8xxx_check_init_adapter_retry(ha); if (status == 1) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: Don\'t retry recover adapter\n", ha->host_no, "qla4xxx_recover_adapter"); qla4xxx_dead_adapter_cleanup(ha); clear_bit(2L, (unsigned long volatile *)(& ha->dpc_flags)); clear_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); clear_bit(4L, (unsigned long volatile *)(& ha->dpc_flags)); goto exit_recover; } else { } } else { } } else { } tmp___21 = constant_test_bit(0L, (unsigned long const volatile *)(& ha->flags)); if (tmp___21 == 0) { tmp___22 = constant_test_bit(5L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___22 == 0) { tmp___19 = is_qla80XX(ha); if (tmp___19 != 0) { (*((ha->isp_ops)->idc_lock))(ha); tmp___18 = qla4_8xxx_rd_direct(ha, 4U); dev_state = (uint32_t )tmp___18; (*((ha->isp_ops)->idc_unlock))(ha); if (dev_state == 6U) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: don\'t retry recover adapter. H/W is in Failed state\n", "qla4xxx_recover_adapter"); qla4xxx_dead_adapter_cleanup(ha); clear_bit(2L, (unsigned long volatile *)(& ha->dpc_flags)); clear_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); clear_bit(4L, (unsigned long volatile *)(& ha->dpc_flags)); status = 1; goto exit_recover; } else { } } else { } tmp___20 = constant_test_bit(2L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___20 == 0) { ha->retry_reset_ha_cnt = 2U; if (ql4xextended_error_logging == 2) { printk("scsi%ld: recover adapter - retrying (%d) more times\n", ha->host_no, ha->retry_reset_ha_cnt); } else { } set_bit(2L, (unsigned long volatile *)(& ha->dpc_flags)); status = 1; } else { if (ha->retry_reset_ha_cnt != 0U) { ha->retry_reset_ha_cnt = ha->retry_reset_ha_cnt - 1U; if (ql4xextended_error_logging == 2) { printk("scsi%ld: recover adapter - retry remaining %d\n", ha->host_no, ha->retry_reset_ha_cnt); } else { } status = 1; } else { } if (ha->retry_reset_ha_cnt == 0U) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: recover adapter failed - board disabled\n", ha->host_no); } else { } qla4xxx_dead_adapter_cleanup(ha); clear_bit(2L, (unsigned long volatile *)(& ha->dpc_flags)); clear_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); clear_bit(4L, (unsigned long volatile *)(& ha->dpc_flags)); status = 1; } else { } } } else { clear_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); clear_bit(4L, (unsigned long volatile *)(& ha->dpc_flags)); clear_bit(2L, (unsigned long volatile *)(& ha->dpc_flags)); } } else { clear_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); clear_bit(4L, (unsigned long volatile *)(& ha->dpc_flags)); clear_bit(2L, (unsigned long volatile *)(& ha->dpc_flags)); } exit_recover: ha->adapter_error_count = ha->adapter_error_count + 1ULL; tmp___23 = constant_test_bit(0L, (unsigned long const volatile *)(& ha->flags)); if (tmp___23 != 0) { (*((ha->isp_ops)->enable_intrs))(ha); } else { } scsi_unblock_requests(ha->host); clear_bit(20L, (unsigned long volatile *)(& ha->dpc_flags)); if (ql4xextended_error_logging == 2) { printk("scsi%ld: recover adapter: %s\n", ha->host_no, status == 1 ? (char *)"FAILED" : (char *)"SUCCEEDED"); } else { } return (status); } } static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session ) { struct iscsi_session *sess ; struct ddb_entry *ddb_entry ; struct scsi_qla_host *ha ; int tmp ; int tmp___0 ; int tmp___1 ; { sess = (struct iscsi_session *)cls_session->dd_data; ddb_entry = (struct ddb_entry *)sess->dd_data; ha = ddb_entry->ha; tmp___1 = iscsi_is_session_online(cls_session); if (tmp___1 == 0) { if (ddb_entry->fw_ddb_device_state == 4U) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: ddb[%d] unblock session\n", ha->host_no, "qla4xxx_relogin_devices", (int )ddb_entry->fw_ddb_index); iscsi_unblock_session(ddb_entry->sess); } else if ((unsigned int )ddb_entry->ddb_type == 1U) { tmp = constant_test_bit(0L, (unsigned long const volatile *)(& ddb_entry->flags)); if (tmp == 0) { tmp___0 = constant_test_bit(4L, (unsigned long const volatile *)(& ddb_entry->flags)); if (tmp___0 == 0) { qla4xxx_arm_relogin_timer(ddb_entry); } else { } } else { } } else { iscsi_session_failure((struct iscsi_session *)cls_session->dd_data, 1011); } } else { } return; } } int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session ) { struct iscsi_session *sess ; struct ddb_entry *ddb_entry ; struct scsi_qla_host *ha ; int tmp ; { sess = (struct iscsi_session *)cls_session->dd_data; ddb_entry = (struct ddb_entry *)sess->dd_data; ha = ddb_entry->ha; dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: ddb[%d] unblock session\n", ha->host_no, "qla4xxx_unblock_flash_ddb", (int )ddb_entry->fw_ddb_index); iscsi_unblock_session(ddb_entry->sess); tmp = constant_test_bit(0L, (unsigned long const volatile *)(& ha->flags)); if (tmp != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: ddb[%d] start scan\n", ha->host_no, "qla4xxx_unblock_flash_ddb", (int )ddb_entry->fw_ddb_index); scsi_queue_work(ha->host, & (ddb_entry->sess)->scan_work); } else { } return (0); } } int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session ) { struct iscsi_session *sess ; struct ddb_entry *ddb_entry ; struct scsi_qla_host *ha ; int status ; int tmp ; { status = 0; sess = (struct iscsi_session *)cls_session->dd_data; ddb_entry = (struct ddb_entry *)sess->dd_data; ha = ddb_entry->ha; dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: ddb[%d] unblock user space session\n", ha->host_no, "qla4xxx_unblock_ddb", (int )ddb_entry->fw_ddb_index); tmp = iscsi_is_session_online(cls_session); if (tmp == 0) { iscsi_conn_start(ddb_entry->conn); iscsi_conn_login_event(ddb_entry->conn, 3); } else { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: ddb[%d] session [%d] already logged in\n", ha->host_no, "qla4xxx_unblock_ddb", (int )ddb_entry->fw_ddb_index, cls_session->sid); status = 1; } return (status); } } static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha ) { { iscsi_host_for_each_session(ha->host, & qla4xxx_relogin_devices); return; } } static void qla4xxx_relogin_flash_ddb(struct iscsi_cls_session *cls_sess ) { uint16_t relogin_timer ; struct iscsi_session *sess ; struct ddb_entry *ddb_entry ; struct scsi_qla_host *ha ; uint16_t _max1 ; unsigned short _max2 ; { sess = (struct iscsi_session *)cls_sess->dd_data; ddb_entry = (struct ddb_entry *)sess->dd_data; ha = ddb_entry->ha; _max1 = ddb_entry->default_relogin_timeout; _max2 = 18U; relogin_timer = (uint16_t )((int )_max1 > (int )_max2 ? (int )_max1 : (int )_max2); atomic_set(& ddb_entry->relogin_timer, (int )relogin_timer); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no, (int )ddb_entry->fw_ddb_index, (int )relogin_timer); } else { } qla4xxx_login_flash_ddb(cls_sess); return; } } static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess ) { struct iscsi_session *sess ; struct ddb_entry *ddb_entry ; struct scsi_qla_host *ha ; int tmp ; int tmp___0 ; int tmp___1 ; { sess = (struct iscsi_session *)cls_sess->dd_data; ddb_entry = (struct ddb_entry *)sess->dd_data; ha = ddb_entry->ha; if ((unsigned int )ddb_entry->ddb_type != 1U) { return; } else { } tmp = constant_test_bit(4L, (unsigned long const volatile *)(& ddb_entry->flags)); if (tmp != 0) { return; } else { } tmp___0 = test_and_clear_bit(0L, (unsigned long volatile *)(& ddb_entry->flags)); if (tmp___0 != 0) { tmp___1 = iscsi_is_session_online(cls_sess); if (tmp___1 == 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "relogin issued\n"); } else { } qla4xxx_relogin_flash_ddb(cls_sess); } else { } } else { } return; } } void qla4xxx_wake_dpc(struct scsi_qla_host *ha ) { { if ((unsigned long )ha->dpc_thread != (unsigned long )((struct workqueue_struct *)0)) { queue_work(ha->dpc_thread, & ha->dpc_work); } else { } return; } } static struct qla4_work_evt *qla4xxx_alloc_work(struct scsi_qla_host *ha , uint32_t data_size , enum qla4_work_type type ) { struct qla4_work_evt *e ; uint32_t size ; void *tmp ; { size = data_size + 32U; tmp = kzalloc((size_t )size, 32U); e = (struct qla4_work_evt *)tmp; if ((unsigned long )e == (unsigned long )((struct qla4_work_evt *)0)) { return ((struct qla4_work_evt *)0); } else { } INIT_LIST_HEAD(& e->list); e->type = type; return (e); } } static void qla4xxx_post_work(struct scsi_qla_host *ha , struct qla4_work_evt *e ) { unsigned long flags ; { ldv_spin_lock(); list_add_tail(& e->list, & ha->work_list); spin_unlock_irqrestore(& ha->work_lock, flags); qla4xxx_wake_dpc(ha); return; } } int qla4xxx_post_aen_work(struct scsi_qla_host *ha , enum iscsi_host_event_code aen_code , uint32_t data_size , uint8_t *data ) { struct qla4_work_evt *e ; { e = qla4xxx_alloc_work(ha, data_size, 0); if ((unsigned long )e == (unsigned long )((struct qla4_work_evt *)0)) { return (1); } else { } e->u.aen.code = aen_code; e->u.aen.data_size = data_size; memcpy((void *)(& e->u.aen.data), (void const *)data, (size_t )data_size); qla4xxx_post_work(ha, e); return (0); } } int qla4xxx_post_ping_evt_work(struct scsi_qla_host *ha , uint32_t status , uint32_t pid , uint32_t data_size , uint8_t *data ) { struct qla4_work_evt *e ; { e = qla4xxx_alloc_work(ha, data_size, 1); if ((unsigned long )e == (unsigned long )((struct qla4_work_evt *)0)) { return (1); } else { } e->u.ping.status = status; e->u.ping.pid = pid; e->u.ping.data_size = data_size; memcpy((void *)(& e->u.ping.data), (void const *)data, (size_t )data_size); qla4xxx_post_work(ha, e); return (0); } } static void qla4xxx_do_work(struct scsi_qla_host *ha ) { struct qla4_work_evt *e ; struct qla4_work_evt *tmp ; unsigned long flags ; struct list_head work ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { work.next = & work; work.prev = & work; ldv_spin_lock(); list_splice_init(& ha->work_list, & work); spin_unlock_irqrestore(& ha->work_lock, flags); __mptr = (struct list_head const *)work.next; e = (struct qla4_work_evt *)__mptr; __mptr___0 = (struct list_head const *)e->list.next; tmp = (struct qla4_work_evt *)__mptr___0; goto ldv_65126; ldv_65125: list_del_init(& e->list); switch ((unsigned int )e->type) { case 0U: iscsi_post_host_event((uint32_t )ha->host_no, & qla4xxx_iscsi_transport, e->u.aen.code, e->u.aen.data_size, (uint8_t *)(& e->u.aen.data)); goto ldv_65122; case 1U: iscsi_ping_comp_event((uint32_t )ha->host_no, & qla4xxx_iscsi_transport, e->u.ping.status, e->u.ping.pid, e->u.ping.data_size, (uint8_t *)(& e->u.ping.data)); goto ldv_65122; default: dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "event type: 0x%x not supported", (unsigned int )e->type); } ldv_65122: kfree((void const *)e); e = tmp; __mptr___1 = (struct list_head const *)tmp->list.next; tmp = (struct qla4_work_evt *)__mptr___1; ldv_65126: ; if ((unsigned long )(& e->list) != (unsigned long )(& work)) { goto ldv_65125; } else { } return; } } static void qla4xxx_do_dpc(struct work_struct *work ) { struct scsi_qla_host *ha ; struct work_struct const *__mptr ; int status ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; int tmp___15 ; int tmp___16 ; uint8_t wait_time ; unsigned short tmp___17 ; int tmp___18 ; int tmp___19 ; int tmp___20 ; int tmp___21 ; int tmp___22 ; int tmp___23 ; int tmp___24 ; int tmp___25 ; int tmp___26 ; int tmp___27 ; int tmp___28 ; int tmp___29 ; int tmp___30 ; int tmp___31 ; int tmp___32 ; int tmp___33 ; { __mptr = (struct work_struct const *)work; ha = (struct scsi_qla_host *)__mptr + 0xfffffffffffffd78UL; status = 1; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: DPC handler waking up. flags = 0x%08lx, dpc_flags = 0x%08lx\n", ha->host_no, "qla4xxx_do_dpc", ha->flags, ha->dpc_flags); } else { } tmp = constant_test_bit(1L, (unsigned long const volatile *)(& ha->flags)); if (tmp == 0) { return; } else { } tmp___0 = constant_test_bit(20L, (unsigned long const volatile *)(& ha->flags)); if (tmp___0 != 0) { if (ql4xextended_error_logging == 2) { printk("\016scsi%ld: %s: flags = %lx\n", ha->host_no, "qla4xxx_do_dpc", ha->flags); } else { } return; } else { } qla4xxx_do_work(ha); tmp___10 = is_qla80XX(ha); if (tmp___10 != 0) { tmp___3 = constant_test_bit(21L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___3 != 0) { tmp___1 = is_qla8032(ha); if (tmp___1 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: disabling pause transmit on port 0 & 1.\n", "qla4xxx_do_dpc"); qla4_83xx_disable_pause(ha); } else { tmp___2 = is_qla8042(ha); if (tmp___2 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: disabling pause transmit on port 0 & 1.\n", "qla4xxx_do_dpc"); qla4_83xx_disable_pause(ha); } else { } } (*((ha->isp_ops)->idc_lock))(ha); qla4_8xxx_wr_direct(ha, 4U, 6U); (*((ha->isp_ops)->idc_unlock))(ha); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "HW State: FAILED\n"); qla4_8xxx_device_state_handler(ha); } else { } tmp___5 = constant_test_bit(23L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___5 != 0) { tmp___4 = is_qla8042(ha); if (tmp___4 != 0) { if ((ha->idc_info.info2 & 4U) != 0U) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Disabling ACB\n", "qla4xxx_do_dpc"); status = qla4_84xx_config_acb(ha, 0); if (status != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: ACB config failed\n", "qla4xxx_do_dpc"); } else { } } else { } } else { } qla4_83xx_post_idc_ack(ha); clear_bit(23L, (unsigned long volatile *)(& ha->dpc_flags)); } else { } tmp___7 = is_qla8042(ha); if (tmp___7 != 0) { tmp___8 = constant_test_bit(24L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___8 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Restoring ACB\n", "qla4xxx_do_dpc"); tmp___6 = qla4_84xx_config_acb(ha, 1); if (tmp___6 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: ACB config failed ", "qla4xxx_do_dpc"); } else { } clear_bit(24L, (unsigned long volatile *)(& ha->dpc_flags)); } else { } } else { } tmp___9 = test_and_clear_bit(22L, (unsigned long volatile *)(& ha->dpc_flags)); if (tmp___9 != 0) { qla4_8xxx_need_qsnt_handler(ha); } else { } } else { } tmp___20 = constant_test_bit(20L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___20 == 0) { tmp___21 = constant_test_bit(1L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___21 != 0) { goto _L___1; } else { tmp___22 = constant_test_bit(5L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___22 != 0) { goto _L___1; } else { tmp___23 = constant_test_bit(4L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___23 != 0) { _L___1: /* CIL Label */ tmp___11 = is_qla8022(ha); if (tmp___11 != 0 && ql4xdontresethba != 0) { goto _L; } else { tmp___12 = is_qla8032(ha); if (tmp___12 != 0) { goto _L___0; } else { tmp___13 = is_qla8042(ha); if (tmp___13 != 0) { _L___0: /* CIL Label */ tmp___14 = qla4_83xx_idc_dontreset(ha); if (tmp___14 != 0) { _L: /* CIL Label */ if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Don\'t Reset HBA\n", ha->host_no, "qla4xxx_do_dpc"); } else { } clear_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); clear_bit(5L, (unsigned long volatile *)(& ha->dpc_flags)); clear_bit(4L, (unsigned long volatile *)(& ha->dpc_flags)); goto dpc_post_reset_ha; } else { } } else { } } } tmp___15 = constant_test_bit(4L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___15 != 0) { qla4xxx_recover_adapter(ha); } else { tmp___16 = constant_test_bit(1L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___16 != 0) { qla4xxx_recover_adapter(ha); } else { } } tmp___19 = constant_test_bit(5L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___19 != 0) { wait_time = 3U; goto ldv_65140; ldv_65139: wait_time = (uint8_t )((int )wait_time - 1); if ((unsigned int )wait_time == 0U) { goto ldv_65138; } else { } msleep(1000U); ldv_65140: tmp___17 = readw((void const volatile *)(& (ha->reg)->ctrl_status)); if (((int )tmp___17 & 40960) != 0) { goto ldv_65139; } else { } ldv_65138: ; if ((unsigned int )wait_time == 0U) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: SR|FSR bit not cleared-- resetting\n", ha->host_no, "qla4xxx_do_dpc"); } else { } } else { } qla4xxx_abort_active_cmds(ha, 524288); tmp___18 = ql4xxx_lock_drvr_wait(ha); if (tmp___18 == 0) { qla4xxx_process_aen(ha, 1); status = qla4xxx_recover_adapter(ha); } else { } clear_bit(5L, (unsigned long volatile *)(& ha->dpc_flags)); if (status == 0) { (*((ha->isp_ops)->enable_intrs))(ha); } else { } } else { } } else { } } } } else { } dpc_post_reset_ha: tmp___24 = test_and_clear_bit(9L, (unsigned long volatile *)(& ha->dpc_flags)); if (tmp___24 != 0) { qla4xxx_process_aen(ha, 0); } else { } tmp___25 = test_and_clear_bit(15L, (unsigned long volatile *)(& ha->dpc_flags)); if (tmp___25 != 0) { qla4xxx_get_dhcp_ip_address(ha); } else { } tmp___26 = adapter_up(ha); if (tmp___26 != 0) { tmp___27 = test_and_clear_bit(3L, (unsigned long volatile *)(& ha->dpc_flags)); if (tmp___27 != 0) { iscsi_host_for_each_session(ha->host, & qla4xxx_dpc_relogin); } else { } } else { } tmp___30 = constant_test_bit(9L, (unsigned long const volatile *)(& ha->flags)); if (tmp___30 == 0) { tmp___31 = test_and_clear_bit(18L, (unsigned long volatile *)(& ha->dpc_flags)); if (tmp___31 != 0) { tmp___29 = constant_test_bit(8L, (unsigned long const volatile *)(& ha->flags)); if (tmp___29 == 0) { qla4xxx_mark_all_devices_missing(ha); } else { tmp___28 = test_and_clear_bit(22L, (unsigned long volatile *)(& ha->flags)); if (tmp___28 != 0) { qla4xxx_build_ddb_list(ha, ha->is_reset); iscsi_host_for_each_session(ha->host, & qla4xxx_login_flash_ddb); } else { qla4xxx_relogin_all_devices(ha); } } } else { } } else { } tmp___33 = test_and_clear_bit(25L, (unsigned long volatile *)(& ha->dpc_flags)); if (tmp___33 != 0) { tmp___32 = qla4xxx_sysfs_ddb_export(ha); if (tmp___32 != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Error exporting ddb to sysfs\n", "qla4xxx_do_dpc"); } else { } } else { } return; } } static void qla4xxx_free_adapter(struct scsi_qla_host *ha ) { uint32_t tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; { qla4xxx_abort_active_cmds(ha, 65536); (*((ha->isp_ops)->disable_intrs))(ha); tmp___3 = is_qla40XX(ha); if (tmp___3 != 0) { tmp = set_rmask(32U); writel(tmp, (void volatile *)(& (ha->reg)->ctrl_status)); readl((void const volatile *)(& (ha->reg)->ctrl_status)); } else { tmp___2 = is_qla8022(ha); if (tmp___2 != 0) { writel(0U, (void volatile *)(& (ha->qla4_82xx_reg)->host_int)); readl((void const volatile *)(& (ha->qla4_82xx_reg)->host_int)); } else { tmp___0 = is_qla8032(ha); if (tmp___0 != 0) { writel(0U, (void volatile *)(& (ha->qla4_83xx_reg)->risc_intr)); readl((void const volatile *)(& (ha->qla4_83xx_reg)->risc_intr)); } else { tmp___1 = is_qla8042(ha); if (tmp___1 != 0) { writel(0U, (void volatile *)(& (ha->qla4_83xx_reg)->risc_intr)); readl((void const volatile *)(& (ha->qla4_83xx_reg)->risc_intr)); } else { } } } } if (ha->timer_active != 0U) { qla4xxx_stop_timer(ha); } else { } if ((unsigned long )ha->dpc_thread != (unsigned long )((struct workqueue_struct *)0)) { ldv_destroy_workqueue_50(ha->dpc_thread); } else { } if ((unsigned long )ha->task_wq != (unsigned long )((struct workqueue_struct *)0)) { ldv_destroy_workqueue_51(ha->task_wq); } else { } (*((ha->isp_ops)->reset_firmware))(ha); tmp___4 = is_qla80XX(ha); if (tmp___4 != 0) { (*((ha->isp_ops)->idc_lock))(ha); qla4_8xxx_clear_drv_active(ha); (*((ha->isp_ops)->idc_unlock))(ha); } else { } qla4xxx_free_irqs(ha); qla4xxx_mem_free(ha); return; } } int qla4_8xxx_iospace_config(struct scsi_qla_host *ha ) { int status ; unsigned long mem_base ; unsigned long mem_len ; unsigned long db_base ; unsigned long db_len ; struct pci_dev *pdev ; char const *tmp ; void *tmp___0 ; char const *tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; { status = 0; pdev = ha->pdev; status = pci_request_regions(pdev, "qla4xxx"); if (status != 0) { tmp = pci_name((struct pci_dev const *)pdev); printk("\fscsi(%ld) Failed to reserve PIO regions (%s) status=%d\n", ha->host_no, tmp, status); goto iospace_error_exit; } else { } if (ql4xextended_error_logging == 2) { printk("\016%s: revision-id=%d\n", "qla4_8xxx_iospace_config", (int )pdev->revision); } else { } ha->revision_id = pdev->revision; mem_base = (unsigned long )pdev->resource[0].start; mem_len = pdev->resource[0].start != 0ULL || pdev->resource[0].end != pdev->resource[0].start ? (unsigned long )((pdev->resource[0].end - pdev->resource[0].start) + 1ULL) : 0UL; if (ql4xextended_error_logging == 2) { printk("\016%s: ioremap from %lx a size of %lx\n", "qla4_8xxx_iospace_config", mem_base, mem_len); } else { } tmp___0 = ioremap((resource_size_t )mem_base, mem_len); ha->nx_pcibase = (unsigned long )tmp___0; if (ha->nx_pcibase == 0UL) { tmp___1 = pci_name((struct pci_dev const *)pdev); printk("\vcannot remap MMIO (%s), aborting\n", tmp___1); pci_release_regions(ha->pdev); goto iospace_error_exit; } else { } tmp___4 = is_qla8022(ha); if (tmp___4 != 0) { ha->qla4_82xx_reg = (struct device_reg_82xx *)(((unsigned long )((ha->pdev)->devfn << 11) + ha->nx_pcibase) + 770048UL); ha->nx_db_wr_ptr = (ha->pdev)->devfn == 4U ? 136323504UL : 136323508UL; } else { tmp___2 = is_qla8032(ha); if (tmp___2 != 0) { ha->qla4_83xx_reg = (struct device_reg_83xx *)ha->nx_pcibase; } else { tmp___3 = is_qla8042(ha); if (tmp___3 != 0) { ha->qla4_83xx_reg = (struct device_reg_83xx *)ha->nx_pcibase; } else { } } } db_base = (unsigned long )pdev->resource[4].start; db_len = pdev->resource[4].start != 0ULL || pdev->resource[4].end != pdev->resource[4].start ? (unsigned long )((pdev->resource[4].end - pdev->resource[4].start) + 1ULL) : 0UL; return (0); iospace_error_exit: ; return (-12); } } int qla4xxx_iospace_config(struct scsi_qla_host *ha ) { unsigned long pio ; unsigned long pio_len ; unsigned long pio_flags ; unsigned long mmio ; unsigned long mmio_len ; unsigned long mmio_flags ; int tmp ; void *tmp___0 ; { pio = (unsigned long )(ha->pdev)->resource[0].start; pio_len = (ha->pdev)->resource[0].start != 0ULL || (ha->pdev)->resource[0].end != (ha->pdev)->resource[0].start ? (unsigned long )(((ha->pdev)->resource[0].end - (ha->pdev)->resource[0].start) + 1ULL) : 0UL; pio_flags = (ha->pdev)->resource[0].flags; if ((pio_flags & 256UL) != 0UL) { if (pio_len <= 255UL) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "Invalid PCI I/O region size\n"); pio = 0UL; } else { } } else { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "region #0 not a PIO resource\n"); pio = 0UL; } mmio = (unsigned long )(ha->pdev)->resource[1].start; mmio_len = (ha->pdev)->resource[1].start != 0ULL || (ha->pdev)->resource[1].end != (ha->pdev)->resource[1].start ? (unsigned long )(((ha->pdev)->resource[1].end - (ha->pdev)->resource[1].start) + 1ULL) : 0UL; mmio_flags = (ha->pdev)->resource[1].flags; if ((mmio_flags & 512UL) == 0UL) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "region #0 not an MMIO resource, aborting\n"); goto iospace_error_exit; } else { } if (mmio_len <= 255UL) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Invalid PCI mem region size, aborting\n"); goto iospace_error_exit; } else { } tmp = pci_request_regions(ha->pdev, "qla4xxx"); if (tmp != 0) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "Failed to reserve PIO/MMIO regions\n"); goto iospace_error_exit; } else { } ha->pio_address = pio; ha->pio_length = pio_len; tmp___0 = ioremap((resource_size_t )mmio, 256UL); ha->reg = (struct isp_reg *)tmp___0; if ((unsigned long )ha->reg == (unsigned long )((struct isp_reg *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "cannot remap MMIO, aborting\n"); goto iospace_error_exit; } else { } return (0); iospace_error_exit: ; return (-12); } } static struct isp_operations qla4xxx_isp_ops = {& qla4xxx_iospace_config, & qla4xxx_pci_config, & qla4xxx_disable_intrs, & qla4xxx_enable_intrs, & qla4xxx_start_firmware, 0, & qla4xxx_intr_handler, & qla4xxx_interrupt_service_routine, 0, & qla4xxx_soft_reset, & qla4xxx_hw_reset, & qla4xxx_queue_iocb, & qla4xxx_complete_iocb, & qla4xxx_rd_shdw_req_q_out, & qla4xxx_rd_shdw_rsp_q_in, & qla4xxx_get_sys_info, 0, 0, 0, 0, 0, 0, 0, & qla4xxx_queue_mbox_cmd, & qla4xxx_process_mbox_intr}; static struct isp_operations qla4_82xx_isp_ops = {& qla4_8xxx_iospace_config, & qla4_8xxx_pci_config, & qla4_82xx_disable_intrs, & qla4_82xx_enable_intrs, & qla4_8xxx_load_risc, & qla4_82xx_try_start_fw, & qla4_82xx_intr_handler, & qla4_82xx_interrupt_service_routine, & qla4_8xxx_need_reset, & qla4_82xx_isp_reset, & qla4_8xxx_stop_firmware, & qla4_82xx_queue_iocb, & qla4_82xx_complete_iocb, & qla4_82xx_rd_shdw_req_q_out, & qla4_82xx_rd_shdw_rsp_q_in, & qla4_8xxx_get_sys_info, & qla4_82xx_rd_32, & qla4_82xx_wr_32, & qla4_82xx_md_rd_32, & qla4_82xx_md_wr_32, & qla4_82xx_idc_lock, & qla4_82xx_idc_unlock, & qla4_82xx_rom_lock_recovery, & qla4_82xx_queue_mbox_cmd, & qla4_82xx_process_mbox_intr}; static struct isp_operations qla4_83xx_isp_ops = {& qla4_8xxx_iospace_config, & qla4_8xxx_pci_config, & qla4_83xx_disable_intrs, & qla4_83xx_enable_intrs, & qla4_8xxx_load_risc, & qla4_83xx_start_firmware, & qla4_83xx_intr_handler, & qla4_83xx_interrupt_service_routine, & qla4_8xxx_need_reset, & qla4_83xx_isp_reset, & qla4_8xxx_stop_firmware, & qla4_83xx_queue_iocb, & qla4_83xx_complete_iocb, & qla4xxx_rd_shdw_req_q_out, & qla4xxx_rd_shdw_rsp_q_in, & qla4_8xxx_get_sys_info, & qla4_83xx_rd_reg, & qla4_83xx_wr_reg, & qla4_83xx_rd_reg_indirect, & qla4_83xx_wr_reg_indirect, & qla4_83xx_drv_lock, & qla4_83xx_drv_unlock, & qla4_83xx_rom_lock_recovery, & qla4_83xx_queue_mbox_cmd, & qla4_83xx_process_mbox_intr}; uint16_t qla4xxx_rd_shdw_req_q_out(struct scsi_qla_host *ha ) { { return ((uint16_t )(ha->shadow_regs)->req_q_out); } } uint16_t qla4_82xx_rd_shdw_req_q_out(struct scsi_qla_host *ha ) { unsigned int tmp ; { tmp = readl((void const volatile *)(& (ha->qla4_82xx_reg)->req_q_out)); return ((uint16_t )tmp); } } uint16_t qla4xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha ) { { return ((uint16_t )(ha->shadow_regs)->rsp_q_in); } } uint16_t qla4_82xx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha ) { unsigned int tmp ; { tmp = readl((void const volatile *)(& (ha->qla4_82xx_reg)->rsp_q_in)); return ((uint16_t )tmp); } } static ssize_t qla4xxx_show_boot_eth_info(void *data , int type , char *buf ) { struct scsi_qla_host *ha ; char *str ; int rc ; ssize_t tmp ; { ha = (struct scsi_qla_host *)data; str = buf; switch (type) { case 1: rc = sprintf(str, "%d\n", 2); goto ldv_65189; case 0: rc = sprintf(str, "0\n"); goto ldv_65189; case 10: tmp = sysfs_format_mac(str, (unsigned char const *)(& ha->my_mac), 6); rc = (int )tmp; goto ldv_65189; default: rc = -38; goto ldv_65189; } ldv_65189: ; return ((ssize_t )rc); } } static umode_t qla4xxx_eth_get_attr_visibility(void *data , int type ) { int rc ; { switch (type) { case 1: ; case 10: ; case 0: rc = 292; goto ldv_65201; default: rc = 0; goto ldv_65201; } ldv_65201: ; return ((umode_t )rc); } } static ssize_t qla4xxx_show_boot_ini_info(void *data , int type , char *buf ) { struct scsi_qla_host *ha ; char *str ; int rc ; { ha = (struct scsi_qla_host *)data; str = buf; switch (type) { case 6: rc = sprintf(str, "%s\n", (uint8_t *)(& ha->name_string)); goto ldv_65212; default: rc = -38; goto ldv_65212; } ldv_65212: ; return ((ssize_t )rc); } } static umode_t qla4xxx_ini_get_attr_visibility(void *data , int type ) { int rc ; { switch (type) { case 6: rc = 292; goto ldv_65220; default: rc = 0; goto ldv_65220; } ldv_65220: ; return ((umode_t )rc); } } static ssize_t qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess , int type , char *buf ) { struct ql4_conn_info *boot_conn ; char *str ; int rc ; { boot_conn = (struct ql4_conn_info *)(& boot_sess->conn_list); str = buf; switch (type) { case 7: rc = sprintf(buf, "%s\n", (char *)(& boot_sess->target_name)); goto ldv_65231; case 2: ; if ((unsigned int )boot_sess->conn_list[0].dest_ipaddr.ip_type == 1U) { rc = sprintf(buf, "%pI4\n", & boot_conn->dest_ipaddr.ip_address); } else { rc = sprintf(str, "%pI6\n", & boot_conn->dest_ipaddr.ip_address); } goto ldv_65231; case 3: rc = sprintf(str, "%d\n", (int )boot_conn->dest_port); goto ldv_65231; case 8: rc = sprintf(str, "%.*s\n", (int )boot_conn->chap.target_chap_name_length, (char *)(& boot_conn->chap.target_chap_name)); goto ldv_65231; case 9: rc = sprintf(str, "%.*s\n", (int )boot_conn->chap.target_secret_length, (char *)(& boot_conn->chap.target_secret)); goto ldv_65231; case 10: rc = sprintf(str, "%.*s\n", (int )boot_conn->chap.intr_chap_name_length, (char *)(& boot_conn->chap.intr_chap_name)); goto ldv_65231; case 11: rc = sprintf(str, "%.*s\n", (int )boot_conn->chap.intr_secret_length, (char *)(& boot_conn->chap.intr_secret)); goto ldv_65231; case 1: rc = sprintf(str, "%d\n", 2); goto ldv_65231; case 6: rc = sprintf(str, "0\n"); goto ldv_65231; default: rc = -38; goto ldv_65231; } ldv_65231: ; return ((ssize_t )rc); } } static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data , int type , char *buf ) { struct scsi_qla_host *ha ; struct ql4_boot_session_info *boot_sess ; ssize_t tmp ; { ha = (struct scsi_qla_host *)data; boot_sess = & ha->boot_tgt.boot_pri_sess; tmp = qla4xxx_show_boot_tgt_info(boot_sess, type, buf); return (tmp); } } static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data , int type , char *buf ) { struct scsi_qla_host *ha ; struct ql4_boot_session_info *boot_sess ; ssize_t tmp ; { ha = (struct scsi_qla_host *)data; boot_sess = & ha->boot_tgt.boot_sec_sess; tmp = qla4xxx_show_boot_tgt_info(boot_sess, type, buf); return (tmp); } } static umode_t qla4xxx_tgt_get_attr_visibility(void *data , int type ) { int rc ; { switch (type) { case 7: ; case 2: ; case 3: ; case 8: ; case 9: ; case 10: ; case 11: ; case 6: ; case 1: rc = 292; goto ldv_65269; default: rc = 0; goto ldv_65269; } ldv_65269: ; return ((umode_t )rc); } } static void qla4xxx_boot_release(void *data ) { struct scsi_qla_host *ha ; { ha = (struct scsi_qla_host *)data; scsi_host_put(ha->host); return; } } static int get_fw_boot_info(struct scsi_qla_host *ha , uint16_t *ddb_index ) { dma_addr_t buf_dma ; uint32_t addr ; uint32_t pri_addr ; uint32_t sec_addr ; uint32_t offset ; uint16_t func_num ; uint8_t val ; uint8_t *buf ; size_t size ; int ret ; void *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { buf = (uint8_t *)0U; size = 13UL; ret = 0; func_num = (unsigned int )((uint16_t )(ha->pdev)->devfn) & 7U; dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Get FW boot info for 0x%x func %d\n", "get_fw_boot_info", (int )(ha->pdev)->device, (int )func_num); tmp___2 = is_qla40XX(ha); if (tmp___2 != 0) { if ((unsigned int )func_num == 1U) { addr = 945U; pri_addr = 946U; sec_addr = 955U; } else if ((unsigned int )func_num == 3U) { addr = 1969U; pri_addr = 1970U; sec_addr = 1979U; } else { ret = 1; goto exit_boot_info; } val = rd_nvram_byte(ha, (int )addr); if (((int )val & 7) == 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Adapter boot options : 0x%x\n", "get_fw_boot_info", (int )val); } else { } ret = 1; goto exit_boot_info; } else { } val = rd_nvram_byte(ha, (int )pri_addr); if ((int )((signed char )val) < 0) { *ddb_index = (unsigned int )((uint16_t )val) & 127U; } else { } val = rd_nvram_byte(ha, (int )sec_addr); if ((int )((signed char )val) < 0) { *(ddb_index + 1UL) = (unsigned int )((uint16_t )val) & 127U; } else { } } else { tmp___1 = is_qla80XX(ha); if (tmp___1 != 0) { tmp = dma_alloc_attrs(& (ha->pdev)->dev, size, & buf_dma, 208U, (struct dma_attrs *)0); buf = (uint8_t *)tmp; if ((unsigned long )buf == (unsigned long )((uint8_t *)0U)) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to allocate dma buffer\n", "get_fw_boot_info"); } else { } ret = 1; goto exit_boot_info; } else { } if ((unsigned int )ha->port_num == 0U) { offset = 944U; } else if ((unsigned int )ha->port_num == 1U) { offset = 1968U; } else { ret = 1; goto exit_boot_info_free; } addr = (ha->hw.flt_iscsi_param + 595591168U) * 4U + offset; tmp___0 = qla4xxx_get_flash(ha, buf_dma, addr, 13U); if (tmp___0 != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: Get Flash failed\n", ha->host_no, "get_fw_boot_info"); } else { } ret = 1; goto exit_boot_info_free; } else { } if (((int )*(buf + 1UL) & 7) == 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Firmware boot options : 0x%x\n", (int )*(buf + 1UL)); } else { } ret = 1; goto exit_boot_info_free; } else { } if ((int )((signed char )*(buf + 2UL)) < 0) { *ddb_index = (unsigned int )((uint16_t )*(buf + 2UL)) & 127U; } else { } if ((int )((signed char )*(buf + 11UL)) < 0) { *(ddb_index + 1UL) = (unsigned int )((uint16_t )*(buf + 11UL)) & 127U; } else { } } else { ret = 1; goto exit_boot_info; } } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Primary target ID %d, Secondary target ID %d\n", "get_fw_boot_info", (int )*ddb_index, (int )*(ddb_index + 1UL)); } else { } exit_boot_info_free: dma_free_attrs(& (ha->pdev)->dev, size, (void *)buf, buf_dma, (struct dma_attrs *)0); exit_boot_info: ha->pri_ddb_idx = *ddb_index; ha->sec_ddb_idx = *(ddb_index + 1UL); return (ret); } } static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha , char *username , char *password ) { int i ; int ret ; int max_chap_entries ; struct ql4_chap_table *chap_table ; int tmp ; { ret = -22; max_chap_entries = 0; tmp = is_qla80XX(ha); if (tmp != 0) { max_chap_entries = (int )(ha->hw.flt_chap_size / 728U); } else { max_chap_entries = 128; } if ((unsigned long )ha->chap_list == (unsigned long )((uint8_t *)0U)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Do not have CHAP table cache\n"); return (ret); } else { } mutex_lock_nested(& ha->chap_sem, 0U); i = 0; goto ldv_65304; ldv_65303: chap_table = (struct ql4_chap_table *)ha->chap_list + (unsigned long )i; if ((unsigned int )chap_table->cookie != 16530U) { goto ldv_65301; } else { } if ((int )((signed char )chap_table->flags) < 0) { goto ldv_65301; } else { } if (((int )chap_table->flags & 64) == 0) { goto ldv_65301; } else { } strlcpy(password, (char const *)(& chap_table->secret), 100UL); strlcpy(username, (char const *)(& chap_table->name), 256UL); ret = 0; goto ldv_65302; ldv_65301: i = i + 1; ldv_65304: ; if (i < max_chap_entries) { goto ldv_65303; } else { } ldv_65302: mutex_unlock(& ha->chap_sem); return (ret); } } static int qla4xxx_get_boot_target(struct scsi_qla_host *ha , struct ql4_boot_session_info *boot_sess , uint16_t ddb_index ) { struct ql4_conn_info *boot_conn ; struct dev_db_entry *fw_ddb_entry ; dma_addr_t fw_ddb_entry_dma ; uint16_t idx ; uint16_t options ; int ret ; void *tmp ; int tmp___0 ; unsigned long _min1 ; unsigned long _min2 ; { boot_conn = (struct ql4_conn_info *)(& boot_sess->conn_list); ret = 0; tmp = dma_alloc_attrs(& (ha->pdev)->dev, 512UL, & fw_ddb_entry_dma, 208U, (struct dma_attrs *)0); fw_ddb_entry = (struct dev_db_entry *)tmp; if ((unsigned long )fw_ddb_entry == (unsigned long )((struct dev_db_entry *)0)) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to allocate dma buffer.\n", "qla4xxx_get_boot_target"); } else { } ret = 1; return (ret); } else { } tmp___0 = qla4xxx_bootdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma, (int )ddb_index); if (tmp___0 != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: No Flash DDB found at index [%d]\n", "qla4xxx_get_boot_target", (int )ddb_index); } else { } ret = 1; goto exit_boot_target; } else { } _min1 = 224UL; _min2 = 224UL; memcpy((void *)(& boot_sess->target_name), (void const *)(& fw_ddb_entry->iscsi_name), _min1 < _min2 ? _min1 : _min2); options = fw_ddb_entry->options; if (((int )options & 256) != 0) { memcpy((void *)(& boot_conn->dest_ipaddr.ip_address), (void const *)(& fw_ddb_entry->ip_addr), 16UL); } else { boot_conn->dest_ipaddr.ip_type = 1U; memcpy((void *)(& boot_conn->dest_ipaddr.ip_address), (void const *)(& fw_ddb_entry->ip_addr), 4UL); } boot_conn->dest_port = fw_ddb_entry->port; idx = fw_ddb_entry->chap_tbl_idx; if (((unsigned int )fw_ddb_entry->iscsi_options & 128U) != 0U) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Setting chap\n"); } else { } ret = qla4xxx_get_chap(ha, (char *)(& boot_conn->chap.target_chap_name), (char *)(& boot_conn->chap.target_secret), (int )idx); if (ret != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Failed to set chap\n"); ret = 1; goto exit_boot_target; } else { } boot_conn->chap.target_chap_name_length = 256U; boot_conn->chap.target_secret_length = 100U; } else { } if (((unsigned int )fw_ddb_entry->iscsi_options & 16U) != 0U) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Setting BIDI chap\n"); } else { } ret = qla4xxx_get_bidi_chap(ha, (char *)(& boot_conn->chap.intr_chap_name), (char *)(& boot_conn->chap.intr_secret)); if (ret != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Failed to set BIDI chap\n"); ret = 1; goto exit_boot_target; } else { } boot_conn->chap.intr_chap_name_length = 256U; boot_conn->chap.intr_secret_length = 100U; } else { } exit_boot_target: dma_free_attrs(& (ha->pdev)->dev, 512UL, (void *)fw_ddb_entry, fw_ddb_entry_dma, (struct dma_attrs *)0); return (ret); } } static int qla4xxx_get_boot_info(struct scsi_qla_host *ha ) { uint16_t ddb_index[2U] ; int ret ; int rval ; { ret = 1; memset((void *)(& ddb_index), 0, 4UL); ddb_index[0] = 65535U; ddb_index[1] = 65535U; ret = get_fw_boot_info(ha, (uint16_t *)(& ddb_index)); if (ret != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: No boot target configured.\n", "qla4xxx_get_boot_info"); } else { } return (ret); } else { } if (ql4xdisablesysfsboot != 0) { return (0); } else { } if ((unsigned int )ddb_index[0] == 65535U) { goto sec_target; } else { } rval = qla4xxx_get_boot_target(ha, & ha->boot_tgt.boot_pri_sess, (int )ddb_index[0]); if (rval != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Primary boot target not configured\n", "qla4xxx_get_boot_info"); } else { } } else { ret = 0; } sec_target: ; if ((unsigned int )ddb_index[1] == 65535U) { goto exit_get_boot_info; } else { } rval = qla4xxx_get_boot_target(ha, & ha->boot_tgt.boot_sec_sess, (int )ddb_index[1]); if (rval != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Secondary boot target not configured\n", "qla4xxx_get_boot_info"); } else { } } else { ret = 0; } exit_get_boot_info: ; return (ret); } } static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha ) { struct iscsi_boot_kobj *boot_kobj ; int tmp ; struct Scsi_Host *tmp___0 ; struct Scsi_Host *tmp___1 ; struct Scsi_Host *tmp___2 ; struct Scsi_Host *tmp___3 ; { tmp = qla4xxx_get_boot_info(ha); if (tmp != 0) { return (1); } else { } if (ql4xdisablesysfsboot != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: syfsboot disabled - driver will trigger login and publish session for discovery .\n", "qla4xxx_setup_boot_info"); return (0); } else { } ha->boot_kset = iscsi_boot_create_host_kset((ha->host)->host_no); if ((unsigned long )ha->boot_kset == (unsigned long )((struct iscsi_boot_kset *)0)) { goto kset_free; } else { } tmp___0 = scsi_host_get(ha->host); if ((unsigned long )tmp___0 == (unsigned long )((struct Scsi_Host *)0)) { goto kset_free; } else { } boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, (void *)ha, & qla4xxx_show_boot_tgt_pri_info, & qla4xxx_tgt_get_attr_visibility, & qla4xxx_boot_release); if ((unsigned long )boot_kobj == (unsigned long )((struct iscsi_boot_kobj *)0)) { goto put_host; } else { } tmp___1 = scsi_host_get(ha->host); if ((unsigned long )tmp___1 == (unsigned long )((struct Scsi_Host *)0)) { goto kset_free; } else { } boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, (void *)ha, & qla4xxx_show_boot_tgt_sec_info, & qla4xxx_tgt_get_attr_visibility, & qla4xxx_boot_release); if ((unsigned long )boot_kobj == (unsigned long )((struct iscsi_boot_kobj *)0)) { goto put_host; } else { } tmp___2 = scsi_host_get(ha->host); if ((unsigned long )tmp___2 == (unsigned long )((struct Scsi_Host *)0)) { goto kset_free; } else { } boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, (void *)ha, & qla4xxx_show_boot_ini_info, & qla4xxx_ini_get_attr_visibility, & qla4xxx_boot_release); if ((unsigned long )boot_kobj == (unsigned long )((struct iscsi_boot_kobj *)0)) { goto put_host; } else { } tmp___3 = scsi_host_get(ha->host); if ((unsigned long )tmp___3 == (unsigned long )((struct Scsi_Host *)0)) { goto kset_free; } else { } boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, (void *)ha, & qla4xxx_show_boot_eth_info, & qla4xxx_eth_get_attr_visibility, & qla4xxx_boot_release); if ((unsigned long )boot_kobj == (unsigned long )((struct iscsi_boot_kobj *)0)) { goto put_host; } else { } return (0); put_host: scsi_host_put(ha->host); kset_free: iscsi_boot_destroy_kset(ha->boot_kset); return (-12); } } static void qla4xxx_get_param_ddb(struct ddb_entry *ddb_entry , struct ql4_tuple_ddb *tddb ) { struct scsi_qla_host *ha ; struct iscsi_cls_session *cls_sess ; struct iscsi_cls_conn *cls_conn ; struct iscsi_session *sess ; struct iscsi_conn *conn ; { if (ql4xextended_error_logging == 2) { printk("\016Func: %s\n", "qla4xxx_get_param_ddb"); } else { } ha = ddb_entry->ha; cls_sess = ddb_entry->sess; sess = (struct iscsi_session *)cls_sess->dd_data; cls_conn = ddb_entry->conn; conn = (struct iscsi_conn *)cls_conn->dd_data; tddb->tpgt = sess->tpgt; tddb->port = conn->persistent_port; strlcpy((char *)(& tddb->iscsi_name), (char const *)sess->targetname, 224UL); strlcpy((char *)(& tddb->ip_addr), (char const *)conn->persistent_address, 64UL); return; } } static void qla4xxx_convert_param_ddb(struct dev_db_entry *fw_ddb_entry , struct ql4_tuple_ddb *tddb , uint8_t *flash_isid ) { uint16_t options ; unsigned long _min1 ; unsigned long _min2 ; { options = 0U; tddb->tpgt = (int )fw_ddb_entry->tgt_portal_grp; _min1 = 224UL; _min2 = 224UL; memcpy((void *)(& tddb->iscsi_name), (void const *)(& fw_ddb_entry->iscsi_name), _min1 < _min2 ? _min1 : _min2); options = fw_ddb_entry->options; if (((int )options & 256) != 0) { sprintf((char *)(& tddb->ip_addr), "%pI6", (uint8_t *)(& fw_ddb_entry->ip_addr)); } else { sprintf((char *)(& tddb->ip_addr), "%pI4", (uint8_t *)(& fw_ddb_entry->ip_addr)); } tddb->port = (int )fw_ddb_entry->port; if ((unsigned long )flash_isid == (unsigned long )((uint8_t *)0U)) { memcpy((void *)(& tddb->isid), (void const *)(& fw_ddb_entry->isid), 6UL); } else { memcpy((void *)(& tddb->isid), (void const *)flash_isid, 6UL); } return; } } static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha , struct ql4_tuple_ddb *old_tddb , struct ql4_tuple_ddb *new_tddb , uint8_t is_isid_compare ) { int tmp ; int tmp___0 ; int tmp___1 ; { tmp = strcmp((char const *)(& old_tddb->iscsi_name), (char const *)(& new_tddb->iscsi_name)); if (tmp != 0) { return (1); } else { } tmp___0 = strcmp((char const *)(& old_tddb->ip_addr), (char const *)(& new_tddb->ip_addr)); if (tmp___0 != 0) { return (1); } else { } if (old_tddb->port != new_tddb->port) { return (1); } else { } if ((unsigned int )is_isid_compare != 0U) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: old ISID [%02x%02x%02x%02x%02x%02x] New ISID [%02x%02x%02x%02x%02x%02x]\n", "qla4xxx_compare_tuple_ddb", (int )old_tddb->isid[5], (int )old_tddb->isid[4], (int )old_tddb->isid[3], (int )old_tddb->isid[2], (int )old_tddb->isid[1], (int )old_tddb->isid[0], (int )new_tddb->isid[5], (int )new_tddb->isid[4], (int )new_tddb->isid[3], (int )new_tddb->isid[2], (int )new_tddb->isid[1], (int )new_tddb->isid[0]); } else { } tmp___1 = memcmp((void const *)(& old_tddb->isid), (void const *)(& new_tddb->isid), 6UL); if (tmp___1 != 0) { return (1); } else { } } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Match Found, fw[%d,%d,%s,%s], [%d,%d,%s,%s]", old_tddb->port, old_tddb->tpgt, (char *)(& old_tddb->ip_addr), (char *)(& old_tddb->iscsi_name), new_tddb->port, new_tddb->tpgt, (char *)(& new_tddb->ip_addr), (char *)(& new_tddb->iscsi_name)); } else { } return (0); } } static int qla4xxx_is_session_exists(struct scsi_qla_host *ha , struct dev_db_entry *fw_ddb_entry , uint32_t *index ) { struct ddb_entry *ddb_entry ; struct ql4_tuple_ddb *fw_tddb ; struct ql4_tuple_ddb *tmp_tddb ; int idx ; int ret ; void *tmp ; void *tmp___0 ; int tmp___1 ; { fw_tddb = (struct ql4_tuple_ddb *)0; tmp_tddb = (struct ql4_tuple_ddb *)0; ret = 1; tmp = ldv_vzalloc_52(304UL); fw_tddb = (struct ql4_tuple_ddb *)tmp; if ((unsigned long )fw_tddb == (unsigned long )((struct ql4_tuple_ddb *)0)) { if (ql4xextended_error_logging == 2) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "Memory Allocation failed.\n"); } else { } ret = 0; goto exit_check; } else { } tmp___0 = ldv_vzalloc_53(304UL); tmp_tddb = (struct ql4_tuple_ddb *)tmp___0; if ((unsigned long )tmp_tddb == (unsigned long )((struct ql4_tuple_ddb *)0)) { if (ql4xextended_error_logging == 2) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "Memory Allocation failed.\n"); } else { } ret = 0; goto exit_check; } else { } qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, (uint8_t *)0U); idx = 0; goto ldv_65376; ldv_65375: ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, (uint32_t )idx); if ((unsigned long )ddb_entry == (unsigned long )((struct ddb_entry *)0)) { goto ldv_65374; } else { } qla4xxx_get_param_ddb(ddb_entry, tmp_tddb); tmp___1 = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, 0); if (tmp___1 == 0) { ret = 0; if ((unsigned long )index != (unsigned long )((uint32_t *)0U)) { *index = (uint32_t )idx; } else { } goto exit_check; } else { } ldv_65374: idx = idx + 1; ldv_65376: ; if (idx <= 511) { goto ldv_65375; } else { } exit_check: ; if ((unsigned long )fw_tddb != (unsigned long )((struct ql4_tuple_ddb *)0)) { vfree((void const *)fw_tddb); } else { } if ((unsigned long )tmp_tddb != (unsigned long )((struct ql4_tuple_ddb *)0)) { vfree((void const *)tmp_tddb); } else { } return (ret); } } static int qla4xxx_check_existing_isid(struct list_head *list_nt , uint8_t *isid ) { struct qla_ddb_index *nt_ddb_idx ; struct qla_ddb_index *nt_ddb_idx_tmp ; struct dev_db_entry *fw_ddb_entry ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; int tmp ; struct list_head const *__mptr___1 ; { __mptr = (struct list_head const *)list_nt->next; nt_ddb_idx = (struct qla_ddb_index *)__mptr; __mptr___0 = (struct list_head const *)nt_ddb_idx->list.next; nt_ddb_idx_tmp = (struct qla_ddb_index *)__mptr___0; goto ldv_65392; ldv_65391: fw_ddb_entry = & nt_ddb_idx->fw_ddb; tmp = memcmp((void const *)(& fw_ddb_entry->isid), (void const *)isid, 6UL); if (tmp == 0) { return (0); } else { } nt_ddb_idx = nt_ddb_idx_tmp; __mptr___1 = (struct list_head const *)nt_ddb_idx_tmp->list.next; nt_ddb_idx_tmp = (struct qla_ddb_index *)__mptr___1; ldv_65392: ; if ((unsigned long )(& nt_ddb_idx->list) != (unsigned long )list_nt) { goto ldv_65391; } else { } return (1); } } static int qla4xxx_update_isid(struct scsi_qla_host *ha , struct list_head *list_nt , struct dev_db_entry *fw_ddb_entry ) { uint8_t base_value ; uint8_t i ; int tmp ; int tmp___0 ; { base_value = (unsigned int )fw_ddb_entry->isid[1] & 31U; i = 0U; goto ldv_65403; ldv_65402: fw_ddb_entry->isid[1] = (uint8_t )((int )((signed char )((int )i << 5)) | (int )((signed char )base_value)); tmp = qla4xxx_check_existing_isid(list_nt, (uint8_t *)(& fw_ddb_entry->isid)); if (tmp != 0) { goto ldv_65401; } else { } i = (uint8_t )((int )i + 1); ldv_65403: ; if ((unsigned int )i <= 7U) { goto ldv_65402; } else { } ldv_65401: tmp___0 = qla4xxx_check_existing_isid(list_nt, (uint8_t *)(& fw_ddb_entry->isid)); if (tmp___0 == 0) { return (1); } else { } return (0); } } static int qla4xxx_should_update_isid(struct scsi_qla_host *ha , struct ql4_tuple_ddb *old_tddb , struct ql4_tuple_ddb *new_tddb ) { int tmp ; int tmp___0 ; int tmp___1 ; { tmp = strcmp((char const *)(& old_tddb->ip_addr), (char const *)(& new_tddb->ip_addr)); if (tmp == 0) { if (old_tddb->port == new_tddb->port) { return (1); } else { } } else { } tmp___0 = strcmp((char const *)(& old_tddb->iscsi_name), (char const *)(& new_tddb->iscsi_name)); if (tmp___0 != 0) { return (1); } else { } tmp___1 = memcmp((void const *)(& old_tddb->isid), (void const *)(& new_tddb->isid), 6UL); if (tmp___1 != 0) { return (1); } else { } return (0); } } static int qla4xxx_is_flash_ddb_exists(struct scsi_qla_host *ha , struct list_head *list_nt , struct dev_db_entry *fw_ddb_entry ) { struct qla_ddb_index *nt_ddb_idx ; struct qla_ddb_index *nt_ddb_idx_tmp ; struct ql4_tuple_ddb *fw_tddb ; struct ql4_tuple_ddb *tmp_tddb ; int rval ; int ret ; void *tmp ; void *tmp___0 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; { fw_tddb = (struct ql4_tuple_ddb *)0; tmp_tddb = (struct ql4_tuple_ddb *)0; ret = 1; tmp = ldv_vzalloc_54(304UL); fw_tddb = (struct ql4_tuple_ddb *)tmp; if ((unsigned long )fw_tddb == (unsigned long )((struct ql4_tuple_ddb *)0)) { if (ql4xextended_error_logging == 2) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "Memory Allocation failed.\n"); } else { } ret = 0; goto exit_check; } else { } tmp___0 = ldv_vzalloc_55(304UL); tmp_tddb = (struct ql4_tuple_ddb *)tmp___0; if ((unsigned long )tmp_tddb == (unsigned long )((struct ql4_tuple_ddb *)0)) { if (ql4xextended_error_logging == 2) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "Memory Allocation failed.\n"); } else { } ret = 0; goto exit_check; } else { } qla4xxx_convert_param_ddb(fw_ddb_entry, fw_tddb, (uint8_t *)0U); __mptr = (struct list_head const *)list_nt->next; nt_ddb_idx = (struct qla_ddb_index *)__mptr; __mptr___0 = (struct list_head const *)nt_ddb_idx->list.next; nt_ddb_idx_tmp = (struct qla_ddb_index *)__mptr___0; goto ldv_65428; ldv_65427: qla4xxx_convert_param_ddb(& nt_ddb_idx->fw_ddb, tmp_tddb, (uint8_t *)(& nt_ddb_idx->flash_isid)); ret = qla4xxx_compare_tuple_ddb(ha, fw_tddb, tmp_tddb, 1); if (ret == 0) { goto exit_check; } else { } nt_ddb_idx = nt_ddb_idx_tmp; __mptr___1 = (struct list_head const *)nt_ddb_idx_tmp->list.next; nt_ddb_idx_tmp = (struct qla_ddb_index *)__mptr___1; ldv_65428: ; if ((unsigned long )(& nt_ddb_idx->list) != (unsigned long )list_nt) { goto ldv_65427; } else { } __mptr___2 = (struct list_head const *)list_nt->next; nt_ddb_idx = (struct qla_ddb_index *)__mptr___2; __mptr___3 = (struct list_head const *)nt_ddb_idx->list.next; nt_ddb_idx_tmp = (struct qla_ddb_index *)__mptr___3; goto ldv_65437; ldv_65436: qla4xxx_convert_param_ddb(& nt_ddb_idx->fw_ddb, tmp_tddb, (uint8_t *)0U); ret = qla4xxx_should_update_isid(ha, tmp_tddb, fw_tddb); if (ret == 0) { rval = qla4xxx_update_isid(ha, list_nt, fw_ddb_entry); if (rval == 0) { ret = 1; } else { ret = 0; } goto exit_check; } else { } nt_ddb_idx = nt_ddb_idx_tmp; __mptr___4 = (struct list_head const *)nt_ddb_idx_tmp->list.next; nt_ddb_idx_tmp = (struct qla_ddb_index *)__mptr___4; ldv_65437: ; if ((unsigned long )(& nt_ddb_idx->list) != (unsigned long )list_nt) { goto ldv_65436; } else { } exit_check: ; if ((unsigned long )fw_tddb != (unsigned long )((struct ql4_tuple_ddb *)0)) { vfree((void const *)fw_tddb); } else { } if ((unsigned long )tmp_tddb != (unsigned long )((struct ql4_tuple_ddb *)0)) { vfree((void const *)tmp_tddb); } else { } return (ret); } } static void qla4xxx_free_ddb_list(struct list_head *list_ddb ) { struct qla_ddb_index *ddb_idx ; struct qla_ddb_index *ddb_idx_tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { __mptr = (struct list_head const *)list_ddb->next; ddb_idx = (struct qla_ddb_index *)__mptr; __mptr___0 = (struct list_head const *)ddb_idx->list.next; ddb_idx_tmp = (struct qla_ddb_index *)__mptr___0; goto ldv_65451; ldv_65450: list_del_init(& ddb_idx->list); vfree((void const *)ddb_idx); ddb_idx = ddb_idx_tmp; __mptr___1 = (struct list_head const *)ddb_idx_tmp->list.next; ddb_idx_tmp = (struct qla_ddb_index *)__mptr___1; ldv_65451: ; if ((unsigned long )(& ddb_idx->list) != (unsigned long )list_ddb) { goto ldv_65450; } else { } return; } } static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha , struct dev_db_entry *fw_ddb_entry ) { struct iscsi_endpoint *ep ; struct sockaddr_in *addr ; struct sockaddr_in6 *addr6 ; struct sockaddr *t_addr ; struct __kernel_sockaddr_storage *dst_addr ; char *ip ; void *tmp ; __u16 tmp___0 ; __u16 tmp___1 ; { tmp = ldv_vmalloc_56(128UL); dst_addr = (struct __kernel_sockaddr_storage *)tmp; if ((unsigned long )dst_addr == (unsigned long )((struct __kernel_sockaddr_storage *)0)) { return ((struct iscsi_endpoint *)0); } else { } if (((int )fw_ddb_entry->options & 256) != 0) { t_addr = (struct sockaddr *)dst_addr; t_addr->sa_family = 10U; addr6 = (struct sockaddr_in6 *)dst_addr; ip = (char *)(& addr6->sin6_addr); memcpy((void *)ip, (void const *)(& fw_ddb_entry->ip_addr), 16UL); tmp___0 = __fswab16((int )fw_ddb_entry->port); addr6->sin6_port = tmp___0; } else { t_addr = (struct sockaddr *)dst_addr; t_addr->sa_family = 2U; addr = (struct sockaddr_in *)dst_addr; ip = (char *)(& addr->sin_addr); memcpy((void *)ip, (void const *)(& fw_ddb_entry->ip_addr), 4UL); tmp___1 = __fswab16((int )fw_ddb_entry->port); addr->sin_port = tmp___1; } ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0); vfree((void const *)dst_addr); return (ep); } } static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha , uint16_t idx ) { { if (ql4xdisablesysfsboot != 0) { return (0); } else { } if ((int )ha->pri_ddb_idx == (int )idx || (int )ha->sec_ddb_idx == (int )idx) { return (1); } else { } return (0); } } static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry , uint16_t idx ) { uint16_t def_timeout ; { ddb_entry->ddb_type = 1U; ddb_entry->fw_ddb_index = 65535U; ddb_entry->fw_ddb_device_state = 1U; ddb_entry->ha = ha; ddb_entry->unblock_sess = & qla4xxx_unblock_flash_ddb; ddb_entry->ddb_change = & qla4xxx_flash_ddb_change; ddb_entry->chap_tbl_idx = 65535U; atomic_set(& ddb_entry->retry_relogin_timer, 65535); atomic_set(& ddb_entry->relogin_timer, 0); atomic_set(& ddb_entry->relogin_retry_count, 0); def_timeout = ddb_entry->fw_ddb_entry.def_timeout; ddb_entry->default_relogin_timeout = (unsigned int )def_timeout > 12U && (unsigned int )def_timeout <= 119U ? def_timeout : 12U; ddb_entry->default_time2wait = (uint32_t )ddb_entry->fw_ddb_entry.iscsi_def_time2wait; if (ql4xdisablesysfsboot != 0 && ((int )ha->pri_ddb_idx == (int )idx || (int )ha->sec_ddb_idx == (int )idx)) { set_bit(1L, (unsigned long volatile *)(& ddb_entry->flags)); } else { } return; } } static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha ) { uint32_t idx ; uint32_t ip_idx[4U] ; uint32_t sts[8U] ; uint32_t ip_state ; unsigned long wtime ; int ret ; { idx = 0U; ip_idx[0] = 0U; ip_idx[1] = 1U; ip_idx[2] = 2U; ip_idx[3] = 3U; wtime = (unsigned long )jiffies + 7500UL; ldv_65493: idx = 0U; goto ldv_65484; ldv_65483: ; if (ip_idx[idx] == 4294967295U) { goto ldv_65482; } else { } ret = qla4xxx_get_ip_state(ha, 0U, ip_idx[idx], (uint32_t *)(& sts)); if (ret == 1) { ip_idx[idx] = 4294967295U; goto ldv_65482; } else { } ip_state = (sts[1] & 251658240U) >> 24; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Waiting for IP state for idx = %d, state = 0x%x\n", ip_idx[idx], ip_state); } else { } if ((((ip_state == 0U || ip_state == 1U) || ip_state == 5U) || ip_state == 4U) || ip_state == 6U) { ip_idx[idx] = 4294967295U; } else { } ldv_65482: idx = idx + 1U; ldv_65484: ; if (idx <= 3U) { goto ldv_65483; } else { } if (((ip_idx[0] == 4294967295U && ip_idx[1] == 4294967295U) && ip_idx[2] == 4294967295U) && ip_idx[3] == 4294967295U) { goto ldv_65486; } else { } schedule_timeout_uninterruptible(250L); if ((long )((unsigned long )jiffies - wtime) < 0L) { goto ldv_65493; } else { } ldv_65486: ; return; } } static int qla4xxx_cmp_fw_stentry(struct dev_db_entry *fw_ddb_entry , struct dev_db_entry *flash_ddb_entry ) { uint16_t options ; size_t ip_len ; int tmp ; int tmp___0 ; int tmp___1 ; { options = 0U; ip_len = 4UL; options = fw_ddb_entry->options; if (((int )options & 256) != 0) { ip_len = 16UL; } else { } tmp = memcmp((void const *)(& fw_ddb_entry->ip_addr), (void const *)(& flash_ddb_entry->ip_addr), ip_len); if (tmp != 0) { return (1); } else { } tmp___0 = memcmp((void const *)(& fw_ddb_entry->isid), (void const *)(& flash_ddb_entry->isid), 6UL); if (tmp___0 != 0) { return (1); } else { } tmp___1 = memcmp((void const *)(& fw_ddb_entry->port), (void const *)(& flash_ddb_entry->port), 2UL); if (tmp___1 != 0) { return (1); } else { } return (0); } } static int qla4xxx_find_flash_st_idx(struct scsi_qla_host *ha , struct dev_db_entry *fw_ddb_entry , uint32_t fw_idx , uint32_t *flash_index ) { struct dev_db_entry *flash_ddb_entry ; dma_addr_t flash_ddb_entry_dma ; uint32_t idx ; int max_ddbs ; int ret ; int status ; int tmp ; void *tmp___0 ; { idx = 0U; ret = 1; tmp = is_qla40XX(ha); max_ddbs = tmp != 0 ? 256 : 512; tmp___0 = ldv_dma_pool_alloc_57(ha->fw_ddb_dma_pool, 208U, & flash_ddb_entry_dma); flash_ddb_entry = (struct dev_db_entry *)tmp___0; if ((unsigned long )flash_ddb_entry == (unsigned long )((struct dev_db_entry *)0) || (unsigned long )fw_ddb_entry == (unsigned long )((struct dev_db_entry *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Out of memory\n"); goto exit_find_st_idx; } else { } status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, flash_ddb_entry_dma, (int )((uint16_t )fw_idx)); if (status == 0) { status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); if (status == 0) { *flash_index = fw_idx; ret = 0; goto exit_find_st_idx; } else { } } else { } idx = 0U; goto ldv_65515; ldv_65514: status = qla4xxx_flashdb_by_index(ha, flash_ddb_entry, flash_ddb_entry_dma, (int )((uint16_t )idx)); if (status == 1) { goto ldv_65513; } else { } status = qla4xxx_cmp_fw_stentry(fw_ddb_entry, flash_ddb_entry); if (status == 0) { *flash_index = idx; ret = 0; goto exit_find_st_idx; } else { } ldv_65513: idx = idx + 1U; ldv_65515: ; if ((uint32_t )max_ddbs > idx) { goto ldv_65514; } else { } if ((uint32_t )max_ddbs == idx) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Failed to find ST [%d] in flash\n", fw_idx); } else { } exit_find_st_idx: ; if ((unsigned long )flash_ddb_entry != (unsigned long )((struct dev_db_entry *)0)) { dma_pool_free(ha->fw_ddb_dma_pool, (void *)flash_ddb_entry, flash_ddb_entry_dma); } else { } return (ret); } } static void qla4xxx_build_st_list(struct scsi_qla_host *ha , struct list_head *list_st ) { struct qla_ddb_index *st_ddb_idx ; int max_ddbs ; int fw_idx_size ; struct dev_db_entry *fw_ddb_entry ; dma_addr_t fw_ddb_dma ; int ret ; uint32_t idx ; uint32_t next_idx ; uint32_t state ; uint32_t conn_err ; uint32_t flash_index ; uint16_t conn_id ; void *tmp ; int tmp___0 ; size_t tmp___1 ; void *tmp___2 ; { idx = 0U; next_idx = 0U; state = 0U; conn_err = 0U; flash_index = 4294967295U; conn_id = 0U; tmp = ldv_dma_pool_alloc_58(ha->fw_ddb_dma_pool, 208U, & fw_ddb_dma); fw_ddb_entry = (struct dev_db_entry *)tmp; if ((unsigned long )fw_ddb_entry == (unsigned long )((struct dev_db_entry *)0)) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Out of memory\n"); } else { } goto exit_st_list; } else { } tmp___0 = is_qla40XX(ha); max_ddbs = tmp___0 != 0 ? 256 : 512; fw_idx_size = 544; idx = 0U; goto ldv_65537; ldv_65536: ret = qla4xxx_get_fwddb_entry(ha, (int )((uint16_t )idx), fw_ddb_entry, fw_ddb_dma, (uint32_t *)0U, & next_idx, & state, & conn_err, (uint16_t *)0U, & conn_id); if (ret == 1) { goto ldv_65534; } else { } if (state == 0U) { goto continue_next_st; } else { } tmp___1 = strlen((char const *)(& fw_ddb_entry->iscsi_name)); if (tmp___1 != 0UL) { goto continue_next_st; } else { } tmp___2 = ldv_vzalloc_59((unsigned long )fw_idx_size); st_ddb_idx = (struct qla_ddb_index *)tmp___2; if ((unsigned long )st_ddb_idx == (unsigned long )((struct qla_ddb_index *)0)) { goto ldv_65534; } else { } ret = qla4xxx_find_flash_st_idx(ha, fw_ddb_entry, idx, & flash_index); if (ret == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "No flash entry for ST at idx [%d]\n", idx); st_ddb_idx->flash_ddb_idx = (uint16_t )idx; } else { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "ST at idx [%d] is stored at flash [%d]\n", idx, flash_index); st_ddb_idx->flash_ddb_idx = (uint16_t )flash_index; } st_ddb_idx->fw_ddb_idx = (uint16_t )idx; list_add_tail(& st_ddb_idx->list, list_st); continue_next_st: ; if (next_idx == 0U) { goto ldv_65534; } else { } idx = next_idx; ldv_65537: ; if ((uint32_t )max_ddbs > idx) { goto ldv_65536; } else { } ldv_65534: ; exit_st_list: ; if ((unsigned long )fw_ddb_entry != (unsigned long )((struct dev_db_entry *)0)) { dma_pool_free(ha->fw_ddb_dma_pool, (void *)fw_ddb_entry, fw_ddb_dma); } else { } return; } } static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha , struct list_head *list_ddb ) { struct qla_ddb_index *ddb_idx ; struct qla_ddb_index *ddb_idx_tmp ; uint32_t next_idx ; uint32_t state ; uint32_t conn_err ; int ret ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { next_idx = 0U; state = 0U; conn_err = 0U; __mptr = (struct list_head const *)list_ddb->next; ddb_idx = (struct qla_ddb_index *)__mptr; __mptr___0 = (struct list_head const *)ddb_idx->list.next; ddb_idx_tmp = (struct qla_ddb_index *)__mptr___0; goto ldv_65556; ldv_65555: ret = qla4xxx_get_fwddb_entry(ha, (int )ddb_idx->fw_ddb_idx, (struct dev_db_entry *)0, 0ULL, (uint32_t *)0U, & next_idx, & state, & conn_err, (uint16_t *)0U, (uint16_t *)0U); if (ret == 1) { goto ldv_65554; } else { } if (state == 1U || state == 6U) { list_del_init(& ddb_idx->list); vfree((void const *)ddb_idx); } else { } ldv_65554: ddb_idx = ddb_idx_tmp; __mptr___1 = (struct list_head const *)ddb_idx_tmp->list.next; ddb_idx_tmp = (struct qla_ddb_index *)__mptr___1; ldv_65556: ; if ((unsigned long )(& ddb_idx->list) != (unsigned long )list_ddb) { goto ldv_65555; } else { } return; } } static void qla4xxx_update_sess_disc_idx(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry , struct dev_db_entry *fw_ddb_entry ) { struct iscsi_cls_session *cls_sess ; struct iscsi_session *sess ; uint32_t max_ddbs ; uint16_t ddb_link ; int tmp ; { max_ddbs = 0U; ddb_link = 65535U; tmp = is_qla40XX(ha); max_ddbs = tmp != 0 ? 256U : 512U; cls_sess = ddb_entry->sess; sess = (struct iscsi_session *)cls_sess->dd_data; ddb_link = fw_ddb_entry->ddb_link; if ((uint32_t )ddb_link < max_ddbs) { sess->discovery_parent_idx = ddb_link; } else { sess->discovery_parent_idx = 65535U; } return; } } static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha , struct dev_db_entry *fw_ddb_entry , int is_reset , uint16_t idx ) { struct iscsi_cls_session *cls_sess ; struct iscsi_session *sess ; struct iscsi_cls_conn *cls_conn ; struct iscsi_endpoint *ep ; uint16_t cmds_max ; uint16_t conn_id ; uint32_t initial_cmdsn ; int ret ; struct ddb_entry *ddb_entry ; { cmds_max = 32U; conn_id = 0U; initial_cmdsn = 0U; ret = 0; ddb_entry = (struct ddb_entry *)0; cls_sess = iscsi_session_setup(& qla4xxx_iscsi_transport, ha->host, (int )cmds_max, 600, 224, initial_cmdsn, 65535U); if ((unsigned long )cls_sess == (unsigned long )((struct iscsi_cls_session *)0)) { ret = 1; goto exit_setup; } else { } module_put(qla4xxx_iscsi_transport.owner); sess = (struct iscsi_session *)cls_sess->dd_data; ddb_entry = (struct ddb_entry *)sess->dd_data; ddb_entry->sess = cls_sess; cls_sess->recovery_tmo = ql4xsess_recovery_tmo; memcpy((void *)(& ddb_entry->fw_ddb_entry), (void const *)fw_ddb_entry, 512UL); qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, (int )idx); cls_conn = iscsi_conn_setup(cls_sess, 8, (uint32_t )conn_id); if ((unsigned long )cls_conn == (unsigned long )((struct iscsi_cls_conn *)0)) { ret = 1; goto exit_setup; } else { } ddb_entry->conn = cls_conn; ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry); if ((unsigned long )ep != (unsigned long )((struct iscsi_endpoint *)0)) { ep->conn = cls_conn; cls_conn->ep = ep; } else { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Unable to get ep\n"); } else { } ret = 1; goto exit_setup; } qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn); qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry); if (is_reset == 1) { iscsi_block_session(cls_sess); set_bit(3L, (unsigned long volatile *)(& ha->dpc_flags)); set_bit(0L, (unsigned long volatile *)(& ddb_entry->flags)); } else { } exit_setup: ; return (ret); } } static void qla4xxx_update_fw_ddb_link(struct scsi_qla_host *ha , struct list_head *list_ddb , struct dev_db_entry *fw_ddb_entry ) { struct qla_ddb_index *ddb_idx ; struct qla_ddb_index *ddb_idx_tmp ; uint16_t ddb_link ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { ddb_link = fw_ddb_entry->ddb_link; __mptr = (struct list_head const *)list_ddb->next; ddb_idx = (struct qla_ddb_index *)__mptr; __mptr___0 = (struct list_head const *)ddb_idx->list.next; ddb_idx_tmp = (struct qla_ddb_index *)__mptr___0; goto ldv_65598; ldv_65597: ; if ((int )ddb_idx->fw_ddb_idx == (int )ddb_link) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Updating NT parent idx from [%d] to [%d]\n", (int )ddb_link, (int )ddb_idx->flash_ddb_idx); } else { } fw_ddb_entry->ddb_link = ddb_idx->flash_ddb_idx; return; } else { } ddb_idx = ddb_idx_tmp; __mptr___1 = (struct list_head const *)ddb_idx_tmp->list.next; ddb_idx_tmp = (struct qla_ddb_index *)__mptr___1; ldv_65598: ; if ((unsigned long )(& ddb_idx->list) != (unsigned long )list_ddb) { goto ldv_65597; } else { } return; } } static void qla4xxx_build_nt_list(struct scsi_qla_host *ha , struct list_head *list_nt , struct list_head *list_st , int is_reset ) { struct dev_db_entry *fw_ddb_entry ; struct ddb_entry *ddb_entry ; dma_addr_t fw_ddb_dma ; int max_ddbs ; int fw_idx_size ; int ret ; uint32_t idx ; uint32_t next_idx ; uint32_t state ; uint32_t conn_err ; uint32_t ddb_idx ; uint16_t conn_id ; uint16_t ddb_link ; struct qla_ddb_index *nt_ddb_idx ; void *tmp ; int tmp___0 ; int tmp___1 ; size_t tmp___2 ; void *tmp___3 ; { ddb_entry = (struct ddb_entry *)0; idx = 0U; next_idx = 0U; state = 0U; conn_err = 0U; ddb_idx = 4294967295U; conn_id = 0U; ddb_link = 65535U; tmp = ldv_dma_pool_alloc_60(ha->fw_ddb_dma_pool, 208U, & fw_ddb_dma); fw_ddb_entry = (struct dev_db_entry *)tmp; if ((unsigned long )fw_ddb_entry == (unsigned long )((struct dev_db_entry *)0)) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Out of memory\n"); } else { } goto exit_nt_list; } else { } tmp___0 = is_qla40XX(ha); max_ddbs = tmp___0 != 0 ? 256 : 512; fw_idx_size = 544; idx = 0U; goto ldv_65624; ldv_65623: ret = qla4xxx_get_fwddb_entry(ha, (int )((uint16_t )idx), fw_ddb_entry, fw_ddb_dma, (uint32_t *)0U, & next_idx, & state, & conn_err, (uint16_t *)0U, & conn_id); if (ret == 1) { goto ldv_65621; } else { } tmp___1 = qla4xxx_verify_boot_idx(ha, (int )((uint16_t )idx)); if (tmp___1 != 0) { goto continue_next_nt; } else { } tmp___2 = strlen((char const *)(& fw_ddb_entry->iscsi_name)); if (tmp___2 == 0UL) { goto continue_next_nt; } else { } ddb_link = fw_ddb_entry->ddb_link; if ((int )ddb_link < max_ddbs) { qla4xxx_update_fw_ddb_link(ha, list_st, fw_ddb_entry); } else { } if ((state != 1U && state != 6U) && is_reset == 0) { goto continue_next_nt; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Adding DDB to session = 0x%x\n", idx); } else { } if (is_reset == 0) { tmp___3 = ldv_vmalloc_61((unsigned long )fw_idx_size); nt_ddb_idx = (struct qla_ddb_index *)tmp___3; if ((unsigned long )nt_ddb_idx == (unsigned long )((struct qla_ddb_index *)0)) { goto ldv_65621; } else { } nt_ddb_idx->fw_ddb_idx = (uint16_t )idx; memcpy((void *)(& nt_ddb_idx->flash_isid), (void const *)(& fw_ddb_entry->isid), 6UL); ret = qla4xxx_is_flash_ddb_exists(ha, list_nt, fw_ddb_entry); if (ret == 0) { vfree((void const *)nt_ddb_idx); goto continue_next_nt; } else { } memcpy((void *)(& nt_ddb_idx->fw_ddb), (void const *)fw_ddb_entry, 512UL); list_add_tail(& nt_ddb_idx->list, list_nt); } else if (is_reset == 1) { ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, & ddb_idx); if (ret == 0) { ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, ddb_idx); if ((unsigned long )ddb_entry != (unsigned long )((struct ddb_entry *)0)) { qla4xxx_update_sess_disc_idx(ha, ddb_entry, fw_ddb_entry); } else { } goto continue_next_nt; } else { } } else { } ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, (int )((uint16_t )idx)); if (ret == 1) { goto exit_nt_list; } else { } continue_next_nt: ; if (next_idx == 0U) { goto ldv_65621; } else { } idx = next_idx; ldv_65624: ; if ((uint32_t )max_ddbs > idx) { goto ldv_65623; } else { } ldv_65621: ; exit_nt_list: ; if ((unsigned long )fw_ddb_entry != (unsigned long )((struct dev_db_entry *)0)) { dma_pool_free(ha->fw_ddb_dma_pool, (void *)fw_ddb_entry, fw_ddb_dma); } else { } return; } } static void qla4xxx_build_new_nt_list(struct scsi_qla_host *ha , struct list_head *list_nt , uint16_t target_id ) { struct dev_db_entry *fw_ddb_entry ; dma_addr_t fw_ddb_dma ; int max_ddbs ; int fw_idx_size ; int ret ; uint32_t idx ; uint32_t next_idx ; uint32_t state ; uint32_t conn_err ; uint16_t conn_id ; struct qla_ddb_index *nt_ddb_idx ; void *tmp ; int tmp___0 ; size_t tmp___1 ; void *tmp___2 ; { idx = 0U; next_idx = 0U; state = 0U; conn_err = 0U; conn_id = 0U; tmp = ldv_dma_pool_alloc_62(ha->fw_ddb_dma_pool, 208U, & fw_ddb_dma); fw_ddb_entry = (struct dev_db_entry *)tmp; if ((unsigned long )fw_ddb_entry == (unsigned long )((struct dev_db_entry *)0)) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Out of memory\n"); } else { } goto exit_new_nt_list; } else { } tmp___0 = is_qla40XX(ha); max_ddbs = tmp___0 != 0 ? 256 : 512; fw_idx_size = 544; idx = 0U; goto ldv_65645; ldv_65644: ret = qla4xxx_get_fwddb_entry(ha, (int )((uint16_t )idx), fw_ddb_entry, fw_ddb_dma, (uint32_t *)0U, & next_idx, & state, & conn_err, (uint16_t *)0U, & conn_id); if (ret == 1) { goto ldv_65642; } else { } tmp___1 = strlen((char const *)(& fw_ddb_entry->iscsi_name)); if (tmp___1 == 0UL) { goto continue_next_new_nt; } else { } if (state != 1U) { goto continue_next_new_nt; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Adding DDB to session = 0x%x\n", idx); } else { } tmp___2 = ldv_vmalloc_63((unsigned long )fw_idx_size); nt_ddb_idx = (struct qla_ddb_index *)tmp___2; if ((unsigned long )nt_ddb_idx == (unsigned long )((struct qla_ddb_index *)0)) { goto ldv_65642; } else { } nt_ddb_idx->fw_ddb_idx = (uint16_t )idx; ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, (uint32_t *)0U); if (ret == 0) { vfree((void const *)nt_ddb_idx); goto continue_next_new_nt; } else { } if ((int )target_id < max_ddbs) { fw_ddb_entry->ddb_link = target_id; } else { } list_add_tail(& nt_ddb_idx->list, list_nt); ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, 1, (int )((uint16_t )idx)); if (ret == 1) { goto exit_new_nt_list; } else { } continue_next_new_nt: ; if (next_idx == 0U) { goto ldv_65642; } else { } idx = next_idx; ldv_65645: ; if ((uint32_t )max_ddbs > idx) { goto ldv_65644; } else { } ldv_65642: ; exit_new_nt_list: ; if ((unsigned long )fw_ddb_entry != (unsigned long )((struct dev_db_entry *)0)) { dma_pool_free(ha->fw_ddb_dma_pool, (void *)fw_ddb_entry, fw_ddb_dma); } else { } return; } } static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev , void *data ) { struct iscsi_bus_flash_session *fnode_sess ; int tmp ; struct device const *__mptr ; { tmp = iscsi_flashnode_bus_match(dev, (struct device_driver *)0); if (tmp == 0) { return (0); } else { } __mptr = (struct device const *)dev; fnode_sess = (struct iscsi_bus_flash_session *)__mptr + 0xffffffffffffffd8UL; return (fnode_sess->flash_state == 0); } } static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha , struct dev_db_entry *fw_ddb_entry , uint16_t *idx , int user ) { struct iscsi_bus_flash_session *fnode_sess ; struct iscsi_bus_flash_conn *fnode_conn ; int rc ; { fnode_sess = (struct iscsi_bus_flash_session *)0; fnode_conn = (struct iscsi_bus_flash_conn *)0; rc = 1; fnode_sess = iscsi_create_flashnode_sess(ha->host, (int )*idx, & qla4xxx_iscsi_transport, 0); if ((unsigned long )fnode_sess == (unsigned long )((struct iscsi_bus_flash_session *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to create session sysfs entry for flashnode %d of host%lu\n", "qla4xxx_sysfs_ddb_tgt_create", (int )*idx, ha->host_no); goto exit_tgt_create; } else { } fnode_conn = iscsi_create_flashnode_conn(ha->host, fnode_sess, & qla4xxx_iscsi_transport, 0); if ((unsigned long )fnode_conn == (unsigned long )((struct iscsi_bus_flash_conn *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to create conn sysfs entry for flashnode %d of host%lu\n", "qla4xxx_sysfs_ddb_tgt_create", (int )*idx, ha->host_no); goto free_sess; } else { } if (user != 0) { fnode_sess->flash_state = 0; } else { fnode_sess->flash_state = 1; if ((int )*idx == (int )ha->pri_ddb_idx || (int )*idx == (int )ha->sec_ddb_idx) { fnode_sess->is_boot_target = 1U; } else { fnode_sess->is_boot_target = 0U; } } rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: sysfs entry %s created\n", "qla4xxx_sysfs_ddb_tgt_create", fnode_sess->dev.kobj.name); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: sysfs entry %s created\n", "qla4xxx_sysfs_ddb_tgt_create", fnode_conn->dev.kobj.name); return (0); free_sess: iscsi_destroy_flashnode_sess(fnode_sess); exit_tgt_create: ; return (1); } } static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost , char const *buf , int len ) { struct scsi_qla_host *ha ; struct scsi_qla_host *tmp ; struct dev_db_entry *fw_ddb_entry ; dma_addr_t fw_ddb_entry_dma ; struct device *dev ; uint16_t idx ; uint16_t max_ddbs ; uint32_t options ; uint32_t rval ; int tmp___0 ; int tmp___1 ; int tmp___2 ; void *tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; { tmp = to_qla_host(shost); ha = tmp; fw_ddb_entry = (struct dev_db_entry *)0; idx = 0U; max_ddbs = 0U; options = 0U; rval = 1U; tmp___0 = strncasecmp("ipv4", buf, 4UL); if (tmp___0 != 0) { tmp___1 = strncasecmp("ipv6", buf, 4UL); if (tmp___1 != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Invalid portal type\n", "qla4xxx_sysfs_ddb_add"); } else { } goto exit_ddb_add; } else { } } else { } tmp___2 = is_qla40XX(ha); max_ddbs = tmp___2 != 0 ? 64U : 512U; tmp___3 = dma_alloc_attrs(& (ha->pdev)->dev, 512UL, & fw_ddb_entry_dma, 208U, (struct dma_attrs *)0); fw_ddb_entry = (struct dev_db_entry *)tmp___3; if ((unsigned long )fw_ddb_entry == (unsigned long )((struct dev_db_entry *)0)) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to allocate dma buffer\n", "qla4xxx_sysfs_ddb_add"); } else { } goto exit_ddb_add; } else { } dev = iscsi_find_flashnode_sess(ha->host, (void *)0, & qla4xxx_sysfs_ddb_is_non_persistent); if ((unsigned long )dev != (unsigned long )((struct device *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: A non-persistent entry %s found\n", "qla4xxx_sysfs_ddb_add", dev->kobj.name); put_device(dev); goto exit_ddb_add; } else { } idx = 2U; goto ldv_65682; ldv_65681: tmp___4 = qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma, (int )idx); if (tmp___4 != 0) { goto ldv_65680; } else { } idx = (uint16_t )((int )idx + 1); ldv_65682: ; if ((int )idx < (int )max_ddbs) { goto ldv_65681; } else { } ldv_65680: ; if ((int )idx == (int )max_ddbs) { goto exit_ddb_add; } else { } tmp___5 = strncasecmp("ipv6", buf, 4UL); if (tmp___5 == 0) { options = options | 1U; } else { } tmp___6 = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); rval = (uint32_t )tmp___6; if (rval == 1U) { goto exit_ddb_add; } else { } tmp___7 = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, & idx, 1); rval = (uint32_t )tmp___7; exit_ddb_add: ; if ((unsigned long )fw_ddb_entry != (unsigned long )((struct dev_db_entry *)0)) { dma_free_attrs(& (ha->pdev)->dev, 512UL, (void *)fw_ddb_entry, fw_ddb_entry_dma, (struct dma_attrs *)0); } else { } if (rval == 0U) { return ((int )idx); } else { return (-5); } } } static int qla4xxx_sysfs_ddb_apply(struct iscsi_bus_flash_session *fnode_sess , struct iscsi_bus_flash_conn *fnode_conn ) { struct Scsi_Host *shost ; struct Scsi_Host *tmp ; struct scsi_qla_host *ha ; struct scsi_qla_host *tmp___0 ; uint32_t dev_db_start_offset ; struct dev_db_entry *fw_ddb_entry ; dma_addr_t fw_ddb_entry_dma ; uint32_t options ; int rval ; void *tmp___1 ; int tmp___2 ; { tmp = dev_to_shost(fnode_sess->dev.parent); shost = tmp; tmp___0 = to_qla_host(shost); ha = tmp___0; dev_db_start_offset = 83886080U; fw_ddb_entry = (struct dev_db_entry *)0; options = 0U; rval = 0; tmp___1 = dma_alloc_attrs(& (ha->pdev)->dev, 512UL, & fw_ddb_entry_dma, 208U, (struct dma_attrs *)0); fw_ddb_entry = (struct dev_db_entry *)tmp___1; if ((unsigned long )fw_ddb_entry == (unsigned long )((struct dev_db_entry *)0)) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to allocate dma buffer\n", "qla4xxx_sysfs_ddb_apply"); } else { } rval = -12; goto exit_ddb_apply; } else { } tmp___2 = strncasecmp((char const *)fnode_sess->portal_type, "ipv6", 4UL); if (tmp___2 == 0) { options = options | 1U; } else { } rval = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); if (rval == 1) { goto exit_ddb_apply; } else { } dev_db_start_offset = fnode_sess->target_id * 512U + dev_db_start_offset; qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); fw_ddb_entry->cookie = 36916U; rval = qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, 512U, 3U); if (rval == 0) { fnode_sess->flash_state = 1; dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: flash node %u of host %lu written to flash\n", "qla4xxx_sysfs_ddb_apply", fnode_sess->target_id, ha->host_no); } else { rval = -5; dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Error while writing flash node %u of host %lu to flash\n", "qla4xxx_sysfs_ddb_apply", fnode_sess->target_id, ha->host_no); } exit_ddb_apply: ; if ((unsigned long )fw_ddb_entry != (unsigned long )((struct dev_db_entry *)0)) { dma_free_attrs(& (ha->pdev)->dev, 512UL, (void *)fw_ddb_entry, fw_ddb_entry_dma, (struct dma_attrs *)0); } else { } return (rval); } } static ssize_t qla4xxx_sysfs_ddb_conn_open(struct scsi_qla_host *ha , struct dev_db_entry *fw_ddb_entry , uint16_t idx ) { struct dev_db_entry *ddb_entry ; dma_addr_t ddb_entry_dma ; unsigned long wtime ; uint32_t mbx_sts ; uint32_t state ; uint32_t conn_err ; uint16_t tmo ; int ret ; void *tmp ; { ddb_entry = (struct dev_db_entry *)0; mbx_sts = 0U; state = 0U; conn_err = 0U; tmo = 0U; ret = 0; tmp = dma_alloc_attrs(& (ha->pdev)->dev, 512UL, & ddb_entry_dma, 208U, (struct dma_attrs *)0); ddb_entry = (struct dev_db_entry *)tmp; if ((unsigned long )ddb_entry == (unsigned long )((struct dev_db_entry *)0)) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to allocate dma buffer\n", "qla4xxx_sysfs_ddb_conn_open"); } else { } return (1L); } else { } memcpy((void *)ddb_entry, (void const *)fw_ddb_entry, 512UL); ret = qla4xxx_set_ddb_entry(ha, (int )idx, ddb_entry_dma, & mbx_sts); if (ret != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to set ddb entry for index %d\n", "qla4xxx_sysfs_ddb_conn_open", (int )idx); } else { } goto exit_ddb_conn_open; } else { } qla4xxx_conn_open(ha, (int )idx); tmo = (unsigned int )ha->def_timeout > 12U && (unsigned int )ha->def_timeout <= 119U ? ha->def_timeout : 12U; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Default time to wait for login to ddb %d\n", (int )tmo); } else { } wtime = (unsigned long )((int )tmo * 250) + (unsigned long )jiffies; ldv_65719: ret = qla4xxx_get_fwddb_entry(ha, (int )idx, (struct dev_db_entry *)0, 0ULL, (uint32_t *)0U, (uint32_t *)0U, & state, & conn_err, (uint16_t *)0U, (uint16_t *)0U); if (ret == 1) { goto ldv_65711; } else { } if (state == 1U || state == 6U) { goto ldv_65712; } else { } schedule_timeout_uninterruptible(25L); ldv_65711: ; if ((long )((unsigned long )jiffies - wtime) < 0L) { goto ldv_65719; } else { } ldv_65712: ; exit_ddb_conn_open: ; if ((unsigned long )ddb_entry != (unsigned long )((struct dev_db_entry *)0)) { dma_free_attrs(& (ha->pdev)->dev, 512UL, (void *)ddb_entry, ddb_entry_dma, (struct dma_attrs *)0); } else { } return ((ssize_t )ret); } } static int qla4xxx_ddb_login_st(struct scsi_qla_host *ha , struct dev_db_entry *fw_ddb_entry , uint16_t target_id ) { struct qla_ddb_index *ddb_idx ; struct qla_ddb_index *ddb_idx_tmp ; struct list_head list_nt ; uint16_t ddb_index ; int ret ; int tmp ; ssize_t tmp___0 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int tmp___1 ; { ret = 0; tmp = constant_test_bit(4L, (unsigned long const volatile *)(& ha->flags)); if (tmp != 0) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: A discovery already in progress!\n", "qla4xxx_ddb_login_st"); return (1); } else { } INIT_LIST_HEAD(& list_nt); set_bit(4L, (unsigned long volatile *)(& ha->flags)); ret = qla4xxx_get_ddb_index(ha, & ddb_index); if (ret == 1) { goto exit_login_st_clr_bit; } else { } tmp___0 = qla4xxx_sysfs_ddb_conn_open(ha, fw_ddb_entry, (int )ddb_index); ret = (int )tmp___0; if (ret == 1) { goto exit_login_st; } else { } qla4xxx_build_new_nt_list(ha, & list_nt, (int )target_id); __mptr = (struct list_head const *)list_nt.next; ddb_idx = (struct qla_ddb_index *)__mptr; __mptr___0 = (struct list_head const *)ddb_idx->list.next; ddb_idx_tmp = (struct qla_ddb_index *)__mptr___0; goto ldv_65740; ldv_65739: list_del_init(& ddb_idx->list); qla4xxx_clear_ddb_entry(ha, (uint32_t )ddb_idx->fw_ddb_idx); vfree((void const *)ddb_idx); ddb_idx = ddb_idx_tmp; __mptr___1 = (struct list_head const *)ddb_idx_tmp->list.next; ddb_idx_tmp = (struct qla_ddb_index *)__mptr___1; ldv_65740: ; if ((unsigned long )(& ddb_idx->list) != (unsigned long )(& list_nt)) { goto ldv_65739; } else { } exit_login_st: tmp___1 = qla4xxx_clear_ddb_entry(ha, (uint32_t )ddb_index); if (tmp___1 == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Unable to clear DDB index = 0x%x\n", (int )ddb_index); } else { } clear_bit((long )ddb_index, (unsigned long volatile *)(& ha->ddb_idx_map)); exit_login_st_clr_bit: clear_bit(4L, (unsigned long volatile *)(& ha->flags)); return (ret); } } static int qla4xxx_ddb_login_nt(struct scsi_qla_host *ha , struct dev_db_entry *fw_ddb_entry , uint16_t idx ) { int ret ; { ret = 1; ret = qla4xxx_is_session_exists(ha, fw_ddb_entry, (uint32_t *)0U); if (ret != 0) { ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, 1, (int )idx); } else { ret = -1; } return (ret); } } static int qla4xxx_sysfs_ddb_login(struct iscsi_bus_flash_session *fnode_sess , struct iscsi_bus_flash_conn *fnode_conn ) { struct Scsi_Host *shost ; struct Scsi_Host *tmp ; struct scsi_qla_host *ha ; struct scsi_qla_host *tmp___0 ; struct dev_db_entry *fw_ddb_entry ; dma_addr_t fw_ddb_entry_dma ; uint32_t options ; int ret ; void *tmp___1 ; int tmp___2 ; size_t tmp___3 ; { tmp = dev_to_shost(fnode_sess->dev.parent); shost = tmp; tmp___0 = to_qla_host(shost); ha = tmp___0; fw_ddb_entry = (struct dev_db_entry *)0; options = 0U; ret = 0; if (fnode_sess->flash_state == 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Target info is not persistent\n", "qla4xxx_sysfs_ddb_login"); ret = -5; goto exit_ddb_login; } else { } tmp___1 = dma_alloc_attrs(& (ha->pdev)->dev, 512UL, & fw_ddb_entry_dma, 208U, (struct dma_attrs *)0); fw_ddb_entry = (struct dev_db_entry *)tmp___1; if ((unsigned long )fw_ddb_entry == (unsigned long )((struct dev_db_entry *)0)) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to allocate dma buffer\n", "qla4xxx_sysfs_ddb_login"); } else { } ret = -12; goto exit_ddb_login; } else { } tmp___2 = strncasecmp((char const *)fnode_sess->portal_type, "ipv6", 4UL); if (tmp___2 == 0) { options = options | 1U; } else { } ret = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); if (ret == 1) { goto exit_ddb_login; } else { } qla4xxx_copy_to_fwddb_param(fnode_sess, fnode_conn, fw_ddb_entry); fw_ddb_entry->cookie = 36916U; tmp___3 = strlen((char const *)(& fw_ddb_entry->iscsi_name)); if (tmp___3 == 0UL) { ret = qla4xxx_ddb_login_st(ha, fw_ddb_entry, (int )((uint16_t )fnode_sess->target_id)); } else { ret = qla4xxx_ddb_login_nt(ha, fw_ddb_entry, (int )((uint16_t )fnode_sess->target_id)); } if (ret > 0) { ret = -5; } else { } exit_ddb_login: ; if ((unsigned long )fw_ddb_entry != (unsigned long )((struct dev_db_entry *)0)) { dma_free_attrs(& (ha->pdev)->dev, 512UL, (void *)fw_ddb_entry, fw_ddb_entry_dma, (struct dma_attrs *)0); } else { } return (ret); } } static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess ) { struct iscsi_session *sess ; struct ddb_entry *ddb_entry ; struct scsi_qla_host *ha ; struct dev_db_entry *fw_ddb_entry ; dma_addr_t fw_ddb_entry_dma ; unsigned long flags ; unsigned long wtime ; uint32_t ddb_state ; int options ; int ret ; int tmp ; void *tmp___0 ; int tmp___1 ; int tmp___2 ; { ddb_entry = (struct ddb_entry *)0; fw_ddb_entry = (struct dev_db_entry *)0; ret = 0; sess = (struct iscsi_session *)cls_sess->dd_data; ddb_entry = (struct ddb_entry *)sess->dd_data; ha = ddb_entry->ha; if ((unsigned int )ddb_entry->ddb_type != 1U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Not a flash node session\n", "qla4xxx_sysfs_ddb_logout_sid"); ret = -6; goto exit_ddb_logout; } else { } tmp = constant_test_bit(1L, (unsigned long const volatile *)(& ddb_entry->flags)); if (tmp != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Logout from boot target entry is not permitted.\n", "qla4xxx_sysfs_ddb_logout_sid"); ret = -1; goto exit_ddb_logout; } else { } tmp___0 = dma_alloc_attrs(& (ha->pdev)->dev, 512UL, & fw_ddb_entry_dma, 208U, (struct dma_attrs *)0); fw_ddb_entry = (struct dev_db_entry *)tmp___0; if ((unsigned long )fw_ddb_entry == (unsigned long )((struct dev_db_entry *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to allocate dma buffer\n", "qla4xxx_sysfs_ddb_logout_sid"); ret = -12; goto exit_ddb_logout; } else { } tmp___1 = test_and_set_bit(4L, (unsigned long volatile *)(& ddb_entry->flags)); if (tmp___1 != 0) { goto ddb_logout_init; } else { } ret = qla4xxx_get_fwddb_entry(ha, (int )ddb_entry->fw_ddb_index, fw_ddb_entry, fw_ddb_entry_dma, (uint32_t *)0U, (uint32_t *)0U, & ddb_state, (uint32_t *)0U, (uint16_t *)0U, (uint16_t *)0U); if (ret == 1) { goto ddb_logout_init; } else { } if (ddb_state == 4U) { goto ddb_logout_init; } else { } wtime = (unsigned long )jiffies + 4500UL; ldv_65782: tmp___2 = test_and_clear_bit(0L, (unsigned long volatile *)(& ddb_entry->flags)); if (tmp___2 != 0) { goto ddb_logout_init; } else { } schedule_timeout_uninterruptible(250L); if ((long )((unsigned long )jiffies - wtime) < 0L) { goto ldv_65782; } else { } ddb_logout_init: atomic_set(& ddb_entry->retry_relogin_timer, 65535); atomic_set(& ddb_entry->relogin_timer, 0); options = 2; qla4xxx_session_logout_ddb(ha, ddb_entry, options); memset((void *)fw_ddb_entry, 0, 512UL); wtime = (unsigned long )jiffies + 2500UL; ldv_65791: ret = qla4xxx_get_fwddb_entry(ha, (int )ddb_entry->fw_ddb_index, fw_ddb_entry, fw_ddb_entry_dma, (uint32_t *)0U, (uint32_t *)0U, & ddb_state, (uint32_t *)0U, (uint16_t *)0U, (uint16_t *)0U); if (ret == 1) { goto ddb_logout_clr_sess; } else { } if (ddb_state == 1U || ddb_state == 6U) { goto ddb_logout_clr_sess; } else { } schedule_timeout_uninterruptible(250L); if ((long )((unsigned long )jiffies - wtime) < 0L) { goto ldv_65791; } else { } ddb_logout_clr_sess: qla4xxx_clear_ddb_entry(ha, (uint32_t )ddb_entry->fw_ddb_index); try_module_get(qla4xxx_iscsi_transport.owner); iscsi_destroy_endpoint((ddb_entry->conn)->ep); ldv_spin_lock(); qla4xxx_free_ddb(ha, ddb_entry); clear_bit((long )ddb_entry->fw_ddb_index, (unsigned long volatile *)(& ha->ddb_idx_map)); spin_unlock_irqrestore(& ha->hardware_lock, flags); iscsi_session_teardown(ddb_entry->sess); clear_bit(4L, (unsigned long volatile *)(& ddb_entry->flags)); ret = 0; exit_ddb_logout: ; if ((unsigned long )fw_ddb_entry != (unsigned long )((struct dev_db_entry *)0)) { dma_free_attrs(& (ha->pdev)->dev, 512UL, (void *)fw_ddb_entry, fw_ddb_entry_dma, (struct dma_attrs *)0); } else { } return (ret); } } static int qla4xxx_sysfs_ddb_logout(struct iscsi_bus_flash_session *fnode_sess , struct iscsi_bus_flash_conn *fnode_conn ) { struct Scsi_Host *shost ; struct Scsi_Host *tmp ; struct scsi_qla_host *ha ; struct scsi_qla_host *tmp___0 ; struct ql4_tuple_ddb *flash_tddb ; struct ql4_tuple_ddb *tmp_tddb ; struct dev_db_entry *fw_ddb_entry ; struct ddb_entry *ddb_entry ; dma_addr_t fw_ddb_dma ; uint32_t next_idx ; uint32_t state ; uint32_t conn_err ; uint16_t conn_id ; int idx ; int index ; int status ; int ret ; void *tmp___1 ; void *tmp___2 ; void *tmp___3 ; int tmp___4 ; int i ; int j ; int tmp___5 ; int tmp___6 ; { tmp = dev_to_shost(fnode_sess->dev.parent); shost = tmp; tmp___0 = to_qla_host(shost); ha = tmp___0; flash_tddb = (struct ql4_tuple_ddb *)0; tmp_tddb = (struct ql4_tuple_ddb *)0; fw_ddb_entry = (struct dev_db_entry *)0; ddb_entry = (struct ddb_entry *)0; next_idx = 0U; state = 0U; conn_err = 0U; conn_id = 0U; ret = 0; tmp___1 = ldv_dma_pool_alloc_64(ha->fw_ddb_dma_pool, 208U, & fw_ddb_dma); fw_ddb_entry = (struct dev_db_entry *)tmp___1; if ((unsigned long )fw_ddb_entry == (unsigned long )((struct dev_db_entry *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s:Out of memory\n", "qla4xxx_sysfs_ddb_logout"); ret = -12; goto exit_ddb_logout; } else { } tmp___2 = ldv_vzalloc_65(304UL); flash_tddb = (struct ql4_tuple_ddb *)tmp___2; if ((unsigned long )flash_tddb == (unsigned long )((struct ql4_tuple_ddb *)0)) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s:Memory Allocation failed.\n", "qla4xxx_sysfs_ddb_logout"); ret = -12; goto exit_ddb_logout; } else { } tmp___3 = ldv_vzalloc_66(304UL); tmp_tddb = (struct ql4_tuple_ddb *)tmp___3; if ((unsigned long )tmp_tddb == (unsigned long )((struct ql4_tuple_ddb *)0)) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s:Memory Allocation failed.\n", "qla4xxx_sysfs_ddb_logout"); ret = -12; goto exit_ddb_logout; } else { } if ((unsigned long )fnode_sess->targetname == (unsigned long )((char *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s:Cannot logout from SendTarget entry\n", "qla4xxx_sysfs_ddb_logout"); ret = -1; goto exit_ddb_logout; } else { } if ((unsigned int )fnode_sess->is_boot_target != 0U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Logout from boot target entry is not permitted.\n", "qla4xxx_sysfs_ddb_logout"); ret = -1; goto exit_ddb_logout; } else { } strlcpy((char *)(& flash_tddb->iscsi_name), (char const *)fnode_sess->targetname, 224UL); tmp___4 = strncmp((char const *)fnode_sess->portal_type, "ipv6", 4UL); if (tmp___4 == 0) { sprintf((char *)(& flash_tddb->ip_addr), "%pI6", fnode_conn->ipaddress); } else { sprintf((char *)(& flash_tddb->ip_addr), "%pI4", fnode_conn->ipaddress); } flash_tddb->tpgt = fnode_sess->tpgt; flash_tddb->port = fnode_conn->port; i = 0; j = 5; goto ldv_65817; ldv_65816: tmp___5 = i; i = i + 1; tmp___6 = j; j = j - 1; flash_tddb->isid[tmp___5] = fnode_sess->isid[tmp___6]; ldv_65817: ; if (i <= 5) { goto ldv_65816; } else { } idx = 0; goto ldv_65822; ldv_65821: ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, (uint32_t )idx); if ((unsigned long )ddb_entry == (unsigned long )((struct ddb_entry *)0)) { goto ldv_65819; } else { } if ((unsigned int )ddb_entry->ddb_type != 1U) { goto ldv_65819; } else { } index = (int )(ddb_entry->sess)->target_id; status = qla4xxx_get_fwddb_entry(ha, (int )((uint16_t )index), fw_ddb_entry, fw_ddb_dma, (uint32_t *)0U, & next_idx, & state, & conn_err, (uint16_t *)0U, & conn_id); if (status == 1) { ret = -12; goto ldv_65820; } else { } qla4xxx_convert_param_ddb(fw_ddb_entry, tmp_tddb, (uint8_t *)0U); status = qla4xxx_compare_tuple_ddb(ha, flash_tddb, tmp_tddb, 1); if (status == 0) { ret = qla4xxx_sysfs_ddb_logout_sid(ddb_entry->sess); goto ldv_65820; } else { } ldv_65819: idx = idx + 1; ldv_65822: ; if (idx <= 511) { goto ldv_65821; } else { } ldv_65820: ; if (idx == 512) { ret = -3; } else { } exit_ddb_logout: ; if ((unsigned long )flash_tddb != (unsigned long )((struct ql4_tuple_ddb *)0)) { vfree((void const *)flash_tddb); } else { } if ((unsigned long )tmp_tddb != (unsigned long )((struct ql4_tuple_ddb *)0)) { vfree((void const *)tmp_tddb); } else { } if ((unsigned long )fw_ddb_entry != (unsigned long )((struct dev_db_entry *)0)) { dma_pool_free(ha->fw_ddb_dma_pool, (void *)fw_ddb_entry, fw_ddb_dma); } else { } return (ret); } } static int qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess , int param , char *buf ) { struct Scsi_Host *shost ; struct Scsi_Host *tmp ; struct scsi_qla_host *ha ; struct scsi_qla_host *tmp___0 ; struct iscsi_bus_flash_conn *fnode_conn ; struct ql4_chap_table chap_tbl ; struct device *dev ; int parent_type ; int rc ; struct device const *__mptr ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; char *tmp___5 ; { tmp = dev_to_shost(fnode_sess->dev.parent); shost = tmp; tmp___0 = to_qla_host(shost); ha = tmp___0; rc = 0; dev = iscsi_find_flashnode_conn(fnode_sess); if ((unsigned long )dev == (unsigned long )((struct device *)0)) { return (-5); } else { } __mptr = (struct device const *)dev; fnode_conn = (struct iscsi_bus_flash_conn *)__mptr + 0xffffffffffffffe0UL; switch (param) { case 0: rc = sprintf(buf, "%u\n", (int )fnode_conn->is_fw_assigned_ipv6); goto ldv_65838; case 1: rc = sprintf(buf, "%s\n", fnode_sess->portal_type); goto ldv_65838; case 2: rc = sprintf(buf, "%u\n", (int )fnode_sess->auto_snd_tgt_disable); goto ldv_65838; case 3: rc = sprintf(buf, "%u\n", (int )fnode_sess->discovery_sess); goto ldv_65838; case 4: rc = sprintf(buf, "%u\n", (int )fnode_sess->entry_state); goto ldv_65838; case 5: rc = sprintf(buf, "%u\n", fnode_conn->hdrdgst_en); goto ldv_65838; case 6: rc = sprintf(buf, "%u\n", fnode_conn->datadgst_en); goto ldv_65838; case 7: rc = sprintf(buf, "%u\n", fnode_sess->imm_data_en); goto ldv_65838; case 8: rc = sprintf(buf, "%u\n", fnode_sess->initial_r2t_en); goto ldv_65838; case 9: rc = sprintf(buf, "%u\n", fnode_sess->dataseq_inorder_en); goto ldv_65838; case 10: rc = sprintf(buf, "%u\n", fnode_sess->pdu_inorder_en); goto ldv_65838; case 11: rc = sprintf(buf, "%u\n", (int )fnode_sess->chap_auth_en); goto ldv_65838; case 12: rc = sprintf(buf, "%u\n", (int )fnode_conn->snack_req_en); goto ldv_65838; case 13: rc = sprintf(buf, "%u\n", (int )fnode_sess->discovery_logout_en); goto ldv_65838; case 14: rc = sprintf(buf, "%u\n", (int )fnode_sess->bidi_chap_en); goto ldv_65838; case 15: rc = sprintf(buf, "%u\n", (int )fnode_sess->discovery_auth_optional); goto ldv_65838; case 16: rc = sprintf(buf, "%u\n", fnode_sess->erl); goto ldv_65838; case 17: rc = sprintf(buf, "%u\n", (int )fnode_conn->tcp_timestamp_stat); goto ldv_65838; case 18: rc = sprintf(buf, "%u\n", (int )fnode_conn->tcp_nagle_disable); goto ldv_65838; case 19: rc = sprintf(buf, "%u\n", (int )fnode_conn->tcp_wsf_disable); goto ldv_65838; case 20: rc = sprintf(buf, "%u\n", (int )fnode_conn->tcp_timer_scale); goto ldv_65838; case 21: rc = sprintf(buf, "%u\n", (int )fnode_conn->tcp_timestamp_en); goto ldv_65838; case 22: rc = sprintf(buf, "%u\n", (int )fnode_conn->fragment_disable); goto ldv_65838; case 23: rc = sprintf(buf, "%u\n", fnode_conn->max_recv_dlength); goto ldv_65838; case 24: rc = sprintf(buf, "%u\n", fnode_conn->max_xmit_dlength); goto ldv_65838; case 25: rc = sprintf(buf, "%u\n", fnode_sess->first_burst); goto ldv_65838; case 26: rc = sprintf(buf, "%u\n", fnode_sess->time2wait); goto ldv_65838; case 27: rc = sprintf(buf, "%u\n", fnode_sess->time2retain); goto ldv_65838; case 28: rc = sprintf(buf, "%u\n", (int )fnode_sess->max_r2t); goto ldv_65838; case 29: rc = sprintf(buf, "%u\n", (int )fnode_conn->keepalive_timeout); goto ldv_65838; case 30: rc = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n", (int )fnode_sess->isid[0], (int )fnode_sess->isid[1], (int )fnode_sess->isid[2], (int )fnode_sess->isid[3], (int )fnode_sess->isid[4], (int )fnode_sess->isid[5]); goto ldv_65838; case 31: rc = sprintf(buf, "%u\n", (int )fnode_sess->tsid); goto ldv_65838; case 32: rc = sprintf(buf, "%d\n", fnode_conn->port); goto ldv_65838; case 33: rc = sprintf(buf, "%u\n", fnode_sess->max_burst); goto ldv_65838; case 34: rc = sprintf(buf, "%u\n", fnode_sess->default_taskmgmt_timeout); goto ldv_65838; case 35: tmp___1 = strncmp((char const *)fnode_sess->portal_type, "ipv6", 4UL); if (tmp___1 == 0) { rc = sprintf(buf, "%pI6\n", fnode_conn->ipaddress); } else { rc = sprintf(buf, "%pI4\n", fnode_conn->ipaddress); } goto ldv_65838; case 36: ; if ((unsigned long )fnode_sess->targetalias != (unsigned long )((char *)0)) { rc = sprintf(buf, "%s\n", fnode_sess->targetalias); } else { rc = sprintf(buf, "\n"); } goto ldv_65838; case 37: tmp___2 = strncmp((char const *)fnode_sess->portal_type, "ipv6", 4UL); if (tmp___2 == 0) { rc = sprintf(buf, "%pI6\n", fnode_conn->redirect_ipaddr); } else { rc = sprintf(buf, "%pI4\n", fnode_conn->redirect_ipaddr); } goto ldv_65838; case 38: rc = sprintf(buf, "%u\n", fnode_conn->max_segment_size); goto ldv_65838; case 39: rc = sprintf(buf, "%u\n", (int )fnode_conn->local_port); goto ldv_65838; case 40: rc = sprintf(buf, "%u\n", (int )fnode_conn->ipv4_tos); goto ldv_65838; case 41: tmp___3 = strncmp((char const *)fnode_sess->portal_type, "ipv6", 4UL); if (tmp___3 == 0) { rc = sprintf(buf, "%u\n", (int )fnode_conn->ipv6_traffic_class); } else { rc = sprintf(buf, "\n"); } goto ldv_65838; case 42: rc = sprintf(buf, "%u\n", (int )fnode_conn->ipv6_flow_label); goto ldv_65838; case 45: tmp___4 = strncmp((char const *)fnode_sess->portal_type, "ipv6", 4UL); if (tmp___4 == 0) { rc = sprintf(buf, "%pI6\n", fnode_conn->link_local_ipv6_addr); } else { rc = sprintf(buf, "\n"); } goto ldv_65838; case 46: rc = sprintf(buf, "%u\n", (int )fnode_sess->discovery_parent_idx); goto ldv_65838; case 47: ; if ((unsigned int )fnode_sess->discovery_parent_type == 65533U) { parent_type = 3; } else if ((unsigned int )fnode_sess->discovery_parent_type == 65535U) { parent_type = 1; } else if ((unsigned int )fnode_sess->discovery_parent_type <= 511U) { parent_type = 2; } else { parent_type = 1; } tmp___5 = iscsi_get_discovery_parent_name(parent_type); rc = sprintf(buf, "%s\n", tmp___5); goto ldv_65838; case 43: ; if ((unsigned long )fnode_sess->targetname != (unsigned long )((char *)0)) { rc = sprintf(buf, "%s\n", fnode_sess->targetname); } else { rc = sprintf(buf, "\n"); } goto ldv_65838; case 44: rc = sprintf(buf, "%u\n", fnode_sess->tpgt); goto ldv_65838; case 48: rc = sprintf(buf, "%u\n", fnode_conn->tcp_xmit_wsf); goto ldv_65838; case 49: rc = sprintf(buf, "%u\n", fnode_conn->tcp_recv_wsf); goto ldv_65838; case 51: rc = sprintf(buf, "%u\n", (int )fnode_sess->chap_out_idx); goto ldv_65838; case 52: ; if ((unsigned int )fnode_sess->chap_auth_en != 0U) { qla4xxx_get_uni_chap_at_index(ha, (char *)(& chap_tbl.name), (char *)(& chap_tbl.secret), (int )fnode_sess->chap_out_idx); rc = sprintf(buf, "%s\n", (uint8_t *)(& chap_tbl.name)); } else { rc = sprintf(buf, "\n"); } goto ldv_65838; case 54: ; if ((unsigned int )fnode_sess->chap_auth_en != 0U) { qla4xxx_get_uni_chap_at_index(ha, (char *)(& chap_tbl.name), (char *)(& chap_tbl.secret), (int )fnode_sess->chap_out_idx); rc = sprintf(buf, "%s\n", (uint8_t *)(& chap_tbl.secret)); } else { rc = sprintf(buf, "\n"); } goto ldv_65838; case 56: rc = sprintf(buf, "%u\n", fnode_conn->statsn); goto ldv_65838; case 57: rc = sprintf(buf, "%u\n", fnode_conn->exp_statsn); goto ldv_65838; case 58: rc = sprintf(buf, "%u\n", (int )fnode_sess->is_boot_target); goto ldv_65838; default: rc = -38; goto ldv_65838; } ldv_65838: put_device(dev); return (rc); } } static int qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess , struct iscsi_bus_flash_conn *fnode_conn , void *data , int len ) { struct Scsi_Host *shost ; struct Scsi_Host *tmp ; struct scsi_qla_host *ha ; struct scsi_qla_host *tmp___0 ; struct iscsi_flashnode_param_info *fnode_param ; struct ql4_chap_table chap_tbl ; struct nlattr *attr ; uint16_t chap_out_idx ; int rc ; uint32_t rem ; void *tmp___1 ; size_t tmp___2 ; int tmp___3 ; int tmp___4 ; { tmp = dev_to_shost(fnode_sess->dev.parent); shost = tmp; tmp___0 = to_qla_host(shost); ha = tmp___0; chap_out_idx = 65535U; rc = 1; rem = (uint32_t )len; memset((void *)(& chap_tbl), 0, 364UL); attr = (struct nlattr *)data; rem = (uint32_t )len; goto ldv_65966; ldv_65965: tmp___1 = nla_data((struct nlattr const *)attr); fnode_param = (struct iscsi_flashnode_param_info *)tmp___1; switch ((int )fnode_param->param) { case 0: fnode_conn->is_fw_assigned_ipv6 = fnode_param->value[0]; goto ldv_65910; case 1: tmp___2 = strlen((char const *)fnode_sess->portal_type); memcpy((void *)fnode_sess->portal_type, (void const *)(& fnode_param->value), tmp___2); goto ldv_65910; case 2: fnode_sess->auto_snd_tgt_disable = fnode_param->value[0]; goto ldv_65910; case 3: fnode_sess->discovery_sess = fnode_param->value[0]; goto ldv_65910; case 4: fnode_sess->entry_state = fnode_param->value[0]; goto ldv_65910; case 5: fnode_conn->hdrdgst_en = (int )fnode_param->value[0]; goto ldv_65910; case 6: fnode_conn->datadgst_en = (int )fnode_param->value[0]; goto ldv_65910; case 7: fnode_sess->imm_data_en = (int )fnode_param->value[0]; goto ldv_65910; case 8: fnode_sess->initial_r2t_en = (int )fnode_param->value[0]; goto ldv_65910; case 9: fnode_sess->dataseq_inorder_en = (int )fnode_param->value[0]; goto ldv_65910; case 10: fnode_sess->pdu_inorder_en = (int )fnode_param->value[0]; goto ldv_65910; case 11: fnode_sess->chap_auth_en = fnode_param->value[0]; if ((unsigned int )fnode_sess->chap_auth_en == 0U) { fnode_sess->chap_out_idx = 65535U; } else { } goto ldv_65910; case 12: fnode_conn->snack_req_en = fnode_param->value[0]; goto ldv_65910; case 13: fnode_sess->discovery_logout_en = fnode_param->value[0]; goto ldv_65910; case 14: fnode_sess->bidi_chap_en = fnode_param->value[0]; goto ldv_65910; case 15: fnode_sess->discovery_auth_optional = fnode_param->value[0]; goto ldv_65910; case 16: fnode_sess->erl = (int )fnode_param->value[0]; goto ldv_65910; case 17: fnode_conn->tcp_timestamp_stat = fnode_param->value[0]; goto ldv_65910; case 18: fnode_conn->tcp_nagle_disable = fnode_param->value[0]; goto ldv_65910; case 19: fnode_conn->tcp_wsf_disable = fnode_param->value[0]; goto ldv_65910; case 20: fnode_conn->tcp_timer_scale = fnode_param->value[0]; goto ldv_65910; case 21: fnode_conn->tcp_timestamp_en = fnode_param->value[0]; goto ldv_65910; case 22: fnode_conn->fragment_disable = fnode_param->value[0]; goto ldv_65910; case 23: fnode_conn->max_recv_dlength = *((unsigned int *)(& fnode_param->value)); goto ldv_65910; case 24: fnode_conn->max_xmit_dlength = *((unsigned int *)(& fnode_param->value)); goto ldv_65910; case 25: fnode_sess->first_burst = *((unsigned int *)(& fnode_param->value)); goto ldv_65910; case 26: fnode_sess->time2wait = (int )*((uint16_t *)(& fnode_param->value)); goto ldv_65910; case 27: fnode_sess->time2retain = (int )*((uint16_t *)(& fnode_param->value)); goto ldv_65910; case 28: fnode_sess->max_r2t = *((uint16_t *)(& fnode_param->value)); goto ldv_65910; case 29: fnode_conn->keepalive_timeout = *((uint16_t *)(& fnode_param->value)); goto ldv_65910; case 30: memcpy((void *)(& fnode_sess->isid), (void const *)(& fnode_param->value), 6UL); goto ldv_65910; case 31: fnode_sess->tsid = *((uint16_t *)(& fnode_param->value)); goto ldv_65910; case 32: fnode_conn->port = (int )*((uint16_t *)(& fnode_param->value)); goto ldv_65910; case 33: fnode_sess->max_burst = *((unsigned int *)(& fnode_param->value)); goto ldv_65910; case 34: fnode_sess->default_taskmgmt_timeout = (int )*((uint16_t *)(& fnode_param->value)); goto ldv_65910; case 35: memcpy((void *)fnode_conn->ipaddress, (void const *)(& fnode_param->value), 16UL); goto ldv_65910; case 36: rc = iscsi_switch_str_param(& fnode_sess->targetalias, (char *)(& fnode_param->value)); goto ldv_65910; case 37: memcpy((void *)fnode_conn->redirect_ipaddr, (void const *)(& fnode_param->value), 16UL); goto ldv_65910; case 38: fnode_conn->max_segment_size = *((unsigned int *)(& fnode_param->value)); goto ldv_65910; case 39: fnode_conn->local_port = *((uint16_t *)(& fnode_param->value)); goto ldv_65910; case 40: fnode_conn->ipv4_tos = fnode_param->value[0]; goto ldv_65910; case 41: fnode_conn->ipv6_traffic_class = fnode_param->value[0]; goto ldv_65910; case 42: fnode_conn->ipv6_flow_label = fnode_param->value[0]; goto ldv_65910; case 43: rc = iscsi_switch_str_param(& fnode_sess->targetname, (char *)(& fnode_param->value)); goto ldv_65910; case 44: fnode_sess->tpgt = (int )*((uint16_t *)(& fnode_param->value)); goto ldv_65910; case 45: memcpy((void *)fnode_conn->link_local_ipv6_addr, (void const *)(& fnode_param->value), 16UL); goto ldv_65910; case 46: fnode_sess->discovery_parent_idx = *((uint16_t *)(& fnode_param->value)); goto ldv_65910; case 48: fnode_conn->tcp_xmit_wsf = (unsigned int )*((uint8_t *)(& fnode_param->value)); goto ldv_65910; case 49: fnode_conn->tcp_recv_wsf = (unsigned int )*((uint8_t *)(& fnode_param->value)); goto ldv_65910; case 56: fnode_conn->statsn = *((uint32_t *)(& fnode_param->value)); goto ldv_65910; case 57: fnode_conn->exp_statsn = *((uint32_t *)(& fnode_param->value)); goto ldv_65910; case 51: chap_out_idx = *((uint16_t *)(& fnode_param->value)); tmp___3 = qla4xxx_get_uni_chap_at_index(ha, (char *)(& chap_tbl.name), (char *)(& chap_tbl.secret), (int )chap_out_idx); if (tmp___3 == 0) { fnode_sess->chap_out_idx = chap_out_idx; fnode_sess->chap_auth_en = 1U; } else { } goto ldv_65910; default: dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: No such sysfs attribute\n", "qla4xxx_sysfs_ddb_set_param"); rc = -38; goto exit_set_param; } ldv_65910: attr = nla_next((struct nlattr const *)attr, (int *)(& rem)); ldv_65966: tmp___4 = nla_ok((struct nlattr const *)attr, (int )rem); if (tmp___4 != 0) { goto ldv_65965; } else { } rc = qla4xxx_sysfs_ddb_apply(fnode_sess, fnode_conn); exit_set_param: ; return (rc); } } static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess ) { struct Scsi_Host *shost ; struct Scsi_Host *tmp ; struct scsi_qla_host *ha ; struct scsi_qla_host *tmp___0 ; uint32_t dev_db_start_offset ; uint32_t dev_db_end_offset ; struct dev_db_entry *fw_ddb_entry ; dma_addr_t fw_ddb_entry_dma ; uint16_t *ddb_cookie ; size_t ddb_size ; void *pddb ; int target_id ; int rc ; int tmp___1 ; int tmp___2 ; { tmp = dev_to_shost(fnode_sess->dev.parent); shost = tmp; tmp___0 = to_qla_host(shost); ha = tmp___0; fw_ddb_entry = (struct dev_db_entry *)0; ddb_cookie = (uint16_t *)0U; ddb_size = 0UL; pddb = (void *)0; rc = 0; if ((unsigned int )fnode_sess->is_boot_target != 0U) { rc = -1; if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Deletion of boot target entry is not permitted.\n", "qla4xxx_sysfs_ddb_delete"); } else { } goto exit_ddb_del; } else { } if (fnode_sess->flash_state == 0) { goto sysfs_ddb_del; } else { } tmp___1 = is_qla40XX(ha); if (tmp___1 != 0) { dev_db_start_offset = 83886080U; dev_db_end_offset = 83918847U; dev_db_start_offset = fnode_sess->target_id * 512U + dev_db_start_offset; ddb_size = 512UL; } else { dev_db_start_offset = (ha->hw.flt_region_ddb << 2) + 2382364672U; if ((unsigned int )ha->port_num == 1U) { dev_db_start_offset = ha->hw.flt_ddb_size / 2U + dev_db_start_offset; } else { } dev_db_end_offset = ha->hw.flt_ddb_size / 2U + dev_db_start_offset; dev_db_start_offset = fnode_sess->target_id * 512U + dev_db_start_offset; dev_db_start_offset = dev_db_start_offset + 508U; ddb_size = 2UL; } if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: start offset=%u, end offset=%u\n", "qla4xxx_sysfs_ddb_delete", dev_db_start_offset, dev_db_end_offset); } else { } if (dev_db_start_offset > dev_db_end_offset) { rc = -5; if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s:Invalid DDB index %u\n", "qla4xxx_sysfs_ddb_delete", fnode_sess->target_id); } else { } goto exit_ddb_del; } else { } pddb = dma_alloc_attrs(& (ha->pdev)->dev, ddb_size, & fw_ddb_entry_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )pddb == (unsigned long )((void *)0)) { rc = -12; if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to allocate dma buffer\n", "qla4xxx_sysfs_ddb_delete"); } else { } goto exit_ddb_del; } else { } tmp___2 = is_qla40XX(ha); if (tmp___2 != 0) { fw_ddb_entry = (struct dev_db_entry *)pddb; memset((void *)fw_ddb_entry, 0, ddb_size); ddb_cookie = & fw_ddb_entry->cookie; } else { ddb_cookie = (uint16_t *)pddb; } *ddb_cookie = 65518U; qla4xxx_set_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, (uint32_t )ddb_size, 3U); sysfs_ddb_del: target_id = (int )fnode_sess->target_id; iscsi_destroy_flashnode_sess(fnode_sess); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: session and conn entries for flashnode %u of host %lu deleted\n", "qla4xxx_sysfs_ddb_delete", target_id, ha->host_no); exit_ddb_del: ; if ((unsigned long )pddb != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, ddb_size, pddb, fw_ddb_entry_dma, (struct dma_attrs *)0); } else { } return (rc); } } int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha ) { struct dev_db_entry *fw_ddb_entry ; dma_addr_t fw_ddb_entry_dma ; uint16_t max_ddbs ; uint16_t idx ; int ret ; void *tmp ; int tmp___0 ; int tmp___1 ; { fw_ddb_entry = (struct dev_db_entry *)0; idx = 0U; ret = 0; tmp = dma_alloc_attrs(& (ha->pdev)->dev, 512UL, & fw_ddb_entry_dma, 208U, (struct dma_attrs *)0); fw_ddb_entry = (struct dev_db_entry *)tmp; if ((unsigned long )fw_ddb_entry == (unsigned long )((struct dev_db_entry *)0)) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to allocate dma buffer\n", "qla4xxx_sysfs_ddb_export"); } else { } return (-12); } else { } tmp___0 = is_qla40XX(ha); max_ddbs = tmp___0 != 0 ? 64U : 512U; idx = 0U; goto ldv_65997; ldv_65996: tmp___1 = qla4xxx_flashdb_by_index(ha, fw_ddb_entry, fw_ddb_entry_dma, (int )idx); if (tmp___1 != 0) { goto ldv_65994; } else { } ret = qla4xxx_sysfs_ddb_tgt_create(ha, fw_ddb_entry, & idx, 0); if (ret != 0) { ret = -5; goto ldv_65995; } else { } ldv_65994: idx = (uint16_t )((int )idx + 1); ldv_65997: ; if ((int )idx < (int )max_ddbs) { goto ldv_65996; } else { } ldv_65995: dma_free_attrs(& (ha->pdev)->dev, 512UL, (void *)fw_ddb_entry, fw_ddb_entry_dma, (struct dma_attrs *)0); return (ret); } } static void qla4xxx_sysfs_ddb_remove(struct scsi_qla_host *ha ) { { iscsi_destroy_all_flashnode(ha->host); return; } } void qla4xxx_build_ddb_list(struct scsi_qla_host *ha , int is_reset ) { uint16_t tmo ; struct list_head list_st ; struct list_head list_nt ; struct qla_ddb_index *st_ddb_idx ; struct qla_ddb_index *st_ddb_idx_tmp ; unsigned long wtime ; int tmp ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; int tmp___0 ; { tmo = 0U; tmp = constant_test_bit(8L, (unsigned long const volatile *)(& ha->flags)); if (tmp == 0) { set_bit(22L, (unsigned long volatile *)(& ha->flags)); ha->is_reset = is_reset; return; } else { } INIT_LIST_HEAD(& list_st); INIT_LIST_HEAD(& list_nt); qla4xxx_build_st_list(ha, & list_st); qla4xxx_wait_for_ip_configuration(ha); __mptr = (struct list_head const *)list_st.next; st_ddb_idx = (struct qla_ddb_index *)__mptr; __mptr___0 = (struct list_head const *)st_ddb_idx->list.next; st_ddb_idx_tmp = (struct qla_ddb_index *)__mptr___0; goto ldv_66018; ldv_66017: qla4xxx_conn_open(ha, (int )st_ddb_idx->fw_ddb_idx); st_ddb_idx = st_ddb_idx_tmp; __mptr___1 = (struct list_head const *)st_ddb_idx_tmp->list.next; st_ddb_idx_tmp = (struct qla_ddb_index *)__mptr___1; ldv_66018: ; if ((unsigned long )(& st_ddb_idx->list) != (unsigned long )(& list_st)) { goto ldv_66017; } else { } tmo = (unsigned int )ha->def_timeout > 12U && (unsigned int )ha->def_timeout <= 119U ? ha->def_timeout : 12U; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Default time to wait for build ddb %d\n", (int )tmo); } else { } wtime = (unsigned long )((int )tmo * 250) + (unsigned long )jiffies; ldv_66027: tmp___0 = list_empty((struct list_head const *)(& list_st)); if (tmp___0 != 0) { goto ldv_66020; } else { } qla4xxx_remove_failed_ddb(ha, & list_st); schedule_timeout_uninterruptible(25L); if ((long )((unsigned long )jiffies - wtime) < 0L) { goto ldv_66027; } else { } ldv_66020: qla4xxx_build_nt_list(ha, & list_nt, & list_st, is_reset); qla4xxx_free_ddb_list(& list_st); qla4xxx_free_ddb_list(& list_nt); qla4xxx_free_ddb_index(ha); return; } } static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha ) { struct ddb_entry *ddb_entry ; struct dev_db_entry *fw_ddb_entry ; dma_addr_t fw_ddb_entry_dma ; unsigned long wtime ; uint32_t ddb_state ; int max_ddbs ; int idx ; int ret ; int tmp ; void *tmp___0 ; int tmp___1 ; { fw_ddb_entry = (struct dev_db_entry *)0; tmp = is_qla40XX(ha); max_ddbs = tmp != 0 ? 256 : 512; tmp___0 = dma_alloc_attrs(& (ha->pdev)->dev, 512UL, & fw_ddb_entry_dma, 208U, (struct dma_attrs *)0); fw_ddb_entry = (struct dev_db_entry *)tmp___0; if ((unsigned long )fw_ddb_entry == (unsigned long )((struct dev_db_entry *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to allocate dma buffer\n", "qla4xxx_wait_login_resp_boot_tgt"); goto exit_login_resp; } else { } wtime = (unsigned long )jiffies + 15000UL; idx = 0; goto ldv_66057; ldv_66056: ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, (uint32_t )idx); if ((unsigned long )ddb_entry == (unsigned long )((struct ddb_entry *)0)) { goto ldv_66041; } else { } tmp___1 = constant_test_bit(1L, (unsigned long const volatile *)(& ddb_entry->flags)); if (tmp___1 != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: DDB index [%d]\n", "qla4xxx_wait_login_resp_boot_tgt", (int )ddb_entry->fw_ddb_index); } else { } ldv_66049: ret = qla4xxx_get_fwddb_entry(ha, (int )ddb_entry->fw_ddb_index, fw_ddb_entry, fw_ddb_entry_dma, (uint32_t *)0U, (uint32_t *)0U, & ddb_state, (uint32_t *)0U, (uint16_t *)0U, (uint16_t *)0U); if (ret == 1) { goto exit_login_resp; } else { } if (ddb_state == 4U || ddb_state == 6U) { goto ldv_66042; } else { } schedule_timeout_uninterruptible(250L); if ((long )((unsigned long )jiffies - wtime) < 0L) { goto ldv_66049; } else { } ldv_66042: ; if ((long )((unsigned long )jiffies - wtime) >= 0L) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Login response wait timer expired\n", "qla4xxx_wait_login_resp_boot_tgt"); } else { } goto exit_login_resp; } else { } } else { } ldv_66041: idx = idx + 1; ldv_66057: ; if (idx < max_ddbs) { goto ldv_66056; } else { } exit_login_resp: ; if ((unsigned long )fw_ddb_entry != (unsigned long )((struct dev_db_entry *)0)) { dma_free_attrs(& (ha->pdev)->dev, 512UL, (void *)fw_ddb_entry, fw_ddb_entry_dma, (struct dma_attrs *)0); } else { } return; } } static int qla4xxx_probe_adapter(struct pci_dev *pdev , struct pci_device_id const *ent ) { int ret ; int status ; struct Scsi_Host *host ; struct scsi_qla_host *ha ; uint8_t init_retry_count ; char buf[34U] ; struct qla4_8xxx_legacy_intr_set *nx_legacy_intr ; uint32_t dev_state ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; struct lock_class_key __key ; int tmp___3 ; struct lock_class_key __key___0 ; struct lock_class_key __key___1 ; struct lock_class_key __key___2 ; struct lock_class_key __key___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; uint8_t tmp___15 ; int tmp___16 ; int tmp___17 ; int tmp___18 ; int tmp___19 ; int tmp___20 ; struct lock_class_key __key___4 ; char const *__lock_name ; struct workqueue_struct *tmp___21 ; struct lock_class_key __key___5 ; atomic_long_t __constr_expr_0 ; struct lock_class_key __key___6 ; char const *__lock_name___0 ; struct workqueue_struct *tmp___22 ; int tmp___23 ; char const *tmp___24 ; int tmp___25 ; int tmp___26 ; { ret = -19; init_retry_count = 0U; tmp = pci_enable_device(pdev); if (tmp != 0) { return (-1); } else { } host = iscsi_host_alloc(& qla4xxx_driver_template, 43072, 0); if ((unsigned long )host == (unsigned long )((struct Scsi_Host *)0)) { printk("\fqla4xxx: Couldn\'t allocate host from scsi layer!\n"); goto probe_disable_device; } else { } ha = to_qla_host(host); memset((void *)ha, 0, 43072UL); ha->pdev = pdev; ha->host = host; ha->host_no = (unsigned long )host->host_no; ha->func_num = (unsigned int )((uint16_t )(ha->pdev)->devfn) & 7U; pci_enable_pcie_error_reporting(pdev); tmp___2 = is_qla8022(ha); if (tmp___2 != 0) { ha->isp_ops = & qla4_82xx_isp_ops; ha->reg_tbl = (uint32_t *)(& qla4_82xx_reg_tbl); ha->qdr_sn_window = -1; ha->ddr_mn_window = 4294967295U; ha->curr_window = 255U; nx_legacy_intr = (struct qla4_8xxx_legacy_intr_set *)(& legacy_intr) + (unsigned long )ha->func_num; ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg; ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; } else { tmp___0 = is_qla8032(ha); if (tmp___0 != 0) { ha->isp_ops = & qla4_83xx_isp_ops; ha->reg_tbl = (uint32_t *)(& qla4_83xx_reg_tbl); } else { tmp___1 = is_qla8042(ha); if (tmp___1 != 0) { ha->isp_ops = & qla4_83xx_isp_ops; ha->reg_tbl = (uint32_t *)(& qla4_83xx_reg_tbl); } else { ha->isp_ops = & qla4xxx_isp_ops; } } } tmp___3 = is_qla80XX(ha); if (tmp___3 != 0) { __rwlock_init(& ha->hw_lock, "&ha->hw_lock", & __key); ha->pf_bit = (uint32_t )((int )ha->func_num << 16); pdev->needs_freset = 1U; } else { } ret = (*((ha->isp_ops)->iospace_config))(ha); if (ret != 0) { goto probe_failed_ioconfig; } else { } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Found an ISP%04x, irq %d, iobase 0x%p\n", (int )pdev->device, pdev->irq, ha->reg); qla4xxx_config_dma_addressing(ha); INIT_LIST_HEAD(& ha->free_srb_q); __mutex_init(& ha->mbox_sem, "&ha->mbox_sem", & __key___0); __mutex_init(& ha->chap_sem, "&ha->chap_sem", & __key___1); init_completion(& ha->mbx_intr_comp); init_completion(& ha->disable_acb_comp); init_completion(& ha->idc_comp); init_completion(& ha->link_up_comp); init_completion(& ha->disable_acb_comp); spinlock_check(& ha->hardware_lock); __raw_spin_lock_init(& ha->hardware_lock.__annonCompField18.rlock, "&(&ha->hardware_lock)->rlock", & __key___2); spinlock_check(& ha->work_lock); __raw_spin_lock_init(& ha->work_lock.__annonCompField18.rlock, "&(&ha->work_lock)->rlock", & __key___3); INIT_LIST_HEAD(& ha->work_list); tmp___4 = qla4xxx_mem_alloc(ha); if (tmp___4 != 0) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "[OLD_ERROR] Failed to allocate memory for adapter\n"); ret = -12; goto probe_failed; } else { } host->cmd_per_lun = 3; host->max_channel = 0U; host->max_lun = 65534ULL; host->max_id = 512U; host->max_cmd_len = 16U; host->can_queue = 1024; host->transportt = qla4xxx_scsi_transport; ret = scsi_init_shared_tag_map(host, 1024); if (ret != 0) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: scsi_init_shared_tag_map failed\n", "qla4xxx_probe_adapter"); goto probe_failed; } else { } pci_set_drvdata(pdev, (void *)ha); ret = scsi_add_host(host, & pdev->dev); if (ret != 0) { goto probe_failed; } else { } tmp___5 = is_qla80XX(ha); if (tmp___5 != 0) { qla4_8xxx_get_flash_info(ha); } else { } tmp___6 = is_qla8032(ha); if (tmp___6 != 0) { goto _L; } else { tmp___7 = is_qla8042(ha); if (tmp___7 != 0) { _L: /* CIL Label */ qla4_83xx_read_reset_template(ha); if (ql4xdontresethba == 1) { qla4_83xx_set_idc_dontreset(ha); } else { } } else { } } status = qla4xxx_initialize_adapter(ha, 0); tmp___8 = is_qla80XX(ha); if (tmp___8 != 0 && status == 1) { goto skip_retry_init; } else { } goto ldv_66082; ldv_66083: tmp___10 = is_qla80XX(ha); if (tmp___10 != 0) { (*((ha->isp_ops)->idc_lock))(ha); tmp___9 = qla4_8xxx_rd_direct(ha, 4U); dev_state = (uint32_t )tmp___9; (*((ha->isp_ops)->idc_unlock))(ha); if (dev_state == 6U) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: don\'t retry initialize adapter. H/W is in failed state\n", "qla4xxx_probe_adapter"); goto ldv_66081; } else { } } else { } if (ql4xextended_error_logging == 2) { printk("scsi: %s: retrying adapter initialization (%d)\n", "qla4xxx_probe_adapter", (int )init_retry_count); } else { } tmp___11 = (*((ha->isp_ops)->reset_chip))(ha); if (tmp___11 == 1) { goto ldv_66082; } else { } status = qla4xxx_initialize_adapter(ha, 0); tmp___13 = is_qla80XX(ha); if (tmp___13 != 0 && status == 1) { tmp___12 = qla4_8xxx_check_init_adapter_retry(ha); if (tmp___12 == 1) { goto skip_retry_init; } else { } } else { } ldv_66082: tmp___14 = constant_test_bit(0L, (unsigned long const volatile *)(& ha->flags)); if (tmp___14 == 0) { tmp___15 = init_retry_count; init_retry_count = (uint8_t )((int )init_retry_count + 1); if ((unsigned int )tmp___15 <= 4U) { goto ldv_66083; } else { goto ldv_66081; } } else { } ldv_66081: ; skip_retry_init: tmp___20 = constant_test_bit(0L, (unsigned long const volatile *)(& ha->flags)); if (tmp___20 == 0) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "Failed to initialize adapter\n"); tmp___16 = is_qla8022(ha); if (tmp___16 != 0 && ql4xdontresethba != 0) { goto _L___0; } else { tmp___17 = is_qla8032(ha); if (tmp___17 != 0) { goto _L___1; } else { tmp___18 = is_qla8042(ha); if (tmp___18 != 0) { _L___1: /* CIL Label */ tmp___19 = qla4_83xx_idc_dontreset(ha); if (tmp___19 != 0) { _L___0: /* CIL Label */ if (ql4xextended_error_logging == 2) { printk("\vHW STATE: FAILED\n"); } else { } (*((ha->isp_ops)->idc_lock))(ha); qla4_8xxx_wr_direct(ha, 4U, 6U); (*((ha->isp_ops)->idc_unlock))(ha); } else { } } else { } } } ret = -19; goto remove_host; } else { } if (ql4xextended_error_logging == 2) { printk("scsi: %s: Starting kernel thread for qla4xxx_dpc\n", "qla4xxx_probe_adapter"); } else { } sprintf((char *)(& buf), "qla4xxx_%lu_dpc", ha->host_no); __lock_name = "\"%s\"buf"; tmp___21 = __alloc_workqueue_key("%s", 131082U, 1, & __key___4, __lock_name, (char *)(& buf)); ha->dpc_thread = tmp___21; if ((unsigned long )ha->dpc_thread == (unsigned long )((struct workqueue_struct *)0)) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "Unable to start DPC thread!\n"); ret = -19; goto remove_host; } else { } __init_work(& ha->dpc_work, 0); __constr_expr_0.counter = 137438953408L; ha->dpc_work.data = __constr_expr_0; lockdep_init_map(& ha->dpc_work.lockdep_map, "(&ha->dpc_work)", & __key___5, 0); INIT_LIST_HEAD(& ha->dpc_work.entry); ha->dpc_work.func = & qla4xxx_do_dpc; __lock_name___0 = "\"qla4xxx_%lu_task\"ha->host_no"; tmp___22 = __alloc_workqueue_key("qla4xxx_%lu_task", 8U, 1, & __key___6, __lock_name___0, ha->host_no); ha->task_wq = tmp___22; if ((unsigned long )ha->task_wq == (unsigned long )((struct workqueue_struct *)0)) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "Unable to start task thread!\n"); ret = -19; goto remove_host; } else { } tmp___23 = is_qla40XX(ha); if (tmp___23 != 0) { ret = qla4xxx_request_irqs(ha); if (ret != 0) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "Failed to reserve interrupt %d already in use.\n", pdev->irq); goto remove_host; } else { } } else { } pci_save_state(ha->pdev); (*((ha->isp_ops)->enable_intrs))(ha); qla4xxx_start_timer(ha, (void *)(& qla4xxx_timer), 1UL); set_bit(1L, (unsigned long volatile *)(& ha->flags)); qla4_8xxx_alloc_sysfs_attr(ha); tmp___24 = pci_name((struct pci_dev const *)ha->pdev); printk("\016 QLogic iSCSI HBA Driver version: %s\n QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", (char *)(& qla4xxx_version_str), (int )(ha->pdev)->device, tmp___24, ha->host_no, (int )ha->fw_info.fw_major, (int )ha->fw_info.fw_minor, (int )ha->fw_info.fw_patch, (int )ha->fw_info.fw_build); tmp___25 = is_qla80XX(ha); if (tmp___25 != 0) { qla4_8xxx_set_param(ha, 512); } else { } tmp___26 = qla4xxx_setup_boot_info(ha); if (tmp___26 != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: No iSCSI boot target configured\n", "qla4xxx_probe_adapter"); } else { } set_bit(25L, (unsigned long volatile *)(& ha->dpc_flags)); qla4xxx_build_ddb_list(ha, 0); iscsi_host_for_each_session(ha->host, & qla4xxx_login_flash_ddb); qla4xxx_wait_login_resp_boot_tgt(ha); qla4xxx_create_chap_list(ha); qla4xxx_create_ifaces(ha); return (0); remove_host: ldv_scsi_remove_host_67(ha->host); probe_failed: qla4xxx_free_adapter(ha); probe_failed_ioconfig: pci_disable_pcie_error_reporting(pdev); scsi_host_put(ha->host); probe_disable_device: pci_disable_device(pdev); return (ret); } } static void qla4xxx_prevent_other_port_reinit(struct scsi_qla_host *ha ) { struct scsi_qla_host *other_ha ; struct pci_dev *other_pdev ; int fn ; int tmp ; void *tmp___0 ; char const *tmp___1 ; int tmp___2 ; { other_ha = (struct scsi_qla_host *)0; other_pdev = (struct pci_dev *)0; fn = 3; if (((ha->pdev)->devfn & 2U) != 0U) { fn = 1; } else { } tmp = pci_domain_nr((ha->pdev)->bus); other_pdev = pci_get_domain_bus_and_slot(tmp, (unsigned int )((ha->pdev)->bus)->number, ((ha->pdev)->devfn & 248U) | ((unsigned int )fn & 7U)); if ((unsigned long )other_pdev != (unsigned long )((struct pci_dev *)0)) { tmp___2 = atomic_read((atomic_t const *)(& other_pdev->enable_cnt)); if (tmp___2 != 0) { tmp___0 = pci_get_drvdata(other_pdev); other_ha = (struct scsi_qla_host *)tmp___0; if ((unsigned long )other_ha != (unsigned long )((struct scsi_qla_host *)0)) { set_bit(12L, (unsigned long volatile *)(& other_ha->flags)); if (ql4xextended_error_logging == 2) { tmp___1 = dev_name((struct device const *)(& (other_ha->pdev)->dev)); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Prevent %s reinit\n", "qla4xxx_prevent_other_port_reinit", tmp___1); } else { } } else { } } else { } pci_dev_put(other_pdev); } else { } return; } } static void qla4xxx_destroy_ddb(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry ) { struct dev_db_entry *fw_ddb_entry ; dma_addr_t fw_ddb_entry_dma ; unsigned long wtime ; uint32_t ddb_state ; int options ; int status ; int tmp ; void *tmp___0 ; { fw_ddb_entry = (struct dev_db_entry *)0; options = 2; tmp = qla4xxx_session_logout_ddb(ha, ddb_entry, options); if (tmp == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Logout failed\n", "qla4xxx_destroy_ddb"); goto clear_ddb; } else { } tmp___0 = dma_alloc_attrs(& (ha->pdev)->dev, 512UL, & fw_ddb_entry_dma, 208U, (struct dma_attrs *)0); fw_ddb_entry = (struct dev_db_entry *)tmp___0; if ((unsigned long )fw_ddb_entry == (unsigned long )((struct dev_db_entry *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to allocate dma buffer\n", "qla4xxx_destroy_ddb"); goto clear_ddb; } else { } wtime = (unsigned long )jiffies + 2500UL; ldv_66119: status = qla4xxx_get_fwddb_entry(ha, (int )ddb_entry->fw_ddb_index, fw_ddb_entry, fw_ddb_entry_dma, (uint32_t *)0U, (uint32_t *)0U, & ddb_state, (uint32_t *)0U, (uint16_t *)0U, (uint16_t *)0U); if (status == 1) { goto free_ddb; } else { } if (ddb_state == 1U || ddb_state == 6U) { goto free_ddb; } else { } schedule_timeout_uninterruptible(250L); if ((long )((unsigned long )jiffies - wtime) < 0L) { goto ldv_66119; } else { } free_ddb: dma_free_attrs(& (ha->pdev)->dev, 512UL, (void *)fw_ddb_entry, fw_ddb_entry_dma, (struct dma_attrs *)0); clear_ddb: qla4xxx_clear_ddb_entry(ha, (uint32_t )ddb_entry->fw_ddb_index); return; } } static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha ) { struct ddb_entry *ddb_entry ; int idx ; { idx = 0; goto ldv_66127; ldv_66126: ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, (uint32_t )idx); if ((unsigned long )ddb_entry != (unsigned long )((struct ddb_entry *)0) && (unsigned int )ddb_entry->ddb_type == 1U) { qla4xxx_destroy_ddb(ha, ddb_entry); try_module_get(qla4xxx_iscsi_transport.owner); iscsi_destroy_endpoint((ddb_entry->conn)->ep); qla4xxx_free_ddb(ha, ddb_entry); iscsi_session_teardown(ddb_entry->sess); } else { } idx = idx + 1; ldv_66127: ; if (idx <= 511) { goto ldv_66126; } else { } return; } } static void qla4xxx_remove_adapter(struct pci_dev *pdev ) { struct scsi_qla_host *ha ; int tmp ; void *tmp___0 ; int tmp___1 ; { tmp = pci_is_enabled(pdev); if (tmp == 0) { return; } else { } tmp___0 = pci_get_drvdata(pdev); ha = (struct scsi_qla_host *)tmp___0; tmp___1 = is_qla40XX(ha); if (tmp___1 != 0) { qla4xxx_prevent_other_port_reinit(ha); } else { } qla4xxx_destroy_ifaces(ha); if (ql4xdisablesysfsboot == 0 && (unsigned long )ha->boot_kset != (unsigned long )((struct iscsi_boot_kset *)0)) { iscsi_boot_destroy_kset(ha->boot_kset); } else { } qla4xxx_destroy_fw_ddb_session(ha); qla4_8xxx_free_sysfs_attr(ha); qla4xxx_sysfs_ddb_remove(ha); ldv_scsi_remove_host_68(ha->host); qla4xxx_free_adapter(ha); scsi_host_put(ha->host); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); return; } } static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha ) { int retval ; struct _ddebug descriptor ; long tmp ; int tmp___0 ; int tmp___1 ; { tmp___1 = pci_set_dma_mask(ha->pdev, 0xffffffffffffffffULL); if (tmp___1 == 0) { tmp___0 = pci_set_consistent_dma_mask(ha->pdev, 0xffffffffffffffffULL); if (tmp___0 != 0) { descriptor.modname = "qla4xxx"; descriptor.function = "qla4xxx_config_dma_addressing"; descriptor.filename = "/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/3192/dscv_tempdir/dscv/ri/43_2a/drivers/scsi/qla4xxx/ql4_os.c"; descriptor.format = "Failed to set 64 bit PCI consistent mask; using 32 bit.\n"; descriptor.lineno = 9038U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_dev_dbg(& descriptor, (struct device const *)(& (ha->pdev)->dev), "Failed to set 64 bit PCI consistent mask; using 32 bit.\n"); } else { } retval = pci_set_consistent_dma_mask(ha->pdev, 4294967295ULL); } else { } } else { retval = pci_set_dma_mask(ha->pdev, 4294967295ULL); } return; } } static int qla4xxx_slave_alloc(struct scsi_device *sdev ) { struct iscsi_cls_session *cls_sess ; struct iscsi_session *sess ; struct ddb_entry *ddb ; int queue_depth ; struct device const *__mptr ; { queue_depth = 32; __mptr = (struct device const *)(sdev->sdev_target)->dev.parent; cls_sess = (struct iscsi_cls_session *)__mptr + 0xfffffffffffffd58UL; sess = (struct iscsi_session *)cls_sess->dd_data; ddb = (struct ddb_entry *)sess->dd_data; sdev->hostdata = (void *)ddb; if (ql4xmaxqdepth != 0 && (unsigned int )ql4xmaxqdepth <= 65535U) { queue_depth = ql4xmaxqdepth; } else { } scsi_change_queue_depth(sdev, queue_depth); return (0); } } struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha , uint32_t index ) { struct srb *srb ; struct scsi_cmnd *cmd ; { srb = (struct srb *)0; cmd = (struct scsi_cmnd *)0; cmd = scsi_host_find_tag(ha->host, (int )index); if ((unsigned long )cmd == (unsigned long )((struct scsi_cmnd *)0)) { return (srb); } else { } srb = (struct srb *)cmd->SCp.ptr; if ((unsigned long )srb == (unsigned long )((struct srb *)0)) { return (srb); } else { } if (((int )srb->flags & 8) != 0) { ha->iocb_cnt = (int )ha->iocb_cnt - (int )srb->iocb_cnt; if ((unsigned long )srb->cmd != (unsigned long )((struct scsi_cmnd *)0)) { (srb->cmd)->host_scribble = (unsigned char *)1024U; } else { } } else { } return (srb); } } static int qla4xxx_eh_wait_on_command(struct scsi_qla_host *ha , struct scsi_cmnd *cmd ) { int done ; struct srb *rp ; uint32_t max_wait_time ; int ret ; int tmp ; long tmp___0 ; int tmp___1 ; uint32_t tmp___2 ; { done = 0; max_wait_time = 120U; ret = 8194; tmp = pci_channel_offline(ha->pdev); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: Return from %s\n", ha->host_no, "qla4xxx_eh_wait_on_command"); return (ret); } else { tmp___1 = constant_test_bit(20L, (unsigned long const volatile *)(& ha->flags)); if (tmp___1 != 0) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: Return from %s\n", ha->host_no, "qla4xxx_eh_wait_on_command"); return (ret); } else { } } ldv_66164: rp = (struct srb *)cmd->SCp.ptr; if ((unsigned long )rp == (unsigned long )((struct srb *)0)) { done = done + 1; goto ldv_66163; } else { } msleep(2000U); tmp___2 = max_wait_time; max_wait_time = max_wait_time - 1U; if (tmp___2 != 0U) { goto ldv_66164; } else { } ldv_66163: ; return (done); } } static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha ) { unsigned long wait_online ; int tmp ; { wait_online = (unsigned long )jiffies + 7500UL; goto ldv_66176; ldv_66175: tmp = adapter_up(ha); if (tmp != 0) { return (0); } else { } msleep(2000U); ldv_66176: ; if ((long )((unsigned long )jiffies - wait_online) < 0L) { goto ldv_66175; } else { } return (1); } } static int qla4xxx_eh_wait_for_commands(struct scsi_qla_host *ha , struct scsi_target *stgt , struct scsi_device *sdev ) { int cnt ; int status ; struct scsi_cmnd *cmd ; int tmp ; struct scsi_target *tmp___0 ; { status = 0; cnt = 0; goto ldv_66188; ldv_66187: cmd = scsi_host_find_tag(ha->host, cnt); if ((unsigned long )cmd != (unsigned long )((struct scsi_cmnd *)0)) { tmp___0 = scsi_target(cmd->device); if ((unsigned long )tmp___0 == (unsigned long )stgt) { if ((unsigned long )sdev == (unsigned long )((struct scsi_device *)0) || (unsigned long )cmd->device == (unsigned long )sdev) { tmp = qla4xxx_eh_wait_on_command(ha, cmd); if (tmp == 0) { status = status + 1; goto ldv_66186; } else { } } else { } } else { } } else { } cnt = cnt + 1; ldv_66188: ; if ((ha->host)->can_queue > cnt) { goto ldv_66187; } else { } ldv_66186: ; return (status); } } static int qla4xxx_eh_abort(struct scsi_cmnd *cmd ) { struct scsi_qla_host *ha ; struct scsi_qla_host *tmp ; unsigned int id ; uint64_t lun ; unsigned long flags ; struct srb *srb ; int ret ; int wait ; int tmp___0 ; int tmp___1 ; { tmp = to_qla_host((cmd->device)->host); ha = tmp; id = (cmd->device)->id; lun = (cmd->device)->lun; srb = (struct srb *)0; ret = 8194; wait = 0; dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n", ha->host_no, id, lun, cmd, (int )*(cmd->cmnd)); ldv_spin_lock(); srb = (struct srb *)cmd->SCp.ptr; if ((unsigned long )srb == (unsigned long )((struct srb *)0)) { spin_unlock_irqrestore(& ha->hardware_lock, flags); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld:%d:%llu: Specified command has already completed.\n", ha->host_no, id, lun); return (8194); } else { } kref_get(& srb->srb_ref); spin_unlock_irqrestore(& ha->hardware_lock, flags); tmp___0 = qla4xxx_abort_task(ha, srb); if (tmp___0 != 0) { ret = 8195; } else { wait = 1; } kref_put(& srb->srb_ref, & qla4xxx_srb_compl); if (wait != 0) { tmp___1 = qla4xxx_eh_wait_on_command(ha, cmd); if (tmp___1 == 0) { if (ql4xextended_error_logging == 2) { printk("scsi%ld:%d:%llu: Abort handler timed out\n", ha->host_no, id, lun); } else { } ret = 8195; } else { } } else { } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld:%d:%llu: Abort command - %s\n", ha->host_no, id, lun, ret == 8194 ? (char *)"succeeded" : (char *)"failed"); return (ret); } } static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd ) { struct scsi_qla_host *ha ; struct scsi_qla_host *tmp ; struct ddb_entry *ddb_entry ; int ret ; int stat ; struct scsi_target *tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = to_qla_host((cmd->device)->host); ha = tmp; ddb_entry = (struct ddb_entry *)(cmd->device)->hostdata; ret = 8195; if ((unsigned long )ddb_entry == (unsigned long )((struct ddb_entry *)0)) { return (ret); } else { } ret = iscsi_block_scsi_eh(cmd); if (ret != 0) { return (ret); } else { } ret = 8195; dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld:%d:%d:%llu: DEVICE RESET ISSUED.\n", ha->host_no, (cmd->device)->channel, (cmd->device)->id, (cmd->device)->lun); if (ql4xextended_error_logging == 2) { printk("\016scsi%ld: DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, cmd, jiffies, (cmd->request)->timeout / 250U, ha->dpc_flags, cmd->result, cmd->allowed); } else { } stat = qla4xxx_reset_lun(ha, ddb_entry, (cmd->device)->lun); if (stat != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "DEVICE RESET FAILED. %d\n", stat); goto eh_dev_reset_done; } else { } tmp___0 = scsi_target(cmd->device); tmp___1 = qla4xxx_eh_wait_for_commands(ha, tmp___0, cmd->device); if (tmp___1 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "DEVICE RESET FAILED - waiting for commands.\n"); goto eh_dev_reset_done; } else { } tmp___2 = qla4xxx_send_marker_iocb(ha, ddb_entry, (cmd->device)->lun, 0); if (tmp___2 != 0) { goto eh_dev_reset_done; } else { } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi(%ld:%d:%d:%llu): DEVICE RESET SUCCEEDED.\n", ha->host_no, (cmd->device)->channel, (cmd->device)->id, (cmd->device)->lun); ret = 8194; eh_dev_reset_done: ; return (ret); } } static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd ) { struct scsi_qla_host *ha ; struct scsi_qla_host *tmp ; struct ddb_entry *ddb_entry ; int stat ; int ret ; struct scsi_target *tmp___0 ; struct scsi_target *tmp___1 ; struct scsi_target *tmp___2 ; struct scsi_target *tmp___3 ; int tmp___4 ; struct scsi_target *tmp___5 ; int tmp___6 ; struct scsi_target *tmp___7 ; { tmp = to_qla_host((cmd->device)->host); ha = tmp; ddb_entry = (struct ddb_entry *)(cmd->device)->hostdata; if ((unsigned long )ddb_entry == (unsigned long )((struct ddb_entry *)0)) { return (8195); } else { } ret = iscsi_block_scsi_eh(cmd); if (ret != 0) { return (ret); } else { } tmp___0 = scsi_target(cmd->device); dev_printk("\016", (struct device const *)(& tmp___0->dev), "WARM TARGET RESET ISSUED.\n"); if (ql4xextended_error_logging == 2) { printk("\016scsi%ld: TARGET_DEVICE_RESET cmd=%p jiffies = 0x%lx, to=%x,dpc_flags=%lx, status=%x allowed=%d\n", ha->host_no, cmd, jiffies, (cmd->request)->timeout / 250U, ha->dpc_flags, cmd->result, cmd->allowed); } else { } stat = qla4xxx_reset_target(ha, ddb_entry); if (stat != 0) { tmp___1 = scsi_target(cmd->device); dev_printk("\016", (struct device const *)(& tmp___1->dev), "WARM TARGET RESET FAILED.\n"); return (8195); } else { } tmp___3 = scsi_target(cmd->device); tmp___4 = qla4xxx_eh_wait_for_commands(ha, tmp___3, (struct scsi_device *)0); if (tmp___4 != 0) { tmp___2 = scsi_target(cmd->device); dev_printk("\016", (struct device const *)(& tmp___2->dev), "WARM TARGET DEVICE RESET FAILED - waiting for commands.\n"); return (8195); } else { } tmp___6 = qla4xxx_send_marker_iocb(ha, ddb_entry, (cmd->device)->lun, 1); if (tmp___6 != 0) { tmp___5 = scsi_target(cmd->device); dev_printk("\016", (struct device const *)(& tmp___5->dev), "WARM TARGET DEVICE RESET FAILED - marker iocb failed.\n"); return (8195); } else { } tmp___7 = scsi_target(cmd->device); dev_printk("\016", (struct device const *)(& tmp___7->dev), "WARM TARGET RESET SUCCEEDED.\n"); return (8194); } } static int qla4xxx_is_eh_active(struct Scsi_Host *shost ) { { if ((unsigned int )shost->shost_state == 5U) { return (1); } else { } return (0); } } static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd ) { int return_status ; struct scsi_qla_host *ha ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; { return_status = 8195; ha = to_qla_host((cmd->device)->host); tmp = is_qla8032(ha); if (tmp != 0) { goto _L; } else { tmp___0 = is_qla8042(ha); if (tmp___0 != 0) { _L: /* CIL Label */ if (ql4xdontresethba != 0) { qla4_83xx_set_idc_dontreset(ha); } else { } } else { } } if (ql4xdontresethba != 0) { goto _L___0; } else { tmp___2 = is_qla8032(ha); if (tmp___2 != 0) { goto _L___1; } else { tmp___3 = is_qla8042(ha); if (tmp___3 != 0) { _L___1: /* CIL Label */ tmp___4 = qla4_83xx_idc_dontreset(ha); if (tmp___4 != 0) { _L___0: /* CIL Label */ if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Don\'t Reset HBA\n", ha->host_no, "qla4xxx_eh_host_reset"); } else { } tmp___1 = qla4xxx_is_eh_active((cmd->device)->host); if (tmp___1 != 0) { qla4xxx_abort_active_cmds(ha, 327680); } else { } return (8195); } else { } } else { } } } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi(%ld:%d:%d:%llu): HOST RESET ISSUED.\n", ha->host_no, (cmd->device)->channel, (cmd->device)->id, (cmd->device)->lun); tmp___5 = qla4xxx_wait_for_hba_online(ha); if (tmp___5 != 0) { if (ql4xextended_error_logging == 2) { printk("scsi%ld:%d: %s: Unable to reset host. Adapter DEAD.\n", ha->host_no, (cmd->device)->channel, "qla4xxx_eh_host_reset"); } else { } return (8195); } else { } tmp___7 = constant_test_bit(1L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___7 == 0) { tmp___6 = is_qla80XX(ha); if (tmp___6 != 0) { set_bit(4L, (unsigned long volatile *)(& ha->dpc_flags)); } else { set_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); } } else { } tmp___8 = qla4xxx_recover_adapter(ha); if (tmp___8 == 0) { return_status = 8194; } else { } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "HOST RESET %s.\n", return_status == 8195 ? (char *)"FAILED" : (char *)"SUCCEEDED"); return (return_status); } } static int qla4xxx_context_reset(struct scsi_qla_host *ha ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; struct addr_ctrl_blk_def *acb ; uint32_t acb_len ; int rval ; dma_addr_t acb_dma ; void *tmp ; { acb = (struct addr_ctrl_blk_def *)0; acb_len = 768U; rval = 0; tmp = dma_alloc_attrs(& (ha->pdev)->dev, 768UL, & acb_dma, 208U, (struct dma_attrs *)0); acb = (struct addr_ctrl_blk_def *)tmp; if ((unsigned long )acb == (unsigned long )((struct addr_ctrl_blk_def *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to alloc acb\n", "qla4xxx_context_reset"); rval = -12; goto exit_port_reset; } else { } memset((void *)acb, 0, (size_t )acb_len); rval = qla4xxx_get_acb(ha, acb_dma, 0U, acb_len); if (rval != 0) { rval = -5; goto exit_free_acb; } else { } rval = qla4xxx_disable_acb(ha); if (rval != 0) { rval = -5; goto exit_free_acb; } else { } wait_for_completion_timeout(& ha->disable_acb_comp, 7500UL); rval = qla4xxx_set_acb(ha, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts), acb_dma); if (rval != 0) { rval = -5; goto exit_free_acb; } else { } exit_free_acb: dma_free_attrs(& (ha->pdev)->dev, 768UL, (void *)acb, acb_dma, (struct dma_attrs *)0); exit_port_reset: ; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s %s\n", "qla4xxx_context_reset", rval == 0 ? (char *)"SUCCEEDED" : (char *)"FAILED"); } else { } return (rval); } } static int qla4xxx_host_reset(struct Scsi_Host *shost , int reset_type ) { struct scsi_qla_host *ha ; struct scsi_qla_host *tmp ; int rval ; uint32_t idc_ctrl ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; { tmp = to_qla_host(shost); ha = tmp; rval = 0; if (ql4xdontresethba != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Don\'t Reset HBA\n", "qla4xxx_host_reset"); } else { } rval = -1; goto exit_host_reset; } else { } tmp___0 = constant_test_bit(1L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___0 != 0) { goto recover_adapter; } else { } switch (reset_type) { case 1: set_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); goto ldv_66246; case 2: tmp___2 = constant_test_bit(1L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___2 == 0) { tmp___1 = is_qla80XX(ha); if (tmp___1 != 0) { set_bit(4L, (unsigned long volatile *)(& ha->dpc_flags)); } else { rval = qla4xxx_context_reset(ha); goto exit_host_reset; } } else { } goto ldv_66246; } ldv_66246: ; recover_adapter: tmp___3 = is_qla8032(ha); if (tmp___3 != 0) { goto _L; } else { tmp___4 = is_qla8042(ha); if (tmp___4 != 0) { _L: /* CIL Label */ tmp___5 = constant_test_bit(1L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___5 != 0) { idc_ctrl = qla4_83xx_rd_reg(ha, 14224UL); qla4_83xx_wr_reg(ha, 14224UL, idc_ctrl | 2U); } else { } } else { } } rval = qla4xxx_recover_adapter(ha); if (rval != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: recover adapter fail\n", "qla4xxx_host_reset"); } else { } rval = -5; } else { } exit_host_reset: ; return (rval); } } static pci_ers_result_t qla4xxx_pci_error_detected(struct pci_dev *pdev , pci_channel_state_t state ) { struct scsi_qla_host *ha ; void *tmp ; int tmp___0 ; { tmp = pci_get_drvdata(pdev); ha = (struct scsi_qla_host *)tmp; dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: error detected:state %x\n", ha->host_no, "qla4xxx_pci_error_detected", state); tmp___0 = is_aer_supported(ha); if (tmp___0 == 0) { return (1U); } else { } switch (state) { case 1U: clear_bit(20L, (unsigned long volatile *)(& ha->flags)); return (2U); case 2U: set_bit(20L, (unsigned long volatile *)(& ha->flags)); qla4xxx_mailbox_premature_completion(ha); qla4xxx_free_irqs(ha); pci_disable_device(pdev); qla4xxx_abort_active_cmds(ha, 524288); return (3U); case 3U: set_bit(20L, (unsigned long volatile *)(& ha->flags)); set_bit(21L, (unsigned long volatile *)(& ha->flags)); qla4xxx_abort_active_cmds(ha, 65536); return (4U); } return (3U); } } static pci_ers_result_t qla4xxx_pci_mmio_enabled(struct pci_dev *pdev ) { struct scsi_qla_host *ha ; void *tmp ; int tmp___0 ; { tmp = pci_get_drvdata(pdev); ha = (struct scsi_qla_host *)tmp; tmp___0 = is_aer_supported(ha); if (tmp___0 == 0) { return (1U); } else { } return (5U); } } static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha ) { uint32_t rval ; int fn ; struct pci_dev *other_pdev ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; { rval = 1U; other_pdev = (struct pci_dev *)0; dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: In %s\n", ha->host_no, "qla4_8xxx_error_recovery"); set_bit(20L, (unsigned long volatile *)(& ha->dpc_flags)); tmp = constant_test_bit(0L, (unsigned long const volatile *)(& ha->flags)); if (tmp != 0) { clear_bit(0L, (unsigned long volatile *)(& ha->flags)); clear_bit(8L, (unsigned long volatile *)(& ha->flags)); iscsi_host_for_each_session(ha->host, & qla4xxx_fail_session); qla4xxx_process_aen(ha, 1); } else { } fn = (int )(ha->pdev)->devfn & 7; tmp___3 = is_qla8022(ha); if (tmp___3 != 0) { goto ldv_66268; ldv_66270: fn = fn - 1; dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: Finding PCI device at func %x\n", ha->host_no, "qla4_8xxx_error_recovery", fn); tmp___0 = pci_domain_nr((ha->pdev)->bus); other_pdev = pci_get_domain_bus_and_slot(tmp___0, (unsigned int )((ha->pdev)->bus)->number, ((ha->pdev)->devfn & 248U) | ((unsigned int )fn & 7U)); if ((unsigned long )other_pdev == (unsigned long )((struct pci_dev *)0)) { goto ldv_66268; } else { } tmp___1 = atomic_read((atomic_t const *)(& other_pdev->enable_cnt)); if (tmp___1 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: Found PCI func in enabled state%x\n", ha->host_no, "qla4_8xxx_error_recovery", fn); pci_dev_put(other_pdev); goto ldv_66269; } else { } pci_dev_put(other_pdev); ldv_66268: ; if (fn > 0) { goto ldv_66270; } else { } ldv_66269: ; } else { tmp___2 = qla4_83xx_can_perform_reset(ha); if (tmp___2 != 0) { fn = 0; } else { } } if (fn == 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: devfn being reset 0x%x is the owner\n", ha->host_no, "qla4_8xxx_error_recovery", (ha->pdev)->devfn); (*((ha->isp_ops)->idc_lock))(ha); qla4_8xxx_wr_direct(ha, 4U, 1U); (*((ha->isp_ops)->idc_unlock))(ha); tmp___4 = qla4_8xxx_update_idc_reg(ha); rval = (uint32_t )tmp___4; if (rval == 1U) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: HW State: FAILED\n", ha->host_no, "qla4_8xxx_error_recovery"); (*((ha->isp_ops)->idc_lock))(ha); qla4_8xxx_wr_direct(ha, 4U, 6U); (*((ha->isp_ops)->idc_unlock))(ha); goto exit_error_recovery; } else { } clear_bit(19L, (unsigned long volatile *)(& ha->flags)); tmp___5 = qla4xxx_initialize_adapter(ha, 1); rval = (uint32_t )tmp___5; if (rval != 0U) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: HW State: FAILED\n", ha->host_no, "qla4_8xxx_error_recovery"); qla4xxx_free_irqs(ha); (*((ha->isp_ops)->idc_lock))(ha); qla4_8xxx_clear_drv_active(ha); qla4_8xxx_wr_direct(ha, 4U, 6U); (*((ha->isp_ops)->idc_unlock))(ha); } else { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: HW State: READY\n", ha->host_no, "qla4_8xxx_error_recovery"); (*((ha->isp_ops)->idc_lock))(ha); qla4_8xxx_wr_direct(ha, 4U, 3U); qla4_8xxx_wr_direct(ha, 5U, 0U); qla4_8xxx_set_drv_active(ha); (*((ha->isp_ops)->idc_unlock))(ha); (*((ha->isp_ops)->enable_intrs))(ha); } } else { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: devfn 0x%x is not the reset owner\n", ha->host_no, "qla4_8xxx_error_recovery", (ha->pdev)->devfn); tmp___7 = qla4_8xxx_rd_direct(ha, 4U); if (tmp___7 == 3) { clear_bit(19L, (unsigned long volatile *)(& ha->flags)); tmp___6 = qla4xxx_initialize_adapter(ha, 1); rval = (uint32_t )tmp___6; if (rval == 0U) { (*((ha->isp_ops)->enable_intrs))(ha); } else { qla4xxx_free_irqs(ha); } (*((ha->isp_ops)->idc_lock))(ha); qla4_8xxx_set_drv_active(ha); (*((ha->isp_ops)->idc_unlock))(ha); } else { } } exit_error_recovery: clear_bit(20L, (unsigned long volatile *)(& ha->dpc_flags)); return (rval); } } static pci_ers_result_t qla4xxx_pci_slot_reset(struct pci_dev *pdev ) { pci_ers_result_t ret ; struct scsi_qla_host *ha ; void *tmp ; int rc ; int tmp___0 ; uint32_t tmp___1 ; int tmp___2 ; { ret = 4U; tmp = pci_get_drvdata(pdev); ha = (struct scsi_qla_host *)tmp; dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: slot_reset\n", ha->host_no, "qla4xxx_pci_slot_reset"); tmp___0 = is_aer_supported(ha); if (tmp___0 == 0) { return (1U); } else { } pci_restore_state(pdev); pci_save_state(pdev); rc = pci_enable_device(pdev); if (rc != 0) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: Can\'t re-enable device after reset\n", ha->host_no, "qla4xxx_pci_slot_reset"); goto exit_slot_reset; } else { } (*((ha->isp_ops)->disable_intrs))(ha); tmp___2 = is_qla80XX(ha); if (tmp___2 != 0) { tmp___1 = qla4_8xxx_error_recovery(ha); if (tmp___1 == 0U) { ret = 5U; goto exit_slot_reset; } else { goto exit_slot_reset; } } else { } exit_slot_reset: dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: Return=%x\ndevice after reset\n", ha->host_no, "qla4xxx_pci_slot_reset", ret); return (ret); } } static void qla4xxx_pci_resume(struct pci_dev *pdev ) { struct scsi_qla_host *ha ; void *tmp ; int ret ; { tmp = pci_get_drvdata(pdev); ha = (struct scsi_qla_host *)tmp; dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: pci_resume\n", ha->host_no, "qla4xxx_pci_resume"); ret = qla4xxx_wait_for_hba_online(ha); if (ret != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: the device failed to resume I/O from slot/link_reset\n", ha->host_no, "qla4xxx_pci_resume"); } else { } pci_cleanup_aer_uncorrect_error_status(pdev); clear_bit(20L, (unsigned long volatile *)(& ha->flags)); return; } } static struct pci_error_handlers const qla4xxx_err_handler = {(pci_ers_result_t (*)(struct pci_dev * , enum pci_channel_state ))(& qla4xxx_pci_error_detected), & qla4xxx_pci_mmio_enabled, 0, & qla4xxx_pci_slot_reset, 0, & qla4xxx_pci_resume}; static struct pci_device_id qla4xxx_pci_tbl[7U] = { {4215U, 16400U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 16418U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 16434U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 32802U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 32818U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {4215U, 32834U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, {0U, 0U, 0U, 0U, 0U, 0U, 0UL}}; struct pci_device_id const __mod_pci__qla4xxx_pci_tbl_device_table[7U] ; static struct pci_driver qla4xxx_pci_driver = {{0, 0}, "qla4xxx", (struct pci_device_id const *)(& qla4xxx_pci_tbl), & qla4xxx_probe_adapter, & qla4xxx_remove_adapter, 0, 0, 0, 0, 0, 0, & qla4xxx_err_handler, {0, 0, 0, 0, (_Bool)0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {{{{{{0}}, 0U, 0U, 0, {0, {0, 0}, 0, 0, 0UL}}}}, {0, 0}}}; static int qla4xxx_module_init(void) { int ret ; { if (ql4xqfulltracking != 0) { qla4xxx_driver_template.track_queue_depth = 1U; } else { } srb_cachep = kmem_cache_create("qla4xxx_srbs", 88UL, 0UL, 8192UL, (void (*)(void * ))0); if ((unsigned long )srb_cachep == (unsigned long )((struct kmem_cache *)0)) { printk("\v%s: Unable to allocate SRB cache...Failing load!\n", (char *)"qla4xxx"); ret = -12; goto no_srp_cache; } else { } strcpy((char *)(& qla4xxx_version_str), "5.04.00-k6"); if (ql4xextended_error_logging != 0) { strcat((char *)(& qla4xxx_version_str), "-debug"); } else { } qla4xxx_scsi_transport = iscsi_register_transport(& qla4xxx_iscsi_transport); if ((unsigned long )qla4xxx_scsi_transport == (unsigned long )((struct scsi_transport_template *)0)) { ret = -19; goto release_srb_cache; } else { } ret = ldv___pci_register_driver_69(& qla4xxx_pci_driver, & __this_module, "qla4xxx"); if (ret != 0) { goto unregister_transport; } else { } printk("\016QLogic iSCSI HBA Driver\n"); return (0); unregister_transport: iscsi_unregister_transport(& qla4xxx_iscsi_transport); release_srb_cache: kmem_cache_destroy(srb_cachep); no_srp_cache: ; return (ret); } } static void qla4xxx_module_exit(void) { { ldv_pci_unregister_driver_70(& qla4xxx_pci_driver); iscsi_unregister_transport(& qla4xxx_iscsi_transport); kmem_cache_destroy(srb_cachep); return; } } extern int ldv_shutdown_21(void) ; extern int ldv_release_22(void) ; extern int ldv_probe_26(void) ; int ldv_retval_0 ; extern int ldv_probe_22(void) ; int ldv_retval_1 ; extern int ldv_suspend_22(void) ; extern void ldv_initialize(void) ; extern void ldv_check_final_state(void) ; void work_init_3(void) { { ldv_work_3_0 = 0; ldv_work_3_1 = 0; ldv_work_3_2 = 0; ldv_work_3_3 = 0; return; } } void work_init_2(void) { { ldv_work_2_0 = 0; ldv_work_2_1 = 0; ldv_work_2_2 = 0; ldv_work_2_3 = 0; return; } } void ldv_initialize_isp_operations_25(void) { void *tmp ; { tmp = ldv_init_zalloc(43072UL); qla4xxx_isp_ops_group0 = (struct scsi_qla_host *)tmp; return; } } void activate_pending_timer_4(struct timer_list *timer , unsigned long data , int pending_flag ) { { if ((unsigned long )ldv_timer_list_4 == (unsigned long )timer) { if (ldv_timer_state_4 == 2 || pending_flag != 0) { ldv_timer_list_4 = timer; ldv_timer_list_4->data = data; ldv_timer_state_4 = 1; } else { } return; } else { } reg_timer_4(timer); ldv_timer_list_4->data = data; return; } } void ldv_initialize_isp_operations_24(void) { void *tmp ; { tmp = ldv_init_zalloc(43072UL); qla4_82xx_isp_ops_group0 = (struct scsi_qla_host *)tmp; return; } } void call_and_disable_all_2(int state ) { { if (ldv_work_2_0 == state) { call_and_disable_work_2(ldv_work_struct_2_0); } else { } if (ldv_work_2_1 == state) { call_and_disable_work_2(ldv_work_struct_2_1); } else { } if (ldv_work_2_2 == state) { call_and_disable_work_2(ldv_work_struct_2_2); } else { } if (ldv_work_2_3 == state) { call_and_disable_work_2(ldv_work_struct_2_3); } else { } return; } } void activate_work_2(struct work_struct *work , int state ) { { if (ldv_work_2_0 == 0) { ldv_work_struct_2_0 = work; ldv_work_2_0 = state; return; } else { } if (ldv_work_2_1 == 0) { ldv_work_struct_2_1 = work; ldv_work_2_1 = state; return; } else { } if (ldv_work_2_2 == 0) { ldv_work_struct_2_2 = work; ldv_work_2_2 = state; return; } else { } if (ldv_work_2_3 == 0) { ldv_work_struct_2_3 = work; ldv_work_2_3 = state; return; } else { } return; } } void activate_work_3(struct work_struct *work , int state ) { { if (ldv_work_3_0 == 0) { ldv_work_struct_3_0 = work; ldv_work_3_0 = state; return; } else { } if (ldv_work_3_1 == 0) { ldv_work_struct_3_1 = work; ldv_work_3_1 = state; return; } else { } if (ldv_work_3_2 == 0) { ldv_work_struct_3_2 = work; ldv_work_3_2 = state; return; } else { } if (ldv_work_3_3 == 0) { ldv_work_struct_3_3 = work; ldv_work_3_3 = state; return; } else { } return; } } void ldv_initialize_iscsi_transport_26(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; void *tmp___2 ; void *tmp___3 ; void *tmp___4 ; void *tmp___5 ; void *tmp___6 ; { tmp = ldv_init_zalloc(3816UL); qla4xxx_iscsi_transport_group0 = (struct Scsi_Host *)tmp; tmp___0 = ldv_init_zalloc(1584UL); qla4xxx_iscsi_transport_group2 = (struct iscsi_bus_flash_session *)tmp___0; tmp___1 = ldv_init_zalloc(1528UL); qla4xxx_iscsi_transport_group1 = (struct iscsi_bus_flash_conn *)tmp___1; tmp___2 = ldv_init_zalloc(2096UL); qla4xxx_iscsi_transport_group3 = (struct iscsi_cls_session *)tmp___2; tmp___3 = ldv_init_zalloc(1624UL); qla4xxx_iscsi_transport_group4 = (struct iscsi_cls_conn *)tmp___3; tmp___4 = ldv_init_zalloc(152UL); qla4xxx_iscsi_transport_group5 = (struct iscsi_task *)tmp___4; tmp___5 = ldv_init_zalloc(16UL); qla4xxx_iscsi_transport_group6 = (struct sockaddr *)tmp___5; tmp___6 = ldv_init_zalloc(1440UL); qla4xxx_iscsi_transport_group7 = (struct iscsi_endpoint *)tmp___6; return; } } void ldv_initialize_isp_operations_23(void) { void *tmp ; { tmp = ldv_init_zalloc(43072UL); qla4_83xx_isp_ops_group0 = (struct scsi_qla_host *)tmp; return; } } void choose_timer_4(struct timer_list *timer ) { { LDV_IN_INTERRUPT = 2; (*(timer->function))(timer->data); LDV_IN_INTERRUPT = 1; ldv_timer_state_4 = 2; return; } } void call_and_disable_work_3(struct work_struct *work ) { { if ((ldv_work_3_0 == 2 || ldv_work_3_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_3_0) { qla4xxx_do_dpc(work); ldv_work_3_0 = 1; return; } else { } if ((ldv_work_3_1 == 2 || ldv_work_3_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_3_1) { qla4xxx_do_dpc(work); ldv_work_3_1 = 1; return; } else { } if ((ldv_work_3_2 == 2 || ldv_work_3_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_3_2) { qla4xxx_do_dpc(work); ldv_work_3_2 = 1; return; } else { } if ((ldv_work_3_3 == 2 || ldv_work_3_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_3_3) { qla4xxx_do_dpc(work); ldv_work_3_3 = 1; return; } else { } return; } } void disable_suitable_timer_4(struct timer_list *timer ) { { if ((unsigned long )timer == (unsigned long )ldv_timer_list_4) { ldv_timer_state_4 = 0; return; } else { } return; } } void disable_work_3(struct work_struct *work ) { { if ((ldv_work_3_0 == 3 || ldv_work_3_0 == 2) && (unsigned long )ldv_work_struct_3_0 == (unsigned long )work) { ldv_work_3_0 = 1; } else { } if ((ldv_work_3_1 == 3 || ldv_work_3_1 == 2) && (unsigned long )ldv_work_struct_3_1 == (unsigned long )work) { ldv_work_3_1 = 1; } else { } if ((ldv_work_3_2 == 3 || ldv_work_3_2 == 2) && (unsigned long )ldv_work_struct_3_2 == (unsigned long )work) { ldv_work_3_2 = 1; } else { } if ((ldv_work_3_3 == 3 || ldv_work_3_3 == 2) && (unsigned long )ldv_work_struct_3_3 == (unsigned long )work) { ldv_work_3_3 = 1; } else { } return; } } void disable_work_2(struct work_struct *work ) { { if ((ldv_work_2_0 == 3 || ldv_work_2_0 == 2) && (unsigned long )ldv_work_struct_2_0 == (unsigned long )work) { ldv_work_2_0 = 1; } else { } if ((ldv_work_2_1 == 3 || ldv_work_2_1 == 2) && (unsigned long )ldv_work_struct_2_1 == (unsigned long )work) { ldv_work_2_1 = 1; } else { } if ((ldv_work_2_2 == 3 || ldv_work_2_2 == 2) && (unsigned long )ldv_work_struct_2_2 == (unsigned long )work) { ldv_work_2_2 = 1; } else { } if ((ldv_work_2_3 == 3 || ldv_work_2_3 == 2) && (unsigned long )ldv_work_struct_2_3 == (unsigned long )work) { ldv_work_2_3 = 1; } else { } return; } } int reg_timer_4(struct timer_list *timer ) { { ldv_timer_list_4 = timer; ldv_timer_state_4 = 1; return (0); } } void invoke_work_3(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_3_0 == 2 || ldv_work_3_0 == 3) { ldv_work_3_0 = 4; qla4xxx_do_dpc(ldv_work_struct_3_0); ldv_work_3_0 = 1; } else { } goto ldv_66390; case 1: ; if (ldv_work_3_1 == 2 || ldv_work_3_1 == 3) { ldv_work_3_1 = 4; qla4xxx_do_dpc(ldv_work_struct_3_0); ldv_work_3_1 = 1; } else { } goto ldv_66390; case 2: ; if (ldv_work_3_2 == 2 || ldv_work_3_2 == 3) { ldv_work_3_2 = 4; qla4xxx_do_dpc(ldv_work_struct_3_0); ldv_work_3_2 = 1; } else { } goto ldv_66390; case 3: ; if (ldv_work_3_3 == 2 || ldv_work_3_3 == 3) { ldv_work_3_3 = 4; qla4xxx_do_dpc(ldv_work_struct_3_0); ldv_work_3_3 = 1; } else { } goto ldv_66390; default: ldv_stop(); } ldv_66390: ; return; } } void ldv_pci_driver_21(void) { void *tmp ; { tmp = ldv_init_zalloc(2976UL); qla4xxx_pci_driver_group1 = (struct pci_dev *)tmp; return; } } void call_and_disable_all_3(int state ) { { if (ldv_work_3_0 == state) { call_and_disable_work_3(ldv_work_struct_3_0); } else { } if (ldv_work_3_1 == state) { call_and_disable_work_3(ldv_work_struct_3_1); } else { } if (ldv_work_3_2 == state) { call_and_disable_work_3(ldv_work_struct_3_2); } else { } if (ldv_work_3_3 == state) { call_and_disable_work_3(ldv_work_struct_3_3); } else { } return; } } void ldv_initialize_scsi_host_template_27(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(3816UL); qla4xxx_driver_template_group0 = (struct Scsi_Host *)tmp; tmp___0 = __VERIFIER_nondet_pointer(); qla4xxx_driver_template_group1 = (struct scsi_cmnd *)tmp___0; tmp___1 = __VERIFIER_nondet_pointer(); qla4xxx_driver_template_group2 = (struct scsi_device *)tmp___1; return; } } void ldv_initialize_pci_error_handlers_22(void) { void *tmp ; { tmp = __VERIFIER_nondet_pointer(); qla4xxx_err_handler_group0 = (struct pci_dev *)tmp; return; } } void call_and_disable_work_2(struct work_struct *work ) { { if ((ldv_work_2_0 == 2 || ldv_work_2_0 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_2_0) { qla4xxx_task_work(work); ldv_work_2_0 = 1; return; } else { } if ((ldv_work_2_1 == 2 || ldv_work_2_1 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_2_1) { qla4xxx_task_work(work); ldv_work_2_1 = 1; return; } else { } if ((ldv_work_2_2 == 2 || ldv_work_2_2 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_2_2) { qla4xxx_task_work(work); ldv_work_2_2 = 1; return; } else { } if ((ldv_work_2_3 == 2 || ldv_work_2_3 == 3) && (unsigned long )work == (unsigned long )ldv_work_struct_2_3) { qla4xxx_task_work(work); ldv_work_2_3 = 1; return; } else { } return; } } void invoke_work_2(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_work_2_0 == 2 || ldv_work_2_0 == 3) { ldv_work_2_0 = 4; qla4xxx_task_work(ldv_work_struct_2_0); ldv_work_2_0 = 1; } else { } goto ldv_66420; case 1: ; if (ldv_work_2_1 == 2 || ldv_work_2_1 == 3) { ldv_work_2_1 = 4; qla4xxx_task_work(ldv_work_struct_2_0); ldv_work_2_1 = 1; } else { } goto ldv_66420; case 2: ; if (ldv_work_2_2 == 2 || ldv_work_2_2 == 3) { ldv_work_2_2 = 4; qla4xxx_task_work(ldv_work_struct_2_0); ldv_work_2_2 = 1; } else { } goto ldv_66420; case 3: ; if (ldv_work_2_3 == 2 || ldv_work_2_3 == 3) { ldv_work_2_3 = 4; qla4xxx_task_work(ldv_work_struct_2_0); ldv_work_2_3 = 1; } else { } goto ldv_66420; default: ldv_stop(); } ldv_66420: ; return; } } void ldv_main_exported_11(void) ; void ldv_main_exported_7(void) ; void ldv_main_exported_17(void) ; void ldv_main_exported_18(void) ; void ldv_main_exported_16(void) ; void ldv_main_exported_13(void) ; void ldv_main_exported_6(void) ; void ldv_main_exported_9(void) ; void ldv_main_exported_12(void) ; void ldv_main_exported_14(void) ; void ldv_main_exported_15(void) ; void ldv_main_exported_20(void) ; void ldv_main_exported_8(void) ; void ldv_main_exported_10(void) ; void ldv_main_exported_19(void) ; void ldv_main_exported_5(void) ; int main(void) { struct pci_device_id *ldvarg3 ; void *tmp ; uint32_t ldvarg18 ; int ldvarg11 ; char *ldvarg51 ; void *tmp___0 ; char *ldvarg32 ; void *tmp___1 ; uint32_t ldvarg7 ; int ldvarg23 ; char *ldvarg43 ; void *tmp___2 ; void *ldvarg42 ; void *tmp___3 ; int ldvarg12 ; uint64_t ldvarg56 ; int ldvarg50 ; char *ldvarg46 ; void *tmp___4 ; enum iscsi_param ldvarg58 ; uint32_t ldvarg37 ; char *ldvarg53 ; void *tmp___5 ; uint32_t ldvarg29 ; enum iscsi_param ldvarg44 ; enum iscsi_param_type ldvarg24 ; struct bsg_job *ldvarg35 ; void *tmp___6 ; void *ldvarg38 ; void *tmp___7 ; uint32_t *ldvarg33 ; void *tmp___8 ; uint32_t ldvarg16 ; int ldvarg48 ; struct iscsi_stats *ldvarg14 ; void *tmp___9 ; uint16_t ldvarg34 ; int ldvarg28 ; uint32_t ldvarg47 ; int ldvarg39 ; enum iscsi_host_param ldvarg20 ; struct iscsi_hdr *ldvarg31 ; void *tmp___10 ; int ldvarg41 ; void *ldvarg49 ; void *tmp___11 ; char *ldvarg57 ; void *tmp___12 ; uint16_t ldvarg8 ; char *ldvarg13 ; void *tmp___13 ; int ldvarg55 ; int ldvarg10 ; uint16_t ldvarg36 ; int ldvarg40 ; uint16_t ldvarg9 ; int ldvarg45 ; char *ldvarg26 ; void *tmp___14 ; int ldvarg27 ; uint32_t ldvarg15 ; char *ldvarg30 ; void *tmp___15 ; char *ldvarg21 ; void *tmp___16 ; enum iscsi_param ldvarg54 ; uint32_t ldvarg17 ; uint8_t ldvarg25 ; struct iscsi_iface *ldvarg22 ; void *tmp___17 ; char *ldvarg19 ; void *tmp___18 ; enum iscsi_param ldvarg52 ; enum pci_channel_state ldvarg62 ; int ldvarg77 ; ulong ldvarg73 ; int ldvarg70 ; uint32_t ldvarg67 ; void *ldvarg78 ; void *tmp___19 ; uint32_t *ldvarg71 ; void *tmp___20 ; uint32_t ldvarg68 ; int ldvarg72 ; uint32_t ldvarg69 ; uint32_t *ldvarg66 ; void *tmp___21 ; uint32_t ldvarg76 ; uint32_t ldvarg74 ; ulong ldvarg75 ; int ldvarg85 ; int ldvarg86 ; int ldvarg91 ; uint32_t *ldvarg88 ; void *tmp___22 ; void *ldvarg92 ; void *tmp___23 ; int ldvarg89 ; uint32_t ldvarg90 ; int ldvarg87 ; int ldvarg121 ; uint32_t ldvarg127 ; ulong ldvarg124 ; uint32_t *ldvarg122 ; void *tmp___24 ; int ldvarg128 ; ulong ldvarg126 ; uint32_t ldvarg118 ; int ldvarg123 ; void *ldvarg129 ; void *tmp___25 ; uint32_t ldvarg120 ; uint32_t ldvarg125 ; uint32_t *ldvarg117 ; void *tmp___26 ; uint32_t ldvarg119 ; int tmp___27 ; int tmp___28 ; int tmp___29 ; int tmp___30 ; int tmp___31 ; int tmp___32 ; int tmp___33 ; int tmp___34 ; int tmp___35 ; { tmp = ldv_init_zalloc(32UL); ldvarg3 = (struct pci_device_id *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg51 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg32 = (char *)tmp___1; tmp___2 = ldv_init_zalloc(1UL); ldvarg43 = (char *)tmp___2; tmp___3 = ldv_init_zalloc(1UL); ldvarg42 = tmp___3; tmp___4 = ldv_init_zalloc(1UL); ldvarg46 = (char *)tmp___4; tmp___5 = ldv_init_zalloc(1UL); ldvarg53 = (char *)tmp___5; tmp___6 = ldv_init_zalloc(80UL); ldvarg35 = (struct bsg_job *)tmp___6; tmp___7 = ldv_init_zalloc(1UL); ldvarg38 = tmp___7; tmp___8 = ldv_init_zalloc(4UL); ldvarg33 = (uint32_t *)tmp___8; tmp___9 = ldv_init_zalloc(96UL); ldvarg14 = (struct iscsi_stats *)tmp___9; tmp___10 = ldv_init_zalloc(48UL); ldvarg31 = (struct iscsi_hdr *)tmp___10; tmp___11 = ldv_init_zalloc(1UL); ldvarg49 = tmp___11; tmp___12 = ldv_init_zalloc(1UL); ldvarg57 = (char *)tmp___12; tmp___13 = ldv_init_zalloc(1UL); ldvarg13 = (char *)tmp___13; tmp___14 = ldv_init_zalloc(1UL); ldvarg26 = (char *)tmp___14; tmp___15 = ldv_init_zalloc(1UL); ldvarg30 = (char *)tmp___15; tmp___16 = ldv_init_zalloc(1UL); ldvarg21 = (char *)tmp___16; tmp___17 = ldv_init_zalloc(1440UL); ldvarg22 = (struct iscsi_iface *)tmp___17; tmp___18 = ldv_init_zalloc(1UL); ldvarg19 = (char *)tmp___18; tmp___19 = ldv_init_zalloc(1UL); ldvarg78 = tmp___19; tmp___20 = ldv_init_zalloc(4UL); ldvarg71 = (uint32_t *)tmp___20; tmp___21 = ldv_init_zalloc(4UL); ldvarg66 = (uint32_t *)tmp___21; tmp___22 = ldv_init_zalloc(4UL); ldvarg88 = (uint32_t *)tmp___22; tmp___23 = ldv_init_zalloc(1UL); ldvarg92 = tmp___23; tmp___24 = ldv_init_zalloc(4UL); ldvarg122 = (uint32_t *)tmp___24; tmp___25 = ldv_init_zalloc(1UL); ldvarg129 = tmp___25; tmp___26 = ldv_init_zalloc(4UL); ldvarg117 = (uint32_t *)tmp___26; ldv_initialize(); ldv_memset((void *)(& ldvarg18), 0, 4UL); ldv_memset((void *)(& ldvarg11), 0, 4UL); ldv_memset((void *)(& ldvarg7), 0, 4UL); ldv_memset((void *)(& ldvarg23), 0, 4UL); ldv_memset((void *)(& ldvarg12), 0, 4UL); ldv_memset((void *)(& ldvarg56), 0, 8UL); ldv_memset((void *)(& ldvarg50), 0, 4UL); ldv_memset((void *)(& ldvarg58), 0, 4UL); ldv_memset((void *)(& ldvarg37), 0, 4UL); ldv_memset((void *)(& ldvarg29), 0, 4UL); ldv_memset((void *)(& ldvarg44), 0, 4UL); ldv_memset((void *)(& ldvarg24), 0, 4UL); ldv_memset((void *)(& ldvarg16), 0, 4UL); ldv_memset((void *)(& ldvarg48), 0, 4UL); ldv_memset((void *)(& ldvarg34), 0, 2UL); ldv_memset((void *)(& ldvarg28), 0, 4UL); ldv_memset((void *)(& ldvarg47), 0, 4UL); ldv_memset((void *)(& ldvarg39), 0, 4UL); ldv_memset((void *)(& ldvarg20), 0, 4UL); ldv_memset((void *)(& ldvarg41), 0, 4UL); ldv_memset((void *)(& ldvarg8), 0, 2UL); ldv_memset((void *)(& ldvarg55), 0, 4UL); ldv_memset((void *)(& ldvarg10), 0, 4UL); ldv_memset((void *)(& ldvarg36), 0, 2UL); ldv_memset((void *)(& ldvarg40), 0, 4UL); ldv_memset((void *)(& ldvarg9), 0, 2UL); ldv_memset((void *)(& ldvarg45), 0, 4UL); ldv_memset((void *)(& ldvarg27), 0, 4UL); ldv_memset((void *)(& ldvarg15), 0, 4UL); ldv_memset((void *)(& ldvarg54), 0, 4UL); ldv_memset((void *)(& ldvarg17), 0, 4UL); ldv_memset((void *)(& ldvarg25), 0, 1UL); ldv_memset((void *)(& ldvarg52), 0, 4UL); ldv_memset((void *)(& ldvarg62), 0, 4UL); ldv_memset((void *)(& ldvarg77), 0, 4UL); ldv_memset((void *)(& ldvarg73), 0, 8UL); ldv_memset((void *)(& ldvarg70), 0, 4UL); ldv_memset((void *)(& ldvarg67), 0, 4UL); ldv_memset((void *)(& ldvarg68), 0, 4UL); ldv_memset((void *)(& ldvarg72), 0, 4UL); ldv_memset((void *)(& ldvarg69), 0, 4UL); ldv_memset((void *)(& ldvarg76), 0, 4UL); ldv_memset((void *)(& ldvarg74), 0, 4UL); ldv_memset((void *)(& ldvarg75), 0, 8UL); ldv_memset((void *)(& ldvarg85), 0, 4UL); ldv_memset((void *)(& ldvarg86), 0, 4UL); ldv_memset((void *)(& ldvarg91), 0, 4UL); ldv_memset((void *)(& ldvarg89), 0, 4UL); ldv_memset((void *)(& ldvarg90), 0, 4UL); ldv_memset((void *)(& ldvarg87), 0, 4UL); ldv_memset((void *)(& ldvarg121), 0, 4UL); ldv_memset((void *)(& ldvarg127), 0, 4UL); ldv_memset((void *)(& ldvarg124), 0, 8UL); ldv_memset((void *)(& ldvarg128), 0, 4UL); ldv_memset((void *)(& ldvarg126), 0, 8UL); ldv_memset((void *)(& ldvarg118), 0, 4UL); ldv_memset((void *)(& ldvarg123), 0, 4UL); ldv_memset((void *)(& ldvarg120), 0, 4UL); ldv_memset((void *)(& ldvarg125), 0, 4UL); ldv_memset((void *)(& ldvarg119), 0, 4UL); ldv_state_variable_11 = 0; ldv_state_variable_21 = 0; ldv_state_variable_7 = 0; ldv_state_variable_26 = 0; ldv_state_variable_17 = 0; work_init_2(); ldv_state_variable_2 = 1; ldv_state_variable_22 = 0; ldv_state_variable_1 = 1; ldv_state_variable_18 = 0; ref_cnt = 0; ldv_state_variable_0 = 1; ldv_state_variable_23 = 0; ldv_state_variable_16 = 0; ldv_state_variable_13 = 0; ldv_state_variable_27 = 0; ldv_state_variable_25 = 0; ldv_state_variable_6 = 0; work_init_3(); ldv_state_variable_3 = 1; ldv_state_variable_9 = 0; ldv_state_variable_12 = 0; ldv_state_variable_20 = 0; ldv_state_variable_14 = 0; ldv_state_variable_15 = 0; ldv_state_variable_8 = 0; ldv_state_variable_4 = 1; ldv_state_variable_24 = 0; ldv_state_variable_19 = 0; ldv_state_variable_10 = 0; ldv_state_variable_5 = 0; ldv_66735: tmp___27 = __VERIFIER_nondet_int(); switch (tmp___27) { case 0: ; if (ldv_state_variable_11 != 0) { ldv_main_exported_11(); } else { } goto ldv_66565; case 1: ; if (ldv_state_variable_21 != 0) { tmp___28 = __VERIFIER_nondet_int(); switch (tmp___28) { case 0: ; if (ldv_state_variable_21 == 1) { ldv_retval_0 = qla4xxx_probe_adapter(qla4xxx_pci_driver_group1, (struct pci_device_id const *)ldvarg3); if (ldv_retval_0 == 0) { ldv_state_variable_21 = 2; ref_cnt = ref_cnt + 1; } else { } } else { } goto ldv_66568; case 1: ; if (ldv_state_variable_21 == 2) { qla4xxx_remove_adapter(qla4xxx_pci_driver_group1); ldv_state_variable_21 = 1; } else { } goto ldv_66568; case 2: ; if (ldv_state_variable_21 == 2) { ldv_shutdown_21(); ldv_state_variable_21 = 2; } else { } goto ldv_66568; default: ldv_stop(); } ldv_66568: ; } else { } goto ldv_66565; case 2: ; if (ldv_state_variable_7 != 0) { ldv_main_exported_7(); } else { } goto ldv_66565; case 3: ; if (ldv_state_variable_26 != 0) { tmp___29 = __VERIFIER_nondet_int(); switch (tmp___29) { case 0: ; if (ldv_state_variable_26 == 1) { qla4xxx_get_ep_param(qla4xxx_iscsi_transport_group7, ldvarg58, ldvarg57); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_get_ep_param(qla4xxx_iscsi_transport_group7, ldvarg58, ldvarg57); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 1: ; if (ldv_state_variable_26 == 1) { qla4xxx_conn_bind(qla4xxx_iscsi_transport_group3, qla4xxx_iscsi_transport_group4, ldvarg56, ldvarg55); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_conn_bind(qla4xxx_iscsi_transport_group3, qla4xxx_iscsi_transport_group4, ldvarg56, ldvarg55); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 2: ; if (ldv_state_variable_26 == 1) { qla4xxx_conn_get_param(qla4xxx_iscsi_transport_group4, ldvarg54, ldvarg53); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_conn_get_param(qla4xxx_iscsi_transport_group4, ldvarg54, ldvarg53); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 3: ; if (ldv_state_variable_26 == 1) { qla4xxx_sysfs_ddb_login(qla4xxx_iscsi_transport_group2, qla4xxx_iscsi_transport_group1); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_sysfs_ddb_login(qla4xxx_iscsi_transport_group2, qla4xxx_iscsi_transport_group1); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 4: ; if (ldv_state_variable_26 == 1) { qla4xxx_session_destroy(qla4xxx_iscsi_transport_group3); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_session_destroy(qla4xxx_iscsi_transport_group3); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 5: ; if (ldv_state_variable_26 == 1) { iscsi_set_param(qla4xxx_iscsi_transport_group4, ldvarg52, ldvarg51, ldvarg50); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { iscsi_set_param(qla4xxx_iscsi_transport_group4, ldvarg52, ldvarg51, ldvarg50); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 6: ; if (ldv_state_variable_26 == 1) { qla4xxx_sysfs_ddb_set_param(qla4xxx_iscsi_transport_group2, qla4xxx_iscsi_transport_group1, ldvarg49, ldvarg48); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_sysfs_ddb_set_param(qla4xxx_iscsi_transport_group2, qla4xxx_iscsi_transport_group1, ldvarg49, ldvarg48); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 7: ; if (ldv_state_variable_26 == 1) { qla4xxx_conn_start(qla4xxx_iscsi_transport_group4); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_conn_start(qla4xxx_iscsi_transport_group4); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 8: ; if (ldv_state_variable_26 == 1) { qla4xxx_conn_create(qla4xxx_iscsi_transport_group3, ldvarg47); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_conn_create(qla4xxx_iscsi_transport_group3, ldvarg47); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 9: ; if (ldv_state_variable_26 == 1) { qla4xxx_get_host_stats(qla4xxx_iscsi_transport_group0, ldvarg46, ldvarg45); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_get_host_stats(qla4xxx_iscsi_transport_group0, ldvarg46, ldvarg45); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 10: ; if (ldv_state_variable_26 == 2) { qla4xxx_ep_disconnect(qla4xxx_iscsi_transport_group7); ldv_state_variable_26 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_66575; case 11: ; if (ldv_state_variable_26 == 1) { qla4xxx_sysfs_ddb_delete(qla4xxx_iscsi_transport_group2); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_sysfs_ddb_delete(qla4xxx_iscsi_transport_group2); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 12: ; if (ldv_state_variable_26 == 1) { qla4xxx_session_get_param(qla4xxx_iscsi_transport_group3, ldvarg44, ldvarg43); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_session_get_param(qla4xxx_iscsi_transport_group3, ldvarg44, ldvarg43); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 13: ; if (ldv_state_variable_26 == 1) { qla4xxx_set_chap_entry(qla4xxx_iscsi_transport_group0, ldvarg42, ldvarg41); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_set_chap_entry(qla4xxx_iscsi_transport_group0, ldvarg42, ldvarg41); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 14: ; if (ldv_state_variable_26 == 1) { qla4_attr_is_visible(ldvarg39, ldvarg40); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4_attr_is_visible(ldvarg39, ldvarg40); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 15: ; if (ldv_state_variable_26 == 1) { qla4xxx_iface_set_param(qla4xxx_iscsi_transport_group0, ldvarg38, ldvarg37); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_iface_set_param(qla4xxx_iscsi_transport_group0, ldvarg38, ldvarg37); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 16: ; if (ldv_state_variable_26 == 1) { qla4xxx_delete_chap(qla4xxx_iscsi_transport_group0, (int )ldvarg36); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_delete_chap(qla4xxx_iscsi_transport_group0, (int )ldvarg36); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 17: ; if (ldv_state_variable_26 == 1) { qla4xxx_bsg_request(ldvarg35); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_bsg_request(ldvarg35); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 18: ; if (ldv_state_variable_26 == 1) { qla4xxx_sysfs_ddb_logout_sid(qla4xxx_iscsi_transport_group3); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_sysfs_ddb_logout_sid(qla4xxx_iscsi_transport_group3); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 19: ; if (ldv_state_variable_26 == 1) { qla4xxx_get_chap_list(qla4xxx_iscsi_transport_group0, (int )ldvarg34, ldvarg33, ldvarg32); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_get_chap_list(qla4xxx_iscsi_transport_group0, (int )ldvarg34, ldvarg33, ldvarg32); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 20: ; if (ldv_state_variable_26 == 1) { iscsi_conn_send_pdu(qla4xxx_iscsi_transport_group4, ldvarg31, ldvarg30, ldvarg29); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { iscsi_conn_send_pdu(qla4xxx_iscsi_transport_group4, ldvarg31, ldvarg30, ldvarg29); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 21: ; if (ldv_state_variable_26 == 1) { qla4xxx_ep_connect(qla4xxx_iscsi_transport_group0, qla4xxx_iscsi_transport_group6, ldvarg28); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_ep_connect(qla4xxx_iscsi_transport_group0, qla4xxx_iscsi_transport_group6, ldvarg28); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 22: ; if (ldv_state_variable_26 == 1) { qla4xxx_sysfs_ddb_get_param(qla4xxx_iscsi_transport_group2, ldvarg27, ldvarg26); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_sysfs_ddb_get_param(qla4xxx_iscsi_transport_group2, ldvarg27, ldvarg26); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 23: ; if (ldv_state_variable_26 == 1) { qla4xxx_task_cleanup(qla4xxx_iscsi_transport_group5); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_task_cleanup(qla4xxx_iscsi_transport_group5); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 24: ; if (ldv_state_variable_26 == 1) { qla4xxx_alloc_pdu(qla4xxx_iscsi_transport_group5, (int )ldvarg25); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_alloc_pdu(qla4xxx_iscsi_transport_group5, (int )ldvarg25); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 25: ; if (ldv_state_variable_26 == 1) { qla4xxx_sysfs_ddb_logout(qla4xxx_iscsi_transport_group2, qla4xxx_iscsi_transport_group1); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_sysfs_ddb_logout(qla4xxx_iscsi_transport_group2, qla4xxx_iscsi_transport_group1); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 26: ; if (ldv_state_variable_26 == 1) { qla4xxx_get_iface_param(ldvarg22, ldvarg24, ldvarg23, ldvarg21); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_get_iface_param(ldvarg22, ldvarg24, ldvarg23, ldvarg21); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 27: ; if (ldv_state_variable_26 == 1) { qla4xxx_host_get_param(qla4xxx_iscsi_transport_group0, ldvarg20, ldvarg19); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_host_get_param(qla4xxx_iscsi_transport_group0, ldvarg20, ldvarg19); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 28: ; if (ldv_state_variable_26 == 1) { qla4xxx_send_ping(qla4xxx_iscsi_transport_group0, ldvarg17, ldvarg16, ldvarg15, ldvarg18, qla4xxx_iscsi_transport_group6); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_send_ping(qla4xxx_iscsi_transport_group0, ldvarg17, ldvarg16, ldvarg15, ldvarg18, qla4xxx_iscsi_transport_group6); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 29: ; if (ldv_state_variable_26 == 1) { qla4xxx_conn_get_stats(qla4xxx_iscsi_transport_group4, ldvarg14); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_conn_get_stats(qla4xxx_iscsi_transport_group4, ldvarg14); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 30: ; if (ldv_state_variable_26 == 1) { qla4xxx_sysfs_ddb_add(qla4xxx_iscsi_transport_group0, (char const *)ldvarg13, ldvarg12); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_sysfs_ddb_add(qla4xxx_iscsi_transport_group0, (char const *)ldvarg13, ldvarg12); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 31: ; if (ldv_state_variable_26 == 1) { qla4xxx_ep_poll(qla4xxx_iscsi_transport_group7, ldvarg11); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_ep_poll(qla4xxx_iscsi_transport_group7, ldvarg11); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 32: ; if (ldv_state_variable_26 == 1) { iscsi_conn_stop(qla4xxx_iscsi_transport_group4, ldvarg10); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { iscsi_conn_stop(qla4xxx_iscsi_transport_group4, ldvarg10); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 33: ; if (ldv_state_variable_26 == 1) { qla4xxx_session_create(qla4xxx_iscsi_transport_group7, (int )ldvarg9, (int )ldvarg8, ldvarg7); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_session_create(qla4xxx_iscsi_transport_group7, (int )ldvarg9, (int )ldvarg8, ldvarg7); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 34: ; if (ldv_state_variable_26 == 1) { qla4xxx_task_xmit(qla4xxx_iscsi_transport_group5); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_task_xmit(qla4xxx_iscsi_transport_group5); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 35: ; if (ldv_state_variable_26 == 1) { qla4xxx_conn_destroy(qla4xxx_iscsi_transport_group4); ldv_state_variable_26 = 1; } else { } if (ldv_state_variable_26 == 2) { qla4xxx_conn_destroy(qla4xxx_iscsi_transport_group4); ldv_state_variable_26 = 2; } else { } goto ldv_66575; case 36: ; if (ldv_state_variable_26 == 1) { ldv_probe_26(); ldv_state_variable_26 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_66575; default: ldv_stop(); } ldv_66575: ; } else { } goto ldv_66565; case 4: ; if (ldv_state_variable_17 != 0) { ldv_main_exported_17(); } else { } goto ldv_66565; case 5: ; if (ldv_state_variable_2 != 0) { invoke_work_2(); } else { } goto ldv_66565; case 6: ; if (ldv_state_variable_22 != 0) { tmp___30 = __VERIFIER_nondet_int(); switch (tmp___30) { case 0: ; if (ldv_state_variable_22 == 3) { qla4xxx_pci_resume(qla4xxx_err_handler_group0); ldv_state_variable_22 = 2; } else { } goto ldv_66617; case 1: ; if (ldv_state_variable_22 == 1) { qla4xxx_pci_slot_reset(qla4xxx_err_handler_group0); ldv_state_variable_22 = 1; } else { } if (ldv_state_variable_22 == 3) { qla4xxx_pci_slot_reset(qla4xxx_err_handler_group0); ldv_state_variable_22 = 3; } else { } if (ldv_state_variable_22 == 2) { qla4xxx_pci_slot_reset(qla4xxx_err_handler_group0); ldv_state_variable_22 = 2; } else { } goto ldv_66617; case 2: ; if (ldv_state_variable_22 == 1) { qla4xxx_pci_error_detected(qla4xxx_err_handler_group0, (pci_channel_state_t )ldvarg62); ldv_state_variable_22 = 1; } else { } if (ldv_state_variable_22 == 3) { qla4xxx_pci_error_detected(qla4xxx_err_handler_group0, (pci_channel_state_t )ldvarg62); ldv_state_variable_22 = 3; } else { } if (ldv_state_variable_22 == 2) { qla4xxx_pci_error_detected(qla4xxx_err_handler_group0, (pci_channel_state_t )ldvarg62); ldv_state_variable_22 = 2; } else { } goto ldv_66617; case 3: ; if (ldv_state_variable_22 == 1) { qla4xxx_pci_mmio_enabled(qla4xxx_err_handler_group0); ldv_state_variable_22 = 1; } else { } if (ldv_state_variable_22 == 3) { qla4xxx_pci_mmio_enabled(qla4xxx_err_handler_group0); ldv_state_variable_22 = 3; } else { } if (ldv_state_variable_22 == 2) { qla4xxx_pci_mmio_enabled(qla4xxx_err_handler_group0); ldv_state_variable_22 = 2; } else { } goto ldv_66617; case 4: ; if (ldv_state_variable_22 == 2) { ldv_suspend_22(); ldv_state_variable_22 = 3; } else { } goto ldv_66617; case 5: ; if (ldv_state_variable_22 == 3) { ldv_release_22(); ldv_state_variable_22 = 1; ref_cnt = ref_cnt - 1; } else { } if (ldv_state_variable_22 == 2) { ldv_release_22(); ldv_state_variable_22 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_66617; case 6: ; if (ldv_state_variable_22 == 1) { ldv_probe_22(); ldv_state_variable_22 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_66617; default: ldv_stop(); } ldv_66617: ; } else { } goto ldv_66565; case 7: ; goto ldv_66565; case 8: ; if (ldv_state_variable_18 != 0) { ldv_main_exported_18(); } else { } goto ldv_66565; case 9: ; if (ldv_state_variable_0 != 0) { tmp___31 = __VERIFIER_nondet_int(); switch (tmp___31) { case 0: ; if (ldv_state_variable_0 == 3 && ref_cnt == 0) { qla4xxx_module_exit(); ldv_state_variable_0 = 2; goto ldv_final; } else { } goto ldv_66630; case 1: ; if (ldv_state_variable_0 == 1) { ldv_retval_1 = qla4xxx_module_init(); if (ldv_retval_1 == 0) { ldv_state_variable_0 = 3; ldv_state_variable_5 = 1; ldv_state_variable_19 = 1; ldv_state_variable_10 = 1; ldv_state_variable_24 = 1; ldv_initialize_isp_operations_24(); ldv_state_variable_8 = 1; ldv_state_variable_20 = 1; ldv_initialize_bin_attribute_20(); ldv_state_variable_15 = 1; ldv_state_variable_14 = 1; ldv_state_variable_12 = 1; ldv_state_variable_9 = 1; ldv_state_variable_6 = 1; ldv_state_variable_25 = 1; ldv_initialize_isp_operations_25(); ldv_state_variable_13 = 1; ldv_state_variable_16 = 1; ldv_state_variable_23 = 1; ldv_initialize_isp_operations_23(); ldv_state_variable_18 = 1; ldv_state_variable_22 = 1; ldv_initialize_pci_error_handlers_22(); ldv_state_variable_17 = 1; ldv_state_variable_26 = 1; ldv_initialize_iscsi_transport_26(); ldv_state_variable_7 = 1; ldv_state_variable_11 = 1; } else { } if (ldv_retval_1 != 0) { ldv_state_variable_0 = 2; goto ldv_final; } else { } } else { } goto ldv_66630; default: ldv_stop(); } ldv_66630: ; } else { } goto ldv_66565; case 10: ; if (ldv_state_variable_23 != 0) { tmp___32 = __VERIFIER_nondet_int(); switch (tmp___32) { case 0: ; if (ldv_state_variable_23 == 1) { qla4_83xx_enable_intrs(qla4_83xx_isp_ops_group0); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 1: ; if (ldv_state_variable_23 == 1) { qla4_83xx_intr_handler(ldvarg77, ldvarg78); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 2: ; if (ldv_state_variable_23 == 1) { qla4_83xx_start_firmware(qla4_83xx_isp_ops_group0); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 3: ; if (ldv_state_variable_23 == 1) { qla4_83xx_complete_iocb(qla4_83xx_isp_ops_group0); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 4: ; if (ldv_state_variable_23 == 1) { qla4_83xx_isp_reset(qla4_83xx_isp_ops_group0); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 5: ; if (ldv_state_variable_23 == 1) { qla4_83xx_interrupt_service_routine(qla4_83xx_isp_ops_group0, ldvarg76); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 6: ; if (ldv_state_variable_23 == 1) { qla4_83xx_rom_lock_recovery(qla4_83xx_isp_ops_group0); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 7: ; if (ldv_state_variable_23 == 1) { qla4_8xxx_load_risc(qla4_83xx_isp_ops_group0); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 8: ; if (ldv_state_variable_23 == 1) { qla4_83xx_wr_reg(qla4_83xx_isp_ops_group0, ldvarg75, ldvarg74); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 9: ; if (ldv_state_variable_23 == 1) { qla4xxx_rd_shdw_rsp_q_in(qla4_83xx_isp_ops_group0); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 10: ; if (ldv_state_variable_23 == 1) { qla4_8xxx_iospace_config(qla4_83xx_isp_ops_group0); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 11: ; if (ldv_state_variable_23 == 1) { qla4_83xx_disable_intrs(qla4_83xx_isp_ops_group0); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 12: ; if (ldv_state_variable_23 == 1) { qla4xxx_rd_shdw_req_q_out(qla4_83xx_isp_ops_group0); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 13: ; if (ldv_state_variable_23 == 1) { qla4_83xx_rd_reg(qla4_83xx_isp_ops_group0, ldvarg73); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 14: ; if (ldv_state_variable_23 == 1) { qla4_8xxx_need_reset(qla4_83xx_isp_ops_group0); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 15: ; if (ldv_state_variable_23 == 1) { qla4_83xx_process_mbox_intr(qla4_83xx_isp_ops_group0, ldvarg72); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 16: ; if (ldv_state_variable_23 == 1) { qla4_83xx_queue_iocb(qla4_83xx_isp_ops_group0); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 17: ; if (ldv_state_variable_23 == 1) { qla4_83xx_queue_mbox_cmd(qla4_83xx_isp_ops_group0, ldvarg71, ldvarg70); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 18: ; if (ldv_state_variable_23 == 1) { qla4_8xxx_stop_firmware(qla4_83xx_isp_ops_group0); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 19: ; if (ldv_state_variable_23 == 1) { qla4_83xx_drv_unlock(qla4_83xx_isp_ops_group0); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 20: ; if (ldv_state_variable_23 == 1) { qla4_8xxx_pci_config(qla4_83xx_isp_ops_group0); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 21: ; if (ldv_state_variable_23 == 1) { qla4_83xx_drv_lock(qla4_83xx_isp_ops_group0); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 22: ; if (ldv_state_variable_23 == 1) { qla4_8xxx_get_sys_info(qla4_83xx_isp_ops_group0); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 23: ; if (ldv_state_variable_23 == 1) { qla4_83xx_wr_reg_indirect(qla4_83xx_isp_ops_group0, ldvarg69, ldvarg68); ldv_state_variable_23 = 1; } else { } goto ldv_66635; case 24: ; if (ldv_state_variable_23 == 1) { qla4_83xx_rd_reg_indirect(qla4_83xx_isp_ops_group0, ldvarg67, ldvarg66); ldv_state_variable_23 = 1; } else { } goto ldv_66635; default: ldv_stop(); } ldv_66635: ; } else { } goto ldv_66565; case 11: ; if (ldv_state_variable_16 != 0) { ldv_main_exported_16(); } else { } goto ldv_66565; case 12: ; if (ldv_state_variable_13 != 0) { ldv_main_exported_13(); } else { } goto ldv_66565; case 13: ; if (ldv_state_variable_27 != 0) { tmp___33 = __VERIFIER_nondet_int(); switch (tmp___33) { case 0: ; if (ldv_state_variable_27 == 1) { qla4xxx_host_reset(qla4xxx_driver_template_group0, ldvarg86); ldv_state_variable_27 = 1; } else { } goto ldv_66665; case 1: ; if (ldv_state_variable_27 == 1) { scsi_change_queue_depth(qla4xxx_driver_template_group2, ldvarg85); ldv_state_variable_27 = 1; } else { } goto ldv_66665; case 2: ; if (ldv_state_variable_27 == 1) { qla4xxx_queuecommand(qla4xxx_driver_template_group0, qla4xxx_driver_template_group1); ldv_state_variable_27 = 1; } else { } goto ldv_66665; case 3: ; if (ldv_state_variable_27 == 1) { qla4xxx_eh_target_reset(qla4xxx_driver_template_group1); ldv_state_variable_27 = 1; } else { } goto ldv_66665; case 4: ; if (ldv_state_variable_27 == 1) { qla4xxx_eh_device_reset(qla4xxx_driver_template_group1); ldv_state_variable_27 = 1; } else { } goto ldv_66665; case 5: ; if (ldv_state_variable_27 == 1) { qla4xxx_eh_abort(qla4xxx_driver_template_group1); ldv_state_variable_27 = 1; } else { } goto ldv_66665; case 6: ; if (ldv_state_variable_27 == 1) { qla4xxx_eh_cmd_timed_out(qla4xxx_driver_template_group1); ldv_state_variable_27 = 1; } else { } goto ldv_66665; case 7: ; if (ldv_state_variable_27 == 1) { qla4xxx_slave_alloc(qla4xxx_driver_template_group2); ldv_state_variable_27 = 1; } else { } goto ldv_66665; case 8: ; if (ldv_state_variable_27 == 1) { qla4xxx_eh_host_reset(qla4xxx_driver_template_group1); ldv_state_variable_27 = 1; } else { } goto ldv_66665; default: ldv_stop(); } ldv_66665: ; } else { } goto ldv_66565; case 14: ; if (ldv_state_variable_25 != 0) { tmp___34 = __VERIFIER_nondet_int(); switch (tmp___34) { case 0: ; if (ldv_state_variable_25 == 1) { qla4xxx_enable_intrs(qla4xxx_isp_ops_group0); ldv_state_variable_25 = 1; } else { } goto ldv_66677; case 1: ; if (ldv_state_variable_25 == 1) { qla4xxx_intr_handler(ldvarg91, ldvarg92); ldv_state_variable_25 = 1; } else { } goto ldv_66677; case 2: ; if (ldv_state_variable_25 == 1) { qla4xxx_complete_iocb(qla4xxx_isp_ops_group0); ldv_state_variable_25 = 1; } else { } goto ldv_66677; case 3: ; if (ldv_state_variable_25 == 1) { qla4xxx_interrupt_service_routine(qla4xxx_isp_ops_group0, ldvarg90); ldv_state_variable_25 = 1; } else { } goto ldv_66677; case 4: ; if (ldv_state_variable_25 == 1) { qla4xxx_soft_reset(qla4xxx_isp_ops_group0); ldv_state_variable_25 = 1; } else { } goto ldv_66677; case 5: ; if (ldv_state_variable_25 == 1) { qla4xxx_start_firmware(qla4xxx_isp_ops_group0); ldv_state_variable_25 = 1; } else { } goto ldv_66677; case 6: ; if (ldv_state_variable_25 == 1) { qla4xxx_rd_shdw_rsp_q_in(qla4xxx_isp_ops_group0); ldv_state_variable_25 = 1; } else { } goto ldv_66677; case 7: ; if (ldv_state_variable_25 == 1) { qla4xxx_iospace_config(qla4xxx_isp_ops_group0); ldv_state_variable_25 = 1; } else { } goto ldv_66677; case 8: ; if (ldv_state_variable_25 == 1) { qla4xxx_disable_intrs(qla4xxx_isp_ops_group0); ldv_state_variable_25 = 1; } else { } goto ldv_66677; case 9: ; if (ldv_state_variable_25 == 1) { qla4xxx_rd_shdw_req_q_out(qla4xxx_isp_ops_group0); ldv_state_variable_25 = 1; } else { } goto ldv_66677; case 10: ; if (ldv_state_variable_25 == 1) { qla4xxx_process_mbox_intr(qla4xxx_isp_ops_group0, ldvarg89); ldv_state_variable_25 = 1; } else { } goto ldv_66677; case 11: ; if (ldv_state_variable_25 == 1) { qla4xxx_queue_iocb(qla4xxx_isp_ops_group0); ldv_state_variable_25 = 1; } else { } goto ldv_66677; case 12: ; if (ldv_state_variable_25 == 1) { qla4xxx_queue_mbox_cmd(qla4xxx_isp_ops_group0, ldvarg88, ldvarg87); ldv_state_variable_25 = 1; } else { } goto ldv_66677; case 13: ; if (ldv_state_variable_25 == 1) { qla4xxx_hw_reset(qla4xxx_isp_ops_group0); ldv_state_variable_25 = 1; } else { } goto ldv_66677; case 14: ; if (ldv_state_variable_25 == 1) { qla4xxx_pci_config(qla4xxx_isp_ops_group0); ldv_state_variable_25 = 1; } else { } goto ldv_66677; case 15: ; if (ldv_state_variable_25 == 1) { qla4xxx_get_sys_info(qla4xxx_isp_ops_group0); ldv_state_variable_25 = 1; } else { } goto ldv_66677; default: ldv_stop(); } ldv_66677: ; } else { } goto ldv_66565; case 15: ; if (ldv_state_variable_6 != 0) { ldv_main_exported_6(); } else { } goto ldv_66565; case 16: ; if (ldv_state_variable_3 != 0) { invoke_work_3(); } else { } goto ldv_66565; case 17: ; if (ldv_state_variable_9 != 0) { ldv_main_exported_9(); } else { } goto ldv_66565; case 18: ; if (ldv_state_variable_12 != 0) { ldv_main_exported_12(); } else { } goto ldv_66565; case 19: ; if (ldv_state_variable_20 != 0) { ldv_main_exported_20(); } else { } goto ldv_66565; case 20: ; if (ldv_state_variable_14 != 0) { ldv_main_exported_14(); } else { } goto ldv_66565; case 21: ; if (ldv_state_variable_15 != 0) { ldv_main_exported_15(); } else { } goto ldv_66565; case 22: ; if (ldv_state_variable_8 != 0) { ldv_main_exported_8(); } else { } goto ldv_66565; case 23: ; if (ldv_state_variable_4 != 0) { choose_timer_4(ldv_timer_list_4); } else { } goto ldv_66565; case 24: ; if (ldv_state_variable_24 != 0) { tmp___35 = __VERIFIER_nondet_int(); switch (tmp___35) { case 0: ; if (ldv_state_variable_24 == 1) { qla4_82xx_enable_intrs(qla4_82xx_isp_ops_group0); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 1: ; if (ldv_state_variable_24 == 1) { qla4_82xx_intr_handler(ldvarg128, ldvarg129); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 2: ; if (ldv_state_variable_24 == 1) { qla4_82xx_try_start_fw(qla4_82xx_isp_ops_group0); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 3: ; if (ldv_state_variable_24 == 1) { qla4_82xx_complete_iocb(qla4_82xx_isp_ops_group0); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 4: ; if (ldv_state_variable_24 == 1) { qla4_82xx_isp_reset(qla4_82xx_isp_ops_group0); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 5: ; if (ldv_state_variable_24 == 1) { qla4_82xx_interrupt_service_routine(qla4_82xx_isp_ops_group0, ldvarg127); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 6: ; if (ldv_state_variable_24 == 1) { qla4_82xx_rom_lock_recovery(qla4_82xx_isp_ops_group0); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 7: ; if (ldv_state_variable_24 == 1) { qla4_8xxx_load_risc(qla4_82xx_isp_ops_group0); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 8: ; if (ldv_state_variable_24 == 1) { qla4_82xx_wr_32(qla4_82xx_isp_ops_group0, ldvarg126, ldvarg125); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 9: ; if (ldv_state_variable_24 == 1) { qla4_82xx_rd_shdw_rsp_q_in(qla4_82xx_isp_ops_group0); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 10: ; if (ldv_state_variable_24 == 1) { qla4_8xxx_iospace_config(qla4_82xx_isp_ops_group0); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 11: ; if (ldv_state_variable_24 == 1) { qla4_82xx_disable_intrs(qla4_82xx_isp_ops_group0); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 12: ; if (ldv_state_variable_24 == 1) { qla4_82xx_rd_shdw_req_q_out(qla4_82xx_isp_ops_group0); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 13: ; if (ldv_state_variable_24 == 1) { qla4_82xx_rd_32(qla4_82xx_isp_ops_group0, ldvarg124); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 14: ; if (ldv_state_variable_24 == 1) { qla4_8xxx_need_reset(qla4_82xx_isp_ops_group0); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 15: ; if (ldv_state_variable_24 == 1) { qla4_82xx_process_mbox_intr(qla4_82xx_isp_ops_group0, ldvarg123); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 16: ; if (ldv_state_variable_24 == 1) { qla4_82xx_queue_iocb(qla4_82xx_isp_ops_group0); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 17: ; if (ldv_state_variable_24 == 1) { qla4_82xx_queue_mbox_cmd(qla4_82xx_isp_ops_group0, ldvarg122, ldvarg121); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 18: ; if (ldv_state_variable_24 == 1) { qla4_8xxx_stop_firmware(qla4_82xx_isp_ops_group0); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 19: ; if (ldv_state_variable_24 == 1) { qla4_82xx_idc_unlock(qla4_82xx_isp_ops_group0); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 20: ; if (ldv_state_variable_24 == 1) { qla4_8xxx_pci_config(qla4_82xx_isp_ops_group0); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 21: ; if (ldv_state_variable_24 == 1) { qla4_82xx_idc_lock(qla4_82xx_isp_ops_group0); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 22: ; if (ldv_state_variable_24 == 1) { qla4_8xxx_get_sys_info(qla4_82xx_isp_ops_group0); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 23: ; if (ldv_state_variable_24 == 1) { qla4_82xx_md_wr_32(qla4_82xx_isp_ops_group0, ldvarg120, ldvarg119); ldv_state_variable_24 = 1; } else { } goto ldv_66705; case 24: ; if (ldv_state_variable_24 == 1) { qla4_82xx_md_rd_32(qla4_82xx_isp_ops_group0, ldvarg118, ldvarg117); ldv_state_variable_24 = 1; } else { } goto ldv_66705; default: ldv_stop(); } ldv_66705: ; } else { } goto ldv_66565; case 25: ; if (ldv_state_variable_19 != 0) { ldv_main_exported_19(); } else { } goto ldv_66565; case 26: ; if (ldv_state_variable_10 != 0) { ldv_main_exported_10(); } else { } goto ldv_66565; case 27: ; if (ldv_state_variable_5 != 0) { ldv_main_exported_5(); } else { } goto ldv_66565; default: ldv_stop(); } ldv_66565: ; goto ldv_66735; ldv_final: ldv_check_final_state(); return 0; } } __inline static void *ERR_PTR(long error ) { void *tmp ; { tmp = ldv_err_ptr(error); return (tmp); } } __inline static void spin_unlock_irqrestore(spinlock_t *lock , unsigned long flags ) { { ldv_spin_unlock(); ldv_spin_unlock_irqrestore_12(lock, flags); return; } } bool ldv_queue_work_on_15(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_16(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_17(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_18(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_19(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_25(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static void *kzalloc(size_t size , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_scsi_add_host_with_dma_29(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___6 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_27 = 1; ldv_initialize_scsi_host_template_27(); } else { } return (ldv_func_res); } } int ldv_pskb_expand_head_32(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_34(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_36(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_37(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_38(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_39(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_40(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_41(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_42(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_43(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_vmalloc_44(unsigned long ldv_func_arg1 ) { void *tmp ; { ldv_check_alloc_nonatomic(); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_dma_pool_alloc_45(struct dma_pool *ldv_func_arg1 , gfp_t flags , dma_addr_t *ldv_func_arg3 ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_del_timer_sync_46(struct timer_list *ldv_func_arg1 ) { ldv_func_ret_type___7 ldv_func_res ; int tmp ; { tmp = del_timer_sync(ldv_func_arg1); ldv_func_res = tmp; disable_suitable_timer_4(ldv_func_arg1); return (ldv_func_res); } } void *ldv_mempool_alloc_47(mempool_t *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_mod_timer_48(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) { ldv_func_ret_type___8 ldv_func_res ; int tmp ; { tmp = mod_timer(ldv_func_arg1, ldv_func_arg2); ldv_func_res = tmp; activate_pending_timer_4(ldv_func_arg1, ldv_func_arg2, 1); return (ldv_func_res); } } int ldv_mod_timer_49(struct timer_list *ldv_func_arg1 , unsigned long ldv_func_arg2 ) { ldv_func_ret_type___9 ldv_func_res ; int tmp ; { tmp = mod_timer(ldv_func_arg1, ldv_func_arg2); ldv_func_res = tmp; activate_pending_timer_4(ldv_func_arg1, ldv_func_arg2, 1); return (ldv_func_res); } } void ldv_destroy_workqueue_50(struct workqueue_struct *ldv_func_arg1 ) { { destroy_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } void ldv_destroy_workqueue_51(struct workqueue_struct *ldv_func_arg1 ) { { destroy_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } void *ldv_vzalloc_52(unsigned long ldv_func_arg1 ) { void *tmp ; { ldv_check_alloc_nonatomic(); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_vzalloc_53(unsigned long ldv_func_arg1 ) { void *tmp ; { ldv_check_alloc_nonatomic(); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_vzalloc_54(unsigned long ldv_func_arg1 ) { void *tmp ; { ldv_check_alloc_nonatomic(); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_vzalloc_55(unsigned long ldv_func_arg1 ) { void *tmp ; { ldv_check_alloc_nonatomic(); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_vmalloc_56(unsigned long ldv_func_arg1 ) { void *tmp ; { ldv_check_alloc_nonatomic(); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_dma_pool_alloc_57(struct dma_pool *ldv_func_arg1 , gfp_t flags , dma_addr_t *ldv_func_arg3 ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_dma_pool_alloc_58(struct dma_pool *ldv_func_arg1 , gfp_t flags , dma_addr_t *ldv_func_arg3 ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_vzalloc_59(unsigned long ldv_func_arg1 ) { void *tmp ; { ldv_check_alloc_nonatomic(); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_dma_pool_alloc_60(struct dma_pool *ldv_func_arg1 , gfp_t flags , dma_addr_t *ldv_func_arg3 ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_vmalloc_61(unsigned long ldv_func_arg1 ) { void *tmp ; { ldv_check_alloc_nonatomic(); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_dma_pool_alloc_62(struct dma_pool *ldv_func_arg1 , gfp_t flags , dma_addr_t *ldv_func_arg3 ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_vmalloc_63(unsigned long ldv_func_arg1 ) { void *tmp ; { ldv_check_alloc_nonatomic(); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_dma_pool_alloc_64(struct dma_pool *ldv_func_arg1 , gfp_t flags , dma_addr_t *ldv_func_arg3 ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_vzalloc_65(unsigned long ldv_func_arg1 ) { void *tmp ; { ldv_check_alloc_nonatomic(); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_vzalloc_66(unsigned long ldv_func_arg1 ) { void *tmp ; { ldv_check_alloc_nonatomic(); tmp = ldv_undef_ptr(); return (tmp); } } void ldv_scsi_remove_host_67(struct Scsi_Host *shost ) { { scsi_remove_host(shost); ldv_state_variable_27 = 0; return; } } void ldv_scsi_remove_host_68(struct Scsi_Host *shost ) { { scsi_remove_host(shost); ldv_state_variable_27 = 0; return; } } int ldv___pci_register_driver_69(struct pci_driver *ldv_func_arg1 , struct module *ldv_func_arg2 , char const *ldv_func_arg3 ) { ldv_func_ret_type___10 ldv_func_res ; int tmp ; { tmp = __pci_register_driver(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; ldv_state_variable_21 = 1; ldv_pci_driver_21(); return (ldv_func_res); } } void ldv_pci_unregister_driver_70(struct pci_driver *ldv_func_arg1 ) { { pci_unregister_driver(ldv_func_arg1); ldv_state_variable_21 = 0; return; } } __inline static void spin_unlock_irqrestore(spinlock_t *lock , unsigned long flags ) ; bool ldv_queue_work_on_100(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_102(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_101(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_104(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_103(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_110(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_127(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; extern int pci_bus_write_config_word(struct pci_bus * , unsigned int , int , u16 ) ; __inline static int pci_write_config_word(struct pci_dev const *dev , int where , u16 val ) { int tmp ; { tmp = pci_bus_write_config_word(dev->bus, dev->devfn, where, (int )val); return (tmp); } } extern void pci_set_master(struct pci_dev * ) ; extern int pci_set_mwi(struct pci_dev * ) ; void *ldv_dma_pool_alloc_130(struct dma_pool *ldv_func_arg1 , gfp_t flags , dma_addr_t *ldv_func_arg3 ) ; extern unsigned long msleep_interruptible(unsigned int ) ; void *ldv_vmalloc_129(unsigned long ldv_func_arg1 ) ; struct sk_buff *ldv_skb_clone_118(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_126(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_120(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_116(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_124(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_125(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_121(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_122(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_123(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; int ldv_scsi_add_host_with_dma_128(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; __inline static int is_ipv4_enabled(struct scsi_qla_host *ha ) { { return ((int )((short )ha->ip_config.ipv4_options) < 0); } } __inline static int is_ipv6_enabled(struct scsi_qla_host *ha ) { { return ((ha->ip_config.ipv6_options & 32768U) != 0U); } } __inline static void *isp_ext_hw_conf(struct scsi_qla_host *ha ) { int tmp ; { tmp = is_qla4010(ha); return ((void *)(tmp != 0 ? & (ha->reg)->u2.isp4010.ext_hw_conf : & (ha->reg)->u2.isp4022.__annonCompField129.p0.ext_hw_conf)); } } __inline static void *isp_port_ctrl(struct scsi_qla_host *ha ) { int tmp ; { tmp = is_qla4010(ha); return ((void *)(tmp != 0 ? & (ha->reg)->u2.isp4010.port_ctrl : & (ha->reg)->u2.isp4022.__annonCompField129.p0.port_ctrl)); } } __inline static int eeprom_ext_hw_conf_offset(struct scsi_qla_host *ha ) { int tmp ; { tmp = is_qla4010(ha); return (tmp != 0 ? 12 : 20); } } int ql4xxx_sem_spinlock(struct scsi_qla_host *ha , u32 sem_mask , u32 sem_bits ) ; void ql4xxx_sem_unlock(struct scsi_qla_host *ha , u32 sem_mask ) ; int ql4xxx_sem_lock(struct scsi_qla_host *ha , u32 sem_mask , u32 sem_bits ) ; __inline static int ql4xxx_lock_flash(struct scsi_qla_host *a ) { int tmp ; int tmp___0 ; int tmp___1 ; { tmp___1 = is_qla4010(a); if (tmp___1 != 0) { tmp = ql4xxx_sem_spinlock(a, 3221225472U, 49152U); return (tmp); } else { tmp___0 = ql4xxx_sem_spinlock(a, 3758096384U, (a->mac_index | 4U) << 13); return (tmp___0); } } } __inline static void ql4xxx_unlock_flash(struct scsi_qla_host *a ) { int tmp ; { tmp = is_qla4010(a); if (tmp != 0) { ql4xxx_sem_unlock(a, 3221225472U); } else { ql4xxx_sem_unlock(a, 3758096384U); } return; } } __inline static int ql4xxx_lock_nvram(struct scsi_qla_host *a ) { int tmp ; int tmp___0 ; int tmp___1 ; { tmp___1 = is_qla4010(a); if (tmp___1 != 0) { tmp = ql4xxx_sem_spinlock(a, 805306368U, 12288U); return (tmp); } else { tmp___0 = ql4xxx_sem_spinlock(a, 469762048U, (a->mac_index | 4U) << 10); return (tmp___0); } } } __inline static void ql4xxx_unlock_nvram(struct scsi_qla_host *a ) { int tmp ; { tmp = is_qla4010(a); if (tmp != 0) { ql4xxx_sem_unlock(a, 805306368U); } else { ql4xxx_sem_unlock(a, 469762048U); } return; } } __inline static int ql4xxx_lock_drvr(struct scsi_qla_host *a ) { int tmp ; int tmp___0 ; int tmp___1 ; { tmp___1 = is_qla4010(a); if (tmp___1 != 0) { tmp = ql4xxx_sem_lock(a, 3145728U, 48U); return (tmp); } else { tmp___0 = ql4xxx_sem_lock(a, 917504U, (a->mac_index | 4U) << 1); return (tmp___0); } } } __inline static void ql4xxx_unlock_drvr(struct scsi_qla_host *a ) { int tmp ; { tmp = is_qla4010(a); if (tmp != 0) { ql4xxx_sem_unlock(a, 3145728U); } else { ql4xxx_sem_unlock(a, 917504U); } return; } } int qla4xxx_get_firmware_status(struct scsi_qla_host *ha ) ; int qla4xxx_initialize_fw_cb(struct scsi_qla_host *ha ) ; u16 rd_nvram_word(struct scsi_qla_host *ha , int offset ) ; void qla4xxx_get_crash_record(struct scsi_qla_host *ha ) ; int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha ) ; int qla4xxx_about_firmware(struct scsi_qla_host *ha ) ; int qla4xxx_init_rings(struct scsi_qla_host *ha ) ; int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha , uint32_t fw_ddb_index , uint32_t state , uint32_t conn_err ) ; int qla4xxx_get_minidump_template(struct scsi_qla_host *ha , dma_addr_t phys_addr ) ; int qla4xxx_req_template_size(struct scsi_qla_host *ha ) ; void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha ) ; void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha ) ; static void ql4xxx_set_mac_number(struct scsi_qla_host *ha ) { uint32_t value ; uint8_t func_number ; unsigned long flags ; unsigned short tmp ; { ldv_spin_lock(); tmp = readw((void const volatile *)(& (ha->reg)->ctrl_status)); value = (uint32_t )tmp; spin_unlock_irqrestore(& ha->hardware_lock, flags); func_number = (unsigned int )((unsigned char )(value >> 4)) & 48U; switch (value & 1792U) { case 1280U: ha->mac_index = 1U; goto ldv_63293; case 1792U: ha->mac_index = 3U; goto ldv_63293; default: ; if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Invalid function number, ispControlStatus = 0x%x\n", ha->host_no, "ql4xxx_set_mac_number", value); } else { } goto ldv_63293; } ldv_63293: ; if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: mac_index %d.\n", ha->host_no, "ql4xxx_set_mac_number", ha->mac_index); } else { } return; } } void qla4xxx_free_ddb(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry ) { { ha->fw_ddb_index_map[(int )ddb_entry->fw_ddb_index] = (struct ddb_entry *)65535; ha->tot_ddbs = ha->tot_ddbs - 1U; return; } } static void qla4xxx_init_response_q_entries(struct scsi_qla_host *ha ) { uint16_t cnt ; struct response *pkt ; { pkt = (struct response *)ha->response_ptr; cnt = 0U; goto ldv_63307; ldv_63306: pkt->signature = 3735936685U; pkt = pkt + 1; cnt = (uint16_t )((int )cnt + 1); ldv_63307: ; if ((unsigned int )cnt <= 63U) { goto ldv_63306; } else { } return; } } int qla4xxx_init_rings(struct scsi_qla_host *ha ) { unsigned long flags ; int i ; int tmp ; int tmp___0 ; int tmp___1 ; { flags = 0UL; ldv_spin_lock(); ha->request_out = 0U; ha->request_in = 0U; ha->request_ptr = ha->request_ring + (unsigned long )ha->request_in; ha->req_q_count = 1024U; ha->response_in = 0U; ha->response_out = 0U; ha->response_ptr = ha->response_ring + (unsigned long )ha->response_out; tmp___1 = is_qla8022(ha); if (tmp___1 != 0) { writel(0U, (void volatile *)(& (ha->qla4_82xx_reg)->req_q_out)); writel(0U, (void volatile *)(& (ha->qla4_82xx_reg)->rsp_q_in)); writel(0U, (void volatile *)(& (ha->qla4_82xx_reg)->rsp_q_out)); } else { tmp = is_qla8032(ha); if (tmp != 0) { writel(0U, (void volatile *)(& (ha->qla4_83xx_reg)->req_q_in)); writel(0U, (void volatile *)(& (ha->qla4_83xx_reg)->rsp_q_in)); writel(0U, (void volatile *)(& (ha->qla4_83xx_reg)->rsp_q_out)); } else { tmp___0 = is_qla8042(ha); if (tmp___0 != 0) { writel(0U, (void volatile *)(& (ha->qla4_83xx_reg)->req_q_in)); writel(0U, (void volatile *)(& (ha->qla4_83xx_reg)->rsp_q_in)); writel(0U, (void volatile *)(& (ha->qla4_83xx_reg)->rsp_q_out)); } else { (ha->shadow_regs)->req_q_out = 0U; (ha->shadow_regs)->rsp_q_in = 0U; __asm__ volatile ("sfence": : : "memory"); writel(0U, (void volatile *)(& (ha->reg)->req_q_in)); writel(0U, (void volatile *)(& (ha->reg)->rsp_q_out)); readl((void const volatile *)(& (ha->reg)->rsp_q_out)); } } } qla4xxx_init_response_q_entries(ha); i = 0; goto ldv_63315; ldv_63314: ha->active_mrb_array[i] = (struct mrb *)0; i = i + 1; ldv_63315: ; if (i <= 127) { goto ldv_63314; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); } } int qla4xxx_get_sys_info(struct scsi_qla_host *ha ) { struct flash_sys_info *sys_info ; dma_addr_t sys_info_dma ; int status ; void *tmp ; int tmp___0 ; unsigned long _min1 ; unsigned long _min2 ; unsigned long _min1___0 ; unsigned long _min2___0 ; { status = 1; tmp = dma_alloc_attrs(& (ha->pdev)->dev, 512UL, & sys_info_dma, 208U, (struct dma_attrs *)0); sys_info = (struct flash_sys_info *)tmp; if ((unsigned long )sys_info == (unsigned long )((struct flash_sys_info *)0)) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Unable to allocate dma buffer.\n", ha->host_no, "qla4xxx_get_sys_info"); } else { } goto exit_get_sys_info_no_free; } else { } memset((void *)sys_info, 0, 512UL); tmp___0 = qla4xxx_get_flash(ha, sys_info_dma, 33554432U, 512U); if (tmp___0 != 0) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: get_flash FLASH_OFFSET_SYS_INFO failed\n", ha->host_no, "qla4xxx_get_sys_info"); } else { } goto exit_get_sys_info; } else { } _min1 = 6UL; _min2 = 6UL; memcpy((void *)(& ha->my_mac), (void const *)(& sys_info->physAddr[0].address), _min1 < _min2 ? _min1 : _min2); _min1___0 = 16UL; _min2___0 = 16UL; memcpy((void *)(& ha->serial_number), (void const *)(& sys_info->acSerialNumber), _min1___0 < _min2___0 ? _min1___0 : _min2___0); status = 0; exit_get_sys_info: dma_free_attrs(& (ha->pdev)->dev, 512UL, (void *)sys_info, sys_info_dma, (struct dma_attrs *)0); exit_get_sys_info_no_free: ; return (status); } } static void qla4xxx_init_local_data(struct scsi_qla_host *ha ) { { ha->aen_q_count = 512U; return; } } static uint8_t qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha ) { uint8_t ipv4_wait ; uint8_t ipv6_wait ; int8_t ip_address[16U] ; unsigned int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { ipv4_wait = 0U; ipv6_wait = 0U; ip_address[0] = 0; tmp = 1U; while (1) { if (tmp >= 16U) { break; } else { } ip_address[tmp] = (signed char)0; tmp = tmp + 1U; } tmp___2 = is_ipv4_enabled(ha); if (tmp___2 != 0) { tmp___3 = is_ipv6_enabled(ha); if (tmp___3 != 0) { if ((ha->addl_fw_state & 2U) != 0U && (ha->addl_fw_state & 4U) == 0U) { ipv4_wait = 1U; } else { } if ((ha->ip_config.ipv6_addl_options & 2U) != 0U && (((unsigned int )ha->ip_config.ipv6_link_local_state == 2U || (unsigned int )ha->ip_config.ipv6_addr0_state == 2U) || (unsigned int )ha->ip_config.ipv6_addr1_state == 2U)) { ipv6_wait = 1U; if (((unsigned int )ha->ip_config.ipv6_link_local_state == 5U || (unsigned int )ha->ip_config.ipv6_addr0_state == 5U) || (unsigned int )ha->ip_config.ipv6_addr1_state == 5U) { if (ql4xextended_error_logging == 2) { printk("\016scsi%ld: %s: Preferred IP configured. Don\'t wait!\n", ha->host_no, "qla4xxx_wait_for_ip_config"); } else { } ipv6_wait = 0U; } else { } tmp___0 = memcmp((void const *)(& ha->ip_config.ipv6_default_router_addr), (void const *)(& ip_address), 16UL); if (tmp___0 == 0) { if (ql4xextended_error_logging == 2) { printk("\016scsi%ld: %s: No Router configured. Don\'t wait!\n", ha->host_no, "qla4xxx_wait_for_ip_config"); } else { } ipv6_wait = 0U; } else { } if ((unsigned int )ha->ip_config.ipv6_default_router_state == 1U && (unsigned int )ha->ip_config.ipv6_link_local_state == 3U) { tmp___1 = memcmp((void const *)(& ha->ip_config.ipv6_link_local_addr), (void const *)(& ha->ip_config.ipv6_default_router_addr), 4UL); if (tmp___1 == 0) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: LinkLocal Router & IP configured. Don\'t wait!\n", ha->host_no, "qla4xxx_wait_for_ip_config"); } else { } ipv6_wait = 0U; } else { } } else { } } else { } if ((unsigned int )ipv4_wait != 0U || (unsigned int )ipv6_wait != 0U) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Wait for additional IP(s) \"", ha->host_no, "qla4xxx_wait_for_ip_config"); } else { } if ((unsigned int )ipv4_wait != 0U) { if (ql4xextended_error_logging == 2) { printk("IPv4 "); } else { } } else { } if ((unsigned int )ha->ip_config.ipv6_link_local_state == 2U) { if (ql4xextended_error_logging == 2) { printk("IPv6LinkLocal "); } else { } } else { } if ((unsigned int )ha->ip_config.ipv6_addr0_state == 2U) { if (ql4xextended_error_logging == 2) { printk("IPv6Addr0 "); } else { } } else { } if ((unsigned int )ha->ip_config.ipv6_addr1_state == 2U) { if (ql4xextended_error_logging == 2) { printk("IPv6Addr1 "); } else { } } else { } if (ql4xextended_error_logging == 2) { printk("\"\n"); } else { } } else { } } else { } } else { } return ((uint8_t )((int )ipv4_wait | (int )ipv6_wait)); } } static int qla4_80xx_is_minidump_dma_capable(struct scsi_qla_host *ha , struct qla4_8xxx_minidump_template_hdr *md_hdr ) { int offset ; int tmp ; int rval ; uint32_t *cap_offset ; { tmp = is_qla8022(ha); offset = tmp != 0 ? 172 : 268; rval = 1; cap_offset = (uint32_t *)md_hdr + (unsigned long )offset; if ((*cap_offset & 1U) == 0U) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "PEX DMA Not supported %d\n", *cap_offset); rval = 0; } else { } return (rval); } } void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha ) { int status ; uint32_t capture_debug_level ; int hdr_entry_bit ; int k ; void *md_tmp ; dma_addr_t md_tmp_dma ; struct qla4_8xxx_minidump_template_hdr *md_hdr ; int dma_capable___0 ; { if ((unsigned long )ha->fw_dump != (unsigned long )((void *)0)) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "Firmware dump previously allocated.\n"); return; } else { } status = qla4xxx_req_template_size(ha); if (status != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: Failed to get template size\n", ha->host_no); return; } else { } clear_bit(24L, (unsigned long volatile *)(& ha->flags)); md_tmp = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )ha->fw_dump_tmplt_size, & md_tmp_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )md_tmp == (unsigned long )((void *)0)) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: Failed to allocate DMA memory\n", ha->host_no); return; } else { } status = qla4xxx_get_minidump_template(ha, md_tmp_dma); if (status != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: Failed to get minidump template\n", ha->host_no); goto alloc_cleanup; } else { } md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp; dma_capable___0 = qla4_80xx_is_minidump_dma_capable(ha, md_hdr); capture_debug_level = md_hdr->capture_debug_level; if ((ql4xmdcapmask > 2 && ql4xmdcapmask <= 127) || (ql4xmdcapmask == 255 && dma_capable___0 != 0)) { ha->fw_dump_capture_mask = (uint32_t )ql4xmdcapmask; } else { if (ql4xmdcapmask == 255) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Falling back to default capture mask, as PEX DMA is not supported\n"); } else { } ha->fw_dump_capture_mask = capture_debug_level; } md_hdr->driver_capture_mask = ha->fw_dump_capture_mask; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Minimum num of entries = %d\n", md_hdr->num_of_entries); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Dump template size = %d\n", ha->fw_dump_tmplt_size); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Selected Capture mask =0x%x\n", ha->fw_dump_capture_mask); } else { } hdr_entry_bit = 2; k = 1; goto ldv_63362; ldv_63361: ; if ((ha->fw_dump_capture_mask & (uint32_t )hdr_entry_bit) != 0U) { ha->fw_dump_size = ha->fw_dump_size + md_hdr->capture_size_array[k]; } else { } hdr_entry_bit = hdr_entry_bit << 1; k = k + 1; ldv_63362: ; if ((hdr_entry_bit & 255) != 0) { goto ldv_63361; } else { } ha->fw_dump_size = ha->fw_dump_size + ha->fw_dump_tmplt_size; ha->fw_dump = ldv_vmalloc_129((unsigned long )ha->fw_dump_size); if ((unsigned long )ha->fw_dump == (unsigned long )((void *)0)) { goto alloc_cleanup; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Minidump Tempalate Size = 0x%x KB\n", ha->fw_dump_tmplt_size); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Total Minidump size = 0x%x KB\n", ha->fw_dump_size); } else { } memcpy(ha->fw_dump, (void const *)md_tmp, (size_t )ha->fw_dump_tmplt_size); ha->fw_dump_tmplt_hdr = ha->fw_dump; alloc_cleanup: dma_free_attrs(& (ha->pdev)->dev, (size_t )ha->fw_dump_tmplt_size, md_tmp, md_tmp_dma, (struct dma_attrs *)0); return; } } static int qla4xxx_fw_ready(struct scsi_qla_host *ha ) { uint32_t timeout_count ; int ready ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; uint8_t tmp___3 ; int tmp___4 ; { ready = 0; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Waiting for Firmware Ready..\n"); } else { } timeout_count = 30U; goto ldv_63373; ldv_63372: tmp = test_and_clear_bit(15L, (unsigned long volatile *)(& ha->dpc_flags)); if (tmp != 0) { qla4xxx_get_dhcp_ip_address(ha); } else { } tmp___0 = qla4xxx_get_firmware_state(ha); if (tmp___0 != 0) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: unable to get firmware state\n", ha->host_no, "qla4xxx_fw_ready"); } else { } goto ldv_63370; } else { } if ((ha->firmware_state & 4U) != 0U) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: an unrecoverable error has occurred\n", ha->host_no, "qla4xxx_fw_ready"); } else { } goto ldv_63370; } else { } if ((int )ha->firmware_state & 1) { tmp___1 = qla4xxx_initialize_fw_cb(ha); if (tmp___1 == 1) { goto ldv_63370; } else { } goto ldv_63371; } else { } if ((ha->firmware_state & 2U) != 0U) { if (ql4xextended_error_logging == 2) { printk("\016scsi%ld: %s: fwstate:AUTOCONNECT in progress\n", ha->host_no, "qla4xxx_fw_ready"); } else { } } else { } if ((ha->firmware_state & 8U) != 0U) { if (ql4xextended_error_logging == 2) { printk("\016scsi%ld: %s: fwstate: CONFIGURING IP\n", ha->host_no, "qla4xxx_fw_ready"); } else { } if (timeout_count <= 15U) { if ((ha->addl_fw_state & 16U) != 0U) { if (ql4xextended_error_logging == 2) { printk("\016scsi%ld: %s: LINK UP (Cable plugged)\n", ha->host_no, "qla4xxx_fw_ready"); } else { } } else if ((ha->firmware_state & 8U) != 0U) { if (ql4xextended_error_logging == 2) { printk("\016scsi%ld: %s: LINK DOWN (Cable unplugged)\n", ha->host_no, "qla4xxx_fw_ready"); } else { } ha->firmware_state = 0U; } else { } } else { } } else { } if (ha->firmware_state == 0U) { tmp___2 = test_and_clear_bit(15L, (unsigned long volatile *)(& ha->dpc_flags)); if (tmp___2 != 0) { qla4xxx_get_dhcp_ip_address(ha); } else { } tmp___3 = qla4xxx_wait_for_ip_config(ha); if ((unsigned int )tmp___3 == 0U || timeout_count == 1U) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Firmware Ready..\n"); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: MEDIA TYPE - %s\n", ha->host_no, "qla4xxx_fw_ready", (int )ha->addl_fw_state & 1 ? (char *)"OPTICAL" : (char *)"COPPER"); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: DHCPv4 STATE Enabled %s\n", ha->host_no, "qla4xxx_fw_ready", (ha->addl_fw_state & 2U) != 0U ? (char *)"YES" : (char *)"NO"); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: LINK %s\n", ha->host_no, "qla4xxx_fw_ready", (ha->addl_fw_state & 16U) != 0U ? (char *)"UP" : (char *)"DOWN"); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: iSNS Service Started %s\n", ha->host_no, "qla4xxx_fw_ready", (ha->addl_fw_state & 32U) != 0U ? (char *)"YES" : (char *)"NO"); } else { } ready = 1; goto ldv_63370; } else { } } else { } if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: waiting on fw, state=%x:%x - seconds expired= %d\n", ha->host_no, "qla4xxx_fw_ready", ha->firmware_state, ha->addl_fw_state, timeout_count); } else { } tmp___4 = is_qla4032(ha); if ((tmp___4 != 0 && (ha->addl_fw_state & 16U) == 0U) && timeout_count <= 24U) { goto ldv_63370; } else { } msleep(1000U); ldv_63371: timeout_count = timeout_count - 1U; ldv_63373: ; if (timeout_count != 0U) { goto ldv_63372; } else { } ldv_63370: ; if (timeout_count == 0U) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: FW Initialization timed out!\n", ha->host_no, "qla4xxx_fw_ready"); } else { } } else { } if ((ha->firmware_state & 8U) != 0U) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: FW initialized, but is reporting it\'s waiting to configure an IP address\n", ha->host_no, "qla4xxx_fw_ready"); } else { } ready = 1; } else if ((ha->firmware_state & 2U) != 0U) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: FW initialized, but auto-discovery still in process\n", ha->host_no, "qla4xxx_fw_ready"); } else { } ready = 1; } else { } return (ready); } } static int qla4xxx_init_firmware(struct scsi_qla_host *ha ) { int status ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; { status = 1; tmp = is_aer_supported(ha); if (tmp != 0) { tmp___0 = constant_test_bit(21L, (unsigned long const volatile *)(& ha->flags)); if (tmp___0 != 0) { return (status); } else { } } else { } tmp___1 = is_qla80XX(ha); if (tmp___1 != 0) { qla4_8xxx_stop_firmware(ha); } else { } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Initializing firmware..\n"); tmp___2 = qla4xxx_initialize_fw_cb(ha); if (tmp___2 == 1) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Failed to initialize firmware control block\n", ha->host_no, "qla4xxx_init_firmware"); } else { } return (status); } else { } tmp___3 = qla4xxx_fw_ready(ha); if (tmp___3 == 0) { return (status); } else { } tmp___4 = is_qla80XX(ha); if (tmp___4 != 0) { tmp___5 = constant_test_bit(1L, (unsigned long const volatile *)(& ha->flags)); if (tmp___5 == 0) { qla4xxx_alloc_fw_dump(ha); } else { } } else { } tmp___6 = qla4xxx_get_firmware_status(ha); return (tmp___6); } } static void qla4xxx_set_model_info(struct scsi_qla_host *ha ) { uint16_t board_id_string[8U] ; int i ; int size ; int offset ; { size = 16; offset = 4; i = 0; goto ldv_63387; ldv_63386: board_id_string[i] = rd_nvram_word(ha, offset); offset = offset + 1; i = i + 1; ldv_63387: ; if (size / 2 > i) { goto ldv_63386; } else { } memcpy((void *)(& ha->model_name), (void const *)(& board_id_string), (size_t )size); return; } } static int qla4xxx_config_nvram(struct scsi_qla_host *ha ) { unsigned long flags ; union external_hw_config_reg extHwConfig ; int tmp ; int tmp___0 ; int tmp___1 ; u16 tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; void *tmp___9 ; void *tmp___10 ; { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Get EEProm parameters \n", ha->host_no, "qla4xxx_config_nvram"); } else { } tmp = ql4xxx_lock_flash(ha); if (tmp != 0) { return (1); } else { } tmp___0 = ql4xxx_lock_nvram(ha); if (tmp___0 != 0) { ql4xxx_unlock_flash(ha); return (1); } else { } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Configuring NVRAM ...\n"); tmp___6 = qla4xxx_is_nvram_configuration_valid(ha); if (tmp___6 == 0) { ldv_spin_lock(); tmp___1 = eeprom_ext_hw_conf_offset(ha); tmp___2 = rd_nvram_word(ha, tmp___1); extHwConfig.Asuint32_t = (uint32_t )tmp___2; spin_unlock_irqrestore(& ha->hardware_lock, flags); } else { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: EEProm checksum invalid. Please update your EEPROM\n", ha->host_no, "qla4xxx_config_nvram"); tmp___5 = is_qla4010(ha); if (tmp___5 != 0) { extHwConfig.Asuint32_t = 6418U; } else { tmp___3 = is_qla4022(ha); tmp___4 = is_qla4032(ha); if ((tmp___3 | tmp___4) != 0) { extHwConfig.Asuint32_t = 35U; } else { return (1); } } } tmp___7 = is_qla4022(ha); if (tmp___7 != 0) { qla4xxx_set_model_info(ha); } else { tmp___8 = is_qla4032(ha); if (tmp___8 != 0) { qla4xxx_set_model_info(ha); } else { strcpy((char *)(& ha->model_name), "QLA4010"); } } ldv_spin_lock(); tmp___9 = isp_ext_hw_conf(ha); writel(extHwConfig.Asuint32_t | 4294901760U, (void volatile *)tmp___9); tmp___10 = isp_ext_hw_conf(ha); readl((void const volatile *)tmp___10); spin_unlock_irqrestore(& ha->hardware_lock, flags); ql4xxx_unlock_nvram(ha); ql4xxx_unlock_flash(ha); return (0); } } void qla4_8xxx_pci_config(struct scsi_qla_host *ha ) { { pci_set_master(ha->pdev); return; } } void qla4xxx_pci_config(struct scsi_qla_host *ha ) { uint16_t w ; int status ; { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Configuring PCI space...\n"); pci_set_master(ha->pdev); status = pci_set_mwi(ha->pdev); pci_read_config_word((struct pci_dev const *)ha->pdev, 4, & w); w = (uint16_t )((unsigned int )w | 320U); w = (unsigned int )w & 64511U; pci_write_config_word((struct pci_dev const *)ha->pdev, 4, (int )w); return; } } static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha ) { int status ; unsigned long max_wait_time ; unsigned long flags ; uint32_t mbox_status ; uint32_t tmp ; int tmp___0 ; int tmp___1 ; uint32_t tmp___2 ; uint32_t ctrl_status ; unsigned short tmp___3 ; unsigned short tmp___4 ; uint32_t tmp___5 ; uint32_t tmp___6 ; { status = 1; dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Starting firmware ...\n"); ldv_spin_lock(); writel((unsigned int )jiffies, (void volatile *)(& (ha->reg)->mailbox) + 7U); tmp___0 = is_qla4022(ha); tmp___1 = is_qla4032(ha); if ((tmp___0 | tmp___1) != 0) { tmp = set_rmask(16U); writel(tmp, (void volatile *)(& (ha->reg)->u1.isp4022.nvram)); } else { } writel(2U, (void volatile *)(& (ha->reg)->mailbox) + 6U); readl((void const volatile *)(& (ha->reg)->mailbox) + 6U); tmp___2 = set_rmask(128U); writel(tmp___2, (void volatile *)(& (ha->reg)->ctrl_status)); readl((void const volatile *)(& (ha->reg)->ctrl_status)); spin_unlock_irqrestore(& ha->hardware_lock, flags); if (ql4xextended_error_logging == 2) { printk("\016scsi%ld: %s: Wait up to %d seconds for boot firmware to complete...\n", ha->host_no, "qla4xxx_start_firmware_from_flash", 60); } else { } max_wait_time = (unsigned long )jiffies + 15000UL; ldv_63419: ldv_spin_lock(); tmp___3 = readw((void const volatile *)(& (ha->reg)->ctrl_status)); ctrl_status = (uint32_t )tmp___3; tmp___4 = readw((void const volatile *)(& (ha->reg)->mailbox)); mbox_status = (uint32_t )tmp___4; spin_unlock_irqrestore(& ha->hardware_lock, flags); tmp___5 = set_rmask(32U); if ((tmp___5 & ctrl_status) != 0U) { goto ldv_63412; } else { } if (mbox_status == 16384U) { goto ldv_63412; } else { } if (ql4xextended_error_logging == 2) { printk("\016scsi%ld: %s: Waiting for boot firmware to complete... ctrl_sts=0x%x, remaining=%ld\n", ha->host_no, "qla4xxx_start_firmware_from_flash", ctrl_status, max_wait_time); } else { } msleep_interruptible(250U); if ((long )((unsigned long )jiffies - max_wait_time) < 0L) { goto ldv_63419; } else { } ldv_63412: ; if (mbox_status == 16384U) { ldv_spin_lock(); tmp___6 = set_rmask(32U); writel(tmp___6, (void volatile *)(& (ha->reg)->ctrl_status)); readl((void const volatile *)(& (ha->reg)->ctrl_status)); spin_unlock_irqrestore(& ha->hardware_lock, flags); status = 0; } else { printk("\016scsi%ld: %s: Boot firmware failed - mbox status 0x%x\n", ha->host_no, "qla4xxx_start_firmware_from_flash", mbox_status); status = 1; } return (status); } } int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a ) { int drvr_wait ; int tmp ; { drvr_wait = 60; goto ldv_63426; ldv_63425: tmp = ql4xxx_lock_drvr(a); if (tmp == 0) { ssleep(1U); if (drvr_wait != 0) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Waiting for Global Init Semaphore(%d)...\n", a->host_no, "ql4xxx_lock_drvr_wait", drvr_wait); } else { } } else { } drvr_wait = drvr_wait + -1; } else { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Global Init Semaphore acquired\n", a->host_no, "ql4xxx_lock_drvr_wait"); } else { } return (0); } ldv_63426: ; if (drvr_wait != 0) { goto ldv_63425; } else { } return (1); } } int qla4xxx_start_firmware(struct scsi_qla_host *ha ) { unsigned long flags ; uint32_t mbox_status ; int status ; int soft_reset ; int config_chip ; int tmp ; int tmp___0 ; int tmp___1 ; void *tmp___2 ; unsigned short tmp___3 ; unsigned short tmp___4 ; uint32_t tmp___5 ; uint32_t tmp___6 ; int tmp___7 ; void *tmp___8 ; unsigned short tmp___9 ; int tmp___10 ; int tmp___11 ; { flags = 0UL; status = 1; soft_reset = 1; config_chip = 0; tmp = is_qla4022(ha); tmp___0 = is_qla4032(ha); if ((tmp | tmp___0) != 0) { ql4xxx_set_mac_number(ha); } else { } tmp___1 = ql4xxx_lock_drvr_wait(ha); if (tmp___1 != 0) { return (1); } else { } ldv_spin_lock(); if (ql4xextended_error_logging == 2) { tmp___2 = isp_port_ctrl(ha); tmp___3 = readw((void const volatile *)tmp___2); printk("scsi%ld: %s: port_ctrl\t= 0x%08X\n", ha->host_no, "qla4xxx_start_firmware", (int )tmp___3); } else { } tmp___8 = isp_port_ctrl(ha); tmp___9 = readw((void const volatile *)tmp___8); if ((int )((short )tmp___9) < 0) { tmp___4 = readw((void const volatile *)(& (ha->reg)->mailbox)); mbox_status = (uint32_t )tmp___4; if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: H/W Config complete - mbox[0]= 0x%x\n", ha->host_no, "qla4xxx_start_firmware", mbox_status); } else { } if (mbox_status == 0U) { config_chip = 1; soft_reset = 0; } else { tmp___5 = set_rmask(32U); writel(tmp___5, (void volatile *)(& (ha->reg)->ctrl_status)); readl((void const volatile *)(& (ha->reg)->ctrl_status)); tmp___6 = set_rmask(16U); writel(tmp___6, (void volatile *)(& (ha->reg)->ctrl_status)); readl((void const volatile *)(& (ha->reg)->ctrl_status)); spin_unlock_irqrestore(& ha->hardware_lock, flags); tmp___7 = qla4xxx_get_firmware_state(ha); if (tmp___7 == 0) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Get firmware state -- state = 0x%x\n", ha->host_no, "qla4xxx_start_firmware", ha->firmware_state); } else { } if ((int )ha->firmware_state & 1) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Firmware in known state -- config and boot, state = 0x%x\n", ha->host_no, "qla4xxx_start_firmware", ha->firmware_state); } else { } config_chip = 1; soft_reset = 0; } else { } } else if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Firmware in unknown state -- resetting, state = 0x%x\n", ha->host_no, "qla4xxx_start_firmware", ha->firmware_state); } else { } ldv_spin_lock(); } } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); if (soft_reset != 0) { status = qla4xxx_soft_reset(ha); if (status == 1) { ql4xxx_unlock_drvr(ha); return (1); } else { } config_chip = 1; tmp___10 = ql4xxx_lock_drvr_wait(ha); if (tmp___10 != 0) { return (1); } else { } } else { } if (config_chip != 0) { status = qla4xxx_config_nvram(ha); if (status == 0) { status = qla4xxx_start_firmware_from_flash(ha); } else { } } else { } ql4xxx_unlock_drvr(ha); if (status == 0) { tmp___11 = test_and_clear_bit(7L, (unsigned long volatile *)(& ha->flags)); if (tmp___11 != 0) { qla4xxx_get_crash_record(ha); } else { } qla4xxx_init_rings(ha); } else { } return (status); } } void qla4xxx_free_ddb_index(struct scsi_qla_host *ha ) { int max_ddbs ; int ret ; uint32_t idx ; uint32_t next_idx ; uint32_t state ; uint32_t conn_err ; int tmp ; { idx = 0U; next_idx = 0U; state = 0U; conn_err = 0U; tmp = is_qla40XX(ha); max_ddbs = tmp != 0 ? 256 : 512; idx = 0U; goto ldv_63449; ldv_63448: ret = qla4xxx_get_fwddb_entry(ha, (int )((uint16_t )idx), (struct dev_db_entry *)0, 0ULL, (uint32_t *)0U, & next_idx, & state, & conn_err, (uint16_t *)0U, (uint16_t *)0U); if (ret == 1) { next_idx = next_idx + 1U; goto ldv_63446; } else { } if (state == 1U || state == 6U) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Freeing DDB index = 0x%x\n", idx); } else { } ret = qla4xxx_clear_ddb_entry(ha, idx); if (ret == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Unable to clear DDB index = 0x%x\n", idx); } else { } } else { } if (next_idx == 0U) { goto ldv_63447; } else { } ldv_63446: idx = next_idx; ldv_63449: ; if ((uint32_t )max_ddbs > idx) { goto ldv_63448; } else { } ldv_63447: ; return; } } int qla4xxx_initialize_adapter(struct scsi_qla_host *ha , int is_reset ) { int status ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { status = 1; ha->eeprom_cmd_data = 0U; dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Configuring PCI space...\n"); (*((ha->isp_ops)->pci_config))(ha); (*((ha->isp_ops)->disable_intrs))(ha); tmp = (*((ha->isp_ops)->start_firmware))(ha); if (tmp == 1) { goto exit_init_hba; } else { } tmp___0 = is_qla8032(ha); if (tmp___0 != 0) { qla4_83xx_enable_mbox_intrs(ha); } else { tmp___1 = is_qla8042(ha); if (tmp___1 != 0) { qla4_83xx_enable_mbox_intrs(ha); } else { } } tmp___2 = qla4xxx_about_firmware(ha); if (tmp___2 == 1) { goto exit_init_hba; } else { } tmp___3 = (*((ha->isp_ops)->get_sys_info))(ha); if (tmp___3 == 1) { goto exit_init_hba; } else { } qla4xxx_init_local_data(ha); status = qla4xxx_init_firmware(ha); if (status == 1) { goto exit_init_hba; } else { } if (is_reset == 1) { qla4xxx_build_ddb_list(ha, is_reset); } else { } set_bit(0L, (unsigned long volatile *)(& ha->flags)); exit_init_hba: ; if (ql4xextended_error_logging == 2) { printk("scsi%ld: initialize adapter: %s\n", ha->host_no, status == 1 ? (char *)"FAILED" : (char *)"SUCCEEDED"); } else { } return (status); } } int qla4xxx_ddb_change(struct scsi_qla_host *ha , uint32_t fw_ddb_index , struct ddb_entry *ddb_entry , uint32_t state ) { uint32_t old_fw_ddb_device_state ; int status ; { status = 1; old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: DDB - old state = 0x%x, new state = 0x%x for index [%d]\n", "qla4xxx_ddb_change", ddb_entry->fw_ddb_device_state, state, fw_ddb_index); } else { } ddb_entry->fw_ddb_device_state = state; switch (old_fw_ddb_device_state) { case 7U: ; switch (state) { case 4U: ; case 2U: qla4xxx_update_session_conn_param(ha, ddb_entry); (*(ddb_entry->unblock_sess))(ddb_entry->sess); status = 0; goto ldv_63468; case 6U: ; case 1U: iscsi_conn_login_event(ddb_entry->conn, 0); status = 0; goto ldv_63468; } ldv_63468: ; goto ldv_63471; case 4U: ; case 2U: ; switch (state) { case 6U: iscsi_session_failure((struct iscsi_session *)(ddb_entry->sess)->dd_data, 1011); status = 0; goto ldv_63475; case 1U: clear_bit((long )fw_ddb_index, (unsigned long volatile *)(& ha->ddb_idx_map)); status = 0; goto ldv_63475; } ldv_63475: ; goto ldv_63471; case 6U: ; switch (state) { case 4U: ; case 2U: (*(ddb_entry->unblock_sess))(ddb_entry->sess); qla4xxx_update_session_conn_param(ha, ddb_entry); status = 0; goto ldv_63480; case 6U: iscsi_session_failure((struct iscsi_session *)(ddb_entry->sess)->dd_data, 1011); status = 0; goto ldv_63480; } ldv_63480: ; goto ldv_63471; default: ; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Unknown Event\n", "qla4xxx_ddb_change"); } else { } goto ldv_63471; } ldv_63471: ; return (status); } } void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry ) { { atomic_set(& ddb_entry->relogin_timer, 0); atomic_set(& ddb_entry->retry_relogin_timer, (int )(ddb_entry->default_time2wait + 4U)); return; } } int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha , uint32_t fw_ddb_index , struct ddb_entry *ddb_entry , uint32_t state ) { uint32_t old_fw_ddb_device_state ; int status ; int tmp ; int tmp___0 ; int tmp___1 ; { status = 1; old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: DDB - old state = 0x%x, new state = 0x%x for index [%d]\n", "qla4xxx_flash_ddb_change", ddb_entry->fw_ddb_device_state, state, fw_ddb_index); } else { } ddb_entry->fw_ddb_device_state = state; switch (old_fw_ddb_device_state) { case 7U: ; case 1U: ; switch (state) { case 4U: (*(ddb_entry->unblock_sess))(ddb_entry->sess); qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry); status = 0; goto ldv_63498; case 6U: iscsi_block_session(ddb_entry->sess); tmp = constant_test_bit(0L, (unsigned long const volatile *)(& ddb_entry->flags)); if (tmp == 0) { qla4xxx_arm_relogin_timer(ddb_entry); } else { } status = 0; goto ldv_63498; } ldv_63498: ; goto ldv_63500; case 4U: ; switch (state) { case 6U: iscsi_block_session(ddb_entry->sess); tmp___0 = constant_test_bit(0L, (unsigned long const volatile *)(& ddb_entry->flags)); if (tmp___0 == 0) { qla4xxx_arm_relogin_timer(ddb_entry); } else { } status = 0; goto ldv_63503; } ldv_63503: ; goto ldv_63500; case 6U: ; switch (state) { case 4U: (*(ddb_entry->unblock_sess))(ddb_entry->sess); qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry); status = 0; goto ldv_63506; case 6U: tmp___1 = constant_test_bit(0L, (unsigned long const volatile *)(& ddb_entry->flags)); if (tmp___1 == 0) { qla4xxx_arm_relogin_timer(ddb_entry); } else { } status = 0; goto ldv_63506; } ldv_63506: ; goto ldv_63500; default: ; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Unknown Event\n", "qla4xxx_flash_ddb_change"); } else { } goto ldv_63500; } ldv_63500: ; return (status); } } int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha , uint32_t fw_ddb_index , uint32_t state , uint32_t conn_err ) { struct ddb_entry *ddb_entry ; int status ; { status = 1; if (fw_ddb_index > 511U) { goto exit_ddb_event; } else { } ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index); if ((unsigned long )ddb_entry == (unsigned long )((struct ddb_entry *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: No ddb_entry at FW index [%d]\n", "qla4xxx_process_ddb_changed", fw_ddb_index); if (state == 1U) { clear_bit((long )fw_ddb_index, (unsigned long volatile *)(& ha->ddb_idx_map)); } else { } goto exit_ddb_event; } else { } (*(ddb_entry->ddb_change))(ha, fw_ddb_index, ddb_entry, state); exit_ddb_event: ; return (status); } } void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session ) { struct iscsi_session *sess ; struct ddb_entry *ddb_entry ; struct scsi_qla_host *ha ; struct dev_db_entry *fw_ddb_entry ; dma_addr_t fw_ddb_dma ; uint32_t mbx_sts ; int ret ; int tmp ; void *tmp___0 ; { fw_ddb_entry = (struct dev_db_entry *)0; mbx_sts = 0U; sess = (struct iscsi_session *)cls_session->dd_data; ddb_entry = (struct ddb_entry *)sess->dd_data; ha = ddb_entry->ha; tmp = constant_test_bit(8L, (unsigned long const volatile *)(& ha->flags)); if (tmp == 0) { return; } else { } if ((unsigned int )ddb_entry->ddb_type != 1U) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Skipping login to non FLASH DB"); } else { } goto exit_login; } else { } tmp___0 = ldv_dma_pool_alloc_130(ha->fw_ddb_dma_pool, 208U, & fw_ddb_dma); fw_ddb_entry = (struct dev_db_entry *)tmp___0; if ((unsigned long )fw_ddb_entry == (unsigned long )((struct dev_db_entry *)0)) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Out of memory\n"); } else { } goto exit_login; } else { } if ((unsigned int )ddb_entry->fw_ddb_index == 65535U) { ret = qla4xxx_get_ddb_index(ha, & ddb_entry->fw_ddb_index); if (ret == 1) { goto exit_login; } else { } ha->fw_ddb_index_map[(int )ddb_entry->fw_ddb_index] = ddb_entry; ha->tot_ddbs = ha->tot_ddbs + 1U; } else { } memcpy((void *)fw_ddb_entry, (void const *)(& ddb_entry->fw_ddb_entry), 512UL); (ddb_entry->sess)->target_id = (unsigned int )ddb_entry->fw_ddb_index; ret = qla4xxx_set_ddb_entry(ha, (int )ddb_entry->fw_ddb_index, fw_ddb_dma, & mbx_sts); if (ret == 1) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Set DDB failed\n"); } else { } goto exit_login; } else { } ddb_entry->fw_ddb_device_state = 7U; ret = qla4xxx_conn_open(ha, (int )ddb_entry->fw_ddb_index); if (ret == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Login failed: %s\n", "qla4xxx_login_flash_ddb", sess->targetname); goto exit_login; } else { } exit_login: ; if ((unsigned long )fw_ddb_entry != (unsigned long )((struct dev_db_entry *)0)) { dma_pool_free(ha->fw_ddb_dma_pool, (void *)fw_ddb_entry, fw_ddb_dma); } else { } return; } } bool ldv_queue_work_on_100(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_101(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_102(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_103(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_104(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_110(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_116(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_118(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_120(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_121(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_122(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_123(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_124(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_125(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_126(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_127(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_scsi_add_host_with_dma_128(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___6 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_27 = 1; ldv_initialize_scsi_host_template_27(); } else { } return (ldv_func_res); } } void *ldv_vmalloc_129(unsigned long ldv_func_arg1 ) { void *tmp ; { ldv_check_alloc_nonatomic(); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_dma_pool_alloc_130(struct dma_pool *ldv_func_arg1 , gfp_t flags , dma_addr_t *ldv_func_arg3 ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } extern char *strncpy(char * , char const * , __kernel_size_t ) ; extern char *skip_spaces(char const * ) ; __inline static void spin_unlock_irqrestore(spinlock_t *lock , unsigned long flags ) ; extern void complete(struct completion * ) ; bool ldv_queue_work_on_151(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_153(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_152(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_155(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_154(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_161(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_178(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; __inline static void *kzalloc(size_t size , gfp_t flags ) ; void *ldv_dma_pool_alloc_180(struct dma_pool *ldv_func_arg1 , gfp_t flags , dma_addr_t *ldv_func_arg3 ) ; void *ldv_dma_pool_alloc_181(struct dma_pool *ldv_func_arg1 , gfp_t flags , dma_addr_t *ldv_func_arg3 ) ; struct sk_buff *ldv_skb_clone_169(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_177(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_171(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_167(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_175(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_176(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_172(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_173(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_174(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; extern void int_to_scsilun(u64 , struct scsi_lun * ) ; int ldv_scsi_add_host_with_dma_179(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha , uint16_t fw_ddb_index , uint16_t connection_id , uint16_t option ) ; void qla4xxx_dump_buffer(void *b , uint32_t size ) ; int qla4xxx_mailbox_command(struct scsi_qla_host *ha , uint8_t inCount , uint8_t outCount , uint32_t *mbx_cmd , uint32_t *mbx_sts ) ; void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha ) ; int qla4xxx_get_nvram(struct scsi_qla_host *ha , dma_addr_t nvram_dma , uint32_t offset , uint32_t size ) ; int qla4xxx_set_nvram(struct scsi_qla_host *ha , dma_addr_t nvram_dma , uint32_t offset , uint32_t size ) ; int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha , uint32_t region , uint32_t field0 , uint32_t field1 ) ; uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state ) ; int qla4_83xx_get_port_config(struct scsi_qla_host *ha , uint32_t *config ) ; int qla4_83xx_set_port_config(struct scsi_qla_host *ha , uint32_t *config ) ; void qla4xxx_queue_mbox_cmd(struct scsi_qla_host *ha , uint32_t *mbx_cmd , int in_count ) { int i ; uint32_t tmp ; { i = 1; goto ldv_63310; ldv_63309: writel(*(mbx_cmd + (unsigned long )i), (void volatile *)(& (ha->reg)->mailbox) + (unsigned long )i); i = i + 1; ldv_63310: ; if (i < in_count) { goto ldv_63309; } else { } writel(*mbx_cmd, (void volatile *)(& (ha->reg)->mailbox)); readl((void const volatile *)(& (ha->reg)->mailbox)); tmp = set_rmask(64U); writel(tmp, (void volatile *)(& (ha->reg)->ctrl_status)); readl((void const volatile *)(& (ha->reg)->ctrl_status)); return; } } void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha , int out_count ) { int intr_status ; unsigned int tmp ; { tmp = readl((void const volatile *)(& (ha->reg)->ctrl_status)); intr_status = (int )tmp; if ((intr_status & 56) != 0) { ha->mbox_status_count = (uint8_t volatile )out_count; (*((ha->isp_ops)->interrupt_service_routine))(ha, (uint32_t )intr_status); } else { } return; } } static int qla4xxx_is_intr_poll_mode(struct scsi_qla_host *ha ) { int rval ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; { rval = 1; tmp___5 = is_qla8032(ha); if (tmp___5 != 0) { goto _L; } else { tmp___6 = is_qla8042(ha); if (tmp___6 != 0) { _L: /* CIL Label */ tmp = constant_test_bit(10L, (unsigned long const volatile *)(& ha->flags)); if (tmp != 0) { tmp___0 = constant_test_bit(29L, (unsigned long const volatile *)(& ha->flags)); if (tmp___0 != 0) { rval = 0; } else { } } else { } } else { tmp___1 = constant_test_bit(10L, (unsigned long const volatile *)(& ha->flags)); if (tmp___1 != 0) { tmp___2 = constant_test_bit(6L, (unsigned long const volatile *)(& ha->flags)); if (tmp___2 != 0) { tmp___3 = constant_test_bit(0L, (unsigned long const volatile *)(& ha->flags)); if (tmp___3 != 0) { tmp___4 = constant_test_bit(12L, (unsigned long const volatile *)(& ha->flags)); if (tmp___4 == 0) { rval = 0; } else { } } else { } } else { } } else { } } } return (rval); } } int qla4xxx_mailbox_command(struct scsi_qla_host *ha , uint8_t inCount , uint8_t outCount , uint32_t *mbx_cmd , uint32_t *mbx_sts ) { int status ; uint8_t i ; u_long wait_count ; unsigned long flags ; uint32_t dev_state ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; u_long tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; int tmp___15 ; { status = 1; flags = 0UL; if ((unsigned long )mbx_cmd == (unsigned long )((uint32_t *)0U) || (unsigned long )mbx_sts == (unsigned long )((uint32_t *)0U)) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Invalid mbx_cmd or mbx_sts pointer\n", ha->host_no, "qla4xxx_mailbox_command"); } else { } return (status); } else { } tmp___0 = is_qla40XX(ha); if (tmp___0 != 0) { tmp = constant_test_bit(12L, (unsigned long const volatile *)(& ha->flags)); if (tmp != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: prematurely completing mbx cmd as adapter removal detected\n", ha->host_no, "qla4xxx_mailbox_command"); } else { } return (status); } else { } } else { } tmp___1 = is_aer_supported(ha); if (tmp___1 != 0) { tmp___2 = constant_test_bit(21L, (unsigned long const volatile *)(& ha->flags)); if (tmp___2 != 0) { if (ql4xextended_error_logging == 2) { printk("\fscsi%ld: %s: Perm failure on EEH, timeout MBX Exiting.\n", ha->host_no, "qla4xxx_mailbox_command"); } else { } return (status); } else { } } else { } wait_count = 6000UL; goto ldv_63336; ldv_63335: mutex_lock_nested(& ha->mbox_sem, 0U); tmp___3 = constant_test_bit(2L, (unsigned long const volatile *)(& ha->flags)); if (tmp___3 == 0) { set_bit(2L, (unsigned long volatile *)(& ha->flags)); mutex_unlock(& ha->mbox_sem); goto ldv_63334; } else { } mutex_unlock(& ha->mbox_sem); if (wait_count == 0UL) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: mbox_sem failed\n", ha->host_no, "qla4xxx_mailbox_command"); } else { } return (status); } else { } msleep(10U); ldv_63336: tmp___4 = wait_count; wait_count = wait_count - 1UL; if (tmp___4 != 0UL) { goto ldv_63335; } else { } ldv_63334: tmp___7 = is_qla80XX(ha); if (tmp___7 != 0) { tmp___5 = constant_test_bit(19L, (unsigned long const volatile *)(& ha->flags)); if (tmp___5 != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n", ha->host_no, "qla4xxx_mailbox_command"); } else { } goto mbox_exit; } else { } (*((ha->isp_ops)->idc_lock))(ha); tmp___6 = qla4_8xxx_rd_direct(ha, 4U); dev_state = (uint32_t )tmp___6; (*((ha->isp_ops)->idc_unlock))(ha); if (dev_state == 6U) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n", ha->host_no, "qla4xxx_mailbox_command"); goto mbox_exit; } else { } } else { } ldv_spin_lock(); ha->mbox_status_count = outCount; i = 0U; goto ldv_63339; ldv_63338: ha->mbox_status[(int )i] = 0U; i = (uint8_t )((int )i + 1); ldv_63339: ; if ((int )i < (int )outCount) { goto ldv_63338; } else { } (*((ha->isp_ops)->queue_mailbox_command))(ha, mbx_cmd, (int )inCount); spin_unlock_irqrestore(& ha->hardware_lock, flags); if ((unsigned int )outCount == 0U) { status = 0; goto mbox_exit; } else { } tmp___9 = qla4xxx_is_intr_poll_mode(ha); if (tmp___9 != 0) { wait_count = (unsigned long )jiffies + 15000UL; goto ldv_63349; ldv_63348: ; if ((long )((unsigned long )jiffies - wait_count) >= 0L) { goto ldv_63347; } else { } ldv_spin_lock(); (*((ha->isp_ops)->process_mailbox_interrupt))(ha, (int )outCount); spin_unlock_irqrestore(& ha->hardware_lock, flags); msleep(10U); ldv_63349: tmp___8 = constant_test_bit(3L, (unsigned long const volatile *)(& ha->flags)); if (tmp___8 == 0) { goto ldv_63348; } else { } ldv_63347: ; } else { set_bit(18L, (unsigned long volatile *)(& ha->flags)); wait_for_completion_timeout(& ha->mbx_intr_comp, 15000UL); clear_bit(18L, (unsigned long volatile *)(& ha->flags)); } tmp___15 = constant_test_bit(3L, (unsigned long const volatile *)(& ha->flags)); if (tmp___15 == 0) { tmp___10 = is_qla80XX(ha); if (tmp___10 != 0) { tmp___11 = constant_test_bit(19L, (unsigned long const volatile *)(& ha->flags)); if (tmp___11 != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n", ha->host_no, "qla4xxx_mailbox_command"); } else { } goto mbox_exit; } else { } } else { } dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: Mailbox Cmd 0x%08X timed out, Scheduling Adapter Reset\n", ha->host_no, *mbx_cmd); ha->mailbox_timeout_count = ha->mailbox_timeout_count + 1U; *mbx_sts = 4294967295U; set_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); tmp___14 = is_qla8022(ha); if (tmp___14 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "disabling pause transmit on port 0 & 1.\n"); qla4_82xx_wr_32(ha, 106954904UL, 9U); } else { tmp___12 = is_qla8032(ha); if (tmp___12 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), " %s: disabling pause transmit on port 0 & 1.\n", "qla4xxx_mailbox_command"); qla4_83xx_disable_pause(ha); } else { tmp___13 = is_qla8042(ha); if (tmp___13 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), " %s: disabling pause transmit on port 0 & 1.\n", "qla4xxx_mailbox_command"); qla4_83xx_disable_pause(ha); } else { } } } goto mbox_exit; } else { } ldv_spin_lock(); i = 0U; goto ldv_63351; ldv_63350: *(mbx_sts + (unsigned long )i) = ha->mbox_status[(int )i]; i = (uint8_t )((int )i + 1); ldv_63351: ; if ((int )i < (int )outCount) { goto ldv_63350; } else { } switch (ha->mbox_status[0]) { case (uint32_t volatile )16384: status = 0; goto ldv_63354; case (uint32_t volatile )4096: status = 0; goto ldv_63354; case (uint32_t volatile )7: dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: Cmd = %08X, ISP BUSY\n", ha->host_no, "qla4xxx_mailbox_command", *mbx_cmd); ha->mailbox_timeout_count = ha->mailbox_timeout_count + 1U; goto ldv_63354; default: dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: FAILED, MBOX CMD = %08X, MBOX STS = %08X %08X %08X %08X %08X %08X %08X %08X\n", ha->host_no, "qla4xxx_mailbox_command", *mbx_cmd, *mbx_sts, *(mbx_sts + 1UL), *(mbx_sts + 2UL), *(mbx_sts + 3UL), *(mbx_sts + 4UL), *(mbx_sts + 5UL), *(mbx_sts + 6UL), *(mbx_sts + 7UL)); goto ldv_63354; } ldv_63354: spin_unlock_irqrestore(& ha->hardware_lock, flags); mbox_exit: mutex_lock_nested(& ha->mbox_sem, 0U); clear_bit(2L, (unsigned long volatile *)(& ha->flags)); mutex_unlock(& ha->mbox_sem); clear_bit(3L, (unsigned long volatile *)(& ha->flags)); return (status); } } int qla4xxx_get_minidump_template(struct scsi_qla_host *ha , dma_addr_t phys_addr ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int status ; { memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 297U; mbox_cmd[1] = 1U; mbox_cmd[2] = (unsigned int )phys_addr; mbox_cmd[3] = (unsigned int )(phys_addr >> 32ULL); mbox_cmd[4] = ha->fw_dump_tmplt_size; mbox_cmd[5] = 0U; status = qla4xxx_mailbox_command(ha, 8, 2, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (status != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: Cmd = %08X, mbx[0] = 0x%04x, mbx[1] = 0x%04x\n", ha->host_no, "qla4xxx_get_minidump_template", mbox_cmd[0], mbox_sts[0], mbox_sts[1]); } else { } } else { } return (status); } } int qla4xxx_req_template_size(struct scsi_qla_host *ha ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int status ; { memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 297U; mbox_cmd[1] = 0U; status = qla4xxx_mailbox_command(ha, 8, 8, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (status == 0) { ha->fw_dump_tmplt_size = mbox_sts[1]; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: sts[0]=0x%04x, template size=0x%04x, size_cm_02=0x%04x, size_cm_04=0x%04x, size_cm_08=0x%04x, size_cm_10=0x%04x, size_cm_FF=0x%04x, version=0x%04x\n", "qla4xxx_req_template_size", mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6], mbox_sts[7]); } else { } if (ha->fw_dump_tmplt_size == 0U) { status = 1; } else { } } else { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: Error sts[0]=0x%04x, mbx[1]=0x%04x\n", "qla4xxx_req_template_size", mbox_sts[0], mbox_sts[1]); status = 1; } return (status); } } void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha ) { int tmp ; int tmp___0 ; { set_bit(19L, (unsigned long volatile *)(& ha->flags)); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: set FW RECOVERY!\n", ha->host_no, "qla4xxx_mailbox_premature_completion"); tmp___0 = constant_test_bit(2L, (unsigned long const volatile *)(& ha->flags)); if (tmp___0 != 0) { tmp = constant_test_bit(18L, (unsigned long const volatile *)(& ha->flags)); if (tmp != 0) { complete(& ha->mbx_intr_comp); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: Due to fw recovery, doing premature completion of mbx cmd\n", ha->host_no, "qla4xxx_mailbox_premature_completion"); } else { set_bit(3L, (unsigned long volatile *)(& ha->flags)); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: Due to fw recovery, doing premature completion of polling mbx cmd\n", ha->host_no, "qla4xxx_mailbox_premature_completion"); } } else { } return; } } static uint8_t qla4xxx_set_ifcb(struct scsi_qla_host *ha , uint32_t *mbox_cmd , uint32_t *mbox_sts , dma_addr_t init_fw_cb_dma ) { int tmp ; int tmp___0 ; { memset((void *)mbox_cmd, 0, 32UL); memset((void *)mbox_sts, 0, 32UL); tmp = is_qla8022(ha); if (tmp != 0) { qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, 0U); } else { } *mbox_cmd = 96U; *(mbox_cmd + 1UL) = 0U; *(mbox_cmd + 2UL) = (unsigned int )init_fw_cb_dma; *(mbox_cmd + 3UL) = (unsigned int )(init_fw_cb_dma >> 32ULL); *(mbox_cmd + 4UL) = 768U; tmp___0 = qla4xxx_mailbox_command(ha, 6, 6, mbox_cmd, mbox_sts); if (tmp___0 != 0) { if (ql4xextended_error_logging == 2) { printk("\fscsi%ld: %s: MBOX_CMD_INITIALIZE_FIRMWARE failed w/ status %04X\n", ha->host_no, "qla4xxx_set_ifcb", *mbox_sts); } else { } return (1U); } else { } return (0U); } } uint8_t qla4xxx_get_ifcb(struct scsi_qla_host *ha , uint32_t *mbox_cmd , uint32_t *mbox_sts , dma_addr_t init_fw_cb_dma ) { int tmp ; { memset((void *)mbox_cmd, 0, 32UL); memset((void *)mbox_sts, 0, 32UL); *mbox_cmd = 97U; *(mbox_cmd + 2UL) = (unsigned int )init_fw_cb_dma; *(mbox_cmd + 3UL) = (unsigned int )(init_fw_cb_dma >> 32ULL); *(mbox_cmd + 4UL) = 768U; tmp = qla4xxx_mailbox_command(ha, 5, 5, mbox_cmd, mbox_sts); if (tmp != 0) { if (ql4xextended_error_logging == 2) { printk("\fscsi%ld: %s: MBOX_CMD_GET_INIT_FW_CTRL_BLOCK failed w/ status %04X\n", ha->host_no, "qla4xxx_get_ifcb", *mbox_sts); } else { } return (1U); } else { } return (0U); } } uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state ) { uint8_t ipaddr_state ; { switch ((int )fw_ipaddr_state) { case 0: ipaddr_state = 0U; goto ldv_63396; case 1: ipaddr_state = 5U; goto ldv_63396; case 2: ipaddr_state = 1U; goto ldv_63396; case 3: ipaddr_state = 2U; goto ldv_63396; case 4: ipaddr_state = 6U; goto ldv_63396; case 5: ipaddr_state = 3U; goto ldv_63396; case 6: ipaddr_state = 4U; goto ldv_63396; default: ipaddr_state = 0U; } ldv_63396: ; return (ipaddr_state); } } static void qla4xxx_update_local_ip(struct scsi_qla_host *ha , struct addr_ctrl_blk *init_fw_cb ) { unsigned long _min1 ; unsigned long _min2 ; unsigned long _min1___0 ; unsigned long _min2___0 ; unsigned long _min1___1 ; unsigned long _min2___1 ; __u16 tmp ; unsigned long _min1___2 ; unsigned long _min2___2 ; unsigned long _min1___3 ; unsigned long _min2___3 ; unsigned long _min1___4 ; unsigned long _min2___4 ; unsigned long _min1___5 ; unsigned long _min2___5 ; unsigned long _min1___6 ; unsigned long _min2___6 ; unsigned long _min1___7 ; unsigned long _min2___7 ; unsigned long _min1___8 ; unsigned long _min2___8 ; __u16 tmp___0 ; int tmp___1 ; { ha->ip_config.tcp_options = init_fw_cb->ipv4_tcp_opts; ha->ip_config.ipv4_options = init_fw_cb->ipv4_ip_opts; ha->ip_config.ipv4_addr_state = qla4xxx_set_ipaddr_state((int )init_fw_cb->ipv4_addr_state); ha->ip_config.eth_mtu_size = init_fw_cb->eth_mtu_size; ha->ip_config.ipv4_port = init_fw_cb->ipv4_port; if ((unsigned int )ha->acb_version == 2U) { ha->ip_config.ipv6_options = (uint32_t )init_fw_cb->ipv6_opts; ha->ip_config.ipv6_addl_options = (uint32_t )init_fw_cb->ipv6_addtl_opts; ha->ip_config.ipv6_tcp_options = init_fw_cb->ipv6_tcp_opts; } else { } _min1 = 4UL; _min2 = 4UL; memcpy((void *)(& ha->ip_config.ip_address), (void const *)(& init_fw_cb->ipv4_addr), _min1 < _min2 ? _min1 : _min2); _min1___0 = 4UL; _min2___0 = 4UL; memcpy((void *)(& ha->ip_config.subnet_mask), (void const *)(& init_fw_cb->ipv4_subnet), _min1___0 < _min2___0 ? _min1___0 : _min2___0); _min1___1 = 4UL; _min2___1 = 4UL; memcpy((void *)(& ha->ip_config.gateway), (void const *)(& init_fw_cb->ipv4_gw_addr), _min1___1 < _min2___1 ? _min1___1 : _min2___1); tmp = __fswab16((int )init_fw_cb->ipv4_vlan_tag); ha->ip_config.ipv4_vlan_tag = tmp; ha->ip_config.control = init_fw_cb->control; ha->ip_config.tcp_wsf = init_fw_cb->ipv4_tcp_wsf; ha->ip_config.ipv4_tos = init_fw_cb->ipv4_tos; ha->ip_config.ipv4_cache_id = init_fw_cb->ipv4_cacheid; ha->ip_config.ipv4_alt_cid_len = init_fw_cb->ipv4_dhcp_alt_cid_len; _min1___2 = 11UL; _min2___2 = 11UL; memcpy((void *)(& ha->ip_config.ipv4_alt_cid), (void const *)(& init_fw_cb->ipv4_dhcp_alt_cid), _min1___2 < _min2___2 ? _min1___2 : _min2___2); ha->ip_config.ipv4_vid_len = init_fw_cb->ipv4_dhcp_vid_len; _min1___3 = 11UL; _min2___3 = 11UL; memcpy((void *)(& ha->ip_config.ipv4_vid), (void const *)(& init_fw_cb->ipv4_dhcp_vid), _min1___3 < _min2___3 ? _min1___3 : _min2___3); ha->ip_config.ipv4_ttl = init_fw_cb->ipv4_ttl; ha->ip_config.def_timeout = init_fw_cb->def_timeout; ha->ip_config.abort_timer = init_fw_cb->abort_timer; ha->ip_config.iscsi_options = init_fw_cb->iscsi_opts; ha->ip_config.iscsi_max_pdu_size = init_fw_cb->iscsi_max_pdu_size; ha->ip_config.iscsi_first_burst_len = init_fw_cb->iscsi_fburst_len; ha->ip_config.iscsi_max_outstnd_r2t = init_fw_cb->iscsi_max_outstnd_r2t; ha->ip_config.iscsi_max_burst_len = init_fw_cb->iscsi_max_burst_len; _min1___4 = 224UL; _min2___4 = 224UL; memcpy((void *)(& ha->ip_config.iscsi_name), (void const *)(& init_fw_cb->iscsi_name), _min1___4 < _min2___4 ? _min1___4 : _min2___4); tmp___1 = is_ipv6_enabled(ha); if (tmp___1 != 0) { ha->ip_config.ipv6_link_local_state = qla4xxx_set_ipaddr_state((int )init_fw_cb->ipv6_lnk_lcl_addr_state); ha->ip_config.ipv6_addr0_state = qla4xxx_set_ipaddr_state((int )init_fw_cb->ipv6_addr0_state); ha->ip_config.ipv6_addr1_state = qla4xxx_set_ipaddr_state((int )init_fw_cb->ipv6_addr1_state); switch ((int )init_fw_cb->ipv6_dflt_rtr_state) { case 0: ha->ip_config.ipv6_default_router_state = 0U; goto ldv_63427; case 1: ha->ip_config.ipv6_default_router_state = 2U; goto ldv_63427; case 3: ha->ip_config.ipv6_default_router_state = 1U; goto ldv_63427; case 4: ha->ip_config.ipv6_default_router_state = 3U; goto ldv_63427; default: ha->ip_config.ipv6_default_router_state = 0U; } ldv_63427: ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[0] = 254U; ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[1] = 128U; _min1___5 = 8UL; _min2___5 = 8UL; memcpy((void *)(& ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8) + 8U, (void const *)(& init_fw_cb->ipv6_if_id), _min1___5 < _min2___5 ? _min1___5 : _min2___5); _min1___6 = 16UL; _min2___6 = 16UL; memcpy((void *)(& ha->ip_config.ipv6_addr0), (void const *)(& init_fw_cb->ipv6_addr0), _min1___6 < _min2___6 ? _min1___6 : _min2___6); _min1___7 = 16UL; _min2___7 = 16UL; memcpy((void *)(& ha->ip_config.ipv6_addr1), (void const *)(& init_fw_cb->ipv6_addr1), _min1___7 < _min2___7 ? _min1___7 : _min2___7); _min1___8 = 16UL; _min2___8 = 16UL; memcpy((void *)(& ha->ip_config.ipv6_default_router_addr), (void const *)(& init_fw_cb->ipv6_dflt_rtr_addr), _min1___8 < _min2___8 ? _min1___8 : _min2___8); tmp___0 = __fswab16((int )init_fw_cb->ipv6_vlan_tag); ha->ip_config.ipv6_vlan_tag = tmp___0; ha->ip_config.ipv6_port = init_fw_cb->ipv6_port; ha->ip_config.ipv6_cache_id = init_fw_cb->ipv6_cache_id; ha->ip_config.ipv6_flow_lbl = init_fw_cb->ipv6_flow_lbl; ha->ip_config.ipv6_traffic_class = init_fw_cb->ipv6_traffic_class; ha->ip_config.ipv6_hop_limit = init_fw_cb->ipv6_hop_limit; ha->ip_config.ipv6_nd_reach_time = init_fw_cb->ipv6_nd_reach_time; ha->ip_config.ipv6_nd_rexmit_timer = init_fw_cb->ipv6_nd_rexmit_timer; ha->ip_config.ipv6_nd_stale_timeout = init_fw_cb->ipv6_nd_stale_timeout; ha->ip_config.ipv6_dup_addr_detect_count = init_fw_cb->ipv6_dup_addr_detect_count; ha->ip_config.ipv6_gw_advrt_mtu = init_fw_cb->ipv6_gw_advrt_mtu; ha->ip_config.ipv6_tcp_wsf = init_fw_cb->ipv6_tcp_wsf; } else { } return; } } uint8_t qla4xxx_update_local_ifcb(struct scsi_qla_host *ha , uint32_t *mbox_cmd , uint32_t *mbox_sts , struct addr_ctrl_blk *init_fw_cb , dma_addr_t init_fw_cb_dma ) { uint8_t tmp ; unsigned long _min1 ; unsigned long _min2 ; { tmp = qla4xxx_get_ifcb(ha, mbox_cmd, mbox_sts, init_fw_cb_dma); if ((unsigned int )tmp != 0U) { if (ql4xextended_error_logging == 2) { printk("\fscsi%ld: %s: Failed to get init_fw_ctrl_blk\n", ha->host_no, "qla4xxx_update_local_ifcb"); } else { } return (1U); } else { } if (ql4xextended_error_logging == 2) { qla4xxx_dump_buffer((void *)init_fw_cb, 768U); } else { } ha->acb_version = init_fw_cb->acb_version; ha->firmware_options = init_fw_cb->fw_options; ha->heartbeat_interval = init_fw_cb->hb_interval; _min1 = 256UL; _min2 = 224UL; memcpy((void *)(& ha->name_string), (void const *)(& init_fw_cb->iscsi_name), _min1 < _min2 ? _min1 : _min2); ha->def_timeout = init_fw_cb->def_timeout; qla4xxx_update_local_ip(ha, init_fw_cb); return (0U); } } int qla4xxx_initialize_fw_cb(struct scsi_qla_host *ha ) { struct addr_ctrl_blk *init_fw_cb ; dma_addr_t init_fw_cb_dma ; uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int status ; void *tmp ; uint8_t tmp___0 ; int tmp___1 ; uint8_t tmp___2 ; uint8_t tmp___3 ; { status = 1; tmp = dma_alloc_attrs(& (ha->pdev)->dev, 768UL, & init_fw_cb_dma, 208U, (struct dma_attrs *)0); init_fw_cb = (struct addr_ctrl_blk *)tmp; if ((unsigned long )init_fw_cb == (unsigned long )((struct addr_ctrl_blk *)0)) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no, "qla4xxx_initialize_fw_cb"); } else { } goto exit_init_fw_cb_no_free; } else { } memset((void *)init_fw_cb, 0, 768UL); memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); tmp___0 = qla4xxx_get_ifcb(ha, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts), init_fw_cb_dma); if ((unsigned int )tmp___0 != 0U) { dma_free_attrs(& (ha->pdev)->dev, 768UL, (void *)init_fw_cb, init_fw_cb_dma, (struct dma_attrs *)0); goto exit_init_fw_cb; } else { } init_fw_cb->rqq_consumer_idx = ha->request_out; init_fw_cb->compq_producer_idx = ha->response_in; init_fw_cb->rqq_len = 1024U; init_fw_cb->compq_len = 64U; init_fw_cb->rqq_addr_lo = (unsigned int )ha->request_dma; init_fw_cb->rqq_addr_hi = (unsigned int )(ha->request_dma >> 32ULL); init_fw_cb->compq_addr_lo = (unsigned int )ha->response_dma; init_fw_cb->compq_addr_hi = (unsigned int )(ha->response_dma >> 32ULL); init_fw_cb->shdwreg_addr_lo = (unsigned int )ha->shadow_regs_dma; init_fw_cb->shdwreg_addr_hi = (unsigned int )(ha->shadow_regs_dma >> 32ULL); init_fw_cb->fw_options = (uint16_t )((unsigned int )init_fw_cb->fw_options | 96U); tmp___1 = is_qla80XX(ha); if (tmp___1 != 0) { init_fw_cb->fw_options = (uint16_t )((unsigned int )init_fw_cb->fw_options | 32768U); } else { } init_fw_cb->fw_options = (unsigned int )init_fw_cb->fw_options & 65519U; init_fw_cb->add_fw_options = 0U; init_fw_cb->add_fw_options = (uint16_t )((unsigned int )init_fw_cb->add_fw_options | 1024U); init_fw_cb->add_fw_options = (uint16_t )((unsigned int )init_fw_cb->add_fw_options | 2U); tmp___2 = qla4xxx_set_ifcb(ha, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts), init_fw_cb_dma); if ((unsigned int )tmp___2 != 0U) { if (ql4xextended_error_logging == 2) { printk("\fscsi%ld: %s: Failed to set init_fw_ctrl_blk\n", ha->host_no, "qla4xxx_initialize_fw_cb"); } else { } goto exit_init_fw_cb; } else { } tmp___3 = qla4xxx_update_local_ifcb(ha, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts), init_fw_cb, init_fw_cb_dma); if ((unsigned int )tmp___3 != 0U) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Failed to update local ifcb\n", ha->host_no, "qla4xxx_initialize_fw_cb"); } else { } goto exit_init_fw_cb; } else { } status = 0; exit_init_fw_cb: dma_free_attrs(& (ha->pdev)->dev, 768UL, (void *)init_fw_cb, init_fw_cb_dma, (struct dma_attrs *)0); exit_init_fw_cb_no_free: ; return (status); } } int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host *ha ) { struct addr_ctrl_blk *init_fw_cb ; dma_addr_t init_fw_cb_dma ; uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; void *tmp ; uint8_t tmp___0 ; { tmp = dma_alloc_attrs(& (ha->pdev)->dev, 768UL, & init_fw_cb_dma, 208U, (struct dma_attrs *)0); init_fw_cb = (struct addr_ctrl_blk *)tmp; if ((unsigned long )init_fw_cb == (unsigned long )((struct addr_ctrl_blk *)0)) { printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no, "qla4xxx_get_dhcp_ip_address"); return (1); } else { } memset((void *)init_fw_cb, 0, 768UL); tmp___0 = qla4xxx_get_ifcb(ha, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts), init_fw_cb_dma); if ((unsigned int )tmp___0 != 0U) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n", ha->host_no, "qla4xxx_get_dhcp_ip_address"); } else { } dma_free_attrs(& (ha->pdev)->dev, 768UL, (void *)init_fw_cb, init_fw_cb_dma, (struct dma_attrs *)0); return (1); } else { } qla4xxx_update_local_ip(ha, init_fw_cb); dma_free_attrs(& (ha->pdev)->dev, 768UL, (void *)init_fw_cb, init_fw_cb_dma, (struct dma_attrs *)0); return (0); } } int qla4xxx_get_firmware_state(struct scsi_qla_host *ha ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int tmp ; { memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 105U; tmp = qla4xxx_mailbox_command(ha, 8, 4, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (tmp != 0) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATE failed w/ status %04X\n", ha->host_no, "qla4xxx_get_firmware_state", mbox_sts[0]); } else { } return (1); } else { } ha->firmware_state = mbox_sts[1]; ha->board_id = mbox_sts[2]; ha->addl_fw_state = mbox_sts[3]; if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s firmware_state=0x%x\n", ha->host_no, "qla4xxx_get_firmware_state", ha->firmware_state); } else { } return (0); } } int qla4xxx_get_firmware_status(struct scsi_qla_host *ha ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int tmp ; { memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 31U; tmp = qla4xxx_mailbox_command(ha, 8, 3, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (tmp != 0) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATUS failed w/ status %04X\n", ha->host_no, "qla4xxx_get_firmware_status", mbox_sts[0]); } else { } return (1); } else { } ha->iocb_hiwat = (uint16_t )mbox_sts[2]; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: firmware IOCBs available = %d\n", "qla4xxx_get_firmware_status", (int )ha->iocb_hiwat); } else { } if ((unsigned int )ha->iocb_hiwat > 4U) { ha->iocb_hiwat = (unsigned int )ha->iocb_hiwat + 65532U; } else { } if ((unsigned int )ha->iocb_hiwat == 0U) { ha->iocb_hiwat = 256U; if (ql4xextended_error_logging == 2) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: Setting IOCB\'s to = %d\n", "qla4xxx_get_firmware_status", (int )ha->iocb_hiwat); } else { } } else { } return (0); } } int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha , uint16_t fw_ddb_index , struct dev_db_entry *fw_ddb_entry , dma_addr_t fw_ddb_entry_dma , uint32_t *num_valid_ddb_entries , uint32_t *next_ddb_index , uint32_t *fw_ddb_device_state , uint32_t *conn_err_detail , uint16_t *tcp_source_port_num , uint16_t *connection_id ) { int status ; uint16_t options ; uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int tmp ; { status = 1; if ((unsigned int )fw_ddb_index > 511U) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: ddb [%d] out of range.\n", ha->host_no, "qla4xxx_get_fwddb_entry", (int )fw_ddb_index); } else { } goto exit_get_fwddb; } else { } memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); if ((unsigned long )fw_ddb_entry != (unsigned long )((struct dev_db_entry *)0)) { memset((void *)fw_ddb_entry, 0, 512UL); } else { } mbox_cmd[0] = 100U; mbox_cmd[1] = (unsigned int )fw_ddb_index; mbox_cmd[2] = (unsigned int )fw_ddb_entry_dma; mbox_cmd[3] = (unsigned int )(fw_ddb_entry_dma >> 32ULL); mbox_cmd[4] = 512U; tmp = qla4xxx_mailbox_command(ha, 8, 7, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (tmp == 1) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: MBOX_CMD_GET_DATABASE_ENTRY failed with status 0x%04X\n", ha->host_no, "qla4xxx_get_fwddb_entry", mbox_sts[0]); } else { } goto exit_get_fwddb; } else { } if ((uint32_t )fw_ddb_index != mbox_sts[1]) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: ddb mismatch [%d] != [%d].\n", ha->host_no, "qla4xxx_get_fwddb_entry", (int )fw_ddb_index, mbox_sts[1]); } else { } goto exit_get_fwddb; } else { } if ((unsigned long )fw_ddb_entry != (unsigned long )((struct dev_db_entry *)0)) { options = fw_ddb_entry->options; if (((int )options & 256) != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: DDB[%d] MB0 %04x Tot %d Next %d State %04x ConnErr %08x %pI6 :%04d \"%s\"\n", "qla4xxx_get_fwddb_entry", (int )fw_ddb_index, mbox_sts[0], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5], (uint8_t *)(& fw_ddb_entry->ip_addr), (int )fw_ddb_entry->port, (uint8_t *)(& fw_ddb_entry->iscsi_name)); } else { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: DDB[%d] MB0 %04x Tot %d Next %d State %04x ConnErr %08x %pI4 :%04d \"%s\"\n", "qla4xxx_get_fwddb_entry", (int )fw_ddb_index, mbox_sts[0], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5], (uint8_t *)(& fw_ddb_entry->ip_addr), (int )fw_ddb_entry->port, (uint8_t *)(& fw_ddb_entry->iscsi_name)); } } else { } if ((unsigned long )num_valid_ddb_entries != (unsigned long )((uint32_t *)0U)) { *num_valid_ddb_entries = mbox_sts[2]; } else { } if ((unsigned long )next_ddb_index != (unsigned long )((uint32_t *)0U)) { *next_ddb_index = mbox_sts[3]; } else { } if ((unsigned long )fw_ddb_device_state != (unsigned long )((uint32_t *)0U)) { *fw_ddb_device_state = mbox_sts[4]; } else { } if ((unsigned long )conn_err_detail != (unsigned long )((uint32_t *)0U)) { *conn_err_detail = mbox_sts[5]; } else { } if ((unsigned long )tcp_source_port_num != (unsigned long )((uint16_t *)0U)) { *tcp_source_port_num = (unsigned short )(mbox_sts[6] >> 16); } else { } if ((unsigned long )connection_id != (unsigned long )((uint16_t *)0U)) { *connection_id = (unsigned int )((uint16_t )mbox_sts[6]) & 255U; } else { } status = 0; exit_get_fwddb: ; return (status); } } int qla4xxx_conn_open(struct scsi_qla_host *ha , uint16_t fw_ddb_index ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int status ; { memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 116U; mbox_cmd[1] = (uint32_t )fw_ddb_index; status = qla4xxx_mailbox_command(ha, 8, 2, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: status = %d mbx0 = 0x%x mbx1 = 0x%x\n", "qla4xxx_conn_open", status, mbox_sts[0], mbox_sts[1]); } else { } return (status); } } int qla4xxx_set_ddb_entry(struct scsi_qla_host *ha , uint16_t fw_ddb_index , dma_addr_t fw_ddb_entry_dma , uint32_t *mbx_sts ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int status ; { memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 99U; mbox_cmd[1] = (unsigned int )fw_ddb_index; mbox_cmd[2] = (unsigned int )fw_ddb_entry_dma; mbox_cmd[3] = (unsigned int )(fw_ddb_entry_dma >> 32ULL); mbox_cmd[4] = 512U; status = qla4xxx_mailbox_command(ha, 8, 5, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if ((unsigned long )mbx_sts != (unsigned long )((uint32_t *)0U)) { *mbx_sts = mbox_sts[0]; } else { } if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: status=%d mbx0=0x%x mbx4=0x%x\n", ha->host_no, "qla4xxx_set_ddb_entry", status, mbox_sts[0], mbox_sts[4]); } else { } return (status); } } int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry , int options ) { int status ; uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; { memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 86U; mbox_cmd[1] = (uint32_t )ddb_entry->fw_ddb_index; mbox_cmd[3] = (uint32_t )options; status = qla4xxx_mailbox_command(ha, 8, 2, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (status != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT failed sts %04X %04X", "qla4xxx_session_logout_ddb", mbox_sts[0], mbox_sts[1]); } else { } if (mbox_sts[0] == 16389U && mbox_sts[1] == 9U) { set_bit(0L, (unsigned long volatile *)(& ddb_entry->flags)); } else { } } else { } return (status); } } void qla4xxx_get_crash_record(struct scsi_qla_host *ha ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; struct crash_record *crash_record ; dma_addr_t crash_record_dma ; uint32_t crash_record_size ; int tmp ; void *tmp___0 ; int tmp___1 ; { crash_record = (struct crash_record *)0; crash_record_dma = 0ULL; crash_record_size = 0U; memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 118U; tmp = qla4xxx_mailbox_command(ha, 8, 5, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (tmp != 0) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: OLD_ERROR: Unable to retrieve size!\n", ha->host_no, "qla4xxx_get_crash_record"); } else { } goto exit_get_crash_record; } else { } crash_record_size = mbox_sts[4]; if (crash_record_size == 0U) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: OLD_ERROR: Crash record size is 0!\n", ha->host_no, "qla4xxx_get_crash_record"); } else { } goto exit_get_crash_record; } else { } tmp___0 = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )crash_record_size, & crash_record_dma, 208U, (struct dma_attrs *)0); crash_record = (struct crash_record *)tmp___0; if ((unsigned long )crash_record == (unsigned long )((struct crash_record *)0)) { goto exit_get_crash_record; } else { } memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 118U; mbox_cmd[2] = (unsigned int )crash_record_dma; mbox_cmd[3] = (unsigned int )(crash_record_dma >> 32ULL); mbox_cmd[4] = crash_record_size; tmp___1 = qla4xxx_mailbox_command(ha, 8, 5, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (tmp___1 != 0) { } else { } exit_get_crash_record: ; if ((unsigned long )crash_record != (unsigned long )((struct crash_record *)0)) { dma_free_attrs(& (ha->pdev)->dev, (size_t )crash_record_size, (void *)crash_record, crash_record_dma, (struct dma_attrs *)0); } else { } return; } } void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; struct conn_event_log_entry *event_log ; dma_addr_t event_log_dma ; uint32_t event_log_size ; uint32_t num_valid_entries ; uint32_t oldest_entry ; uint32_t max_event_log_entries ; uint8_t i ; int tmp ; void *tmp___0 ; int tmp___1 ; { event_log = (struct conn_event_log_entry *)0; event_log_dma = 0ULL; event_log_size = 0U; oldest_entry = 0U; memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 119U; tmp = qla4xxx_mailbox_command(ha, 8, 5, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (tmp != 0) { goto exit_get_event_log; } else { } event_log_size = mbox_sts[4]; if (event_log_size == 0U) { goto exit_get_event_log; } else { } tmp___0 = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )event_log_size, & event_log_dma, 208U, (struct dma_attrs *)0); event_log = (struct conn_event_log_entry *)tmp___0; if ((unsigned long )event_log == (unsigned long )((struct conn_event_log_entry *)0)) { goto exit_get_event_log; } else { } memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 119U; mbox_cmd[2] = (unsigned int )event_log_dma; mbox_cmd[3] = (unsigned int )(event_log_dma >> 32ULL); tmp___1 = qla4xxx_mailbox_command(ha, 8, 5, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (tmp___1 != 0) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: OLD_ERROR: Unable to retrieve event log!\n", ha->host_no, "qla4xxx_get_conn_event_log"); } else { } goto exit_get_event_log; } else { } num_valid_entries = mbox_sts[1]; max_event_log_entries = event_log_size / 20U; if (num_valid_entries > max_event_log_entries) { oldest_entry = num_valid_entries % max_event_log_entries; } else { } if (ql4xextended_error_logging == 3) { if (oldest_entry == 0U) { i = 0U; goto ldv_63556; ldv_63555: qla4xxx_dump_buffer((void *)event_log + (unsigned long )i * 20UL, 20U); i = (uint8_t )((int )i + 1); ldv_63556: ; if ((uint32_t )i < num_valid_entries) { goto ldv_63555; } else { } } else { i = (uint8_t )oldest_entry; goto ldv_63559; ldv_63558: qla4xxx_dump_buffer((void *)event_log + (unsigned long )i * 20UL, 20U); i = (uint8_t )((int )i + 1); ldv_63559: ; if ((uint32_t )i < max_event_log_entries) { goto ldv_63558; } else { } i = 0U; goto ldv_63562; ldv_63561: qla4xxx_dump_buffer((void *)event_log + (unsigned long )i * 20UL, 20U); i = (uint8_t )((int )i + 1); ldv_63562: ; if ((uint32_t )i < oldest_entry) { goto ldv_63561; } else { } } } else { } exit_get_event_log: ; if ((unsigned long )event_log != (unsigned long )((struct conn_event_log_entry *)0)) { dma_free_attrs(& (ha->pdev)->dev, (size_t )event_log_size, (void *)event_log, event_log_dma, (struct dma_attrs *)0); } else { } return; } } int qla4xxx_abort_task(struct scsi_qla_host *ha , struct srb *srb ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; struct scsi_cmnd *cmd ; int status ; unsigned long flags ; uint32_t index ; { cmd = srb->cmd; status = 0; flags = 0UL; memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); ldv_spin_lock(); index = (uint32_t )((long )cmd->host_scribble); spin_unlock_irqrestore(& ha->hardware_lock, flags); if (index == 1024U) { return (status); } else { } mbox_cmd[0] = 21U; mbox_cmd[1] = (uint32_t )(srb->ddb)->fw_ddb_index; mbox_cmd[2] = index; mbox_cmd[5] = 1U; qla4xxx_mailbox_command(ha, 8, 5, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (mbox_sts[0] != 16384U) { status = 1; if (ql4xextended_error_logging == 2) { printk("\fscsi%ld:%d:%llu: abort task FAILED: mbx0=%04X, mb1=%04X, mb2=%04X, mb3=%04X, mb4=%04X\n", ha->host_no, (cmd->device)->id, (cmd->device)->lun, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4]); } else { } } else { } return (status); } } int qla4xxx_reset_lun(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry , uint64_t lun ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; uint32_t scsi_lun[2U] ; int status ; { status = 0; if (ql4xextended_error_logging == 2) { printk("scsi%ld:%d:%llu: lun reset issued\n", ha->host_no, (int )ddb_entry->fw_ddb_index, lun); } else { } memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); int_to_scsilun(lun, (struct scsi_lun *)(& scsi_lun)); mbox_cmd[0] = 22U; mbox_cmd[1] = (uint32_t )ddb_entry->fw_ddb_index; mbox_cmd[2] = scsi_lun[0]; mbox_cmd[3] = scsi_lun[1]; mbox_cmd[5] = 1U; qla4xxx_mailbox_command(ha, 8, 1, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (mbox_sts[0] != 16384U && mbox_sts[0] != 16389U) { status = 1; } else { } return (status); } } int qla4xxx_reset_target(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int status ; { status = 0; if (ql4xextended_error_logging == 2) { printk("scsi%ld:%d: target reset issued\n", ha->host_no, (int )ddb_entry->fw_ddb_index); } else { } memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 23U; mbox_cmd[1] = (uint32_t )ddb_entry->fw_ddb_index; mbox_cmd[5] = 1U; qla4xxx_mailbox_command(ha, 8, 1, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (mbox_sts[0] != 16384U && mbox_sts[0] != 16389U) { status = 1; } else { } return (status); } } int qla4xxx_get_flash(struct scsi_qla_host *ha , dma_addr_t dma_addr , uint32_t offset , uint32_t len ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int tmp ; { memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 38U; mbox_cmd[1] = (unsigned int )dma_addr; mbox_cmd[2] = (unsigned int )(dma_addr >> 32ULL); mbox_cmd[3] = offset; mbox_cmd[4] = len; tmp = qla4xxx_mailbox_command(ha, 8, 2, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (tmp != 0) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: MBOX_CMD_READ_FLASH, failed w/ status %04X %04X, offset %08x, len %08x\n", ha->host_no, "qla4xxx_get_flash", mbox_sts[0], mbox_sts[1], offset, len); } else { } return (1); } else { } return (0); } } int qla4xxx_about_firmware(struct scsi_qla_host *ha ) { struct about_fw_info *about_fw ; dma_addr_t about_fw_dma ; uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int status ; void *tmp ; char *tmp___0 ; char *tmp___1 ; { about_fw = (struct about_fw_info *)0; status = 1; tmp = dma_alloc_attrs(& (ha->pdev)->dev, 256UL, & about_fw_dma, 208U, (struct dma_attrs *)0); about_fw = (struct about_fw_info *)tmp; if ((unsigned long )about_fw == (unsigned long )((struct about_fw_info *)0)) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to alloc memory for about_fw\n", "qla4xxx_about_firmware"); } else { } return (status); } else { } memset((void *)about_fw, 0, 256UL); memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 9U; mbox_cmd[2] = (unsigned int )about_fw_dma; mbox_cmd[3] = (unsigned int )(about_fw_dma >> 32ULL); mbox_cmd[4] = 256U; status = qla4xxx_mailbox_command(ha, 8, 8, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (status != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: MBOX_CMD_ABOUT_FW failed w/ status %04X\n", "qla4xxx_about_firmware", mbox_sts[0]); } else { } goto exit_about_fw; } else { } ha->fw_info.fw_major = about_fw->fw_major; ha->fw_info.fw_minor = about_fw->fw_minor; ha->fw_info.fw_patch = about_fw->fw_patch; ha->fw_info.fw_build = about_fw->fw_build; memcpy((void *)(& ha->fw_info.fw_build_date), (void const *)(& about_fw->fw_build_date), 16UL); memcpy((void *)(& ha->fw_info.fw_build_time), (void const *)(& about_fw->fw_build_time), 16UL); tmp___0 = skip_spaces((char const *)(& about_fw->fw_build_user)); strcpy((char *)(& ha->fw_info.fw_build_user), (char const *)tmp___0); ha->fw_info.fw_load_source = about_fw->fw_load_source; ha->fw_info.iscsi_major = about_fw->iscsi_major; ha->fw_info.iscsi_minor = about_fw->iscsi_minor; ha->fw_info.bootload_major = about_fw->bootload_major; ha->fw_info.bootload_minor = about_fw->bootload_minor; ha->fw_info.bootload_patch = about_fw->bootload_patch; ha->fw_info.bootload_build = about_fw->bootload_build; tmp___1 = skip_spaces((char const *)(& about_fw->extended_timestamp)); strcpy((char *)(& ha->fw_info.extended_timestamp), (char const *)tmp___1); ha->fw_uptime_secs = mbox_sts[5]; ha->fw_uptime_msecs = mbox_sts[6]; status = 0; exit_about_fw: dma_free_attrs(& (ha->pdev)->dev, 256UL, (void *)about_fw, about_fw_dma, (struct dma_attrs *)0); return (status); } } int qla4xxx_get_default_ddb(struct scsi_qla_host *ha , uint32_t options , dma_addr_t dma_addr ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int tmp ; { memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 107U; mbox_cmd[1] = options; mbox_cmd[2] = (unsigned int )dma_addr; mbox_cmd[3] = (unsigned int )(dma_addr >> 32ULL); tmp = qla4xxx_mailbox_command(ha, 8, 1, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (tmp != 0) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: failed status %04X\n", ha->host_no, "qla4xxx_get_default_ddb", mbox_sts[0]); } else { } return (1); } else { } return (0); } } int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha , uint32_t ddb_index , uint32_t *mbx_sts ) { int status ; uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; { memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 98U; mbox_cmd[1] = ddb_index; status = qla4xxx_mailbox_command(ha, 8, 1, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (status != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: failed status %04X\n", "qla4xxx_req_ddb_entry", mbox_sts[0]); } else { } } else { } *mbx_sts = mbox_sts[0]; return (status); } } int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha , uint32_t ddb_index ) { int status ; uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; { memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 49U; mbox_cmd[1] = ddb_index; status = qla4xxx_mailbox_command(ha, 2, 1, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (status != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: failed status %04X\n", "qla4xxx_clear_ddb_entry", mbox_sts[0]); } else { } } else { } return (status); } } int qla4xxx_set_flash(struct scsi_qla_host *ha , dma_addr_t dma_addr , uint32_t offset , uint32_t length , uint32_t options ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int status ; { status = 0; memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 37U; mbox_cmd[1] = (unsigned int )dma_addr; mbox_cmd[2] = (unsigned int )(dma_addr >> 32ULL); mbox_cmd[3] = offset; mbox_cmd[4] = length; mbox_cmd[5] = options; status = qla4xxx_mailbox_command(ha, 6, 2, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (status != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: MBOX_CMD_WRITE_FLASH failed w/ status %04X, mbx1 %04X\n", "qla4xxx_set_flash", mbox_sts[0], mbox_sts[1]); } else { } } else { } return (status); } } int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha , struct dev_db_entry *fw_ddb_entry , dma_addr_t fw_ddb_entry_dma , uint16_t ddb_index ) { uint32_t dev_db_start_offset ; uint32_t dev_db_end_offset ; int status ; int tmp ; { dev_db_start_offset = 83886080U; status = 1; memset((void *)fw_ddb_entry, 0, 512UL); dev_db_start_offset = (uint32_t )ddb_index * 512U + dev_db_start_offset; dev_db_end_offset = 83918847U; if (dev_db_start_offset > dev_db_end_offset) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s:Invalid DDB index %d", "qla4xxx_bootdb_by_index", (int )ddb_index); } else { } goto exit_bootdb_failed; } else { } tmp = qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, 512U); if (tmp != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: Get Flashfailed\n", ha->host_no, "qla4xxx_bootdb_by_index"); goto exit_bootdb_failed; } else { } if ((unsigned int )fw_ddb_entry->cookie == 36916U) { status = 0; } else { } exit_bootdb_failed: ; return (status); } } int qla4xxx_flashdb_by_index(struct scsi_qla_host *ha , struct dev_db_entry *fw_ddb_entry , dma_addr_t fw_ddb_entry_dma , uint16_t ddb_index ) { uint32_t dev_db_start_offset ; uint32_t dev_db_end_offset ; int status ; int tmp ; int tmp___0 ; { status = 1; memset((void *)fw_ddb_entry, 0, 512UL); tmp = is_qla40XX(ha); if (tmp != 0) { dev_db_start_offset = 83886080U; dev_db_end_offset = 83918847U; } else { dev_db_start_offset = (ha->hw.flt_region_ddb << 2) + 2382364672U; if ((unsigned int )ha->port_num == 1U) { dev_db_start_offset = ha->hw.flt_ddb_size / 2U + dev_db_start_offset; } else { } dev_db_end_offset = ha->hw.flt_ddb_size / 2U + dev_db_start_offset; } dev_db_start_offset = (uint32_t )ddb_index * 512U + dev_db_start_offset; if (dev_db_start_offset > dev_db_end_offset) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s:Invalid DDB index %d", "qla4xxx_flashdb_by_index", (int )ddb_index); } else { } goto exit_fdb_failed; } else { } tmp___0 = qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, 512U); if (tmp___0 != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: Get Flash failed\n", ha->host_no, "qla4xxx_flashdb_by_index"); goto exit_fdb_failed; } else { } if ((unsigned int )fw_ddb_entry->cookie == 36916U) { status = 0; } else { } exit_fdb_failed: ; return (status); } } int qla4xxx_get_chap(struct scsi_qla_host *ha , char *username , char *password , uint16_t idx ) { int ret ; int rval ; uint32_t offset ; uint32_t chap_size ; struct ql4_chap_table *chap_table ; dma_addr_t chap_dma ; void *tmp ; int tmp___0 ; { ret = 0; rval = 1; offset = 0U; tmp = ldv_dma_pool_alloc_180(ha->chap_dma_pool, 208U, & chap_dma); chap_table = (struct ql4_chap_table *)tmp; if ((unsigned long )chap_table == (unsigned long )((struct ql4_chap_table *)0)) { return (-12); } else { } chap_size = 364U; memset((void *)chap_table, 0, (size_t )chap_size); tmp___0 = is_qla40XX(ha); if (tmp___0 != 0) { offset = (uint32_t )idx * chap_size | 100663296U; } else { offset = (ha->hw.flt_region_chap << 2) + 2382364672U; if ((unsigned int )ha->port_num == 1U) { offset = ha->hw.flt_chap_size / 2U + offset; } else { } offset = (uint32_t )idx * chap_size + offset; } rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); if (rval != 0) { ret = -22; goto exit_get_chap; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Chap Cookie: x%x\n", (int )chap_table->cookie); } else { } if ((unsigned int )chap_table->cookie != 16530U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "No valid chap entry found\n"); goto exit_get_chap; } else { } strlcpy(password, (char const *)(& chap_table->secret), 100UL); strlcpy(username, (char const *)(& chap_table->name), 256UL); chap_table->cookie = 16530U; exit_get_chap: dma_pool_free(ha->chap_dma_pool, (void *)chap_table, chap_dma); return (ret); } } int qla4xxx_set_chap(struct scsi_qla_host *ha , char *username , char *password , uint16_t idx , int bidi ) { int ret ; int rval ; uint32_t offset ; struct ql4_chap_table *chap_table ; uint32_t chap_size ; dma_addr_t chap_dma ; void *tmp ; size_t tmp___0 ; int tmp___1 ; { ret = 0; rval = 1; offset = 0U; chap_size = 0U; tmp = ldv_dma_pool_alloc_181(ha->chap_dma_pool, 208U, & chap_dma); chap_table = (struct ql4_chap_table *)tmp; if ((unsigned long )chap_table == (unsigned long )((struct ql4_chap_table *)0)) { ret = -12; goto exit_set_chap; } else { } memset((void *)chap_table, 0, 364UL); if (bidi != 0) { chap_table->flags = (uint8_t )((unsigned int )chap_table->flags | 64U); } else { chap_table->flags = (uint8_t )((unsigned int )chap_table->flags | 128U); } tmp___0 = strlen((char const *)password); chap_table->secret_len = (uint8_t )tmp___0; strncpy((char *)(& chap_table->secret), (char const *)password, 99UL); strncpy((char *)(& chap_table->name), (char const *)username, 255UL); chap_table->cookie = 16530U; tmp___1 = is_qla40XX(ha); if (tmp___1 != 0) { chap_size = 46592U; offset = 100663296U; } else { chap_size = ha->hw.flt_chap_size / 2U; offset = (ha->hw.flt_region_chap << 2) + 2382364672U; if ((unsigned int )ha->port_num == 1U) { offset = offset + chap_size; } else { } } offset = (uint32_t )idx * 364U + offset; rval = qla4xxx_set_flash(ha, chap_dma, offset, 364U, 3U); if (rval == 0 && (unsigned long )ha->chap_list != (unsigned long )((uint8_t *)0U)) { memcpy((void *)ha->chap_list + (unsigned long )idx, (void const *)chap_table, 364UL); } else { } dma_pool_free(ha->chap_dma_pool, (void *)chap_table, chap_dma); if (rval != 0) { ret = -22; } else { } exit_set_chap: ; return (ret); } } int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha , char *username , char *password , uint16_t chap_index ) { int rval ; struct ql4_chap_table *chap_table ; int max_chap_entries ; int tmp ; { rval = 1; chap_table = (struct ql4_chap_table *)0; if ((unsigned long )ha->chap_list == (unsigned long )((uint8_t *)0U)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Do not have CHAP table cache\n"); rval = 1; goto exit_uni_chap; } else { } if ((unsigned long )username == (unsigned long )((char *)0) || (unsigned long )password == (unsigned long )((char *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "No memory for username & secret\n"); rval = 1; goto exit_uni_chap; } else { } tmp = is_qla80XX(ha); if (tmp != 0) { max_chap_entries = (int )(ha->hw.flt_chap_size / 728U); } else { max_chap_entries = 128; } if ((int )chap_index > max_chap_entries) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Invalid Chap index\n"); rval = 1; goto exit_uni_chap; } else { } mutex_lock_nested(& ha->chap_sem, 0U); chap_table = (struct ql4_chap_table *)ha->chap_list + (unsigned long )chap_index; if ((unsigned int )chap_table->cookie != 16530U) { rval = 1; goto exit_unlock_uni_chap; } else { } if ((int )((signed char )chap_table->flags) >= 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Unidirectional entry not set\n"); rval = 1; goto exit_unlock_uni_chap; } else { } strlcpy(password, (char const *)(& chap_table->secret), 100UL); strlcpy(username, (char const *)(& chap_table->name), 256UL); rval = 0; exit_unlock_uni_chap: mutex_unlock(& ha->chap_sem); exit_uni_chap: ; return (rval); } } int qla4xxx_get_chap_index(struct scsi_qla_host *ha , char *username , char *password , int bidi , uint16_t *chap_index ) { int i ; int rval ; int free_index ; int found_index ; int max_chap_entries ; struct ql4_chap_table *chap_table ; int tmp ; int tmp___0 ; int tmp___1 ; { free_index = -1; found_index = 0; max_chap_entries = 0; tmp = is_qla80XX(ha); if (tmp != 0) { max_chap_entries = (int )(ha->hw.flt_chap_size / 728U); } else { max_chap_entries = 128; } if ((unsigned long )ha->chap_list == (unsigned long )((uint8_t *)0U)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Do not have CHAP table cache\n"); return (1); } else { } if ((unsigned long )username == (unsigned long )((char *)0) || (unsigned long )password == (unsigned long )((char *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Do not have username and psw\n"); return (1); } else { } mutex_lock_nested(& ha->chap_sem, 0U); i = 0; goto ldv_63721; ldv_63720: chap_table = (struct ql4_chap_table *)ha->chap_list + (unsigned long )i; if ((unsigned int )chap_table->cookie != 16530U) { if (i > 3 && free_index == -1) { free_index = i; } else { } goto ldv_63718; } else { } if (bidi != 0) { if ((int )((signed char )chap_table->flags) < 0) { goto ldv_63718; } else { } } else if (((int )chap_table->flags & 64) != 0) { goto ldv_63718; } else { } tmp___0 = strncmp((char const *)(& chap_table->secret), (char const *)password, 100UL); if (tmp___0 == 0) { tmp___1 = strncmp((char const *)(& chap_table->name), (char const *)username, 256UL); if (tmp___1 == 0) { *chap_index = (uint16_t )i; found_index = 1; goto ldv_63719; } else { } } else { } ldv_63718: i = i + 1; ldv_63721: ; if (i < max_chap_entries) { goto ldv_63720; } else { } ldv_63719: ; if (found_index == 0 && free_index != -1) { rval = qla4xxx_set_chap(ha, username, password, (int )((uint16_t )free_index), bidi); if (rval == 0) { *chap_index = (uint16_t )free_index; found_index = 1; } else { } } else { } mutex_unlock(& ha->chap_sem); if (found_index != 0) { return (0); } else { } return (1); } } int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha , uint16_t fw_ddb_index , uint16_t connection_id , uint16_t option ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int status ; { status = 0; memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 86U; mbox_cmd[1] = (uint32_t )fw_ddb_index; mbox_cmd[2] = (uint32_t )connection_id; mbox_cmd[3] = (uint32_t )option; status = qla4xxx_mailbox_command(ha, 4, 2, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (status != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: MBOX_CMD_CONN_CLOSE option %04x failed w/ status %04X %04X\n", "qla4xxx_conn_close_sess_logout", (int )option, mbox_sts[0], mbox_sts[1]); } else { } } else { } return (status); } } static int qla4_84xx_extend_idc_tmo(struct scsi_qla_host *ha , uint32_t ext_tmo ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int status ; { memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); ext_tmo = ext_tmo & 15U; mbox_cmd[0] = 258U; mbox_cmd[1] = (ha->idc_info.request_desc & 4294963455U) | (ext_tmo << 8); mbox_cmd[2] = ha->idc_info.info1; mbox_cmd[3] = ha->idc_info.info2; mbox_cmd[4] = ha->idc_info.info3; status = qla4xxx_mailbox_command(ha, 8, 8, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (status != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: failed status %04X\n", ha->host_no, "qla4_84xx_extend_idc_tmo", mbox_sts[0]); } else { } return (1); } else { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: IDC timeout extended by %d secs\n", "qla4_84xx_extend_idc_tmo", ext_tmo); } return (0); } } int qla4xxx_disable_acb(struct scsi_qla_host *ha ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int status ; unsigned long tmp ; int tmp___0 ; int tmp___1 ; { status = 0; memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 138U; status = qla4xxx_mailbox_command(ha, 8, 5, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (status != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: MBOX_CMD_DISABLE_ACB failed w/ status %04X %04X %04X", "qla4xxx_disable_acb", mbox_sts[0], mbox_sts[1], mbox_sts[2]); } else { } } else { tmp___0 = is_qla8042(ha); if (tmp___0 != 0) { tmp___1 = constant_test_bit(23L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___1 != 0) { if (mbox_sts[0] != 16384U) { qla4_84xx_extend_idc_tmo(ha, 8U); tmp = wait_for_completion_timeout(& ha->disable_acb_comp, 2000UL); if (tmp == 0UL) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: Disable ACB Completion not received\n", "qla4xxx_disable_acb"); } else { } } else { } } else { } } else { } } return (status); } } int qla4xxx_get_acb(struct scsi_qla_host *ha , dma_addr_t acb_dma , uint32_t acb_type , uint32_t len ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int status ; { status = 0; memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 137U; mbox_cmd[1] = acb_type; mbox_cmd[2] = (unsigned int )acb_dma; mbox_cmd[3] = (unsigned int )(acb_dma >> 32ULL); mbox_cmd[4] = len; status = qla4xxx_mailbox_command(ha, 5, 5, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (status != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: MBOX_CMD_GET_ACB failed w/ status %04X\n", "qla4xxx_get_acb", mbox_sts[0]); } else { } } else { } return (status); } } int qla4xxx_set_acb(struct scsi_qla_host *ha , uint32_t *mbox_cmd , uint32_t *mbox_sts , dma_addr_t acb_dma ) { int status ; { status = 0; memset((void *)mbox_cmd, 0, 32UL); memset((void *)mbox_sts, 0, 32UL); *mbox_cmd = 136U; *(mbox_cmd + 1UL) = 0U; *(mbox_cmd + 2UL) = (unsigned int )acb_dma; *(mbox_cmd + 3UL) = (unsigned int )(acb_dma >> 32ULL); *(mbox_cmd + 4UL) = 768U; status = qla4xxx_mailbox_command(ha, 5, 5, mbox_cmd, mbox_sts); if (status != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: MBOX_CMD_SET_ACB failed w/ status %04X\n", "qla4xxx_set_acb", *mbox_sts); } else { } } else { } return (status); } } int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry , struct iscsi_cls_conn *cls_conn , uint32_t *mbx_sts ) { struct dev_db_entry *fw_ddb_entry ; struct iscsi_conn *conn ; struct iscsi_session *sess ; struct qla_conn *qla_conn ; struct sockaddr *dst_addr ; dma_addr_t fw_ddb_entry_dma ; int status ; int rval ; struct sockaddr_in *addr ; struct sockaddr_in6 *addr6 ; char *ip ; uint16_t iscsi_opts ; uint32_t options ; uint16_t idx ; uint16_t *ptid ; void *tmp ; size_t _min1 ; size_t tmp___0 ; unsigned long _min2 ; __u16 tmp___1 ; __u16 tmp___2 ; size_t tmp___3 ; size_t tmp___4 ; size_t tmp___5 ; size_t tmp___6 ; { status = 0; rval = 0; iscsi_opts = 0U; options = 0U; tmp = dma_alloc_attrs(& (ha->pdev)->dev, 512UL, & fw_ddb_entry_dma, 208U, (struct dma_attrs *)0); fw_ddb_entry = (struct dev_db_entry *)tmp; if ((unsigned long )fw_ddb_entry == (unsigned long )((struct dev_db_entry *)0)) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to allocate dma buffer.\n", "qla4xxx_set_param_ddbentry"); } else { } rval = -12; goto exit_set_param_no_free; } else { } conn = (struct iscsi_conn *)cls_conn->dd_data; qla_conn = (struct qla_conn *)conn->dd_data; sess = conn->session; dst_addr = (struct sockaddr *)(& (qla_conn->qla_ep)->dst_addr); if ((unsigned int )dst_addr->sa_family == 10U) { options = options | 1U; } else { } status = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); if (status == 1) { rval = -22; goto exit_set_param; } else { } ptid = (uint16_t *)(& fw_ddb_entry->isid) + 1U; *ptid = (unsigned short )(ddb_entry->sess)->target_id; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "ISID [%02x%02x%02x%02x%02x%02x]\n", (int )fw_ddb_entry->isid[5], (int )fw_ddb_entry->isid[4], (int )fw_ddb_entry->isid[3], (int )fw_ddb_entry->isid[2], (int )fw_ddb_entry->isid[1], (int )fw_ddb_entry->isid[0]); } else { } iscsi_opts = fw_ddb_entry->iscsi_options; memset((void *)(& fw_ddb_entry->iscsi_alias), 0, 32UL); memset((void *)(& fw_ddb_entry->iscsi_name), 0, 224UL); if ((unsigned long )sess->targetname != (unsigned long )((char *)0)) { tmp___0 = strlen((char const *)sess->targetname); _min1 = tmp___0; _min2 = 224UL; memcpy((void *)(& fw_ddb_entry->iscsi_name), (void const *)sess->targetname, _min1 < _min2 ? _min1 : _min2); } else { } memset((void *)(& fw_ddb_entry->ip_addr), 0, 16UL); memset((void *)(& fw_ddb_entry->tgt_addr), 0, 32UL); fw_ddb_entry->options = 66U; if ((unsigned int )dst_addr->sa_family == 2U) { addr = (struct sockaddr_in *)dst_addr; ip = (char *)(& addr->sin_addr); memcpy((void *)(& fw_ddb_entry->ip_addr), (void const *)ip, 4UL); tmp___1 = __fswab16((int )addr->sin_port); fw_ddb_entry->port = tmp___1; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Destination Address [%pI4]: index [%d]\n", "qla4xxx_set_param_ddbentry", (uint8_t *)(& fw_ddb_entry->ip_addr), (int )ddb_entry->fw_ddb_index); } else { } } else if ((unsigned int )dst_addr->sa_family == 10U) { addr6 = (struct sockaddr_in6 *)dst_addr; ip = (char *)(& addr6->sin6_addr); memcpy((void *)(& fw_ddb_entry->ip_addr), (void const *)ip, 16UL); tmp___2 = __fswab16((int )addr6->sin6_port); fw_ddb_entry->port = tmp___2; fw_ddb_entry->options = (uint16_t )((unsigned int )fw_ddb_entry->options | 256U); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Destination Address [%pI6]: index [%d]\n", "qla4xxx_set_param_ddbentry", (uint8_t *)(& fw_ddb_entry->ip_addr), (int )ddb_entry->fw_ddb_index); } else { } } else { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Failed to get IP Address\n", "qla4xxx_set_param_ddbentry"); rval = -22; goto exit_set_param; } if ((unsigned long )sess->username != (unsigned long )((char *)0) && (unsigned long )sess->password != (unsigned long )((char *)0)) { tmp___3 = strlen((char const *)sess->username); if (tmp___3 != 0UL) { tmp___4 = strlen((char const *)sess->password); if (tmp___4 != 0UL) { iscsi_opts = (uint16_t )((unsigned int )iscsi_opts | 128U); rval = qla4xxx_get_chap_index(ha, sess->username, sess->password, 0, & idx); if (rval != 0) { goto exit_set_param; } else { } fw_ddb_entry->chap_tbl_idx = idx; } else { } } else { } } else { } if ((unsigned long )sess->username_in != (unsigned long )((char *)0) && (unsigned long )sess->password_in != (unsigned long )((char *)0)) { tmp___5 = strlen((char const *)sess->username_in); if (tmp___5 != 0UL) { tmp___6 = strlen((char const *)sess->password_in); if (tmp___6 != 0UL) { iscsi_opts = (uint16_t )((unsigned int )iscsi_opts | 16U); rval = qla4xxx_get_chap_index(ha, sess->username_in, sess->password_in, 1, & idx); if (rval != 0) { goto exit_set_param; } else { } } else { } } else { } } else { } if (sess->initial_r2t_en != 0) { iscsi_opts = (uint16_t )((unsigned int )iscsi_opts | 1024U); } else { } if (sess->imm_data_en != 0) { iscsi_opts = (uint16_t )((unsigned int )iscsi_opts | 2048U); } else { } fw_ddb_entry->iscsi_options = iscsi_opts; if (conn->max_recv_dlength != 0U) { fw_ddb_entry->iscsi_max_rcv_data_seg_len = (unsigned short )(conn->max_recv_dlength / 512U); } else { } if ((unsigned int )sess->max_r2t != 0U) { fw_ddb_entry->iscsi_max_outsnd_r2t = sess->max_r2t; } else { } if (sess->first_burst != 0U) { fw_ddb_entry->iscsi_first_burst_len = (unsigned short )(sess->first_burst / 512U); } else { } if (sess->max_burst != 0U) { fw_ddb_entry->iscsi_max_burst_len = (unsigned short )(sess->max_burst / 512U); } else { } if (sess->time2wait != 0) { fw_ddb_entry->iscsi_def_time2wait = (unsigned short )sess->time2wait; } else { } if (sess->time2retain != 0) { fw_ddb_entry->iscsi_def_time2retain = (unsigned short )sess->time2retain; } else { } status = qla4xxx_set_ddb_entry(ha, (int )ddb_entry->fw_ddb_index, fw_ddb_entry_dma, mbx_sts); if (status != 0) { rval = -22; } else { } exit_set_param: dma_free_attrs(& (ha->pdev)->dev, 512UL, (void *)fw_ddb_entry, fw_ddb_entry_dma, (struct dma_attrs *)0); exit_set_param_no_free: ; return (rval); } } int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha , uint16_t fw_ddb_index , uint16_t stats_size , dma_addr_t stats_dma ) { int status ; uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; { status = 0; memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 30U; mbox_cmd[1] = (uint32_t )fw_ddb_index; mbox_cmd[2] = (unsigned int )stats_dma; mbox_cmd[3] = (unsigned int )(stats_dma >> 32ULL); mbox_cmd[4] = (uint32_t )stats_size; status = qla4xxx_mailbox_command(ha, 5, 1, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (status != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: MBOX_CMD_GET_MANAGEMENT_DATA failed w/ status %04X\n", "qla4xxx_get_mgmt_data", mbox_sts[0]); } else { } } else { } return (status); } } int qla4xxx_get_ip_state(struct scsi_qla_host *ha , uint32_t acb_idx , uint32_t ip_idx , uint32_t *sts ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int status ; { status = 0; memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 145U; mbox_cmd[1] = acb_idx; mbox_cmd[2] = ip_idx; status = qla4xxx_mailbox_command(ha, 3, 8, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (status != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: MBOX_CMD_GET_IP_ADDR_STATE failed w/ status %04X\n", "qla4xxx_get_ip_state", mbox_sts[0]); } else { } } else { } memcpy((void *)sts, (void const *)(& mbox_sts), 32UL); return (status); } } int qla4xxx_get_nvram(struct scsi_qla_host *ha , dma_addr_t nvram_dma , uint32_t offset , uint32_t size ) { int status ; uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; { status = 0; memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 120U; mbox_cmd[1] = (unsigned int )nvram_dma; mbox_cmd[2] = (unsigned int )(nvram_dma >> 32ULL); mbox_cmd[3] = offset; mbox_cmd[4] = size; status = qla4xxx_mailbox_command(ha, 8, 1, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (status != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: failed status %04X\n", ha->host_no, "qla4xxx_get_nvram", mbox_sts[0]); } else { } } else { } return (status); } } int qla4xxx_set_nvram(struct scsi_qla_host *ha , dma_addr_t nvram_dma , uint32_t offset , uint32_t size ) { int status ; uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; { status = 0; memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 121U; mbox_cmd[1] = (unsigned int )nvram_dma; mbox_cmd[2] = (unsigned int )(nvram_dma >> 32ULL); mbox_cmd[3] = offset; mbox_cmd[4] = size; status = qla4xxx_mailbox_command(ha, 8, 1, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (status != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: failed status %04X\n", ha->host_no, "qla4xxx_set_nvram", mbox_sts[0]); } else { } } else { } return (status); } } int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha , uint32_t region , uint32_t field0 , uint32_t field1 ) { int status ; uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; { status = 0; memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 135U; mbox_cmd[3] = region; mbox_cmd[4] = field0; mbox_cmd[5] = field1; status = qla4xxx_mailbox_command(ha, 8, 3, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (status != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: failed status %04X\n", ha->host_no, "qla4xxx_restore_factory_defaults", mbox_sts[0]); } else { } } else { } return (status); } } int qla4_8xxx_set_param(struct scsi_qla_host *ha , int param ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; uint32_t status ; int tmp ; { memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 89U; if (param == 512) { mbox_cmd[1] = 512U; strncpy((char *)(& mbox_cmd) + 2U, "5.04.00-k6", 23UL); } else { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: invalid parameter 0x%x\n", "qla4_8xxx_set_param", param); status = 1U; goto exit_set_param; } tmp = qla4xxx_mailbox_command(ha, 8, 2, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); status = (uint32_t )tmp; if (status == 1U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: failed status %04X\n", "qla4_8xxx_set_param", mbox_sts[0]); } else { } exit_set_param: ; return ((int )status); } } int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int status ; { memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 257U; mbox_cmd[1] = ha->idc_info.request_desc; mbox_cmd[2] = ha->idc_info.info1; mbox_cmd[3] = ha->idc_info.info2; mbox_cmd[4] = ha->idc_info.info3; status = qla4xxx_mailbox_command(ha, 8, 8, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (status == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: failed status %04X\n", "qla4_83xx_post_idc_ack", mbox_sts[0]); } else { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: IDC ACK posted\n", "qla4_83xx_post_idc_ack"); } return (status); } } int qla4_84xx_config_acb(struct scsi_qla_host *ha , int acb_config ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; struct addr_ctrl_blk *acb ; uint32_t acb_len ; int rval ; dma_addr_t acb_dma ; void *tmp ; void *tmp___0 ; { acb = (struct addr_ctrl_blk *)0; acb_len = 768U; rval = 0; tmp = dma_alloc_attrs(& (ha->pdev)->dev, 768UL, & acb_dma, 208U, (struct dma_attrs *)0); acb = (struct addr_ctrl_blk *)tmp; if ((unsigned long )acb == (unsigned long )((struct addr_ctrl_blk *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to alloc acb\n", "qla4_84xx_config_acb"); rval = 1; goto exit_config_acb; } else { } memset((void *)acb, 0, (size_t )acb_len); switch (acb_config) { case 0: rval = qla4xxx_get_acb(ha, acb_dma, 0U, acb_len); if (rval != 0) { goto exit_free_acb; } else { } rval = qla4xxx_disable_acb(ha); if (rval != 0) { goto exit_free_acb; } else { } if ((unsigned long )ha->saved_acb == (unsigned long )((struct addr_ctrl_blk *)0)) { tmp___0 = kzalloc((size_t )acb_len, 208U); ha->saved_acb = (struct addr_ctrl_blk *)tmp___0; } else { } if ((unsigned long )ha->saved_acb == (unsigned long )((struct addr_ctrl_blk *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to alloc acb\n", "qla4_84xx_config_acb"); rval = 1; goto exit_free_acb; } else { } memcpy((void *)ha->saved_acb, (void const *)acb, (size_t )acb_len); goto ldv_63872; case 1: ; if ((unsigned long )ha->saved_acb == (unsigned long )((struct addr_ctrl_blk *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Can\'t set ACB, Saved ACB not available\n", "qla4_84xx_config_acb"); rval = 1; goto exit_free_acb; } else { } memcpy((void *)acb, (void const *)ha->saved_acb, (size_t )acb_len); rval = qla4xxx_set_acb(ha, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts), acb_dma); if (rval != 0) { goto exit_free_acb; } else { } goto ldv_63872; default: dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Invalid ACB Configuration\n", "qla4_84xx_config_acb"); } ldv_63872: ; exit_free_acb: dma_free_attrs(& (ha->pdev)->dev, 768UL, (void *)acb, acb_dma, (struct dma_attrs *)0); exit_config_acb: ; if (acb_config == 1 && (unsigned long )ha->saved_acb != (unsigned long )((struct addr_ctrl_blk *)0)) { kfree((void const *)ha->saved_acb); ha->saved_acb = (struct addr_ctrl_blk *)0; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s %s\n", "qla4_84xx_config_acb", rval == 0 ? (char *)"SUCCEEDED" : (char *)"FAILED"); } else { } return (rval); } } int qla4_83xx_get_port_config(struct scsi_qla_host *ha , uint32_t *config ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int status ; { memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 291U; status = qla4xxx_mailbox_command(ha, 8, 8, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (status == 0) { *config = mbox_sts[1]; } else { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: failed status %04X\n", "qla4_83xx_get_port_config", mbox_sts[0]); } return (status); } } int qla4_83xx_set_port_config(struct scsi_qla_host *ha , uint32_t *config ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int status ; { memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 290U; mbox_cmd[1] = *config; status = qla4xxx_mailbox_command(ha, 8, 8, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (status != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: failed status %04X\n", "qla4_83xx_set_port_config", mbox_sts[0]); } else { } return (status); } } bool ldv_queue_work_on_151(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_152(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_153(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_154(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_155(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_161(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_167(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_169(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_171(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_172(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_173(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_174(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_175(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_176(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_177(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_178(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_scsi_add_host_with_dma_179(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___6 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_27 = 1; ldv_initialize_scsi_host_template_27(); } else { } return (ldv_func_res); } } void *ldv_dma_pool_alloc_180(struct dma_pool *ldv_func_arg1 , gfp_t flags , dma_addr_t *ldv_func_arg3 ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_dma_pool_alloc_181(struct dma_pool *ldv_func_arg1 , gfp_t flags , dma_addr_t *ldv_func_arg3 ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } __inline static void spin_unlock_irqrestore(spinlock_t *lock , unsigned long flags ) ; bool ldv_queue_work_on_202(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_204(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_203(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_206(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_205(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_212(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_229(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; __inline static void *kzalloc(size_t size , gfp_t flags ) ; extern struct scatterlist *sg_next(struct scatterlist * ) ; struct sk_buff *ldv_skb_clone_220(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_228(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_222(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_218(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_226(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_227(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_223(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_224(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_225(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; int ldv_scsi_add_host_with_dma_230(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; extern int scsi_dma_map(struct scsi_cmnd * ) ; __inline static struct scatterlist *scsi_sglist(struct scsi_cmnd *cmd ) { { return (cmd->sdb.table.sgl); } } __inline static unsigned int scsi_bufflen(struct scsi_cmnd *cmd ) { { return (cmd->sdb.length); } } static int qla4xxx_space_in_req_ring(struct scsi_qla_host *ha , uint16_t req_cnt ) { uint16_t cnt ; { if ((int )req_cnt + 2 >= (int )ha->req_q_count) { cnt = (*((ha->isp_ops)->rd_shdw_req_q_out))(ha); if ((int )ha->request_in < (int )cnt) { ha->req_q_count = (int )cnt - (int )ha->request_in; } else { ha->req_q_count = (unsigned int )((int )cnt - (int )ha->request_in) + 1024U; } } else { } if ((int )req_cnt + 2 < (int )ha->req_q_count) { return (1); } else { return (0); } } } static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host *ha ) { { if ((unsigned int )ha->request_in == 1023U) { ha->request_in = 0U; ha->request_ptr = ha->request_ring; } else { ha->request_in = (uint16_t )((int )ha->request_in + 1); ha->request_ptr = ha->request_ptr + 1; } return; } } static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha , struct queue_entry **queue_entry ) { uint16_t req_cnt ; int tmp ; { req_cnt = 1U; tmp = qla4xxx_space_in_req_ring(ha, (int )req_cnt); if (tmp != 0) { *queue_entry = ha->request_ptr; memset((void *)*queue_entry, 0, 64UL); qla4xxx_advance_req_ring_ptr(ha); ha->req_q_count = (int )ha->req_q_count - (int )req_cnt; return (0); } else { } return (1); } } int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha , struct ddb_entry *ddb_entry , uint64_t lun , uint16_t mrkr_mod ) { struct qla4_marker_entry *marker_entry ; unsigned long flags ; uint8_t status ; int tmp ; { flags = 0UL; status = 0U; ldv_spin_lock(); tmp = qla4xxx_get_req_pkt(ha, (struct queue_entry **)(& marker_entry)); if (tmp != 0) { status = 1U; goto exit_send_marker; } else { } marker_entry->hdr.entryType = 4U; marker_entry->hdr.entryCount = 1U; marker_entry->target = ddb_entry->fw_ddb_index; marker_entry->modifier = mrkr_mod; int_to_scsilun(lun, & marker_entry->lun); __asm__ volatile ("sfence": : : "memory"); (*((ha->isp_ops)->queue_iocb))(ha); exit_send_marker: spin_unlock_irqrestore(& ha->hardware_lock, flags); return ((int )status); } } static struct continuation_t1_entry *qla4xxx_alloc_cont_entry(struct scsi_qla_host *ha ) { struct continuation_t1_entry *cont_entry ; { cont_entry = (struct continuation_t1_entry *)ha->request_ptr; qla4xxx_advance_req_ring_ptr(ha); cont_entry->hdr.entryType = 10U; cont_entry->hdr.entryCount = 1U; cont_entry->hdr.systemDefined = (unsigned char )ha->request_in; return (cont_entry); } } static uint16_t qla4xxx_calc_request_entries(uint16_t dsds ) { uint16_t iocbs ; { iocbs = 1U; if ((unsigned int )dsds > 1U) { iocbs = (int )((uint16_t )(((int )dsds + -1) / 5)) + (int )iocbs; if (((int )dsds + -1) % 5 != 0) { iocbs = (uint16_t )((int )iocbs + 1); } else { } } else { } return (iocbs); } } static void qla4xxx_build_scsi_iocbs(struct srb *srb , struct command_t3_entry *cmd_entry , uint16_t tot_dsds ) { struct scsi_qla_host *ha ; uint16_t avail_dsds ; struct data_seg_a64 *cur_dsd ; struct scsi_cmnd *cmd ; struct scatterlist *sg ; int i ; unsigned int tmp ; dma_addr_t sle_dma ; struct continuation_t1_entry *cont_entry ; { cmd = srb->cmd; ha = srb->ha; tmp = scsi_bufflen(cmd); if (tmp == 0U || (unsigned int )cmd->sc_data_direction == 3U) { cmd_entry->ttlByteCnt = 0U; return; } else { } avail_dsds = 1U; cur_dsd = (struct data_seg_a64 *)(& cmd_entry->dataseg); i = 0; sg = scsi_sglist(cmd); goto ldv_63339; ldv_63338: ; if ((unsigned int )avail_dsds == 0U) { cont_entry = qla4xxx_alloc_cont_entry(ha); cur_dsd = (struct data_seg_a64 *)(& cont_entry->dataseg); avail_dsds = 5U; } else { } sle_dma = sg->dma_address; cur_dsd->base.addrLow = (unsigned int )sle_dma; cur_dsd->base.addrHigh = (unsigned int )(sle_dma >> 32ULL); cur_dsd->count = sg->dma_length; avail_dsds = (uint16_t )((int )avail_dsds - 1); cur_dsd = cur_dsd + 1; i = i + 1; sg = sg_next(sg); ldv_63339: ; if ((int )tot_dsds > i) { goto ldv_63338; } else { } return; } } void qla4_83xx_queue_iocb(struct scsi_qla_host *ha ) { { writel((unsigned int )ha->request_in, (void volatile *)(& (ha->qla4_83xx_reg)->req_q_in)); readl((void const volatile *)(& (ha->qla4_83xx_reg)->req_q_in)); return; } } void qla4_83xx_complete_iocb(struct scsi_qla_host *ha ) { { writel((unsigned int )ha->response_out, (void volatile *)(& (ha->qla4_83xx_reg)->rsp_q_out)); readl((void const volatile *)(& (ha->qla4_83xx_reg)->rsp_q_out)); return; } } void qla4_82xx_queue_iocb(struct scsi_qla_host *ha ) { uint32_t dbval ; { dbval = 0U; dbval = (uint32_t )(((int )ha->func_num << 5) | 20); dbval = (uint32_t )((int )ha->request_in << 16) | dbval; qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, (u32 )ha->request_in); return; } } void qla4_82xx_complete_iocb(struct scsi_qla_host *ha ) { { writel((unsigned int )ha->response_out, (void volatile *)(& (ha->qla4_82xx_reg)->rsp_q_out)); readl((void const volatile *)(& (ha->qla4_82xx_reg)->rsp_q_out)); return; } } void qla4xxx_queue_iocb(struct scsi_qla_host *ha ) { { writel((unsigned int )ha->request_in, (void volatile *)(& (ha->reg)->req_q_in)); readl((void const volatile *)(& (ha->reg)->req_q_in)); return; } } void qla4xxx_complete_iocb(struct scsi_qla_host *ha ) { { writel((unsigned int )ha->response_out, (void volatile *)(& (ha->reg)->rsp_q_out)); readl((void const volatile *)(& (ha->reg)->rsp_q_out)); return; } } int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha , struct srb *srb ) { struct scsi_cmnd *cmd ; struct ddb_entry *ddb_entry ; struct command_t3_entry *cmd_entry ; int nseg ; uint16_t tot_dsds ; uint16_t req_cnt ; unsigned long flags ; uint32_t index ; int tmp ; int tmp___0 ; unsigned int tmp___1 ; unsigned int tmp___2 ; { cmd = srb->cmd; ddb_entry = srb->ddb; tot_dsds = 0U; ldv_spin_lock(); index = (unsigned int )(cmd->request)->tag; tmp = constant_test_bit(0L, (unsigned long const volatile *)(& ha->flags)); if (tmp == 0) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Adapter OFFLINE! Do not issue command.\n", ha->host_no, "qla4xxx_send_command_to_isp"); } else { } goto queuing_error; } else { } nseg = scsi_dma_map(cmd); if (nseg < 0) { goto queuing_error; } else { } tot_dsds = (uint16_t )nseg; req_cnt = qla4xxx_calc_request_entries((int )tot_dsds); tmp___0 = qla4xxx_space_in_req_ring(ha, (int )req_cnt); if (tmp___0 == 0) { goto queuing_error; } else { } if ((int )ha->iocb_cnt + (int )req_cnt >= (int )ha->iocb_hiwat) { goto queuing_error; } else { } cmd_entry = (struct command_t3_entry *)ha->request_ptr; memset((void *)cmd_entry, 0, 64UL); cmd_entry->hdr.entryType = 25U; cmd_entry->handle = index; cmd_entry->target = ddb_entry->fw_ddb_index; int_to_scsilun((cmd->device)->lun, & cmd_entry->lun); cmd_entry->ttlByteCnt = scsi_bufflen(cmd); memcpy((void *)(& cmd_entry->cdb), (void const *)cmd->cmnd, (size_t )cmd->cmd_len); cmd_entry->dataSegCnt = tot_dsds; cmd_entry->hdr.entryCount = (uint8_t )req_cnt; cmd_entry->control_flags = 0U; tmp___2 = scsi_bufflen(cmd); if (tmp___2 != 0U) { if ((unsigned int )cmd->sc_data_direction == 1U) { cmd_entry->control_flags = 32U; } else if ((unsigned int )cmd->sc_data_direction == 2U) { cmd_entry->control_flags = 64U; } else { } tmp___1 = scsi_bufflen(cmd); ha->bytes_xfered = ha->bytes_xfered + tmp___1; if ((ha->bytes_xfered & 4293918720U) != 0U) { ha->total_mbytes_xferred = ha->total_mbytes_xferred + (uint64_t )(ha->bytes_xfered >> 20); ha->bytes_xfered = ha->bytes_xfered & 1048575U; } else { } } else { } cmd_entry->control_flags = (uint8_t )((unsigned int )cmd_entry->control_flags | 1U); qla4xxx_advance_req_ring_ptr(ha); qla4xxx_build_scsi_iocbs(srb, cmd_entry, (int )tot_dsds); __asm__ volatile ("sfence": : : "memory"); (srb->cmd)->host_scribble = (unsigned char *)((unsigned long )index); srb->state = 3U; srb->flags = (uint16_t )((unsigned int )srb->flags | 8U); ha->iocb_cnt = (int )ha->iocb_cnt + (int )req_cnt; srb->iocb_cnt = req_cnt; ha->req_q_count = (int )ha->req_q_count - (int )req_cnt; (*((ha->isp_ops)->queue_iocb))(ha); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (0); queuing_error: ; if ((unsigned int )tot_dsds != 0U) { scsi_dma_unmap(cmd); } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } int qla4xxx_send_passthru0(struct iscsi_task *task ) { struct passthru0 *passthru_iocb ; struct iscsi_session *sess ; struct ddb_entry *ddb_entry ; struct scsi_qla_host *ha ; struct ql4_task_data *task_data ; uint16_t ctrl_flags ; unsigned long flags ; int ret ; int tmp ; { sess = (task->conn)->session; ddb_entry = (struct ddb_entry *)sess->dd_data; ha = ddb_entry->ha; task_data = (struct ql4_task_data *)task->dd_data; ctrl_flags = 0U; ret = 1; ldv_spin_lock(); task_data->iocb_req_cnt = 1U; tmp = qla4xxx_space_in_req_ring(ha, (int )task_data->iocb_req_cnt); if (tmp == 0) { goto queuing_error; } else { } passthru_iocb = (struct passthru0 *)ha->request_ptr; memset((void *)passthru_iocb, 0, 64UL); passthru_iocb->hdr.entryType = 58U; passthru_iocb->hdr.systemDefined = 1U; passthru_iocb->hdr.entryCount = task_data->iocb_req_cnt; passthru_iocb->handle = (uint32_t )task->itt; passthru_iocb->target = ddb_entry->fw_ddb_index; passthru_iocb->timeout = 30U; if (task_data->req_len != 0U) { memcpy(task_data->req_buffer + 48U, (void const *)task->data, (size_t )task->data_count); ctrl_flags = (uint16_t )((unsigned int )ctrl_flags | 512U); passthru_iocb->out_dsd.base.addrLow = (unsigned int )task_data->req_dma; passthru_iocb->out_dsd.base.addrHigh = (unsigned int )(task_data->req_dma >> 32ULL); passthru_iocb->out_dsd.count = task->data_count + 48U; } else { } if (task_data->resp_len != 0U) { passthru_iocb->in_dsd.base.addrLow = (unsigned int )task_data->resp_dma; passthru_iocb->in_dsd.base.addrHigh = (unsigned int )(task_data->resp_dma >> 32ULL); passthru_iocb->in_dsd.count = task_data->resp_len; } else { } ctrl_flags = (uint16_t )((unsigned int )ctrl_flags | 4352U); passthru_iocb->control_flags = ctrl_flags; qla4xxx_advance_req_ring_ptr(ha); __asm__ volatile ("sfence": : : "memory"); ha->iocb_cnt = (int )ha->iocb_cnt + (int )((uint16_t )task_data->iocb_req_cnt); ha->req_q_count = (int )ha->req_q_count - (int )((uint16_t )task_data->iocb_req_cnt); (*((ha->isp_ops)->queue_iocb))(ha); ret = 0; queuing_error: spin_unlock_irqrestore(& ha->hardware_lock, flags); return (ret); } } static struct mrb *qla4xxx_get_new_mrb(struct scsi_qla_host *ha ) { struct mrb *mrb ; void *tmp ; { tmp = kzalloc(32UL, 208U); mrb = (struct mrb *)tmp; if ((unsigned long )mrb == (unsigned long )((struct mrb *)0)) { return (mrb); } else { } mrb->ha = ha; return (mrb); } } static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha , struct mrb *mrb , uint32_t *in_mbox ) { int rval ; uint32_t i ; unsigned long flags ; uint32_t index ; { rval = 0; index = 0U; ldv_spin_lock(); rval = qla4xxx_get_req_pkt(ha, (struct queue_entry **)(& mrb->mbox)); if (rval != 0) { goto exit_mbox_iocb; } else { } index = ha->mrb_index; i = 0U; goto ldv_63402; ldv_63401: index = index + 1U; if (index == 128U) { index = 1U; } else { } if ((unsigned long )ha->active_mrb_array[index] == (unsigned long )((struct mrb *)0)) { ha->mrb_index = index; goto ldv_63400; } else { } i = i + 1U; ldv_63402: ; if (i <= 127U) { goto ldv_63401; } else { } ldv_63400: mrb->iocb_cnt = 1U; ha->active_mrb_array[index] = mrb; (mrb->mbox)->handle = index; (mrb->mbox)->hdr.entryType = 56U; (mrb->mbox)->hdr.entryCount = (uint8_t )mrb->iocb_cnt; memcpy((void *)(& (mrb->mbox)->in_mbox), (void const *)in_mbox, 32UL); mrb->mbox_cmd = *in_mbox; __asm__ volatile ("sfence": : : "memory"); ha->iocb_cnt = (int )ha->iocb_cnt + (int )mrb->iocb_cnt; (*((ha->isp_ops)->queue_iocb))(ha); exit_mbox_iocb: spin_unlock_irqrestore(& ha->hardware_lock, flags); return (rval); } } int qla4xxx_ping_iocb(struct scsi_qla_host *ha , uint32_t options , uint32_t payload_size , uint32_t pid , uint8_t *ipaddr ) { uint32_t in_mbox[8U] ; struct mrb *mrb ; int rval ; { mrb = (struct mrb *)0; rval = 0; memset((void *)(& in_mbox), 0, 32UL); mrb = qla4xxx_get_new_mrb(ha); if ((unsigned long )mrb == (unsigned long )((struct mrb *)0)) { if (ql4xextended_error_logging == 2) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: fail to get new mrb\n", "qla4xxx_ping_iocb"); } else { } rval = 1; goto exit_ping; } else { } in_mbox[0] = 11U; in_mbox[1] = options; memcpy((void *)(& in_mbox) + 2U, (void const *)ipaddr, 4UL); memcpy((void *)(& in_mbox) + 3U, (void const *)ipaddr + 4U, 4UL); memcpy((void *)(& in_mbox) + 4U, (void const *)ipaddr + 8U, 4UL); memcpy((void *)(& in_mbox) + 5U, (void const *)ipaddr + 12U, 4UL); in_mbox[6] = payload_size; mrb->pid = pid; rval = qla4xxx_send_mbox_iocb(ha, mrb, (uint32_t *)(& in_mbox)); if (rval != 0) { goto exit_ping; } else { } return (rval); exit_ping: kfree((void const *)mrb); return (rval); } } bool ldv_queue_work_on_202(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_203(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_204(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_205(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_206(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_212(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_218(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_220(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_222(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_223(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_224(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_225(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_226(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_227(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_228(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_229(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_scsi_add_host_with_dma_230(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___6 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_27 = 1; ldv_initialize_scsi_host_template_27(); } else { } return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern void _raw_spin_lock(raw_spinlock_t * ) ; extern void _raw_spin_unlock(raw_spinlock_t * ) ; __inline static void ldv_spin_lock_241(spinlock_t *lock ) { { _raw_spin_lock(& lock->__annonCompField18.rlock); return; } } __inline static void spin_lock(spinlock_t *lock ) ; __inline static void ldv_spin_unlock_245(spinlock_t *lock ) { { _raw_spin_unlock(& lock->__annonCompField18.rlock); return; } } __inline static void spin_unlock(spinlock_t *lock ) ; __inline static void spin_unlock_irqrestore(spinlock_t *lock , unsigned long flags ) ; bool ldv_queue_work_on_251(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_253(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_252(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_255(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_254(struct workqueue_struct *ldv_func_arg1 ) ; __inline static bool queue_work___0(struct workqueue_struct *wq , struct work_struct *work ) { bool tmp ; { tmp = ldv_queue_work_on_251(8192, wq, work); return (tmp); } } void *ldv_kmem_cache_alloc_261(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_278(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; int reg_check_1(irqreturn_t (*handler)(int , void * ) ) ; void choose_interrupt_1(void) ; void disable_suitable_irq_1(int line , void *data ) ; int ldv_irq_1(int state , int line , void *data ) ; void activate_suitable_irq_1(int line , void *data ) ; extern void pci_disable_msi(struct pci_dev * ) ; extern int pci_enable_msi_range(struct pci_dev * , int , int ) ; __inline static int pci_enable_msi_exact(struct pci_dev *dev , int nvec ) { int rc ; int tmp ; { tmp = pci_enable_msi_range(dev, nvec, nvec); rc = tmp; if (rc < 0) { return (rc); } else { } return (0); } } extern int request_threaded_irq(unsigned int , irqreturn_t (*)(int , void * ) , irqreturn_t (*)(int , void * ) , unsigned long , char const * , void * ) ; __inline static int request_irq(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) { int tmp ; { tmp = request_threaded_irq(irq, handler, (irqreturn_t (*)(int , void * ))0, flags, name, dev); return (tmp); } } __inline static int ldv_request_irq_280(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) ; __inline static int ldv_request_irq_281(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) ; extern void free_irq(unsigned int , void * ) ; void ldv_free_irq_282(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) ; void ldv_free_irq_283(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) ; struct sk_buff *ldv_skb_clone_269(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_277(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_271(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_267(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_275(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_276(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_272(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_273(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_274(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; int ldv_scsi_add_host_with_dma_279(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; __inline static void scsi_set_resid(struct scsi_cmnd *cmd , int resid ) { { cmd->sdb.resid = resid; return; } } extern struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn * , itt_t ) ; __inline static void *isp_port_error_status(struct scsi_qla_host *ha ) { int tmp ; { tmp = is_qla4010(ha); return ((void *)(tmp != 0 ? & (ha->reg)->u2.isp4010.port_err_status : & (ha->reg)->u2.isp4022.__annonCompField129.p0.port_err_status)); } } void qla4xxx_process_response_queue(struct scsi_qla_host *ha ) ; void qla4xxx_dump_registers(struct scsi_qla_host *ha ) ; int qla4_8xxx_enable_msix(struct scsi_qla_host *ha ) ; void qla4_8xxx_disable_msix(struct scsi_qla_host *ha ) ; irqreturn_t qla4_8xxx_msi_handler(int irq , void *dev_id ) ; irqreturn_t qla4_8xxx_default_intr_handler(int irq , void *dev_id ) ; irqreturn_t qla4_8xxx_msix_rsp_q(int irq , void *dev_id ) ; static void qla4xxx_copy_sense(struct scsi_qla_host *ha , struct status_entry *sts_entry , struct srb *srb ) { struct scsi_cmnd *cmd ; uint16_t sense_len ; uint16_t __min1 ; uint16_t __min2 ; uint16_t __min1___0 ; uint16_t __min2___0 ; { cmd = srb->cmd; memset((void *)cmd->sense_buffer, 0, 96UL); sense_len = sts_entry->senseDataByteCnt; if ((unsigned int )sense_len == 0U) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld:%d:%d:%llu: %s: sense len 0\n", ha->host_no, (cmd->device)->channel, (cmd->device)->id, (cmd->device)->lun, "qla4xxx_copy_sense"); } else { } ha->status_srb = (struct srb *)0; return; } else { } __min1 = sense_len; __min2 = 96U; sense_len = (uint16_t )((int )__min1 < (int )__min2 ? __min1 : __min2); srb->req_sense_ptr = cmd->sense_buffer; srb->req_sense_len = sense_len; __min1___0 = sense_len; __min2___0 = 32U; sense_len = (uint16_t )((int )__min1___0 < (int )__min2___0 ? __min1___0 : __min2___0); memcpy((void *)cmd->sense_buffer, (void const *)(& sts_entry->senseData), (size_t )sense_len); if (ql4xextended_error_logging == 2) { printk("\016scsi%ld:%d:%d:%llu: %s: sense key = %x, ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no, (cmd->device)->channel, (cmd->device)->id, (cmd->device)->lun, "qla4xxx_copy_sense", (int )sts_entry->senseData[2] & 15, (int )sts_entry->senseData[7], (int )sts_entry->senseData[12], (int )sts_entry->senseData[13]); } else { } srb->flags = (uint16_t )((unsigned int )srb->flags | 16U); srb->req_sense_ptr = srb->req_sense_ptr + (unsigned long )sense_len; srb->req_sense_len = (int )srb->req_sense_len - (int )sense_len; if ((unsigned int )srb->req_sense_len != 0U) { ha->status_srb = srb; } else { ha->status_srb = (struct srb *)0; } return; } } static void qla4xxx_status_cont_entry(struct scsi_qla_host *ha , struct status_cont_entry *sts_cont ) { struct srb *srb ; struct scsi_cmnd *cmd ; uint16_t sense_len ; uint16_t __min1 ; uint16_t __min2 ; { srb = ha->status_srb; if ((unsigned long )srb == (unsigned long )((struct srb *)0)) { return; } else { } cmd = srb->cmd; if ((unsigned long )cmd == (unsigned long )((struct scsi_cmnd *)0)) { if (ql4xextended_error_logging == 2) { printk("\016scsi%ld: %s: Cmd already returned back to OS srb=%p srb->state:%d\n", ha->host_no, "qla4xxx_status_cont_entry", srb, (int )srb->state); } else { } ha->status_srb = (struct srb *)0; return; } else { } __min1 = srb->req_sense_len; __min2 = 60U; sense_len = (uint16_t )((int )__min1 < (int )__min2 ? __min1 : __min2); memcpy((void *)srb->req_sense_ptr, (void const *)(& sts_cont->ext_sense_data), (size_t )sense_len); srb->req_sense_ptr = srb->req_sense_ptr + (unsigned long )sense_len; srb->req_sense_len = (int )srb->req_sense_len - (int )sense_len; if ((unsigned int )srb->req_sense_len == 0U) { kref_put(& srb->srb_ref, & qla4xxx_srb_compl); ha->status_srb = (struct srb *)0; } else { } return; } } static void qla4xxx_status_entry(struct scsi_qla_host *ha , struct status_entry *sts_entry ) { uint8_t scsi_status ; struct scsi_cmnd *cmd ; struct srb *srb ; struct ddb_entry *ddb_entry ; uint32_t residual ; int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; int tmp___2 ; unsigned int tmp___3 ; unsigned int tmp___4 ; unsigned int tmp___5 ; int tmp___6 ; { srb = qla4xxx_del_from_active_array(ha, sts_entry->handle); if ((unsigned long )srb == (unsigned long )((struct srb *)0)) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s invalid status entry: handle=0x%0x, srb=%p\n", "qla4xxx_status_entry", sts_entry->handle, srb); tmp = is_qla80XX(ha); if (tmp != 0) { set_bit(4L, (unsigned long volatile *)(& ha->dpc_flags)); } else { set_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); } return; } else { } cmd = srb->cmd; if ((unsigned long )cmd == (unsigned long )((struct scsi_cmnd *)0)) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Command already returned back to OS pkt->handle=%d srb=%p srb->state:%d\n", ha->host_no, "qla4xxx_status_entry", sts_entry->handle, srb, (int )srb->state); } else { } dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "Command is NULL: already returned to OS (srb=%p)\n", srb); return; } else { } ddb_entry = srb->ddb; if ((unsigned long )ddb_entry == (unsigned long )((struct ddb_entry *)0)) { cmd->result = 65536; goto status_entry_exit; } else { } residual = sts_entry->residualByteCnt; scsi_status = sts_entry->scsiStatus; switch ((int )sts_entry->completionStatus) { case 0: ; if (((int )sts_entry->iscsiFlags & 4) != 0) { cmd->result = 458752; goto ldv_63365; } else { } if (((int )sts_entry->iscsiFlags & 2) != 0) { scsi_set_resid(cmd, (int )residual); if ((unsigned int )scsi_status == 0U) { tmp___1 = scsi_bufflen(cmd); if (tmp___1 - residual < cmd->underflow) { cmd->result = 458752; if (ql4xextended_error_logging == 2) { tmp___0 = scsi_bufflen(cmd); printk("scsi%ld:%d:%d:%llu: %s: Mid-layer Data underrun0, xferlen = 0x%x, residual = 0x%x\n", ha->host_no, (cmd->device)->channel, (cmd->device)->id, (cmd->device)->lun, "qla4xxx_status_entry", tmp___0, residual); } else { } goto ldv_63365; } else { } } else { } } else { } cmd->result = (int )scsi_status; if ((unsigned int )scsi_status != 2U) { goto ldv_63365; } else { } qla4xxx_copy_sense(ha, sts_entry, srb); goto ldv_63365; case 1: cmd->result = 458752; goto ldv_63365; case 4: ; if (ql4xextended_error_logging == 2) { printk("scsi%ld:%d:%d:%llu: %s: Device RESET occurred\n", ha->host_no, (cmd->device)->channel, (cmd->device)->id, (cmd->device)->lun, "qla4xxx_status_entry"); } else { } cmd->result = 524288; goto ldv_63365; case 5: ; if (ql4xextended_error_logging == 2) { printk("scsi%ld:%d:%d:%llu: %s: Abort occurred\n", ha->host_no, (cmd->device)->channel, (cmd->device)->id, (cmd->device)->lun, "qla4xxx_status_entry"); } else { } cmd->result = 524288; goto ldv_63365; case 6: ; if (ql4xextended_error_logging == 2) { printk("\016scsi%ld:%d:%d:%llu: Timeout\n", ha->host_no, (cmd->device)->channel, (cmd->device)->id, (cmd->device)->lun); } else { } cmd->result = 917504; tmp___2 = iscsi_is_session_online(ddb_entry->sess); if (tmp___2 != 0) { qla4xxx_mark_device_missing(ddb_entry->sess); } else { } goto ldv_63365; case 21: ; case 7: ; if (((int )sts_entry->iscsiFlags & 4) != 0 || (unsigned int )sts_entry->completionStatus == 7U) { if (ql4xextended_error_logging == 2) { printk("scsi%ld:%d:%d:%llu: %s: Data overrun\n", ha->host_no, (cmd->device)->channel, (cmd->device)->id, (cmd->device)->lun, "qla4xxx_status_entry"); } else { } cmd->result = 458752; goto ldv_63365; } else { } scsi_set_resid(cmd, (int )residual); if (((int )sts_entry->iscsiFlags & 2) != 0) { if ((unsigned int )scsi_status == 0U) { tmp___4 = scsi_bufflen(cmd); if (tmp___4 - residual < cmd->underflow) { if (ql4xextended_error_logging == 2) { tmp___3 = scsi_bufflen(cmd); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld:%d:%d:%llu: %s: Mid-layer Data underrun, xferlen = 0x%x,residual = 0x%x\n", ha->host_no, (cmd->device)->channel, (cmd->device)->id, (cmd->device)->lun, "qla4xxx_status_entry", tmp___3, residual); } else { } cmd->result = 458752; goto ldv_63365; } else { } } else { } } else if ((unsigned int )scsi_status != 40U && (unsigned int )scsi_status != 8U) { if (ql4xextended_error_logging == 2) { tmp___5 = scsi_bufflen(cmd); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld:%d:%d:%llu: %s: Dropped frame(s) detected (0x%x of 0x%x bytes).\n", ha->host_no, (cmd->device)->channel, (cmd->device)->id, (cmd->device)->lun, "qla4xxx_status_entry", residual, tmp___5); } else { } cmd->result = (int )scsi_status | 458752; goto check_scsi_status; } else { } cmd->result = (int )scsi_status; check_scsi_status: ; if ((unsigned int )scsi_status == 2U) { qla4xxx_copy_sense(ha, sts_entry, srb); } else { } goto ldv_63365; case 41: ; case 40: ; if (ql4xextended_error_logging == 2) { printk("\016scsi%ld:%d:%d:%llu: SCS_DEVICE state: 0x%x\n", ha->host_no, (cmd->device)->channel, (cmd->device)->id, (cmd->device)->lun, (int )sts_entry->completionStatus); } else { } tmp___6 = iscsi_is_session_online(ddb_entry->sess); if (tmp___6 != 0) { qla4xxx_mark_device_missing(ddb_entry->sess); } else { } cmd->result = 917504; goto ldv_63365; case 28: cmd->result = (int )sts_entry->scsiStatus; if (ql4xextended_error_logging == 2) { printk("scsi%ld:%d:%llu: %s: QUEUE FULL detected compl=%02x, scsi=%02x, state=%02x, iFlags=%02x, iResp=%02x\n", ha->host_no, (cmd->device)->id, (cmd->device)->lun, "qla4xxx_status_entry", (int )sts_entry->completionStatus, (int )sts_entry->scsiStatus, (int )sts_entry->state_flags, (int )sts_entry->iscsiFlags, (int )sts_entry->iscsiResponse); } else { } goto ldv_63365; default: cmd->result = 458752; goto ldv_63365; } ldv_63365: ; status_entry_exit: srb->cc_stat = (uint16_t )sts_entry->completionStatus; if ((unsigned long )ha->status_srb == (unsigned long )((struct srb *)0)) { kref_put(& srb->srb_ref, & qla4xxx_srb_compl); } else { } return; } } static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha , struct passthru_status *sts_entry ) { struct iscsi_task *task ; struct ddb_entry *ddb_entry ; struct ql4_task_data *task_data ; struct iscsi_cls_conn *cls_conn ; struct iscsi_conn *conn ; itt_t itt ; uint32_t fw_ddb_index ; { itt = sts_entry->handle; fw_ddb_index = (unsigned int )sts_entry->target; ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index); if ((unsigned long )ddb_entry == (unsigned long )((struct ddb_entry *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Invalid target index = 0x%x\n", "qla4xxx_passthru_status_entry", (int )sts_entry->target); return; } else { } cls_conn = ddb_entry->conn; conn = (struct iscsi_conn *)cls_conn->dd_data; spin_lock(& (conn->session)->back_lock); task = iscsi_itt_to_task(conn, itt); spin_unlock(& (conn->session)->back_lock); if ((unsigned long )task == (unsigned long )((struct iscsi_task *)0)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Task is NULL\n", "qla4xxx_passthru_status_entry"); return; } else { } task_data = (struct ql4_task_data *)task->dd_data; memcpy((void *)(& task_data->sts), (void const *)sts_entry, 64UL); ha->iocb_cnt = (int )ha->iocb_cnt - (int )((uint16_t )task_data->iocb_req_cnt); queue_work___0(ha->task_wq, & task_data->task_work); return; } } static struct mrb *qla4xxx_del_mrb_from_active_array(struct scsi_qla_host *ha , uint32_t index ) { struct mrb *mrb ; { mrb = (struct mrb *)0; if (index > 127U) { return (mrb); } else { } mrb = ha->active_mrb_array[index]; ha->active_mrb_array[index] = (struct mrb *)0; if ((unsigned long )mrb == (unsigned long )((struct mrb *)0)) { return (mrb); } else { } ha->iocb_cnt = (int )ha->iocb_cnt - (int )mrb->iocb_cnt; return (mrb); } } static void qla4xxx_mbox_status_entry(struct scsi_qla_host *ha , struct mbox_status_iocb *mbox_sts_entry ) { struct mrb *mrb ; uint32_t status ; uint32_t data_size ; { mrb = qla4xxx_del_mrb_from_active_array(ha, mbox_sts_entry->handle); if ((unsigned long )mrb == (unsigned long )((struct mrb *)0)) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: mrb[%d] is null\n", "qla4xxx_mbox_status_entry", mbox_sts_entry->handle); return; } else { } switch (mrb->mbox_cmd) { case 11U: ; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: mbox_cmd = 0x%x, mbox_sts[0] = 0x%x, mbox_sts[6] = 0x%x\n", "qla4xxx_mbox_status_entry", mrb->mbox_cmd, mbox_sts_entry->out_mbox[0], mbox_sts_entry->out_mbox[6]); } else { } if (mbox_sts_entry->out_mbox[0] == 16384U) { status = 0U; } else { status = mbox_sts_entry->out_mbox[6]; } data_size = 32U; qla4xxx_post_ping_evt_work(ha, status, mrb->pid, data_size, (uint8_t *)(& mbox_sts_entry->out_mbox)); goto ldv_63403; default: ; if (ql4xextended_error_logging == 2) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: invalid mbox_cmd = 0x%x\n", "qla4xxx_mbox_status_entry", mrb->mbox_cmd); } else { } } ldv_63403: kfree((void const *)mrb); return; } } void qla4xxx_process_response_queue(struct scsi_qla_host *ha ) { uint32_t count ; struct srb *srb ; struct status_entry *sts_entry ; { count = 0U; srb = (struct srb *)0; goto ldv_63423; ldv_63422: sts_entry = (struct status_entry *)ha->response_ptr; count = count + 1U; if ((unsigned int )ha->response_out == 63U) { ha->response_out = 0U; ha->response_ptr = ha->response_ring; } else { ha->response_out = (uint16_t )((int )ha->response_out + 1); ha->response_ptr = ha->response_ptr + 1; } switch ((int )sts_entry->hdr.entryType) { case 3: qla4xxx_status_entry(ha, sts_entry); goto ldv_63412; case 60: ; if ((unsigned int )sts_entry->hdr.systemDefined == 1U) { qla4xxx_passthru_status_entry(ha, (struct passthru_status *)sts_entry); } else { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Invalid status received\n", "qla4xxx_process_response_queue"); } goto ldv_63412; case 16: qla4xxx_status_cont_entry(ha, (struct status_cont_entry *)sts_entry); goto ldv_63412; case 25: srb = qla4xxx_del_from_active_array(ha, sts_entry->handle); if ((unsigned long )srb == (unsigned long )((struct srb *)0)) { goto exit_prq_invalid_handle; } else { } if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: FW device queue full, srb %p\n", ha->host_no, "qla4xxx_process_response_queue", srb); } else { } (srb->cmd)->result = 131072; kref_put(& srb->srb_ref, & qla4xxx_srb_compl); goto ldv_63412; case 10: ; if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Continuation entry - ignoring\n", ha->host_no, "qla4xxx_process_response_queue"); } else { } goto ldv_63412; case 57: ; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: mbox status IOCB\n", "qla4xxx_process_response_queue"); } else { } qla4xxx_mbox_status_entry(ha, (struct mbox_status_iocb *)sts_entry); goto ldv_63412; default: ; if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Invalid entry %x in response queue \n", ha->host_no, "qla4xxx_process_response_queue", (int )sts_entry->hdr.entryType); } else { } goto exit_prq_error; } ldv_63412: ((struct response *)sts_entry)->signature = 3735936685U; __asm__ volatile ("sfence": : : "memory"); ldv_63423: ; if ((ha->response_ptr)->signature != 3735936685U) { goto ldv_63422; } else { } (*((ha->isp_ops)->complete_iocb))(ha); return; exit_prq_invalid_handle: ; if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n", ha->host_no, "qla4xxx_process_response_queue", srb, (int )sts_entry->hdr.entryType, (int )sts_entry->completionStatus); } else { } exit_prq_error: (*((ha->isp_ops)->complete_iocb))(ha); set_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); return; } } static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha ) { int rval ; int tmp ; int tmp___0 ; { rval = 1; tmp = is_qla8032(ha); if (tmp != 0) { goto _L; } else { tmp___0 = is_qla8042(ha); if (tmp___0 != 0) { _L: /* CIL Label */ if ((ha->idc_info.info2 & 4U) != 0U || (ha->idc_info.info2 & 8U) != 0U) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Loopback diagnostics in progress\n", "qla4_83xx_loopback_in_progress"); } else { } rval = 1; } else { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Loopback diagnostics not in progress\n", "qla4_83xx_loopback_in_progress"); } else { } rval = 0; } } else { } } return (rval); } } static void qla4xxx_update_ipaddr_state(struct scsi_qla_host *ha , uint32_t ipaddr_idx , uint32_t ipaddr_fw_state ) { uint8_t ipaddr_state ; uint8_t ip_idx ; { ip_idx = (unsigned int )((uint8_t )ipaddr_idx) & 15U; ipaddr_state = qla4xxx_set_ipaddr_state((int )((unsigned char )ipaddr_fw_state)); switch ((int )ip_idx) { case 0: ha->ip_config.ipv4_addr_state = ipaddr_state; goto ldv_63438; case 1: ha->ip_config.ipv6_link_local_state = ipaddr_state; goto ldv_63438; case 2: ha->ip_config.ipv6_addr0_state = ipaddr_state; goto ldv_63438; case 3: ha->ip_config.ipv6_addr1_state = ipaddr_state; goto ldv_63438; default: dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Invalid IPADDR index %d\n", "qla4xxx_update_ipaddr_state", (int )ip_idx); } ldv_63438: ; return; } } static void qla4xxx_default_router_changed(struct scsi_qla_host *ha , uint32_t *mbox_sts ) { { memcpy((void *)(& ha->ip_config.ipv6_default_router_addr.in6_u.u6_addr32), (void const *)mbox_sts + 2U, 4UL); memcpy((void *)(& ha->ip_config.ipv6_default_router_addr.in6_u.u6_addr32) + 1U, (void const *)mbox_sts + 3U, 4UL); memcpy((void *)(& ha->ip_config.ipv6_default_router_addr.in6_u.u6_addr32) + 2U, (void const *)mbox_sts + 4U, 4UL); memcpy((void *)(& ha->ip_config.ipv6_default_router_addr.in6_u.u6_addr32) + 3U, (void const *)mbox_sts + 5U, 4UL); return; } } static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host *ha , uint32_t mbox_status ) { int i ; uint32_t mbox_sts[8U] ; __le32 *mailbox_out ; uint32_t opcode ; int tmp ; int tmp___0 ; int tmp___1 ; unsigned int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; int tmp___15 ; int tmp___16 ; int tmp___17 ; int tmp___18 ; int tmp___19 ; { opcode = 0U; tmp___0 = is_qla8032(ha); if (tmp___0 != 0) { mailbox_out = (__le32 *)(& (ha->qla4_83xx_reg)->mailbox_out); } else { tmp___1 = is_qla8042(ha); if (tmp___1 != 0) { mailbox_out = (__le32 *)(& (ha->qla4_83xx_reg)->mailbox_out); } else { tmp = is_qla8022(ha); if (tmp != 0) { mailbox_out = (__le32 *)(& (ha->qla4_82xx_reg)->mailbox_out); } else { mailbox_out = (__le32 *)(& (ha->reg)->mailbox); } } } if ((mbox_status == 7U || mbox_status == 4096U) || mbox_status >> 12 == 4U) { ha->mbox_status[0] = mbox_status; tmp___4 = constant_test_bit(2L, (unsigned long const volatile *)(& ha->flags)); if (tmp___4 != 0) { i = 0; goto ldv_63457; ldv_63456: tmp___2 = readl((void const volatile *)mailbox_out + (unsigned long )i); ha->mbox_status[i] = tmp___2; i = i + 1; ldv_63457: ; if ((int )ha->mbox_status_count > i) { goto ldv_63456; } else { } set_bit(3L, (unsigned long volatile *)(& ha->flags)); tmp___3 = constant_test_bit(18L, (unsigned long const volatile *)(& ha->flags)); if (tmp___3 != 0) { complete(& ha->mbx_intr_comp); } else { } } else { } } else if (mbox_status >> 12 == 8U) { i = 0; goto ldv_63460; ldv_63459: mbox_sts[i] = readl((void const volatile *)mailbox_out + (unsigned long )i); i = i + 1; ldv_63460: ; if (i <= 7) { goto ldv_63459; } else { } if (ha->aen_log.count <= 511) { i = 0; goto ldv_63463; ldv_63462: ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] = mbox_sts[i]; i = i + 1; ldv_63463: ; if (i <= 7) { goto ldv_63462; } else { } ha->aen_log.count = ha->aen_log.count + 1; } else { } switch (mbox_status) { case 32770U: dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: System Err\n", "qla4xxx_isr_decode_mailbox"); qla4xxx_dump_registers(ha); tmp___5 = is_qla8022(ha); if (tmp___5 != 0 && ql4xdontresethba != 0) { goto _L; } else { tmp___6 = is_qla8032(ha); if (tmp___6 != 0) { goto _L___0; } else { tmp___7 = is_qla8042(ha); if (tmp___7 != 0) { _L___0: /* CIL Label */ tmp___8 = qla4_83xx_idc_dontreset(ha); if (tmp___8 != 0) { _L: /* CIL Label */ if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s:Don\'t Reset HBA\n", ha->host_no, "qla4xxx_isr_decode_mailbox"); } else { } } else { set_bit(7L, (unsigned long volatile *)(& ha->flags)); set_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); } } else { set_bit(7L, (unsigned long volatile *)(& ha->flags)); set_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); } } } goto ldv_63467; case 32771U: ; case 32772U: ; case 32794U: ; case 32796U: ; case 32797U: ; if (ql4xextended_error_logging == 2) { printk("scsi%ld: AEN %04x, OLD_ERROR Status, Reset HA\n", ha->host_no, mbox_status); } else { } tmp___9 = is_qla80XX(ha); if (tmp___9 != 0) { set_bit(4L, (unsigned long volatile *)(& ha->dpc_flags)); } else { set_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); } goto ldv_63467; case 32784U: set_bit(8L, (unsigned long volatile *)(& ha->flags)); tmp___10 = constant_test_bit(1L, (unsigned long const volatile *)(& ha->flags)); if (tmp___10 != 0) { set_bit(18L, (unsigned long volatile *)(& ha->dpc_flags)); } else { } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: LINK UP\n", "qla4xxx_isr_decode_mailbox"); qla4xxx_post_aen_work(ha, 1, 32U, (uint8_t *)(& mbox_sts)); tmp___11 = is_qla8032(ha); if (tmp___11 != 0) { goto _L___1; } else { tmp___12 = is_qla8042(ha); if (tmp___12 != 0) { _L___1: /* CIL Label */ if (ha->notify_link_up_comp != 0) { complete(& ha->link_up_comp); } else { } } else { } } goto ldv_63467; case 32785U: clear_bit(8L, (unsigned long volatile *)(& ha->flags)); tmp___13 = constant_test_bit(1L, (unsigned long const volatile *)(& ha->flags)); if (tmp___13 != 0) { set_bit(18L, (unsigned long volatile *)(& ha->dpc_flags)); qla4xxx_wake_dpc(ha); } else { } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: LINK DOWN\n", "qla4xxx_isr_decode_mailbox"); qla4xxx_post_aen_work(ha, 2, 32U, (uint8_t *)(& mbox_sts)); goto ldv_63467; case 32793U: ha->seconds_since_last_heartbeat = 0U; goto ldv_63467; case 32799U: ; if (ql4xextended_error_logging == 2) { printk("scsi%ld: AEN %04x DHCP LEASE ACQUIRED\n", ha->host_no, mbox_status); } else { } set_bit(15L, (unsigned long volatile *)(& ha->dpc_flags)); goto ldv_63467; case 32773U: ; case 32774U: ; case 32789U: ; case 32802U: ; case 32807U: ; case 32805U: ; if (ql4xextended_error_logging == 2) { printk("scsi%ld: AEN %04x\n", ha->host_no, mbox_status); } else { } goto ldv_63467; case 32809U: printk("scsi%ld: AEN %04x, mbox_sts[2]=%04x, mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0], mbox_sts[2], mbox_sts[3]); qla4xxx_update_ipaddr_state(ha, mbox_sts[5], mbox_sts[3]); if (mbox_sts[3] == 5U && (mbox_sts[2] == 3U || mbox_sts[2] == 2U)) { set_bit(15L, (unsigned long volatile *)(& ha->dpc_flags)); } else if (mbox_sts[3] == 2U && mbox_sts[2] == 5U) { tmp___14 = is_qla80XX(ha); if (tmp___14 != 0) { set_bit(4L, (unsigned long volatile *)(& ha->dpc_flags)); } else { set_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); } } else if (mbox_sts[3] == 6U) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: ACB in disabling state\n", ha->host_no, "qla4xxx_isr_decode_mailbox"); } else if (mbox_sts[3] == 0U) { complete(& ha->disable_acb_comp); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: %s: ACB state unconfigured\n", ha->host_no, "qla4xxx_isr_decode_mailbox"); } else { } goto ldv_63467; case 32811U: ; case 32812U: ; case 32813U: ; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: AEN %04x\n", ha->host_no, mbox_status); } else { } goto ldv_63467; case 32814U: ; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: AEN %04x, IPv6 OLD_ERROR, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3}=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", ha->host_no, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5]); } else { } goto ldv_63467; case 32795U: ; case 32792U: ; if (ql4xextended_error_logging == 2) { printk("\016scsi%ld: AEN %04x, mbox_sts[1]=%04x, mbox_sts[2]=%04x\n", ha->host_no, mbox_sts[0], mbox_sts[1], mbox_sts[2]); } else { } goto ldv_63467; case 32790U: ; case 32791U: ; if (ql4xextended_error_logging == 2) { printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, mbox_sts[2]=%04x, mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3]); } else { } goto ldv_63467; case 32788U: ; if ((unsigned int )ha->aen_q_count != 0U) { ha->aen_q_count = (uint16_t )((int )ha->aen_q_count - 1); i = 0; goto ldv_63494; ldv_63493: ha->aen_q[(int )ha->aen_in].mbox_sts[i] = mbox_sts[i]; i = i + 1; ldv_63494: ; if (i <= 7) { goto ldv_63493; } else { } if (ql4xextended_error_logging == 2) { printk("scsi%ld: AEN[%d] %04x queued mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x mb5:0x%x\n", ha->host_no, (int )ha->aen_in, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5]); } else { } ha->aen_in = (uint16_t )((int )ha->aen_in + 1); if ((unsigned int )ha->aen_in == 512U) { ha->aen_in = 0U; } else { } set_bit(9L, (unsigned long volatile *)(& ha->dpc_flags)); } else { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: aen %04x, queue overflowed! AEN LOST!!\n", ha->host_no, "qla4xxx_isr_decode_mailbox", mbox_sts[0]); } else { } if (ql4xextended_error_logging == 2) { printk("scsi%ld: DUMP AEN QUEUE\n", ha->host_no); } else { } i = 0; goto ldv_63497; ldv_63496: ; if (ql4xextended_error_logging == 2) { printk("AEN[%d] %04x %04x %04x %04x\n", i, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3]); } else { } i = i + 1; ldv_63497: ; if (i <= 511) { goto ldv_63496; } else { } } goto ldv_63467; case 33072U: ; if (ql4xextended_error_logging == 2) { printk("\fscsi%ld: AEN %04x Transceiver inserted\n", ha->host_no, mbox_sts[0]); } else { } goto ldv_63467; case 33073U: ; if (ql4xextended_error_logging == 2) { printk("\fscsi%ld: AEN %04x Transceiver removed\n", ha->host_no, mbox_sts[0]); } else { } goto ldv_63467; case 33025U: tmp___15 = is_qla8032(ha); if (tmp___15 != 0) { goto _L___2; } else { tmp___16 = is_qla8042(ha); if (tmp___16 != 0) { _L___2: /* CIL Label */ if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n", ha->host_no, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4]); } else { } opcode = mbox_sts[1] >> 16; if (opcode == 290U || opcode == 288U) { set_bit(23L, (unsigned long volatile *)(& ha->dpc_flags)); ha->idc_info.request_desc = mbox_sts[1]; ha->idc_info.info1 = mbox_sts[2]; ha->idc_info.info2 = mbox_sts[3]; ha->idc_info.info3 = mbox_sts[4]; qla4xxx_wake_dpc(ha); } else { } } else { } } goto ldv_63467; case 33024U: tmp___18 = is_qla8032(ha); if (tmp___18 != 0) { goto _L___3; } else { tmp___19 = is_qla8042(ha); if (tmp___19 != 0) { _L___3: /* CIL Label */ if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n", ha->host_no, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4]); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi:%ld: AEN %04x IDC Complete notification\n", ha->host_no, mbox_sts[0]); } else { } opcode = mbox_sts[1] >> 16; if (ha->notify_idc_comp != 0) { complete(& ha->idc_comp); } else { } if (opcode == 290U || opcode == 288U) { ha->idc_info.info2 = mbox_sts[3]; } else { } tmp___17 = qla4_83xx_loopback_in_progress(ha); if (tmp___17 != 0) { set_bit(9L, (unsigned long volatile *)(& ha->flags)); } else { clear_bit(9L, (unsigned long volatile *)(& ha->flags)); if ((unsigned long )ha->saved_acb != (unsigned long )((struct addr_ctrl_blk *)0)) { set_bit(24L, (unsigned long volatile *)(& ha->dpc_flags)); } else { } } qla4xxx_wake_dpc(ha); } else { } } goto ldv_63467; case 32810U: ; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", ha->host_no, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5]); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: AEN %04x Received IPv6 default router changed notification\n", ha->host_no, mbox_sts[0]); } else { } qla4xxx_default_router_changed(ha, (uint32_t *)(& mbox_sts)); goto ldv_63467; case 33026U: ; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", ha->host_no, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5]); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: AEN %04x Received IDC Extend Timeout notification\n", ha->host_no, mbox_sts[0]); } else { } ha->idc_extend_tmo = (int )mbox_sts[1]; goto ldv_63467; case 32817U: ; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: AEN %04x, mbox_sts[3]=%08x\n", ha->host_no, mbox_sts[0], mbox_sts[3]); } else { } goto ldv_63467; case 32822U: ; if (ql4xextended_error_logging == 2) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", ha->host_no, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5]); } else { } goto ldv_63467; case 33040U: ; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n", ha->host_no, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5]); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi%ld: AEN %04x Received DCBX configuration changed notification\n", ha->host_no, mbox_sts[0]); } else { } goto ldv_63467; default: ; if (ql4xextended_error_logging == 2) { printk("\fscsi%ld: AEN %04x UNKNOWN\n", ha->host_no, mbox_sts[0]); } else { } goto ldv_63467; } ldv_63467: ; } else { if (ql4xextended_error_logging == 2) { printk("scsi%ld: Unknown mailbox status %08X\n", ha->host_no, mbox_status); } else { } ha->mbox_status[0] = mbox_status; } return; } } void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha , uint32_t intr_status ) { unsigned int tmp ; { if (intr_status != 0U) { tmp = readl((void const volatile *)(& (ha->qla4_83xx_reg)->mailbox_out)); qla4xxx_isr_decode_mailbox(ha, tmp); writel(0U, (void volatile *)(& (ha->qla4_83xx_reg)->risc_intr)); } else { qla4xxx_process_response_queue(ha); } writel(0U, (void volatile *)(& (ha->qla4_83xx_reg)->mb_int_mask)); return; } } void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha , uint32_t intr_status ) { int tmp ; unsigned int tmp___0 ; { if ((intr_status & 2U) != 0U) { tmp = constant_test_bit(1L, (unsigned long const volatile *)(& ha->flags)); if (tmp != 0) { qla4xxx_process_response_queue(ha); } else { } } else { } if ((int )intr_status & 1) { tmp___0 = readl((void const volatile *)(& (ha->qla4_82xx_reg)->mailbox_out)); qla4xxx_isr_decode_mailbox(ha, tmp___0); } else { } writel(0U, (void volatile *)(& (ha->qla4_82xx_reg)->host_int)); readl((void const volatile *)(& (ha->qla4_82xx_reg)->host_int)); return; } } void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha , uint32_t intr_status ) { unsigned int tmp ; uint32_t tmp___0 ; { if ((intr_status & 16U) != 0U) { qla4xxx_process_response_queue(ha); } else { } if ((intr_status & 32U) != 0U) { tmp = readl((void const volatile *)(& (ha->reg)->mailbox)); qla4xxx_isr_decode_mailbox(ha, tmp); tmp___0 = set_rmask(32U); writel(tmp___0, (void volatile *)(& (ha->reg)->ctrl_status)); readl((void const volatile *)(& (ha->reg)->ctrl_status)); } else { } return; } } static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha , uint8_t reqs_count ) { int tmp ; int tmp___0 ; { if ((unsigned int )reqs_count != 0U) { return; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Spurious Interrupt\n"); } else { } tmp___0 = is_qla8022(ha); if (tmp___0 != 0) { writel(0U, (void volatile *)(& (ha->qla4_82xx_reg)->host_int)); tmp = constant_test_bit(15L, (unsigned long const volatile *)(& ha->flags)); if (tmp != 0) { qla4_82xx_wr_32(ha, (ulong )ha->nx_legacy_intr.tgt_mask_reg, 64511U); } else { } } else { } ha->spurious_int_count = ha->spurious_int_count + 1U; return; } } irqreturn_t qla4xxx_intr_handler(int irq , void *dev_id ) { struct scsi_qla_host *ha ; uint32_t intr_status ; unsigned long flags ; uint8_t reqs_count ; uint16_t tmp ; void *tmp___0 ; unsigned int tmp___1 ; uint32_t tmp___2 ; unsigned int tmp___3 ; uint32_t tmp___4 ; uint32_t tmp___5 ; int tmp___6 ; { flags = 0UL; reqs_count = 0U; ha = (struct scsi_qla_host *)dev_id; if ((unsigned long )ha == (unsigned long )((struct scsi_qla_host *)0)) { if (ql4xextended_error_logging == 2) { printk("\016qla4xxx: Interrupt with NULL host ptr\n"); } else { } return (0); } else { } ldv_spin_lock(); ha->isr_count = ha->isr_count + 1ULL; ldv_63534: tmp = (*((ha->isp_ops)->rd_shdw_rsp_q_in))(ha); if ((int )tmp != (int )ha->response_out) { intr_status = 16U; } else { intr_status = readl((void const volatile *)(& (ha->reg)->ctrl_status)); } if ((intr_status & 16440U) == 0U) { if ((unsigned int )reqs_count == 0U) { ha->spurious_int_count = ha->spurious_int_count + 1U; } else { } goto ldv_63533; } else { } if ((intr_status & 16384U) != 0U) { if (ql4xextended_error_logging == 2) { tmp___0 = isp_port_error_status(ha); tmp___1 = readl((void const volatile *)tmp___0); printk("\016scsi%ld: Fatal Error, Status 0x%04x\n", ha->host_no, tmp___1); } else { } tmp___3 = readl((void const volatile *)(& (ha->reg)->ctrl_status)); if ((tmp___3 & 8U) == 0U) { tmp___2 = set_rmask(32768U); writel(tmp___2, (void volatile *)(& (ha->reg)->ctrl_status)); readl((void const volatile *)(& (ha->reg)->ctrl_status)); } else { } tmp___4 = set_rmask(16384U); writel(tmp___4, (void volatile *)(& (ha->reg)->ctrl_status)); readl((void const volatile *)(& (ha->reg)->ctrl_status)); __qla4xxx_disable_intrs(ha); set_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); goto ldv_63533; } else if ((intr_status & 8U) != 0U) { clear_bit(0L, (unsigned long volatile *)(& ha->flags)); __qla4xxx_disable_intrs(ha); tmp___5 = set_rmask(8U); writel(tmp___5, (void volatile *)(& (ha->reg)->ctrl_status)); readl((void const volatile *)(& (ha->reg)->ctrl_status)); tmp___6 = constant_test_bit(12L, (unsigned long const volatile *)(& ha->flags)); if (tmp___6 == 0) { set_bit(5L, (unsigned long volatile *)(& ha->dpc_flags)); } else { } goto ldv_63533; } else if ((intr_status & 56U) != 0U) { (*((ha->isp_ops)->interrupt_service_routine))(ha, intr_status); ha->total_io_count = ha->total_io_count + 1ULL; reqs_count = (uint8_t )((int )reqs_count + 1); if ((unsigned int )reqs_count == 1U) { goto ldv_63533; } else { } } else { } goto ldv_63534; ldv_63533: spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } irqreturn_t qla4_82xx_intr_handler(int irq , void *dev_id ) { struct scsi_qla_host *ha ; uint32_t intr_status ; uint32_t status ; unsigned long flags ; uint8_t reqs_count ; int tmp ; long tmp___0 ; unsigned int tmp___1 ; { ha = (struct scsi_qla_host *)dev_id; flags = 0UL; reqs_count = 0U; tmp = pci_channel_offline(ha->pdev); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { return (1); } else { } ha->isr_count = ha->isr_count + 1ULL; status = qla4_82xx_rd_32(ha, 101777664UL); if ((ha->nx_legacy_intr.int_vec_bit & status) == 0U) { return (0); } else { } status = qla4_82xx_rd_32(ha, 101785708UL); if ((status & 768U) != 512U) { return (0); } else { } qla4_82xx_wr_32(ha, (ulong )ha->nx_legacy_intr.tgt_status_reg, 4294967295U); qla4_82xx_rd_32(ha, 101777664UL); qla4_82xx_rd_32(ha, 101777664UL); ldv_spin_lock(); ldv_63545: tmp___1 = readl((void const volatile *)(& (ha->qla4_82xx_reg)->host_int)); if ((tmp___1 & 1U) == 0U) { qla4_82xx_spurious_interrupt(ha, (int )reqs_count); goto ldv_63544; } else { } intr_status = readl((void const volatile *)(& (ha->qla4_82xx_reg)->host_status)); if ((intr_status & 3U) == 0U) { qla4_82xx_spurious_interrupt(ha, (int )reqs_count); goto ldv_63544; } else { } (*((ha->isp_ops)->interrupt_service_routine))(ha, intr_status); qla4_82xx_wr_32(ha, (ulong )ha->nx_legacy_intr.tgt_mask_reg, 64511U); reqs_count = (uint8_t )((int )reqs_count + 1); if ((unsigned int )reqs_count == 1U) { goto ldv_63544; } else { } goto ldv_63545; ldv_63544: spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } irqreturn_t qla4_83xx_intr_handler(int irq , void *dev_id ) { struct scsi_qla_host *ha ; uint32_t leg_int_ptr ; unsigned long flags ; { ha = (struct scsi_qla_host *)dev_id; leg_int_ptr = 0U; flags = 0UL; ha->isr_count = ha->isr_count + 1ULL; leg_int_ptr = readl((void const volatile *)(& (ha->qla4_83xx_reg)->leg_int_ptr)); if ((int )leg_int_ptr >= 0) { return (0); } else { } if ((leg_int_ptr & 983040U) != ha->pf_bit) { return (0); } else { } writel(0U, (void volatile *)(& (ha->qla4_83xx_reg)->leg_int_trig)); ldv_63554: leg_int_ptr = readl((void const volatile *)(& (ha->qla4_83xx_reg)->leg_int_ptr)); if ((leg_int_ptr & 983040U) != ha->pf_bit) { goto ldv_63553; } else { } if ((leg_int_ptr & 1073741824U) != 0U) { goto ldv_63554; } else { } ldv_63553: ldv_spin_lock(); leg_int_ptr = readl((void const volatile *)(& (ha->qla4_83xx_reg)->risc_intr)); (*((ha->isp_ops)->interrupt_service_routine))(ha, leg_int_ptr); spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } irqreturn_t qla4_8xxx_msi_handler(int irq , void *dev_id ) { struct scsi_qla_host *ha ; irqreturn_t tmp ; { ha = (struct scsi_qla_host *)dev_id; if ((unsigned long )ha == (unsigned long )((struct scsi_qla_host *)0)) { if (ql4xextended_error_logging == 2) { printk("\016qla4xxx: MSIX: Interrupt with NULL host ptr\n"); } else { } return (0); } else { } ha->isr_count = ha->isr_count + 1ULL; qla4_82xx_wr_32(ha, (ulong )ha->nx_legacy_intr.tgt_status_reg, 4294967295U); qla4_82xx_rd_32(ha, 101777664UL); qla4_82xx_rd_32(ha, 101777664UL); tmp = qla4_8xxx_default_intr_handler(irq, dev_id); return (tmp); } } static irqreturn_t qla4_83xx_mailbox_intr_handler(int irq , void *dev_id ) { struct scsi_qla_host *ha ; unsigned long flags ; uint32_t ival ; unsigned int tmp ; { ha = (struct scsi_qla_host *)dev_id; ival = 0U; ldv_spin_lock(); ival = readl((void const volatile *)(& (ha->qla4_83xx_reg)->risc_intr)); if (ival == 0U) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: It is a spurious mailbox interrupt!\n", "qla4_83xx_mailbox_intr_handler"); ival = readl((void const volatile *)(& (ha->qla4_83xx_reg)->mb_int_mask)); ival = ival & 4294967291U; writel(ival, (void volatile *)(& (ha->qla4_83xx_reg)->mb_int_mask)); goto exit; } else { } tmp = readl((void const volatile *)(& (ha->qla4_83xx_reg)->mailbox_out)); qla4xxx_isr_decode_mailbox(ha, tmp); writel(0U, (void volatile *)(& (ha->qla4_83xx_reg)->risc_intr)); ival = readl((void const volatile *)(& (ha->qla4_83xx_reg)->mb_int_mask)); ival = ival & 4294967291U; writel(ival, (void volatile *)(& (ha->qla4_83xx_reg)->mb_int_mask)); ha->isr_count = ha->isr_count + 1ULL; exit: spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } irqreturn_t qla4_8xxx_default_intr_handler(int irq , void *dev_id ) { struct scsi_qla_host *ha ; unsigned long flags ; uint32_t intr_status ; uint8_t reqs_count ; unsigned int tmp ; int tmp___0 ; int tmp___1 ; { ha = (struct scsi_qla_host *)dev_id; reqs_count = 0U; tmp___0 = is_qla8032(ha); if (tmp___0 != 0) { qla4_83xx_mailbox_intr_handler(irq, dev_id); } else { tmp___1 = is_qla8042(ha); if (tmp___1 != 0) { qla4_83xx_mailbox_intr_handler(irq, dev_id); } else { ldv_spin_lock(); ldv_63578: tmp = readl((void const volatile *)(& (ha->qla4_82xx_reg)->host_int)); if ((tmp & 1U) == 0U) { qla4_82xx_spurious_interrupt(ha, (int )reqs_count); goto ldv_63577; } else { } intr_status = readl((void const volatile *)(& (ha->qla4_82xx_reg)->host_status)); if ((intr_status & 3U) == 0U) { qla4_82xx_spurious_interrupt(ha, (int )reqs_count); goto ldv_63577; } else { } (*((ha->isp_ops)->interrupt_service_routine))(ha, intr_status); reqs_count = (uint8_t )((int )reqs_count + 1); if ((unsigned int )reqs_count == 1U) { goto ldv_63577; } else { } goto ldv_63578; ldv_63577: ha->isr_count = ha->isr_count + 1ULL; spin_unlock_irqrestore(& ha->hardware_lock, flags); } } return (1); } } irqreturn_t qla4_8xxx_msix_rsp_q(int irq , void *dev_id ) { struct scsi_qla_host *ha ; unsigned long flags ; int intr_status ; uint32_t ival ; unsigned int tmp ; int tmp___0 ; int tmp___1 ; { ha = (struct scsi_qla_host *)dev_id; ival = 0U; ldv_spin_lock(); tmp___0 = is_qla8032(ha); if (tmp___0 != 0) { goto _L; } else { tmp___1 = is_qla8042(ha); if (tmp___1 != 0) { _L: /* CIL Label */ ival = readl((void const volatile *)(& (ha->qla4_83xx_reg)->iocb_int_mask)); if (ival == 0U) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: It is a spurious iocb interrupt!\n", "qla4_8xxx_msix_rsp_q"); goto exit_msix_rsp_q; } else { } qla4xxx_process_response_queue(ha); writel(0U, (void volatile *)(& (ha->qla4_83xx_reg)->iocb_int_mask)); } else { tmp = readl((void const volatile *)(& (ha->qla4_82xx_reg)->host_status)); intr_status = (int )tmp; if ((intr_status & 2) != 0) { qla4xxx_process_response_queue(ha); writel(0U, (void volatile *)(& (ha->qla4_82xx_reg)->host_int)); } else { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: spurious iocb interrupt...\n", "qla4_8xxx_msix_rsp_q"); goto exit_msix_rsp_q; } } } ha->isr_count = ha->isr_count + 1ULL; exit_msix_rsp_q: spin_unlock_irqrestore(& ha->hardware_lock, flags); return (1); } } void qla4xxx_process_aen(struct scsi_qla_host *ha , uint8_t process_aen ) { uint32_t mbox_sts[8U] ; struct aen *aen ; int i ; unsigned long flags ; { ldv_spin_lock(); goto ldv_63606; ldv_63605: aen = (struct aen *)(& ha->aen_q) + (unsigned long )ha->aen_out; i = 0; goto ldv_63598; ldv_63597: mbox_sts[i] = aen->mbox_sts[i]; i = i + 1; ldv_63598: ; if (i <= 7) { goto ldv_63597; } else { } ha->aen_q_count = (uint16_t )((int )ha->aen_q_count + 1); ha->aen_out = (uint16_t )((int )ha->aen_out + 1); if ((unsigned int )ha->aen_out == 512U) { ha->aen_out = 0U; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); if (ql4xextended_error_logging == 2) { printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x mbx3=0x%08x mbx4=0x%08x\n", ha->host_no, (unsigned int )ha->aen_out != 0U ? (int )ha->aen_out + -1 : 511, mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4]); } else { } switch (mbox_sts[0]) { case 32788U: ; switch ((int )process_aen) { case 1: ; if (ql4xextended_error_logging == 2) { printk("scsi%ld: AEN[%d] %04x, index [%d] state=%04x FLUSHED!\n", ha->host_no, (int )ha->aen_out, mbox_sts[0], mbox_sts[2], mbox_sts[3]); } else { } goto ldv_63602; case 0: ; default: ; if (mbox_sts[1] == 1U) { qla4xxx_process_ddb_changed(ha, mbox_sts[2], mbox_sts[3], mbox_sts[4]); } else { } goto ldv_63602; } ldv_63602: ; } ldv_spin_lock(); ldv_63606: ; if ((int )ha->aen_out != (int )ha->aen_in) { goto ldv_63605; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); return; } } int qla4xxx_request_irqs(struct scsi_qla_host *ha ) { int ret ; int rval ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; { ret = 0; rval = 1; tmp = is_qla40XX(ha); if (tmp != 0) { goto try_intx; } else { } if (ql4xenablemsix == 2) { tmp___0 = is_qla8032(ha); if (tmp___0 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: MSI Interrupts not supported for ISP%04x, Falling back-to INTx mode\n", "qla4xxx_request_irqs", (int )(ha->pdev)->device); goto try_intx; } else { tmp___1 = is_qla8042(ha); if (tmp___1 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: MSI Interrupts not supported for ISP%04x, Falling back-to INTx mode\n", "qla4xxx_request_irqs", (int )(ha->pdev)->device); goto try_intx; } else { } } goto try_msi; } else { } if (ql4xenablemsix == 0 || ql4xenablemsix != 1) { goto try_intx; } else { } ret = qla4_8xxx_enable_msix(ha); if (ret == 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "MSI-X: Enabled (0x%X).\n", (int )ha->revision_id); } else { } goto irq_attached; } else { tmp___2 = is_qla8032(ha); if (tmp___2 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: ISP%04x: MSI-X: Falling back-to INTx mode. ret = %d\n", "qla4xxx_request_irqs", (int )(ha->pdev)->device, ret); goto try_intx; } else { tmp___3 = is_qla8042(ha); if (tmp___3 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: ISP%04x: MSI-X: Falling back-to INTx mode. ret = %d\n", "qla4xxx_request_irqs", (int )(ha->pdev)->device, ret); goto try_intx; } else { } } } dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "MSI-X: Falling back-to MSI mode -- %d.\n", ret); try_msi: ret = pci_enable_msi_exact(ha->pdev, 1); if (ret == 0) { ret = ldv_request_irq_280((ha->pdev)->irq, & qla4_8xxx_msi_handler, 0UL, "qla4xxx", (void *)ha); if (ret == 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "MSI: Enabled.\n"); } else { } set_bit(16L, (unsigned long volatile *)(& ha->flags)); goto irq_attached; } else { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "MSI: Failed to reserve interrupt %d already in use.\n", (ha->pdev)->irq); pci_disable_msi(ha->pdev); } } else { } try_intx: tmp___4 = is_qla8022(ha); if (tmp___4 != 0) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: ISP82xx Legacy interrupt not supported\n", "qla4xxx_request_irqs"); goto irq_not_attached; } else { } ret = ldv_request_irq_281((ha->pdev)->irq, (ha->isp_ops)->intr_handler, 128UL, "qla4xxx", (void *)ha); if (ret == 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "INTx: Enabled.\n"); } else { } set_bit(15L, (unsigned long volatile *)(& ha->flags)); goto irq_attached; } else { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "INTx: Failed to reserve interrupt %d already in use.\n", (ha->pdev)->irq); goto irq_not_attached; } irq_attached: set_bit(10L, (unsigned long volatile *)(& ha->flags)); (ha->host)->irq = (ha->pdev)->irq; dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: irq %d attached\n", "qla4xxx_request_irqs", (ha->pdev)->irq); rval = 0; irq_not_attached: ; return (rval); } } void qla4xxx_free_irqs(struct scsi_qla_host *ha ) { int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp___2 = test_and_clear_bit(10L, (unsigned long volatile *)(& ha->flags)); if (tmp___2 != 0) { tmp___1 = constant_test_bit(17L, (unsigned long const volatile *)(& ha->flags)); if (tmp___1 != 0) { qla4_8xxx_disable_msix(ha); } else { tmp___0 = test_and_clear_bit(16L, (unsigned long volatile *)(& ha->flags)); if (tmp___0 != 0) { ldv_free_irq_282((ha->pdev)->irq, (void *)ha); pci_disable_msi(ha->pdev); } else { tmp = test_and_clear_bit(15L, (unsigned long volatile *)(& ha->flags)); if (tmp != 0) { ldv_free_irq_283((ha->pdev)->irq, (void *)ha); } else { } } } } else { } return; } } int reg_check_1(irqreturn_t (*handler)(int , void * ) ) { { if ((unsigned long )handler == (unsigned long )(& qla4_8xxx_msi_handler)) { return (1); } else { } return (0); } } void choose_interrupt_1(void) { int tmp ; { tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_irq_1_0 = ldv_irq_1(ldv_irq_1_0, ldv_irq_line_1_0, ldv_irq_data_1_0); goto ldv_63630; case 1: ldv_irq_1_0 = ldv_irq_1(ldv_irq_1_1, ldv_irq_line_1_1, ldv_irq_data_1_1); goto ldv_63630; case 2: ldv_irq_1_0 = ldv_irq_1(ldv_irq_1_2, ldv_irq_line_1_2, ldv_irq_data_1_2); goto ldv_63630; case 3: ldv_irq_1_0 = ldv_irq_1(ldv_irq_1_3, ldv_irq_line_1_3, ldv_irq_data_1_3); goto ldv_63630; default: ldv_stop(); } ldv_63630: ; return; } } void disable_suitable_irq_1(int line , void *data ) { { if (ldv_irq_1_0 != 0 && line == ldv_irq_line_1_0) { ldv_irq_1_0 = 0; return; } else { } if (ldv_irq_1_1 != 0 && line == ldv_irq_line_1_1) { ldv_irq_1_1 = 0; return; } else { } if (ldv_irq_1_2 != 0 && line == ldv_irq_line_1_2) { ldv_irq_1_2 = 0; return; } else { } if (ldv_irq_1_3 != 0 && line == ldv_irq_line_1_3) { ldv_irq_1_3 = 0; return; } else { } return; } } int ldv_irq_1(int state , int line , void *data ) { irqreturn_t irq_retval ; int tmp ; int tmp___0 ; { tmp = __VERIFIER_nondet_int(); irq_retval = (irqreturn_t )tmp; if (state != 0) { tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ; if (state == 1) { LDV_IN_INTERRUPT = 2; irq_retval = qla4_8xxx_msi_handler(line, data); LDV_IN_INTERRUPT = 1; return (state); } else { } goto ldv_63646; default: ldv_stop(); } ldv_63646: ; } else { } return (state); } } void activate_suitable_irq_1(int line , void *data ) { { if (ldv_irq_1_0 == 0) { ldv_irq_line_1_0 = line; ldv_irq_data_1_0 = data; ldv_irq_1_0 = 1; return; } else { } if (ldv_irq_1_1 == 0) { ldv_irq_line_1_1 = line; ldv_irq_data_1_1 = data; ldv_irq_1_1 = 1; return; } else { } if (ldv_irq_1_2 == 0) { ldv_irq_line_1_2 = line; ldv_irq_data_1_2 = data; ldv_irq_1_2 = 1; return; } else { } if (ldv_irq_1_3 == 0) { ldv_irq_line_1_3 = line; ldv_irq_data_1_3 = data; ldv_irq_1_3 = 1; return; } else { } return; } } __inline static void spin_lock(spinlock_t *lock ) { { ldv_spin_lock(); ldv_spin_lock_241(lock); return; } } __inline static void spin_unlock(spinlock_t *lock ) { { ldv_spin_unlock(); ldv_spin_unlock_245(lock); return; } } bool ldv_queue_work_on_251(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_252(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_253(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_254(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_255(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_261(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_267(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_269(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_271(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_272(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_273(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_274(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_275(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_276(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_277(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_278(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_scsi_add_host_with_dma_279(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___6 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_27 = 1; ldv_initialize_scsi_host_template_27(); } else { } return (ldv_func_res); } } __inline static int ldv_request_irq_280(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) { ldv_func_ret_type___7 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = request_irq(irq, handler, flags, name, dev); ldv_func_res = tmp; tmp___0 = reg_check_1(handler); if (tmp___0 != 0 && ldv_func_res == 0) { activate_suitable_irq_1((int )irq, dev); } else { } return (ldv_func_res); } } __inline static int ldv_request_irq_281(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) { ldv_func_ret_type___8 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = request_irq(irq, handler, flags, name, dev); ldv_func_res = tmp; tmp___0 = reg_check_1(handler); if (tmp___0 != 0 && ldv_func_res == 0) { activate_suitable_irq_1((int )irq, dev); } else { } return (ldv_func_res); } } void ldv_free_irq_282(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) { { free_irq(ldv_func_arg1, ldv_func_arg2); disable_suitable_irq_1((int )ldv_func_arg1, ldv_func_arg2); return; } } void ldv_free_irq_283(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) { { free_irq(ldv_func_arg1, ldv_func_arg2); disable_suitable_irq_1((int )ldv_func_arg1, ldv_func_arg2); return; } } __inline static long ldv__builtin_expect(long exp , long c ) ; extern int __printk_ratelimit(char const * ) ; extern void ___might_sleep(char const * , int , int ) ; extern int snprintf(char * , size_t , char const * , ...) ; __inline static void rep_nop(void) { { __asm__ volatile ("rep; nop": : : "memory"); return; } } __inline static void cpu_relax(void) { { rep_nop(); return; } } extern int __preempt_count ; __inline static int preempt_count(void) { int pfo_ret__ ; { switch (4UL) { case 1UL: __asm__ ("movb %%gs:%1,%0": "=q" (pfo_ret__): "m" (__preempt_count)); goto ldv_6106; case 2UL: __asm__ ("movw %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6106; case 4UL: __asm__ ("movl %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6106; case 8UL: __asm__ ("movq %%gs:%1,%0": "=r" (pfo_ret__): "m" (__preempt_count)); goto ldv_6106; default: __bad_percpu_size(); } ldv_6106: ; return (pfo_ret__ & 2147483647); } } extern void _raw_spin_lock_irq(raw_spinlock_t * ) ; extern void _raw_spin_unlock_irq(raw_spinlock_t * ) ; extern void _raw_read_lock(rwlock_t * ) ; extern unsigned long _raw_write_lock_irqsave(rwlock_t * ) ; extern void _raw_read_unlock(rwlock_t * ) ; extern void _raw_write_unlock_irqrestore(rwlock_t * , unsigned long ) ; __inline static void ldv_spin_lock_irq_301(spinlock_t *lock ) { { _raw_spin_lock_irq(& lock->__annonCompField18.rlock); return; } } __inline static void spin_lock_irq(spinlock_t *lock ) ; __inline static void ldv_spin_unlock_irq_304(spinlock_t *lock ) { { _raw_spin_unlock_irq(& lock->__annonCompField18.rlock); return; } } __inline static void spin_unlock_irq(spinlock_t *lock ) ; __inline static u64 get_jiffies_64(void) { { return ((u64 )jiffies); } } extern unsigned int jiffies_to_msecs(unsigned long const ) ; extern unsigned long __msecs_to_jiffies(unsigned int const ) ; __inline static unsigned long msecs_to_jiffies(unsigned int const m ) { unsigned long tmp___0 ; { tmp___0 = __msecs_to_jiffies(m); return (tmp___0); } } bool ldv_queue_work_on_308(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_310(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_309(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_312(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_311(struct workqueue_struct *ldv_func_arg1 ) ; __inline static unsigned char readb(void const volatile *addr ) { unsigned char ret ; { __asm__ volatile ("movb %1,%0": "=q" (ret): "m" (*((unsigned char volatile *)addr)): "memory"); return (ret); } } __inline static void writeb(unsigned char val , void volatile *addr ) { { __asm__ volatile ("movb %0,%1": : "q" (val), "m" (*((unsigned char volatile *)addr)): "memory"); return; } } __inline static void writew(unsigned short val , void volatile *addr ) { { __asm__ volatile ("movw %0,%1": : "r" (val), "m" (*((unsigned short volatile *)addr)): "memory"); return; } } __inline static unsigned long readq(void const volatile *addr ) { unsigned long ret ; { __asm__ volatile ("movq %1,%0": "=r" (ret): "m" (*((unsigned long volatile *)addr)): "memory"); return (ret); } } __inline static void writeq(unsigned long val , void volatile *addr ) { { __asm__ volatile ("movq %0,%1": : "r" (val), "m" (*((unsigned long volatile *)addr)): "memory"); return; } } extern int kobject_uevent_env(struct kobject * , enum kobject_action , char ** ) ; extern void schedule(void) ; extern int _cond_resched(void) ; void *ldv_kmem_cache_alloc_318(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_335(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; __inline static void *kzalloc(size_t size , gfp_t flags ) ; extern void __const_udelay(unsigned long ) ; extern void _dev_info(struct device const * , char const * , ...) ; extern int pcie_capability_read_word(struct pci_dev * , int , u16 * ) ; extern void pci_disable_msix(struct pci_dev * ) ; extern int pci_enable_msix_range(struct pci_dev * , struct msix_entry * , int , int ) ; __inline static int pci_enable_msix_exact(struct pci_dev *dev , struct msix_entry *entries , int nvec ) { int rc ; int tmp ; { tmp = pci_enable_msix_range(dev, entries, nvec, nvec); rc = tmp; if (rc < 0) { return (rc); } else { } return (0); } } __inline static int ldv_request_irq_280(unsigned int irq , irqreturn_t (*handler)(int , void * ) , unsigned long flags , char const *name , void *dev ) ; void ldv_free_irq_337(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) ; struct sk_buff *ldv_skb_clone_326(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_334(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_328(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_324(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_332(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_333(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_329(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_330(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_331(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; int ldv_scsi_add_host_with_dma_336(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; static int const MD_MIU_TEST_AGT_RDDATA[4U] = { 1090519208, 1090519212, 1090519224, 1090519228}; int qla4_82xx_crb_win_lock(struct scsi_qla_host *ha ) ; void qla4_82xx_crb_win_unlock(struct scsi_qla_host *ha ) ; int qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *ha , ulong *off ) ; int qla4_82xx_pci_mem_read_2M(struct scsi_qla_host *ha , u64 off , void *data , int size ) ; int qla4_82xx_pci_mem_write_2M(struct scsi_qla_host *ha , u64 off , void *data , int size ) ; int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha , uint32_t flash_addr , uint8_t *p_data , int u32_word_count ) ; void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha ) ; void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha ) ; int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha , uint32_t flash_addr , uint8_t *p_data , int u32_word_count ) ; void qla4_83xx_get_idc_param(struct scsi_qla_host *ha ) ; void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha ) ; void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha ) ; int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha ) ; void qla4_8xxx_get_minidump(struct scsi_qla_host *ha ) ; int qla4_8xxx_intr_disable(struct scsi_qla_host *ha ) ; int qla4_8xxx_intr_enable(struct scsi_qla_host *ha ) ; int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha , uint64_t addr , uint32_t *data , uint32_t count ) ; __inline static void *qla4_8xxx_pci_base_offsetfset(struct scsi_qla_host *ha , unsigned long off ) { { if (ha->first_page_group_end > off && ha->first_page_group_start <= off) { return ((void *)(ha->nx_pcibase + off)); } else { } return ((void *)0); } } static unsigned long crb_addr_xform[60U] ; static int qla4_8xxx_crb_table_initialized ; static void qla4_82xx_crb_addr_transform_setup(void) { { crb_addr_xform[49] = 1078984704UL; crb_addr_xform[40] = 1097859072UL; crb_addr_xform[5] = 218103808UL; crb_addr_xform[11] = 238026752UL; crb_addr_xform[10] = 236978176UL; crb_addr_xform[9] = 235929600UL; crb_addr_xform[8] = 234881024UL; crb_addr_xform[16] = 1883242496UL; crb_addr_xform[15] = 1882193920UL; crb_addr_xform[14] = 1881145344UL; crb_addr_xform[13] = 1880096768UL; crb_addr_xform[48] = 1894776832UL; crb_addr_xform[47] = 148897792UL; crb_addr_xform[46] = 147849216UL; crb_addr_xform[45] = 1891631104UL; crb_addr_xform[44] = 1890582528UL; crb_addr_xform[43] = 1889533952UL; crb_addr_xform[42] = 143654912UL; crb_addr_xform[53] = 142606336UL; crb_addr_xform[51] = 1108344832UL; crb_addr_xform[29] = 1090519040UL; crb_addr_xform[7] = 241172480UL; crb_addr_xform[12] = 1879048192UL; crb_addr_xform[22] = 876609536UL; crb_addr_xform[21] = 877658112UL; crb_addr_xform[20] = 875560960UL; crb_addr_xform[19] = 874512384UL; crb_addr_xform[18] = 873463808UL; crb_addr_xform[17] = 872415232UL; crb_addr_xform[28] = 1010827264UL; crb_addr_xform[27] = 1011875840UL; crb_addr_xform[26] = 1009778688UL; crb_addr_xform[25] = 1008730112UL; crb_addr_xform[24] = 1007681536UL; crb_addr_xform[23] = 1006632960UL; crb_addr_xform[1] = 1999634432UL; crb_addr_xform[0] = 698351616UL; crb_addr_xform[6] = 454033408UL; crb_addr_xform[50] = 1107296256UL; crb_addr_xform[31] = 219152384UL; crb_addr_xform[2] = 693108736UL; crb_addr_xform[3] = 709885952UL; crb_addr_xform[37] = 209715200UL; crb_addr_xform[36] = 208666624UL; crb_addr_xform[35] = 207618048UL; crb_addr_xform[34] = 1096810496UL; crb_addr_xform[39] = 1972371456UL; crb_addr_xform[38] = 1971322880UL; crb_addr_xform[58] = 1904214016UL; crb_addr_xform[56] = 1080033280UL; crb_addr_xform[59] = 428867584UL; qla4_8xxx_crb_table_initialized = 1; return; } } static struct crb_128M_2M_block_map crb_128M_2M_map[64U] = { {{{0U, 0U, 0U, 0U}}}, {{{1U, 1048576U, 1056768U, 1179648U}, {1U, 1114112U, 1179648U, 1245184U}, {1U, 1179648U, 1187840U, 1196032U}, {1U, 1245184U, 1253376U, 1204224U}, {1U, 1310720U, 1318912U, 1212416U}, {1U, 1376256U, 1384448U, 1220608U}, {1U, 1441792U, 1507328U, 1114112U}, {1U, 1507328U, 1515520U, 1236992U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {1U, 1966080U, 1968128U, 1187840U}, {0U, 0U, 0U, 0U}}}, {{{1U, 2097152U, 2162688U, 1572864U}}}, {{{0U, 0U, 0U, 0U}}}, {{{1U, 4194304U, 4198400U, 1478656U}}}, {{{1U, 5242880U, 5308416U, 1310720U}}}, {{{1U, 6291456U, 6356992U, 1835008U}}}, {{{1U, 7340032U, 7356416U, 1802240U}}}, {{{1U, 8388608U, 8396800U, 1507328U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {1U, 9371648U, 9379840U, 1515520U}}}, {{{1U, 9437184U, 9445376U, 1523712U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {1U, 10420224U, 10428416U, 1531904U}}}, {{{0U, 10485760U, 10493952U, 1540096U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {1U, 11468800U, 11476992U, 1548288U}}}, {{{0U, 11534336U, 11542528U, 1556480U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {1U, 12517376U, 12525568U, 1564672U}}}, {{{1U, 12582912U, 12599296U, 1916928U}}}, {{{1U, 13631488U, 13647872U, 1720320U}}}, {{{1U, 14680064U, 14696448U, 1703936U}}}, {{{1U, 15728640U, 15732736U, 1458176U}}}, {{{0U, 16777216U, 16793600U, 1736704U}}}, {{{1U, 17825792U, 17829888U, 1441792U}}}, {{{1U, 18874368U, 18878464U, 1445888U}}}, {{{1U, 19922944U, 19927040U, 1449984U}}}, {{{1U, 20971520U, 20975616U, 1454080U}}}, {{{1U, 22020096U, 22024192U, 1462272U}}}, {{{1U, 23068672U, 23072768U, 1466368U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{1U, 30408704U, 30474240U, 1638400U}}}, {{{1U, 31457280U, 31461376U, 1482752U}}}, {{{1U, 32505856U, 32571392U, 1376256U}}}, {{{0U, 0U, 0U, 0U}}}, {{{1U, 34603008U, 34611200U, 1179648U}, {1U, 34668544U, 34734080U, 1245184U}, {1U, 34734080U, 34742272U, 1196032U}, {1U, 34799616U, 34807808U, 1204224U}, {1U, 34865152U, 34873344U, 1212416U}, {1U, 34930688U, 34938880U, 1220608U}, {1U, 34996224U, 35061760U, 1114112U}, {1U, 35061760U, 35069952U, 1236992U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}, {0U, 0U, 0U, 0U}}}, {{{1U, 35651584U, 35667968U, 1769472U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{1U, 41943040U, 41959424U, 1720320U}}}, {{{1U, 42991616U, 42995712U, 1486848U}}}, {{{1U, 44040192U, 44041216U, 1754112U}}}, {{{1U, 45088768U, 45089792U, 1755136U}}}, {{{1U, 46137344U, 46138368U, 1756160U}}}, {{{1U, 47185920U, 47186944U, 1757184U}}}, {{{1U, 48234496U, 48235520U, 1758208U}}}, {{{1U, 49283072U, 49284096U, 1759232U}}}, {{{1U, 50331648U, 50332672U, 1760256U}}}, {{{0U, 51380224U, 51396608U, 1736704U}}}, {{{1U, 52428800U, 52445184U, 1916928U}}}, {{{1U, 53477376U, 53493760U, 1703936U}}}, {{{0U, 0U, 0U, 0U}}}, {{{1U, 55574528U, 55575552U, 1753088U}}}, {{{1U, 56623104U, 56624128U, 1761280U}}}, {{{1U, 57671680U, 57672704U, 1762304U}}}, {{{1U, 58720256U, 58736640U, 1900544U}}}, {{{1U, 59768832U, 59785216U, 1785856U}}}, {{{1U, 60817408U, 60833792U, 1933312U}}}, {{{0U, 0U, 0U, 0U}}}, {{{0U, 0U, 0U, 0U}}}, {{{1U, 63963136U, 63979520U, 1949696U}}}, {{{1U, 65011712U, 65015808U, 1470464U}}}, {{{1U, 66060288U, 66064384U, 1474560U}}}}; static unsigned int qla4_82xx_crb_hub_agt[64U] = { 0U, 1907U, 661U, 677U, 0U, 208U, 433U, 230U, 224U, 225U, 226U, 227U, 1056U, 1047U, 1057U, 843U, 1029U, 832U, 833U, 834U, 835U, 837U, 836U, 960U, 961U, 962U, 963U, 0U, 964U, 1040U, 0U, 209U, 0U, 1907U, 1046U, 0U, 0U, 0U, 0U, 0U, 1047U, 0U, 137U, 1802U, 1803U, 1804U, 141U, 142U, 1807U, 1029U, 1056U, 1057U, 0U, 136U, 145U, 1810U, 1030U, 0U, 1816U, 409U, 425U, 0U, 838U, 0U}; static char *qdev_state[8U] = { (char *)"Unknown", (char *)"Cold", (char *)"Initializing", (char *)"Ready", (char *)"Need Reset", (char *)"Need Quiescent", (char *)"Failed", (char *)"Quiescent"}; static void qla4_82xx_pci_set_crbwindow_2M(struct scsi_qla_host *ha , ulong *off ) { u32 win_read ; { ha->crb_win = (qla4_82xx_crb_hub_agt[(*off >> 20) & 63UL] << 20) | ((uint32_t )*off & 983040U); writel(ha->crb_win, (void volatile *)(ha->nx_pcibase + 1245280UL)); win_read = readl((void const volatile *)(ha->nx_pcibase + 1245280UL)); if (ha->crb_win != win_read) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Written crbwin (0x%x) != Read crbwin (0x%x), off=0x%lx\n", "qla4_82xx_pci_set_crbwindow_2M", ha->crb_win, win_read, *off); } else { } } else { } *off = (ulong )((((unsigned long long )*off & 65535ULL) + (unsigned long long )ha->nx_pcibase) + 1966080ULL); return; } } void qla4_82xx_wr_32(struct scsi_qla_host *ha , ulong off , u32 data ) { unsigned long flags ; int rv ; long tmp ; { flags = 0UL; rv = qla4_82xx_pci_get_crb_addr_2M(ha, & off); tmp = ldv__builtin_expect(rv == -1, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/3192/dscv_tempdir/dscv/ri/43_2a/drivers/scsi/qla4xxx/ql4_nx.c"), "i" (385), "i" (12UL)); ldv_63336: ; goto ldv_63336; } else { } if (rv == 1) { flags = _raw_write_lock_irqsave(& ha->hw_lock); qla4_82xx_crb_win_lock(ha); qla4_82xx_pci_set_crbwindow_2M(ha, & off); } else { } writel(data, (void volatile *)off); if (rv == 1) { qla4_82xx_crb_win_unlock(ha); _raw_write_unlock_irqrestore(& ha->hw_lock, flags); } else { } return; } } uint32_t qla4_82xx_rd_32(struct scsi_qla_host *ha , ulong off ) { unsigned long flags ; int rv ; u32 data ; long tmp ; { flags = 0UL; rv = qla4_82xx_pci_get_crb_addr_2M(ha, & off); tmp = ldv__builtin_expect(rv == -1, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/3192/dscv_tempdir/dscv/ri/43_2a/drivers/scsi/qla4xxx/ql4_nx.c"), "i" (409), "i" (12UL)); ldv_63350: ; goto ldv_63350; } else { } if (rv == 1) { flags = _raw_write_lock_irqsave(& ha->hw_lock); qla4_82xx_crb_win_lock(ha); qla4_82xx_pci_set_crbwindow_2M(ha, & off); } else { } data = readl((void const volatile *)off); if (rv == 1) { qla4_82xx_crb_win_unlock(ha); _raw_write_unlock_irqrestore(& ha->hw_lock, flags); } else { } return (data); } } int qla4_82xx_md_rd_32(struct scsi_qla_host *ha , uint32_t off , uint32_t *data ) { uint32_t win_read ; uint32_t off_value ; int rval ; { rval = 0; off_value = off & 4294901760U; writel(off_value, (void volatile *)(ha->nx_pcibase + 1245280UL)); win_read = readl((void const volatile *)(ha->nx_pcibase + 1245280UL)); if (win_read != off_value) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Written (0x%x) != Read (0x%x), off=0x%x\n", "qla4_82xx_md_rd_32", off_value, win_read, off); } else { } rval = 1; } else { off_value = off & 65535U; *data = readl((void const volatile *)(((unsigned long )off_value + ha->nx_pcibase) + 1966080UL)); } return (rval); } } int qla4_82xx_md_wr_32(struct scsi_qla_host *ha , uint32_t off , uint32_t data ) { uint32_t win_read ; uint32_t off_value ; int rval ; { rval = 0; off_value = off & 4294901760U; writel(off_value, (void volatile *)(ha->nx_pcibase + 1245280UL)); win_read = readl((void const volatile *)(ha->nx_pcibase + 1245280UL)); if (win_read != off_value) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Written (0x%x) != Read (0x%x), off=0x%x\n", "qla4_82xx_md_wr_32", off_value, win_read, off); } else { } rval = 1; } else { off_value = off & 65535U; writel(data, (void volatile *)(((unsigned long )off_value + ha->nx_pcibase) + 1966080UL)); } return (rval); } } int qla4_82xx_crb_win_lock(struct scsi_qla_host *ha ) { int i ; int done ; int timeout ; uint32_t tmp ; int tmp___0 ; { done = 0; timeout = 0; goto ldv_63386; ldv_63385: tmp = qla4_82xx_rd_32(ha, 101826616UL); done = (int )tmp; if (done == 1) { goto ldv_63381; } else { } if (timeout > 99999999) { return (-1); } else { } timeout = timeout + 1; tmp___0 = preempt_count(); if (((unsigned long )tmp___0 & 2096896UL) == 0UL) { schedule(); } else { i = 0; goto ldv_63383; ldv_63382: cpu_relax(); i = i + 1; ldv_63383: ; if (i <= 19) { goto ldv_63382; } else { } } ldv_63386: ; if (done == 0) { goto ldv_63385; } else { } ldv_63381: qla4_82xx_wr_32(ha, 136323364UL, (u32 )ha->func_num); return (0); } } void qla4_82xx_crb_win_unlock(struct scsi_qla_host *ha ) { { qla4_82xx_rd_32(ha, 101826620UL); return; } } int qla4_82xx_idc_lock(struct scsi_qla_host *ha ) { int i ; int done ; int timeout ; uint32_t tmp ; int tmp___0 ; { done = 0; timeout = 0; goto ldv_63401; ldv_63400: tmp = qla4_82xx_rd_32(ha, 101826600UL); done = (int )tmp; if (done == 1) { goto ldv_63396; } else { } if (timeout > 99999999) { return (-1); } else { } timeout = timeout + 1; tmp___0 = preempt_count(); if (((unsigned long )tmp___0 & 2096896UL) == 0UL) { schedule(); } else { i = 0; goto ldv_63398; ldv_63397: cpu_relax(); i = i + 1; ldv_63398: ; if (i <= 19) { goto ldv_63397; } else { } } ldv_63401: ; if (done == 0) { goto ldv_63400; } else { } ldv_63396: ; return (0); } } void qla4_82xx_idc_unlock(struct scsi_qla_host *ha ) { { qla4_82xx_rd_32(ha, 101826604UL); return; } } int qla4_82xx_pci_get_crb_addr_2M(struct scsi_qla_host *ha , ulong *off ) { struct crb_128M_2M_sub_block_map *m ; { if (*off > 167772159UL) { return (-1); } else { } if (*off > 75497471UL && *off <= 75499519UL) { *off = (*off + ha->nx_pcibase) - 74450944UL; return (0); } else { } if (*off <= 100663295UL) { return (-1); } else { } *off = *off - 100663296UL; m = (struct crb_128M_2M_sub_block_map *)(& crb_128M_2M_map[(*off >> 20) & 63UL].sub_block) + ((*off >> 16) & 15UL); if ((m->valid != 0U && (ulong )m->start_128M <= *off) && (ulong )m->end_128M > *off) { *off = ((*off + (ulong )m->start_2M) - (ulong )m->start_128M) + ha->nx_pcibase; return (0); } else { } return (1); } } static unsigned long qla4_82xx_pci_mem_bound_check(struct scsi_qla_host *ha , unsigned long long addr , int size ) { { if ((addr > 268435455ULL || ((unsigned long long )size + addr) - 1ULL > 268435455ULL) || (((size != 1 && size != 2) && size != 4) && size != 8)) { return (0UL); } else { } return (1UL); } } static int qla4_82xx_pci_set_window_warning_count ; static unsigned long qla4_82xx_pci_set_window(struct scsi_qla_host *ha , unsigned long long addr ) { int window ; u32 win_read ; unsigned int temp1 ; int tmp ; { if (addr <= 268435455ULL) { window = (int )((unsigned int )((addr & 33292288ULL) >> 1) | ((unsigned int )(addr >> 25) & 1023U)); ha->ddr_mn_window = (uint32_t )window; qla4_82xx_wr_32(ha, ha->mn_win_crb | 100663296UL, (u32 )window); win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb | 100663296UL); if (win_read << 17 != (u32 )window) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: Written MNwin (0x%x) != Read MNwin (0x%x)\n", "qla4_82xx_pci_set_window", window, win_read); } else { } addr = addr & 262143ULL; } else if (addr <= 8590983167ULL && addr > 8589934591ULL) { if ((addr & 1046528ULL) == 1046528ULL) { printk("%s: QM access not handled.\n", "qla4_82xx_pci_set_window"); addr = 0xffffffffffffffffULL; } else { } window = (int )((unsigned int )((addr & 33488896ULL) >> 1) | ((unsigned int )(addr >> 25) & 1023U)); ha->ddr_mn_window = (uint32_t )window; qla4_82xx_wr_32(ha, ha->mn_win_crb | 100663296UL, (u32 )window); win_read = qla4_82xx_rd_32(ha, ha->mn_win_crb | 100663296UL); temp1 = ((unsigned int )(window << 7) & 65535U) | ((unsigned int )window >> 17); if (win_read != temp1) { printk("%s: Written OCMwin (0x%x) != Read OCMwin (0x%x)\n", "qla4_82xx_pci_set_window", temp1, win_read); } else { } addr = (addr & 262143ULL) + 786432ULL; } else if (addr <= 12952010751ULL && addr > 12884901887ULL) { window = (int )addr & 268173312; ha->qdr_sn_window = window; qla4_82xx_wr_32(ha, ha->ms_win_crb | 100663296UL, (u32 )window); win_read = qla4_82xx_rd_32(ha, ha->ms_win_crb | 100663296UL); if ((u32 )window != win_read) { printk("%s: Written MSwin (0x%x) != Read MSwin (0x%x)\n", "qla4_82xx_pci_set_window", window, win_read); } else { } addr = (addr & 262143ULL) + 67108864ULL; } else { tmp = qla4_82xx_pci_set_window_warning_count; qla4_82xx_pci_set_window_warning_count = qla4_82xx_pci_set_window_warning_count + 1; if (tmp <= 7 || ((unsigned int )qla4_82xx_pci_set_window_warning_count & 63U) == 0U) { printk("%s: Warning:%s Unknown address range!\n", "qla4_82xx_pci_set_window", (char *)"qla4xxx"); } else { } addr = 0xffffffffffffffffULL; } return ((unsigned long )addr); } } static int qla4_82xx_pci_is_same_window(struct scsi_qla_host *ha , unsigned long long addr ) { int window ; unsigned long long qdr_max ; { qdr_max = 12952010751ULL; if (addr <= 268435455ULL) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/3192/dscv_tempdir/dscv/ri/43_2a/drivers/scsi/qla4xxx/ql4_nx.c"), "i" (692), "i" (12UL)); ldv_63430: ; goto ldv_63430; } else if (addr <= 8590983167ULL && addr > 8589934591ULL) { return (1); } else if (addr <= 8595177471ULL && addr > 8594128895ULL) { return (1); } else if (addr <= qdr_max && addr > 12884901887ULL) { window = (int )((addr - 12884901888ULL) >> 22) & 63; if (ha->qdr_sn_window == window) { return (1); } else { } } else { } return (0); } } static int qla4_82xx_pci_mem_read_direct(struct scsi_qla_host *ha , u64 off , void *data , int size ) { unsigned long flags ; void *addr ; int ret ; u64 start ; void *mem_ptr ; unsigned long mem_base ; unsigned long mem_page ; unsigned long tmp ; int tmp___0 ; unsigned long tmp___1 ; { ret = 0; mem_ptr = (void *)0; flags = _raw_write_lock_irqsave(& ha->hw_lock); tmp = qla4_82xx_pci_set_window(ha, off); start = (u64 )tmp; if (start == 0xffffffffffffffffULL) { _raw_write_unlock_irqrestore(& ha->hw_lock, flags); printk("\v%s out of bound pci memory access. offset is 0x%llx\n", (char *)"qla4xxx", off); return (-1); } else { tmp___0 = qla4_82xx_pci_is_same_window(ha, ((u64 )size + off) - 1ULL); if (tmp___0 == 0) { _raw_write_unlock_irqrestore(& ha->hw_lock, flags); printk("\v%s out of bound pci memory access. offset is 0x%llx\n", (char *)"qla4xxx", off); return (-1); } else { } } addr = qla4_8xxx_pci_base_offsetfset(ha, (unsigned long )start); if ((unsigned long )addr == (unsigned long )((void *)0)) { _raw_write_unlock_irqrestore(& ha->hw_lock, flags); mem_base = (unsigned long )(ha->pdev)->resource[0].start; mem_page = (unsigned long )start & 0xfffffffffffff000UL; if (((((u64 )size + start) - 1ULL) & 0xfffffffffffff000ULL) != (unsigned long long )mem_page) { mem_ptr = ioremap((resource_size_t )(mem_base + mem_page), 8192UL); } else { mem_ptr = ioremap((resource_size_t )(mem_base + mem_page), 4096UL); } if ((unsigned long )mem_ptr == (unsigned long )((void *)0)) { *((u8 *)data) = 0U; return (-1); } else { } addr = mem_ptr; addr = addr + (start & 4095ULL); flags = _raw_write_lock_irqsave(& ha->hw_lock); } else { } switch (size) { case 1: *((u8 *)data) = readb((void const volatile *)addr); goto ldv_63457; case 2: *((u16 *)data) = readw((void const volatile *)addr); goto ldv_63457; case 4: *((u32 *)data) = readl((void const volatile *)addr); goto ldv_63457; case 8: tmp___1 = readq((void const volatile *)addr); *((u64 *)data) = (u64 )tmp___1; goto ldv_63457; default: ret = -1; goto ldv_63457; } ldv_63457: _raw_write_unlock_irqrestore(& ha->hw_lock, flags); if ((unsigned long )mem_ptr != (unsigned long )((void *)0)) { iounmap((void volatile *)mem_ptr); } else { } return (ret); } } static int qla4_82xx_pci_mem_write_direct(struct scsi_qla_host *ha , u64 off , void *data , int size ) { unsigned long flags ; void *addr ; int ret ; u64 start ; void *mem_ptr ; unsigned long mem_base ; unsigned long mem_page ; unsigned long tmp ; int tmp___0 ; { ret = 0; mem_ptr = (void *)0; flags = _raw_write_lock_irqsave(& ha->hw_lock); tmp = qla4_82xx_pci_set_window(ha, off); start = (u64 )tmp; if (start == 0xffffffffffffffffULL) { _raw_write_unlock_irqrestore(& ha->hw_lock, flags); printk("\v%s out of bound pci memory access. offset is 0x%llx\n", (char *)"qla4xxx", off); return (-1); } else { tmp___0 = qla4_82xx_pci_is_same_window(ha, ((u64 )size + off) - 1ULL); if (tmp___0 == 0) { _raw_write_unlock_irqrestore(& ha->hw_lock, flags); printk("\v%s out of bound pci memory access. offset is 0x%llx\n", (char *)"qla4xxx", off); return (-1); } else { } } addr = qla4_8xxx_pci_base_offsetfset(ha, (unsigned long )start); if ((unsigned long )addr == (unsigned long )((void *)0)) { _raw_write_unlock_irqrestore(& ha->hw_lock, flags); mem_base = (unsigned long )(ha->pdev)->resource[0].start; mem_page = (unsigned long )start & 0xfffffffffffff000UL; if (((((u64 )size + start) - 1ULL) & 0xfffffffffffff000ULL) != (unsigned long long )mem_page) { mem_ptr = ioremap((resource_size_t )(mem_base + mem_page), 8192UL); } else { mem_ptr = ioremap((resource_size_t )(mem_base + mem_page), 4096UL); } if ((unsigned long )mem_ptr == (unsigned long )((void *)0)) { return (-1); } else { } addr = mem_ptr; addr = addr + (start & 4095ULL); flags = _raw_write_lock_irqsave(& ha->hw_lock); } else { } switch (size) { case 1: writeb((int )*((u8 *)data), (void volatile *)addr); goto ldv_63491; case 2: writew((int )*((u16 *)data), (void volatile *)addr); goto ldv_63491; case 4: writel(*((u32 *)data), (void volatile *)addr); goto ldv_63491; case 8: writeq((unsigned long )*((u64 *)data), (void volatile *)addr); goto ldv_63491; default: ret = -1; goto ldv_63491; } ldv_63491: _raw_write_unlock_irqrestore(& ha->hw_lock, flags); if ((unsigned long )mem_ptr != (unsigned long )((void *)0)) { iounmap((void volatile *)mem_ptr); } else { } return (ret); } } static unsigned long qla4_82xx_decode_crb_addr(unsigned long addr ) { int i ; unsigned long base_addr ; unsigned long offset ; unsigned long pci_base ; { if (qla4_8xxx_crb_table_initialized == 0) { qla4_82xx_crb_addr_transform_setup(); } else { } pci_base = 4294967295UL; base_addr = addr & 4293918720UL; offset = addr & 1048575UL; i = 0; goto ldv_63508; ldv_63507: ; if (crb_addr_xform[i] == base_addr) { pci_base = (unsigned long )(i << 20); goto ldv_63506; } else { } i = i + 1; ldv_63508: ; if (i <= 59) { goto ldv_63507; } else { } ldv_63506: ; if (pci_base == 4294967295UL) { return (pci_base); } else { return (pci_base + offset); } } } static long rom_max_timeout = 100L; static long qla4_82xx_rom_lock_timeout = 100L; static int qla4_82xx_rom_lock(struct scsi_qla_host *ha ) { int i ; int done ; int timeout ; uint32_t tmp ; int tmp___0 ; { done = 0; timeout = 0; goto ldv_63522; ldv_63521: tmp = qla4_82xx_rd_32(ha, 101826576UL); done = (int )tmp; if (done == 1) { goto ldv_63517; } else { } if ((long )timeout >= qla4_82xx_rom_lock_timeout) { return (-1); } else { } timeout = timeout + 1; tmp___0 = preempt_count(); if (((unsigned long )tmp___0 & 2096896UL) == 0UL) { schedule(); } else { i = 0; goto ldv_63519; ldv_63518: cpu_relax(); i = i + 1; ldv_63519: ; if (i <= 19) { goto ldv_63518; } else { } } ldv_63522: ; if (done == 0) { goto ldv_63521; } else { } ldv_63517: qla4_82xx_wr_32(ha, 136323328UL, 222393152U); return (0); } } static void qla4_82xx_rom_unlock(struct scsi_qla_host *ha ) { { qla4_82xx_rd_32(ha, 101826580UL); return; } } static int qla4_82xx_wait_rom_done(struct scsi_qla_host *ha ) { long timeout ; long done ; uint32_t tmp ; { timeout = 0L; done = 0L; goto ldv_63532; ldv_63531: tmp = qla4_82xx_rd_32(ha, 154140676UL); done = (long )tmp; done = done & 2L; timeout = timeout + 1L; if (timeout >= rom_max_timeout) { printk("%s: Timeout reached waiting for rom done", (char *)"qla4xxx"); return (-1); } else { } ldv_63532: ; if (done == 0L) { goto ldv_63531; } else { } return (0); } } static int qla4_82xx_do_rom_fast_read(struct scsi_qla_host *ha , int addr , int *valp ) { int tmp ; uint32_t tmp___0 ; { qla4_82xx_wr_32(ha, 154206216UL, (u32 )addr); qla4_82xx_wr_32(ha, 154206228UL, 0U); qla4_82xx_wr_32(ha, 154206224UL, 3U); qla4_82xx_wr_32(ha, 154206212UL, 11U); tmp = qla4_82xx_wait_rom_done(ha); if (tmp != 0) { printk("%s: Error waiting for rom done\n", (char *)"qla4xxx"); return (-1); } else { } qla4_82xx_wr_32(ha, 154206228UL, 0U); __const_udelay(42950UL); qla4_82xx_wr_32(ha, 154206224UL, 0U); tmp___0 = qla4_82xx_rd_32(ha, 154206232UL); *valp = (int )tmp___0; return (0); } } static int qla4_82xx_rom_fast_read(struct scsi_qla_host *ha , int addr , int *valp ) { int ret ; int loops ; int tmp ; { loops = 0; goto ldv_63547; ldv_63546: __const_udelay(429500UL); loops = loops + 1; ldv_63547: tmp = qla4_82xx_rom_lock(ha); if (tmp != 0 && loops <= 49999) { goto ldv_63546; } else { } if (loops > 49999) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: qla4_82xx_rom_lock failed\n", (char *)"qla4xxx"); return (-1); } else { } ret = qla4_82xx_do_rom_fast_read(ha, addr, valp); qla4_82xx_rom_unlock(ha); return (ret); } } static int qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha , int verbose ) { int addr ; int val ; int i ; struct crb_addr_pair *buf ; unsigned long off ; unsigned int offset ; unsigned int n ; uint32_t tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; void *tmp___3 ; int tmp___4 ; int tmp___5 ; unsigned long tmp___6 ; { qla4_82xx_rom_lock(ha); qla4_82xx_wr_32(ha, 153092112UL, 0U); qla4_82xx_wr_32(ha, 153092116UL, 0U); qla4_82xx_wr_32(ha, 153092120UL, 0U); qla4_82xx_wr_32(ha, 153092124UL, 0U); qla4_82xx_wr_32(ha, 153092128UL, 0U); qla4_82xx_wr_32(ha, 153092132UL, 0U); qla4_82xx_wr_32(ha, 106954816UL, 255U); qla4_82xx_wr_32(ha, 107413504UL, 0U); qla4_82xx_wr_32(ha, 107479040UL, 0U); qla4_82xx_wr_32(ha, 107544576UL, 0U); qla4_82xx_wr_32(ha, 107610112UL, 0U); qla4_82xx_wr_32(ha, 107675648UL, 0U); tmp = qla4_82xx_rd_32(ha, 105910272UL); val = (int )tmp; qla4_82xx_wr_32(ha, 105910272UL, (u32 )val & 4294967294U); qla4_82xx_wr_32(ha, 133174016UL, 1U); qla4_82xx_wr_32(ha, 142606336UL, 0U); qla4_82xx_wr_32(ha, 142606344UL, 0U); qla4_82xx_wr_32(ha, 142606352UL, 0U); qla4_82xx_wr_32(ha, 142606360UL, 0U); qla4_82xx_wr_32(ha, 142606592UL, 0U); qla4_82xx_wr_32(ha, 142606848UL, 0U); qla4_82xx_wr_32(ha, 118489148UL, 1U); qla4_82xx_wr_32(ha, 119537724UL, 1U); qla4_82xx_wr_32(ha, 120586300UL, 1U); qla4_82xx_wr_32(ha, 121634876UL, 1U); qla4_82xx_wr_32(ha, 116391996UL, 1U); msleep(5U); tmp___0 = constant_test_bit(1L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___0 != 0) { qla4_82xx_wr_32(ha, 154140680UL, 4278190079U); } else { qla4_82xx_wr_32(ha, 154140680UL, 4294967295U); } qla4_82xx_rom_unlock(ha); tmp___1 = qla4_82xx_rom_fast_read(ha, 0, (int *)(& n)); if (tmp___1 != 0 || n != 3405695742U) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "[OLD_ERROR] Reading crb_init area: n: %08x\n", n); return (-1); } else { tmp___2 = qla4_82xx_rom_fast_read(ha, 4, (int *)(& n)); if (tmp___2 != 0) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "[OLD_ERROR] Reading crb_init area: n: %08x\n", n); return (-1); } else { } } offset = n & 65535U; n = n >> 16; if (n > 1023U) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: %s:n=0x%x [OLD_ERROR] Card flash not initialized.\n", (char *)"qla4xxx", "qla4_82xx_pinit_from_rom", n); return (-1); } else { } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: %d CRB init values found in ROM.\n", (char *)"qla4xxx", n); tmp___3 = kzalloc((unsigned long )n * 16UL, 208U); buf = (struct crb_addr_pair___0 *)tmp___3; if ((unsigned long )buf == (unsigned long )((struct crb_addr_pair___0 *)0)) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: [OLD_ERROR] Unable to malloc memory.\n", (char *)"qla4xxx"); return (-1); } else { } i = 0; goto ldv_63565; ldv_63564: tmp___4 = qla4_82xx_rom_fast_read(ha, (int )(((unsigned int )(i * 2) + offset) * 4U), & val); if (tmp___4 != 0) { kfree((void const *)buf); return (-1); } else { tmp___5 = qla4_82xx_rom_fast_read(ha, (int )((((unsigned int )(i * 2) + offset) + 1U) * 4U), & addr); if (tmp___5 != 0) { kfree((void const *)buf); return (-1); } else { } } (buf + (unsigned long )i)->addr = (long )addr; (buf + (unsigned long )i)->data = (long )val; i = i + 1; ldv_63565: ; if ((unsigned int )i < n) { goto ldv_63564; } else { } i = 0; goto ldv_63569; ldv_63568: tmp___6 = qla4_82xx_decode_crb_addr((unsigned long )(buf + (unsigned long )i)->addr); off = tmp___6 + 100663296UL; if ((int )off & 1) { if (ql4xextended_error_logging == 2) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "Skip CRB init replay for offset = 0x%lx\n", off); } else { } goto ldv_63567; } else { } if (off == 136323580UL) { goto ldv_63567; } else { } if (off == 154140860UL) { goto ldv_63567; } else { } if (off == 154140872UL) { goto ldv_63567; } else { } if (off == 101785664UL) { goto ldv_63567; } else { } if (off == 101785672UL) { goto ldv_63567; } else { } if ((off & 267386880UL) == 161480704UL) { goto ldv_63567; } else { } if ((off & 267386880UL) == 102760448UL) { goto ldv_63567; } else { } if (off == 4294967295UL) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: [OLD_ERROR] Unknown addr: 0x%08lx\n", (char *)"qla4xxx", (buf + (unsigned long )i)->addr); goto ldv_63567; } else { } qla4_82xx_wr_32(ha, off, (u32 )(buf + (unsigned long )i)->data); if (off == 154140680UL) { msleep(1000U); } else { } msleep(1U); ldv_63567: i = i + 1; ldv_63569: ; if ((unsigned int )i < n) { goto ldv_63568; } else { } kfree((void const *)buf); qla4_82xx_wr_32(ha, 122683628UL, 30U); qla4_82xx_wr_32(ha, 122683468UL, 8U); qla4_82xx_wr_32(ha, 123732044UL, 8U); qla4_82xx_wr_32(ha, 118489096UL, 0U); qla4_82xx_wr_32(ha, 118489100UL, 0U); qla4_82xx_wr_32(ha, 119537672UL, 0U); qla4_82xx_wr_32(ha, 119537676UL, 0U); qla4_82xx_wr_32(ha, 120586248UL, 0U); qla4_82xx_wr_32(ha, 120586252UL, 0U); qla4_82xx_wr_32(ha, 121634824UL, 0U); qla4_82xx_wr_32(ha, 121634828UL, 0U); return (0); } } int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha , uint64_t addr , uint32_t *data , uint32_t count ) { int i ; int j ; uint32_t agt_ctrl ; unsigned long flags ; int ret_val ; uint32_t *tmp ; int tmp___0 ; uint32_t *tmp___1 ; int tmp___2 ; uint32_t *tmp___3 ; int tmp___4 ; uint32_t *tmp___5 ; int tmp___6 ; int tmp___7 ; struct ratelimit_state _rs ; int tmp___8 ; { ret_val = 0; if ((addr & 15ULL) != 0ULL) { ret_val = 1; goto exit_ms_mem_write; } else { } flags = _raw_write_lock_irqsave(& ha->hw_lock); ret_val = (*((ha->isp_ops)->wr_reg_indirect))(ha, 1090519192U, 0U); if (ret_val == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: write to AGT_ADDR_HI failed\n", "qla4_8xxx_ms_mem_write_128b"); goto exit_ms_mem_write_unlock; } else { } i = 0; goto ldv_63594; ldv_63593: ; if ((addr > 13019119615ULL || addr <= 12884901887ULL) && addr > 268435455ULL) { ret_val = 1; goto exit_ms_mem_write_unlock; } else { } ret_val = (*((ha->isp_ops)->wr_reg_indirect))(ha, 1090519188U, (uint32_t )addr); tmp = data; data = data + 1; tmp___0 = (*((ha->isp_ops)->wr_reg_indirect))(ha, 1090519200U, *tmp); ret_val = tmp___0 | ret_val; tmp___1 = data; data = data + 1; tmp___2 = (*((ha->isp_ops)->wr_reg_indirect))(ha, 1090519204U, *tmp___1); ret_val = tmp___2 | ret_val; tmp___3 = data; data = data + 1; tmp___4 = (*((ha->isp_ops)->wr_reg_indirect))(ha, 1090519216U, *tmp___3); ret_val = tmp___4 | ret_val; tmp___5 = data; data = data + 1; tmp___6 = (*((ha->isp_ops)->wr_reg_indirect))(ha, 1090519220U, *tmp___5); ret_val = tmp___6 | ret_val; if (ret_val == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: write to AGT_WRDATA failed\n", "qla4_8xxx_ms_mem_write_128b"); goto exit_ms_mem_write_unlock; } else { } ret_val = (*((ha->isp_ops)->wr_reg_indirect))(ha, 1090519184U, 6U); tmp___7 = (*((ha->isp_ops)->wr_reg_indirect))(ha, 1090519184U, 7U); ret_val = tmp___7 | ret_val; if (ret_val == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: write to AGT_CTRL failed\n", "qla4_8xxx_ms_mem_write_128b"); goto exit_ms_mem_write_unlock; } else { } j = 0; goto ldv_63590; ldv_63589: ret_val = (*((ha->isp_ops)->rd_reg_indirect))(ha, 1090519184U, & agt_ctrl); if (ret_val == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: failed to read MD_MIU_TEST_AGT_CTRL\n", "qla4_8xxx_ms_mem_write_128b"); goto exit_ms_mem_write_unlock; } else { } if ((agt_ctrl & 8U) == 0U) { goto ldv_63588; } else { } j = j + 1; ldv_63590: ; if (j <= 999) { goto ldv_63589; } else { } ldv_63588: ; if (j > 999) { _rs.lock.raw_lock.val.counter = 0; _rs.lock.magic = 3735899821U; _rs.lock.owner_cpu = 4294967295U; _rs.lock.owner = (void *)-1; _rs.lock.dep_map.key = 0; _rs.lock.dep_map.class_cache[0] = 0; _rs.lock.dep_map.class_cache[1] = 0; _rs.lock.dep_map.name = "_rs.lock"; _rs.lock.dep_map.cpu = 0; _rs.lock.dep_map.ip = 0UL; _rs.interval = 1250; _rs.burst = 10; _rs.printed = 0; _rs.missed = 0; _rs.begin = 0UL; tmp___8 = ___ratelimit(& _rs, "qla4_8xxx_ms_mem_write_128b"); if (tmp___8 != 0) { printk("\v%s: MS memory write failed!\n", "qla4_8xxx_ms_mem_write_128b"); } else { } ret_val = 1; goto exit_ms_mem_write_unlock; } else { } i = i + 1; addr = addr + 16ULL; ldv_63594: ; if ((uint32_t )i < count) { goto ldv_63593; } else { } exit_ms_mem_write_unlock: _raw_write_unlock_irqrestore(& ha->hw_lock, flags); exit_ms_mem_write: ; return (ret_val); } } static int qla4_82xx_load_from_flash(struct scsi_qla_host *ha , uint32_t image_start ) { int i ; int rval ; long size ; long flashaddr ; long memaddr ; u64 data ; u32 high ; u32 low ; int tmp ; int tmp___0 ; { rval = 0; size = 0L; memaddr = (long )ha->hw.flt_region_bootload; flashaddr = memaddr; size = ((long )image_start - flashaddr) / 8L; if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: bootldr=0x%lx, fw_image=0x%x\n", ha->host_no, "qla4_82xx_load_from_flash", flashaddr, image_start); } else { } i = 0; goto ldv_63614; ldv_63613: tmp = qla4_82xx_rom_fast_read(ha, (int )flashaddr, (int *)(& low)); if (tmp != 0) { rval = -1; goto exit_load_from_flash; } else { tmp___0 = qla4_82xx_rom_fast_read(ha, (int )((unsigned int )flashaddr + 4U), (int *)(& high)); if (tmp___0 != 0) { rval = -1; goto exit_load_from_flash; } else { } } data = ((unsigned long long )high << 32) | (unsigned long long )low; rval = qla4_82xx_pci_mem_write_2M(ha, (u64 )memaddr, (void *)(& data), 8); if (rval != 0) { goto exit_load_from_flash; } else { } flashaddr = flashaddr + 8L; memaddr = memaddr + 8L; if (((unsigned int )i & 4095U) == 0U) { msleep(1U); } else { } i = i + 1; ldv_63614: ; if ((long )i < size) { goto ldv_63613; } else { } __const_udelay(429500UL); _raw_read_lock(& ha->hw_lock); qla4_82xx_wr_32(ha, 118489112UL, 4128U); qla4_82xx_wr_32(ha, 154140680UL, 8388638U); _raw_read_unlock(& ha->hw_lock); exit_load_from_flash: ; return (rval); } } static int qla4_82xx_load_fw(struct scsi_qla_host *ha , uint32_t image_start ) { u32 rst ; int tmp ; int tmp___0 ; { qla4_82xx_wr_32(ha, 136323664UL, 0U); tmp = qla4_82xx_pinit_from_rom(ha, 0); if (tmp != 0) { printk("\f%s: Error during CRB Initialization\n", "qla4_82xx_load_fw"); return (1); } else { } __const_udelay(2147500UL); rst = qla4_82xx_rd_32(ha, 154140680UL); rst = rst & 4026531839U; qla4_82xx_wr_32(ha, 154140680UL, rst); tmp___0 = qla4_82xx_load_from_flash(ha, image_start); if (tmp___0 != 0) { printk("%s: Error trying to load fw from flash!\n", "qla4_82xx_load_fw"); return (1); } else { } return (0); } } int qla4_82xx_pci_mem_read_2M(struct scsi_qla_host *ha , u64 off , void *data , int size ) { int i ; int j ; int k ; int start ; int end ; int loop ; int sz[2U] ; int off0[2U] ; int shift_amount ; uint32_t temp ; uint64_t off8 ; uint64_t val ; uint64_t mem_crb ; uint64_t word[2U] ; int tmp ; unsigned long tmp___0 ; struct ratelimit_state _rs ; int tmp___1 ; { j = 0; word[0] = 0ULL; word[1] = 0ULL; if (off > 12884901887ULL && off <= 12952010751ULL) { mem_crb = 131072000ULL; } else { mem_crb = 102760448ULL; tmp___0 = qla4_82xx_pci_mem_bound_check(ha, off, size); if (tmp___0 == 0UL) { tmp = qla4_82xx_pci_mem_read_direct(ha, off, data, size); return (tmp); } else { } } off8 = off & 4294967280ULL; off0[0] = (int )off & 15; sz[0] = size < 16 - off0[0] ? size : 16 - off0[0]; shift_amount = 4; loop = (((off0[0] + size) + -1) >> shift_amount) + 1; off0[1] = 0; sz[1] = size - sz[0]; i = 0; goto ldv_63653; ldv_63652: temp = (uint32_t )(i << shift_amount) + (uint32_t )off8; qla4_82xx_wr_32(ha, (ulong )(mem_crb + 148ULL), temp); temp = 0U; qla4_82xx_wr_32(ha, (ulong )(mem_crb + 152ULL), temp); temp = 2U; qla4_82xx_wr_32(ha, (ulong )(mem_crb + 144ULL), temp); temp = 3U; qla4_82xx_wr_32(ha, (ulong )(mem_crb + 144ULL), temp); j = 0; goto ldv_63644; ldv_63643: temp = qla4_82xx_rd_32(ha, (ulong )(mem_crb + 144ULL)); if ((temp & 8U) == 0U) { goto ldv_63642; } else { } j = j + 1; ldv_63644: ; if (j <= 999) { goto ldv_63643; } else { } ldv_63642: ; if (j > 999) { _rs.lock.raw_lock.val.counter = 0; _rs.lock.magic = 3735899821U; _rs.lock.owner_cpu = 4294967295U; _rs.lock.owner = (void *)-1; _rs.lock.dep_map.key = 0; _rs.lock.dep_map.class_cache[0] = 0; _rs.lock.dep_map.class_cache[1] = 0; _rs.lock.dep_map.name = "_rs.lock"; _rs.lock.dep_map.cpu = 0; _rs.lock.dep_map.ip = 0UL; _rs.interval = 1250; _rs.burst = 10; _rs.printed = 0; _rs.missed = 0; _rs.begin = 0UL; tmp___1 = ___ratelimit(& _rs, "qla4_82xx_pci_mem_read_2M"); if (tmp___1 != 0) { printk("\v%s: failed to read through agent\n", "qla4_82xx_pci_mem_read_2M"); } else { } goto ldv_63648; } else { } start = off0[i] >> 2; end = ((off0[i] + sz[i]) + -1) >> 2; k = start; goto ldv_63650; ldv_63649: temp = qla4_82xx_rd_32(ha, (ulong )((uint64_t )((k + 42) * 4) + mem_crb)); word[i] = word[i] | ((unsigned long long )temp << (k & 1) * 32); k = k + 1; ldv_63650: ; if (k <= end) { goto ldv_63649; } else { } i = i + 1; ldv_63653: ; if (i < loop) { goto ldv_63652; } else { } ldv_63648: ; if (j > 999) { return (-1); } else { } if ((off0[0] & 7) == 0) { val = word[0]; } else { val = ((word[0] >> off0[0] * 8) & ~ (0xffffffffffffffffULL << sz[0] * 8)) | ((word[1] & ~ (0xffffffffffffffffULL << sz[1] * 8)) << sz[0] * 8); } switch (size) { case 1: *((uint8_t *)data) = (uint8_t )val; goto ldv_63655; case 2: *((uint16_t *)data) = (uint16_t )val; goto ldv_63655; case 4: *((uint32_t *)data) = (uint32_t )val; goto ldv_63655; case 8: *((uint64_t *)data) = val; goto ldv_63655; } ldv_63655: ; return (0); } } int qla4_82xx_pci_mem_write_2M(struct scsi_qla_host *ha , u64 off , void *data , int size ) { int i ; int j ; int ret ; int loop ; int sz[2U] ; int off0 ; int scale ; int shift_amount ; int startword ; uint32_t temp ; uint64_t off8 ; uint64_t mem_crb ; uint64_t tmpw ; uint64_t word[2U] ; int tmp ; unsigned long tmp___0 ; int tmp___1 ; int tmp___2 ; { ret = 0; word[0] = 0ULL; word[1] = 0ULL; if (off > 12884901887ULL && off <= 12952010751ULL) { mem_crb = 131072000ULL; } else { mem_crb = 102760448ULL; tmp___0 = qla4_82xx_pci_mem_bound_check(ha, off, size); if (tmp___0 == 0UL) { tmp = qla4_82xx_pci_mem_write_direct(ha, off, data, size); return (tmp); } else { } } off0 = (int )off & 7; sz[0] = size < 8 - off0 ? size : 8 - off0; sz[1] = size - sz[0]; off8 = off & 4294967280ULL; loop = (int )((unsigned int )((((off & 15ULL) + (u64 )size) - 1ULL) >> 4) + 1U); shift_amount = 4; scale = 2; startword = (int )((off & 15ULL) / 8ULL); i = 0; goto ldv_63680; ldv_63679: tmp___1 = qla4_82xx_pci_mem_read_2M(ha, (uint64_t )(i << shift_amount) + off8, (void *)(& word) + (unsigned long )(i * scale), 8); if (tmp___1 != 0) { return (-1); } else { } i = i + 1; ldv_63680: ; if (i < loop) { goto ldv_63679; } else { } switch (size) { case 1: tmpw = (uint64_t )*((uint8_t *)data); goto ldv_63683; case 2: tmpw = (uint64_t )*((uint16_t *)data); goto ldv_63683; case 4: tmpw = (uint64_t )*((uint32_t *)data); goto ldv_63683; case 8: ; default: tmpw = *((uint64_t *)data); goto ldv_63683; } ldv_63683: ; if (sz[0] == 8) { word[startword] = tmpw; } else { word[startword] = word[startword] & ~ (~ (0xffffffffffffffffULL << sz[0] * 8) << off0 * 8); word[startword] = word[startword] | (tmpw << off0 * 8); } if (sz[1] != 0) { word[startword + 1] = word[startword + 1] & ~ (0xffffffffffffffffULL << sz[1] * 8); word[startword + 1] = word[startword + 1] | (tmpw >> sz[0] * 8); } else { } i = 0; goto ldv_63694; ldv_63693: temp = (uint32_t )(i << shift_amount) + (uint32_t )off8; qla4_82xx_wr_32(ha, (ulong )(mem_crb + 148ULL), temp); temp = 0U; qla4_82xx_wr_32(ha, (ulong )(mem_crb + 152ULL), temp); temp = (uint32_t )word[i * scale]; qla4_82xx_wr_32(ha, (ulong )(mem_crb + 160ULL), temp); temp = (uint32_t )(word[i * scale] >> 32); qla4_82xx_wr_32(ha, (ulong )(mem_crb + 164ULL), temp); temp = (uint32_t )word[i * scale + 1]; qla4_82xx_wr_32(ha, (ulong )(mem_crb + 176ULL), temp); temp = (uint32_t )(word[i * scale + 1] >> 32); qla4_82xx_wr_32(ha, (ulong )(mem_crb + 180ULL), temp); temp = 6U; qla4_82xx_wr_32(ha, (ulong )(mem_crb + 144ULL), temp); temp = 7U; qla4_82xx_wr_32(ha, (ulong )(mem_crb + 144ULL), temp); j = 0; goto ldv_63690; ldv_63689: temp = qla4_82xx_rd_32(ha, (ulong )(mem_crb + 144ULL)); if ((temp & 8U) == 0U) { goto ldv_63688; } else { } j = j + 1; ldv_63690: ; if (j <= 999) { goto ldv_63689; } else { } ldv_63688: ; if (j > 999) { tmp___2 = __printk_ratelimit("qla4_82xx_pci_mem_write_2M"); if (tmp___2 != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: failed to read through agent\n", "qla4_82xx_pci_mem_write_2M"); } else { } ret = -1; goto ldv_63692; } else { } i = i + 1; ldv_63694: ; if (i < loop) { goto ldv_63693; } else { } ldv_63692: ; return (ret); } } static int qla4_82xx_cmdpeg_ready(struct scsi_qla_host *ha , int pegtune_val ) { u32 val ; int retries ; struct task_struct *tmp ; long volatile __ret ; struct task_struct *tmp___0 ; struct task_struct *tmp___1 ; struct task_struct *tmp___2 ; struct task_struct *tmp___3 ; uint32_t tmp___4 ; { val = 0U; retries = 60; if (pegtune_val == 0) { ldv_63709: val = qla4_82xx_rd_32(ha, 136323664UL); if (val == 65281U || val == 61455U) { return (0); } else { } tmp = get_current(); tmp->task_state_change = 0UL; __ret = 2L; switch (8UL) { case 1UL: tmp___0 = get_current(); __asm__ volatile ("xchgb %b0, %1\n": "+q" (__ret), "+m" (tmp___0->state): : "memory", "cc"); goto ldv_63703; case 2UL: tmp___1 = get_current(); __asm__ volatile ("xchgw %w0, %1\n": "+r" (__ret), "+m" (tmp___1->state): : "memory", "cc"); goto ldv_63703; case 4UL: tmp___2 = get_current(); __asm__ volatile ("xchgl %0, %1\n": "+r" (__ret), "+m" (tmp___2->state): : "memory", "cc"); goto ldv_63703; case 8UL: tmp___3 = get_current(); __asm__ volatile ("xchgq %q0, %1\n": "+r" (__ret), "+m" (tmp___3->state): : "memory", "cc"); goto ldv_63703; default: __xchg_wrong_size(); } ldv_63703: schedule_timeout(500L); retries = retries - 1; if (retries != 0) { goto ldv_63709; } else { } if (retries == 0) { tmp___4 = qla4_82xx_rd_32(ha, 154140764UL); pegtune_val = (int )tmp___4; printk("\f%s: init failed, pegtune_val = %x\n", "qla4_82xx_cmdpeg_ready", pegtune_val); return (-1); } else { } } else { } return (0); } } static int qla4_82xx_rcvpeg_ready(struct scsi_qla_host *ha ) { uint32_t state ; int loops ; { state = 0U; loops = 0; _raw_read_lock(& ha->hw_lock); state = qla4_82xx_rd_32(ha, 136323900UL); _raw_read_unlock(& ha->hw_lock); goto ldv_63718; ldv_63717: __const_udelay(429500UL); _raw_read_lock(& ha->hw_lock); state = qla4_82xx_rd_32(ha, 136323900UL); _raw_read_unlock(& ha->hw_lock); loops = loops + 1; ldv_63718: ; if (state != 65281U && loops <= 29999) { goto ldv_63717; } else { } if (loops > 29999) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Receive Peg initialization not complete: 0x%x.\n", state); } else { } return (1); } else { } return (0); } } void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha ) { uint32_t drv_active ; int tmp ; int tmp___0 ; int tmp___1 ; { tmp = qla4_8xxx_rd_direct(ha, 3U); drv_active = (uint32_t )tmp; tmp___0 = is_qla8032(ha); if (tmp___0 != 0) { drv_active = (uint32_t )(1 << (int )ha->func_num) | drv_active; } else { tmp___1 = is_qla8042(ha); if (tmp___1 != 0) { drv_active = (uint32_t )(1 << (int )ha->func_num) | drv_active; } else { drv_active = (uint32_t )(1 << (int )ha->func_num * 4) | drv_active; } } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s(%ld): drv_active: 0x%08x\n", "qla4_8xxx_set_drv_active", ha->host_no, drv_active); qla4_8xxx_wr_direct(ha, 3U, drv_active); return; } } void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha ) { uint32_t drv_active ; int tmp ; int tmp___0 ; int tmp___1 ; { tmp = qla4_8xxx_rd_direct(ha, 3U); drv_active = (uint32_t )tmp; tmp___0 = is_qla8032(ha); if (tmp___0 != 0) { drv_active = (uint32_t )(~ (1 << (int )ha->func_num)) & drv_active; } else { tmp___1 = is_qla8042(ha); if (tmp___1 != 0) { drv_active = (uint32_t )(~ (1 << (int )ha->func_num)) & drv_active; } else { drv_active = (uint32_t )(~ (1 << (int )ha->func_num * 4)) & drv_active; } } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s(%ld): drv_active: 0x%08x\n", "qla4_8xxx_clear_drv_active", ha->host_no, drv_active); qla4_8xxx_wr_direct(ha, 3U, drv_active); return; } } void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha ) { uint32_t drv_state ; int tmp ; int tmp___0 ; int tmp___1 ; { tmp = qla4_8xxx_rd_direct(ha, 5U); drv_state = (uint32_t )tmp; tmp___0 = is_qla8032(ha); if (tmp___0 != 0) { drv_state = (uint32_t )(1 << (int )ha->func_num) | drv_state; } else { tmp___1 = is_qla8042(ha); if (tmp___1 != 0) { drv_state = (uint32_t )(1 << (int )ha->func_num) | drv_state; } else { drv_state = (uint32_t )(1 << (int )ha->func_num * 4) | drv_state; } } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s(%ld): drv_state: 0x%08x\n", "qla4_8xxx_set_rst_ready", ha->host_no, drv_state); qla4_8xxx_wr_direct(ha, 5U, drv_state); return; } } void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha ) { uint32_t drv_state ; int tmp ; int tmp___0 ; int tmp___1 ; { tmp = qla4_8xxx_rd_direct(ha, 5U); drv_state = (uint32_t )tmp; tmp___0 = is_qla8032(ha); if (tmp___0 != 0) { drv_state = (uint32_t )(~ (1 << (int )ha->func_num)) & drv_state; } else { tmp___1 = is_qla8042(ha); if (tmp___1 != 0) { drv_state = (uint32_t )(~ (1 << (int )ha->func_num)) & drv_state; } else { drv_state = (uint32_t )(~ (1 << (int )ha->func_num * 4)) & drv_state; } } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s(%ld): drv_state: 0x%08x\n", "qla4_8xxx_clear_rst_ready", ha->host_no, drv_state); qla4_8xxx_wr_direct(ha, 5U, drv_state); return; } } __inline static void qla4_8xxx_set_qsnt_ready(struct scsi_qla_host *ha ) { uint32_t qsnt_state ; int tmp ; int tmp___0 ; int tmp___1 ; { tmp = qla4_8xxx_rd_direct(ha, 5U); qsnt_state = (uint32_t )tmp; tmp___0 = is_qla8032(ha); if (tmp___0 != 0) { qsnt_state = (uint32_t )(1 << (int )ha->func_num) | qsnt_state; } else { tmp___1 = is_qla8042(ha); if (tmp___1 != 0) { qsnt_state = (uint32_t )(1 << (int )ha->func_num) | qsnt_state; } else { qsnt_state = (uint32_t )(2 << (int )ha->func_num * 4) | qsnt_state; } } qla4_8xxx_wr_direct(ha, 5U, qsnt_state); return; } } static int qla4_82xx_start_firmware(struct scsi_qla_host *ha , uint32_t image_start ) { uint16_t lnk ; int tmp ; int tmp___0 ; int tmp___1 ; { qla4_82xx_wr_32(ha, 136323788UL, 1431655765U); qla4_82xx_wr_32(ha, 136323664UL, 0U); qla4_82xx_wr_32(ha, 136323900UL, 0U); qla4_82xx_wr_32(ha, 136323240UL, 0U); qla4_82xx_wr_32(ha, 136323244UL, 0U); tmp = qla4_82xx_load_fw(ha, image_start); if (tmp != 0) { printk("%s: Error trying to start fw!\n", "qla4_82xx_start_firmware"); return (1); } else { } tmp___0 = qla4_82xx_cmdpeg_ready(ha, 0); if (tmp___0 != 0) { printk("%s: Error during card handshake!\n", "qla4_82xx_start_firmware"); return (1); } else { } pcie_capability_read_word(ha->pdev, 18, & lnk); ha->link_width = ((int )lnk >> 4) & 63; tmp___1 = qla4_82xx_rcvpeg_ready(ha); return (tmp___1); } } int qla4_82xx_try_start_fw(struct scsi_qla_host *ha ) { int rval ; { rval = 1; dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "FW: Retrieving flash offsets from FLT/FDT ...\n"); rval = qla4_8xxx_get_flash_info(ha); if (rval != 0) { return (rval); } else { } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "FW: Attempting to load firmware from flash...\n"); rval = qla4_82xx_start_firmware(ha, ha->hw.flt_region_fw); if (rval != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "FW: Load firmware from flash FAILED...\n"); return (rval); } else { } return (rval); } } void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha ) { int tmp ; { tmp = qla4_82xx_rom_lock(ha); if (tmp != 0) { _dev_info((struct device const *)(& (ha->pdev)->dev), "Resetting rom_lock\n"); } else { } qla4_82xx_rom_unlock(ha); return; } } static uint32_t ql4_84xx_poll_wait_for_ready(struct scsi_qla_host *ha , uint32_t addr1 , uint32_t mask ) { unsigned long timeout ; uint32_t rval ; uint32_t temp ; unsigned long tmp ; { rval = 0U; tmp = msecs_to_jiffies(100U); timeout = tmp + (unsigned long )jiffies; ldv_63778: (*((ha->isp_ops)->rd_reg_indirect))(ha, addr1, & temp); if ((temp & mask) != 0U) { goto ldv_63771; } else { } if ((long )((unsigned long )jiffies - timeout) >= 0L) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Error in processing rdmdio entry\n"); return (1U); } else { } goto ldv_63778; ldv_63771: ; return (rval); } } uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha , uint32_t addr1 , uint32_t addr3 , uint32_t mask , uint32_t addr , uint32_t *data_ptr ) { int rval ; uint32_t temp ; uint32_t data ; uint32_t tmp ; uint32_t tmp___0 ; { rval = 0; tmp = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); rval = (int )tmp; if (rval != 0) { goto exit_ipmdio_rd_reg; } else { } temp = addr | 1073741824U; (*((ha->isp_ops)->wr_reg_indirect))(ha, addr1, temp); tmp___0 = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); rval = (int )tmp___0; if (rval != 0) { goto exit_ipmdio_rd_reg; } else { } (*((ha->isp_ops)->rd_reg_indirect))(ha, addr3, & data); *data_ptr = data; exit_ipmdio_rd_reg: ; return ((uint32_t )rval); } } static uint32_t ql4_84xx_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *ha , uint32_t addr1 , uint32_t addr2 , uint32_t addr3 , uint32_t mask ) { unsigned long timeout ; uint32_t temp ; uint32_t rval ; unsigned long tmp ; { rval = 0U; tmp = msecs_to_jiffies(100U); timeout = tmp + (unsigned long )jiffies; ldv_63808: ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, mask, addr2, & temp); if ((temp & 1U) == 0U) { goto ldv_63801; } else { } if ((long )((unsigned long )jiffies - timeout) >= 0L) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Error in processing mdiobus idle\n"); return (1U); } else { } goto ldv_63808; ldv_63801: ; return (rval); } } static int ql4_84xx_ipmdio_wr_reg(struct scsi_qla_host *ha , uint32_t addr1 , uint32_t addr3 , uint32_t mask , uint32_t addr , uint32_t value ) { int rval ; uint32_t tmp ; uint32_t tmp___0 ; { rval = 0; tmp = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); rval = (int )tmp; if (rval != 0) { goto exit_ipmdio_wr_reg; } else { } (*((ha->isp_ops)->wr_reg_indirect))(ha, addr3, value); (*((ha->isp_ops)->wr_reg_indirect))(ha, addr1, addr); tmp___0 = ql4_84xx_poll_wait_for_ready(ha, addr1, mask); rval = (int )tmp___0; if (rval != 0) { } else { } exit_ipmdio_wr_reg: ; return (rval); } } static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha , struct qla8xxx_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t r_addr ; uint32_t r_stride ; uint32_t loop_cnt ; uint32_t i ; uint32_t r_value ; struct qla8xxx_minidump_entry_crb *crb_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; uint32_t *tmp___0 ; { data_ptr = *d_ptr; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Entering fn: %s\n", "qla4_8xxx_minidump_process_rdcrb"); } else { } crb_hdr = (struct qla8xxx_minidump_entry_crb *)entry_hdr; r_addr = crb_hdr->addr; r_stride = (uint32_t )crb_hdr->crb_strd.addr_stride; loop_cnt = crb_hdr->op_count; i = 0U; goto ldv_63833; ldv_63832: (*((ha->isp_ops)->rd_reg_indirect))(ha, r_addr, & r_value); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = r_addr; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = r_value; r_addr = r_addr + r_stride; i = i + 1U; ldv_63833: ; if (i < loop_cnt) { goto ldv_63832; } else { } *d_ptr = data_ptr; return; } } static int qla4_83xx_check_dma_engine_state(struct scsi_qla_host *ha ) { int rval ; uint32_t dma_eng_num ; uint32_t cmd_sts_and_cntrl ; uint64_t dma_base_addr ; struct qla4_8xxx_minidump_template_hdr *tmplt_hdr ; { rval = 0; dma_eng_num = 0U; cmd_sts_and_cntrl = 0U; dma_base_addr = 0ULL; tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)0; tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)ha->fw_dump_tmplt_hdr; dma_eng_num = tmplt_hdr->saved_state_array[8]; dma_base_addr = (uint64_t )((dma_eng_num + 30514U) * 65536U); rval = (*((ha->isp_ops)->rd_reg_indirect))(ha, (uint32_t )dma_base_addr + 8U, & cmd_sts_and_cntrl); if (rval != 0) { return (1); } else { } if ((int )cmd_sts_and_cntrl < 0) { return (0); } else { return (1); } } } static int qla4_83xx_start_pex_dma(struct scsi_qla_host *ha , struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr ) { int rval ; int wait ; uint32_t dma_eng_num ; uint32_t cmd_sts_and_cntrl ; uint64_t dma_base_addr ; struct qla4_8xxx_minidump_template_hdr *tmplt_hdr ; { rval = 0; wait = 0; dma_eng_num = 0U; cmd_sts_and_cntrl = 0U; dma_base_addr = 0ULL; tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)0; tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)ha->fw_dump_tmplt_hdr; dma_eng_num = tmplt_hdr->saved_state_array[8]; dma_base_addr = (uint64_t )((dma_eng_num + 30514U) * 65536U); rval = (*((ha->isp_ops)->wr_reg_indirect))(ha, (uint32_t )dma_base_addr, m_hdr->desc_card_addr); if (rval != 0) { goto error_exit; } else { } rval = (*((ha->isp_ops)->wr_reg_indirect))(ha, (uint32_t )dma_base_addr + 4U, 0U); if (rval != 0) { goto error_exit; } else { } rval = (*((ha->isp_ops)->wr_reg_indirect))(ha, (uint32_t )dma_base_addr + 8U, m_hdr->start_dma_cmd); if (rval != 0) { goto error_exit; } else { } wait = 0; goto ldv_63856; ldv_63855: rval = (*((ha->isp_ops)->rd_reg_indirect))(ha, (uint32_t )dma_base_addr + 8U, & cmd_sts_and_cntrl); if (rval != 0) { goto error_exit; } else { } if ((cmd_sts_and_cntrl & 2U) == 0U) { goto ldv_63854; } else { __const_udelay(42950UL); } wait = wait + 1; ldv_63856: ; if (wait <= 9999) { goto ldv_63855; } else { } ldv_63854: ; if (wait > 9999) { rval = 1; goto error_exit; } else { } error_exit: ; return (rval); } } static int qla4_8xxx_minidump_pex_dma_read(struct scsi_qla_host *ha , struct qla8xxx_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { int rval ; struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr ; uint32_t size ; uint32_t read_size ; uint8_t *data_ptr ; void *rdmem_buffer ; dma_addr_t rdmem_dma ; struct qla4_83xx_pex_dma_descriptor dma_desc ; { rval = 0; m_hdr = (struct qla4_83xx_minidump_entry_rdmem_pex_dma *)0; data_ptr = (uint8_t *)*d_ptr; rdmem_buffer = (void *)0; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Entering fn: %s\n", "qla4_8xxx_minidump_pex_dma_read"); } else { } rval = qla4_83xx_check_dma_engine_state(ha); if (rval != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: DMA engine not available. Fallback to rdmem-read.\n", "qla4_8xxx_minidump_pex_dma_read"); } else { } return (1); } else { } m_hdr = (struct qla4_83xx_minidump_entry_rdmem_pex_dma *)entry_hdr; rdmem_buffer = dma_alloc_attrs(& (ha->pdev)->dev, 16384UL, & rdmem_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )rdmem_buffer == (unsigned long )((void *)0)) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to allocate rdmem dma buffer\n", "qla4_8xxx_minidump_pex_dma_read"); } else { } return (1); } else { } dma_desc.cmd.dma_desc_cmd = (unsigned int )m_hdr->dma_desc_cmd & 65295U; dma_desc.cmd.dma_desc_cmd = (unsigned int )dma_desc.cmd.dma_desc_cmd | (((unsigned int )((uint16_t )(ha->pdev)->devfn) & 7U) << 4U); dma_desc.dma_bus_addr = rdmem_dma; size = 0U; read_size = 0U; goto ldv_63873; ldv_63872: ; if (m_hdr->read_data_size - read_size > 16383U) { size = 16384U; } else { size = m_hdr->read_data_size - read_size; if ((unsigned long )rdmem_buffer != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, 16384UL, rdmem_buffer, rdmem_dma, (struct dma_attrs *)0); } else { } rdmem_buffer = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )size, & rdmem_dma, 208U, (struct dma_attrs *)0); if ((unsigned long )rdmem_buffer == (unsigned long )((void *)0)) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to allocate rdmem dma buffer\n", "qla4_8xxx_minidump_pex_dma_read"); } else { } return (1); } else { } dma_desc.dma_bus_addr = rdmem_dma; } dma_desc.src_addr = (uint64_t )(m_hdr->read_addr + read_size); dma_desc.cmd.read_data_size = size; rval = qla4_8xxx_ms_mem_write_128b(ha, (unsigned long long )m_hdr->desc_card_addr, (uint32_t *)(& dma_desc), 3U); if (rval != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Error writing rdmem-dma-init to MS !!!\n", "qla4_8xxx_minidump_pex_dma_read"); goto error_exit; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Dma-desc: Instruct for rdmem dma (size 0x%x).\n", "qla4_8xxx_minidump_pex_dma_read", size); } else { } rval = qla4_83xx_start_pex_dma(ha, m_hdr); if (rval != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi(%ld): start-pex-dma failed rval=0x%x\n", ha->host_no, rval); } else { } goto error_exit; } else { } memcpy((void *)data_ptr, (void const *)rdmem_buffer, (size_t )size); data_ptr = data_ptr + (unsigned long )size; read_size = read_size + size; ldv_63873: ; if (m_hdr->read_data_size > read_size) { goto ldv_63872; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Leaving fn: %s\n", "qla4_8xxx_minidump_pex_dma_read"); } else { } *d_ptr = (uint32_t *)data_ptr; error_exit: ; if ((unsigned long )rdmem_buffer != (unsigned long )((void *)0)) { dma_free_attrs(& (ha->pdev)->dev, (size_t )size, rdmem_buffer, rdmem_dma, (struct dma_attrs *)0); } else { } return (rval); } } static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha , struct qla8xxx_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t addr ; uint32_t r_addr ; uint32_t c_addr ; uint32_t t_r_addr ; uint32_t i ; uint32_t k ; uint32_t loop_count ; uint32_t t_value ; uint32_t r_cnt ; uint32_t r_value ; unsigned long p_wait ; unsigned long w_time ; unsigned long p_mask ; uint32_t c_value_w ; uint32_t c_value_r ; struct qla8xxx_minidump_entry_cache *cache_hdr ; int rval ; uint32_t *data_ptr ; uint32_t *tmp ; { rval = 1; data_ptr = *d_ptr; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Entering fn: %s\n", "qla4_8xxx_minidump_process_l2tag"); } else { } cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = (uint32_t )cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = (uint32_t )cache_hdr->addr_ctrl.init_tag_value; r_cnt = (uint32_t )cache_hdr->read_ctrl.read_addr_cnt; p_wait = (unsigned long )cache_hdr->cache_ctrl.poll_wait; p_mask = (unsigned long )cache_hdr->cache_ctrl.poll_mask; i = 0U; goto ldv_63911; ldv_63910: (*((ha->isp_ops)->wr_reg_indirect))(ha, t_r_addr, t_value); if (c_value_w != 0U) { (*((ha->isp_ops)->wr_reg_indirect))(ha, c_addr, c_value_w); } else { } if (p_mask != 0UL) { w_time = (unsigned long )jiffies + p_wait; ldv_63906: (*((ha->isp_ops)->rd_reg_indirect))(ha, c_addr, & c_value_r); if (((unsigned long )c_value_r & p_mask) == 0UL) { goto ldv_63899; } else if ((long )((unsigned long )jiffies - w_time) >= 0L) { return (rval); } else { } goto ldv_63906; ldv_63899: ; } else { } addr = r_addr; k = 0U; goto ldv_63908; ldv_63907: (*((ha->isp_ops)->rd_reg_indirect))(ha, addr, & r_value); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = r_value; addr = (uint32_t )cache_hdr->read_ctrl.read_addr_stride + addr; k = k + 1U; ldv_63908: ; if (k < r_cnt) { goto ldv_63907; } else { } t_value = (uint32_t )cache_hdr->addr_ctrl.tag_value_stride + t_value; i = i + 1U; ldv_63911: ; if (i < loop_count) { goto ldv_63910; } else { } *d_ptr = data_ptr; return (0); } } static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha , struct qla8xxx_minidump_entry_hdr *entry_hdr ) { struct qla8xxx_minidump_entry_crb *crb_entry ; uint32_t read_value ; uint32_t opcode ; uint32_t poll_time ; uint32_t addr ; uint32_t index ; uint32_t rval ; uint32_t crb_addr ; unsigned long wtime ; struct qla4_8xxx_minidump_template_hdr *tmplt_hdr ; int i ; { rval = 0U; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Entering fn: %s\n", "qla4_8xxx_minidump_process_control"); } else { } tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)ha->fw_dump_tmplt_hdr; crb_entry = (struct qla8xxx_minidump_entry_crb *)entry_hdr; crb_addr = crb_entry->addr; i = 0; goto ldv_63938; ldv_63937: opcode = (uint32_t )crb_entry->crb_ctrl.opcode; if ((int )opcode & 1) { (*((ha->isp_ops)->wr_reg_indirect))(ha, crb_addr, crb_entry->value_1); opcode = opcode & 4294967294U; } else { } if ((opcode & 2U) != 0U) { (*((ha->isp_ops)->rd_reg_indirect))(ha, crb_addr, & read_value); (*((ha->isp_ops)->wr_reg_indirect))(ha, crb_addr, read_value); opcode = opcode & 4294967293U; } else { } if ((opcode & 4U) != 0U) { (*((ha->isp_ops)->rd_reg_indirect))(ha, crb_addr, & read_value); read_value = crb_entry->value_2 & read_value; opcode = opcode & 4294967291U; if ((opcode & 8U) != 0U) { read_value = crb_entry->value_3 | read_value; opcode = opcode & 4294967287U; } else { } (*((ha->isp_ops)->wr_reg_indirect))(ha, crb_addr, read_value); } else { } if ((opcode & 8U) != 0U) { (*((ha->isp_ops)->rd_reg_indirect))(ha, crb_addr, & read_value); read_value = crb_entry->value_3 | read_value; (*((ha->isp_ops)->wr_reg_indirect))(ha, crb_addr, read_value); opcode = opcode & 4294967287U; } else { } if ((opcode & 16U) != 0U) { poll_time = (uint32_t )crb_entry->crb_strd.poll_timeout; wtime = (unsigned long )poll_time + (unsigned long )jiffies; (*((ha->isp_ops)->rd_reg_indirect))(ha, crb_addr, & read_value); ldv_63936: ; if ((crb_entry->value_2 & read_value) == crb_entry->value_1) { goto ldv_63929; } else if ((long )((unsigned long )jiffies - wtime) >= 0L) { rval = 1U; goto ldv_63929; } else { (*((ha->isp_ops)->rd_reg_indirect))(ha, crb_addr, & read_value); } goto ldv_63936; ldv_63929: opcode = opcode & 4294967279U; } else { } if ((opcode & 32U) != 0U) { if ((unsigned int )crb_entry->crb_strd.state_index_a != 0U) { index = (uint32_t )crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else { addr = crb_addr; } (*((ha->isp_ops)->rd_reg_indirect))(ha, addr, & read_value); index = (uint32_t )crb_entry->crb_ctrl.state_index_v; tmplt_hdr->saved_state_array[index] = read_value; opcode = opcode & 4294967263U; } else { } if ((opcode & 64U) != 0U) { if ((unsigned int )crb_entry->crb_strd.state_index_a != 0U) { index = (uint32_t )crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else { addr = crb_addr; } if ((unsigned int )crb_entry->crb_ctrl.state_index_v != 0U) { index = (uint32_t )crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; } else { read_value = crb_entry->value_1; } (*((ha->isp_ops)->wr_reg_indirect))(ha, addr, read_value); opcode = opcode & 4294967231U; } else { } if ((opcode & 128U) != 0U) { index = (uint32_t )crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; read_value = read_value << (int )crb_entry->crb_ctrl.shl; read_value = read_value >> (int )crb_entry->crb_ctrl.shr; if (crb_entry->value_2 != 0U) { read_value = crb_entry->value_2 & read_value; } else { } read_value = crb_entry->value_3 | read_value; read_value = crb_entry->value_1 + read_value; tmplt_hdr->saved_state_array[index] = read_value; opcode = opcode & 4294967167U; } else { } crb_addr = (uint32_t )crb_entry->crb_strd.addr_stride + crb_addr; i = i + 1; ldv_63938: ; if ((uint32_t )i < crb_entry->op_count) { goto ldv_63937; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Leaving fn: %s\n", "qla4_8xxx_minidump_process_control"); } else { } return ((int )rval); } } static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha , struct qla8xxx_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t r_addr ; uint32_t r_stride ; uint32_t loop_cnt ; uint32_t i ; uint32_t r_value ; struct qla8xxx_minidump_entry_rdocm *ocm_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; { data_ptr = *d_ptr; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Entering fn: %s\n", "qla4_8xxx_minidump_process_rdocm"); } else { } ocm_hdr = (struct qla8xxx_minidump_entry_rdocm *)entry_hdr; r_addr = ocm_hdr->read_addr; r_stride = ocm_hdr->read_addr_stride; loop_cnt = ocm_hdr->op_count; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n", "qla4_8xxx_minidump_process_rdocm", r_addr, r_stride, loop_cnt); } else { } i = 0U; goto ldv_63954; ldv_63953: r_value = readl((void const volatile *)((unsigned long )r_addr + ha->nx_pcibase)); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = r_value; r_addr = r_addr + r_stride; i = i + 1U; ldv_63954: ; if (i < loop_cnt) { goto ldv_63953; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Leaving fn: %s datacount: 0x%lx\n", "qla4_8xxx_minidump_process_rdocm", (unsigned long )loop_cnt * 4UL); } else { } *d_ptr = data_ptr; return; } } static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha , struct qla8xxx_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t r_addr ; uint32_t s_stride ; uint32_t s_addr ; uint32_t s_value ; uint32_t loop_cnt ; uint32_t i ; uint32_t r_value ; struct qla8xxx_minidump_entry_mux *mux_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; uint32_t *tmp___0 ; { data_ptr = *d_ptr; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Entering fn: %s\n", "qla4_8xxx_minidump_process_rdmux"); } else { } mux_hdr = (struct qla8xxx_minidump_entry_mux *)entry_hdr; r_addr = mux_hdr->read_addr; s_addr = mux_hdr->select_addr; s_stride = mux_hdr->select_value_stride; s_value = mux_hdr->select_value; loop_cnt = mux_hdr->op_count; i = 0U; goto ldv_63972; ldv_63971: (*((ha->isp_ops)->wr_reg_indirect))(ha, s_addr, s_value); (*((ha->isp_ops)->rd_reg_indirect))(ha, r_addr, & r_value); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = s_value; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = r_value; s_value = s_value + s_stride; i = i + 1U; ldv_63972: ; if (i < loop_cnt) { goto ldv_63971; } else { } *d_ptr = data_ptr; return; } } static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha , struct qla8xxx_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t addr ; uint32_t r_addr ; uint32_t c_addr ; uint32_t t_r_addr ; uint32_t i ; uint32_t k ; uint32_t loop_count ; uint32_t t_value ; uint32_t r_cnt ; uint32_t r_value ; uint32_t c_value_w ; struct qla8xxx_minidump_entry_cache *cache_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; { data_ptr = *d_ptr; cache_hdr = (struct qla8xxx_minidump_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = (uint32_t )cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = (uint32_t )cache_hdr->addr_ctrl.init_tag_value; r_cnt = (uint32_t )cache_hdr->read_ctrl.read_addr_cnt; i = 0U; goto ldv_63996; ldv_63995: (*((ha->isp_ops)->wr_reg_indirect))(ha, t_r_addr, t_value); (*((ha->isp_ops)->wr_reg_indirect))(ha, c_addr, c_value_w); addr = r_addr; k = 0U; goto ldv_63993; ldv_63992: (*((ha->isp_ops)->rd_reg_indirect))(ha, addr, & r_value); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = r_value; addr = (uint32_t )cache_hdr->read_ctrl.read_addr_stride + addr; k = k + 1U; ldv_63993: ; if (k < r_cnt) { goto ldv_63992; } else { } t_value = (uint32_t )cache_hdr->addr_ctrl.tag_value_stride + t_value; i = i + 1U; ldv_63996: ; if (i < loop_count) { goto ldv_63995; } else { } *d_ptr = data_ptr; return; } } static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha , struct qla8xxx_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t s_addr ; uint32_t r_addr ; uint32_t r_stride ; uint32_t r_value ; uint32_t r_cnt ; uint32_t qid ; uint32_t i ; uint32_t k ; uint32_t loop_cnt ; struct qla8xxx_minidump_entry_queue *q_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; { qid = 0U; data_ptr = *d_ptr; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Entering fn: %s\n", "qla4_8xxx_minidump_process_queue"); } else { } q_hdr = (struct qla8xxx_minidump_entry_queue *)entry_hdr; s_addr = q_hdr->select_addr; r_cnt = (uint32_t )q_hdr->rd_strd.read_addr_cnt; r_stride = (uint32_t )q_hdr->rd_strd.read_addr_stride; loop_cnt = q_hdr->op_count; i = 0U; goto ldv_64019; ldv_64018: (*((ha->isp_ops)->wr_reg_indirect))(ha, s_addr, qid); r_addr = q_hdr->read_addr; k = 0U; goto ldv_64016; ldv_64015: (*((ha->isp_ops)->rd_reg_indirect))(ha, r_addr, & r_value); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = r_value; r_addr = r_addr + r_stride; k = k + 1U; ldv_64016: ; if (k < r_cnt) { goto ldv_64015; } else { } qid = (uint32_t )q_hdr->q_strd.queue_id_stride + qid; i = i + 1U; ldv_64019: ; if (i < loop_cnt) { goto ldv_64018; } else { } *d_ptr = data_ptr; return; } } static void qla4_82xx_minidump_process_rdrom(struct scsi_qla_host *ha , struct qla8xxx_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t r_addr ; uint32_t r_value ; uint32_t i ; uint32_t loop_cnt ; struct qla8xxx_minidump_entry_rdrom *rom_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; { data_ptr = *d_ptr; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Entering fn: %s\n", "qla4_82xx_minidump_process_rdrom"); } else { } rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr; r_addr = rom_hdr->read_addr; loop_cnt = rom_hdr->read_data_size / 4U; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "[%s]: flash_addr: 0x%x, read_data_size: 0x%x\n", "qla4_82xx_minidump_process_rdrom", r_addr, loop_cnt); } else { } i = 0U; goto ldv_64034; ldv_64033: (*((ha->isp_ops)->wr_reg_indirect))(ha, 1108410416U, r_addr & 4294901760U); (*((ha->isp_ops)->rd_reg_indirect))(ha, (r_addr & 65535U) + 1108672512U, & r_value); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = r_value; r_addr = r_addr + 4U; i = i + 1U; ldv_64034: ; if (i < loop_cnt) { goto ldv_64033; } else { } *d_ptr = data_ptr; return; } } static int __qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha , struct qla8xxx_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t r_addr ; uint32_t r_value ; uint32_t r_data ; uint32_t i ; uint32_t j ; uint32_t loop_cnt ; struct qla8xxx_minidump_entry_rdmem *m_hdr ; unsigned long flags ; uint32_t *data_ptr ; struct ratelimit_state _rs ; int tmp ; uint32_t *tmp___0 ; { data_ptr = *d_ptr; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Entering fn: %s\n", "__qla4_8xxx_minidump_process_rdmem"); } else { } m_hdr = (struct qla8xxx_minidump_entry_rdmem *)entry_hdr; r_addr = m_hdr->read_addr; loop_cnt = m_hdr->read_data_size / 16U; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n", "__qla4_8xxx_minidump_process_rdmem", r_addr, m_hdr->read_data_size); } else { } if ((r_addr & 15U) != 0U) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "[%s]: Read addr 0x%x not 16 bytes aligned\n", "__qla4_8xxx_minidump_process_rdmem", r_addr); } else { } return (1); } else { } if ((m_hdr->read_data_size & 15U) != 0U) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "[%s]: Read data[0x%x] not multiple of 16 bytes\n", "__qla4_8xxx_minidump_process_rdmem", m_hdr->read_data_size); } else { } return (1); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", "__qla4_8xxx_minidump_process_rdmem", r_addr, m_hdr->read_data_size, loop_cnt); } else { } flags = _raw_write_lock_irqsave(& ha->hw_lock); i = 0U; goto ldv_64066; ldv_64065: (*((ha->isp_ops)->wr_reg_indirect))(ha, 1090519188U, r_addr); r_value = 0U; (*((ha->isp_ops)->wr_reg_indirect))(ha, 1090519192U, r_value); r_value = 2U; (*((ha->isp_ops)->wr_reg_indirect))(ha, 1090519184U, r_value); r_value = 3U; (*((ha->isp_ops)->wr_reg_indirect))(ha, 1090519184U, r_value); j = 0U; goto ldv_64056; ldv_64055: (*((ha->isp_ops)->rd_reg_indirect))(ha, 1090519184U, & r_value); if ((r_value & 8U) == 0U) { goto ldv_64054; } else { } j = j + 1U; ldv_64056: ; if (j <= 999U) { goto ldv_64055; } else { } ldv_64054: ; if (j > 999U) { _rs.lock.raw_lock.val.counter = 0; _rs.lock.magic = 3735899821U; _rs.lock.owner_cpu = 4294967295U; _rs.lock.owner = (void *)-1; _rs.lock.dep_map.key = 0; _rs.lock.dep_map.class_cache[0] = 0; _rs.lock.dep_map.class_cache[1] = 0; _rs.lock.dep_map.name = "_rs.lock"; _rs.lock.dep_map.cpu = 0; _rs.lock.dep_map.ip = 0UL; _rs.interval = 1250; _rs.burst = 10; _rs.printed = 0; _rs.missed = 0; _rs.begin = 0UL; tmp = ___ratelimit(& _rs, "__qla4_8xxx_minidump_process_rdmem"); if (tmp != 0) { printk("\v%s: failed to read through agent\n", "__qla4_8xxx_minidump_process_rdmem"); } else { } _raw_write_unlock_irqrestore(& ha->hw_lock, flags); return (0); } else { } j = 0U; goto ldv_64063; ldv_64062: (*((ha->isp_ops)->rd_reg_indirect))(ha, (uint32_t )MD_MIU_TEST_AGT_RDDATA[j], & r_data); tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = r_data; j = j + 1U; ldv_64063: ; if (j <= 3U) { goto ldv_64062; } else { } r_addr = r_addr + 16U; i = i + 1U; ldv_64066: ; if (i < loop_cnt) { goto ldv_64065; } else { } _raw_write_unlock_irqrestore(& ha->hw_lock, flags); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Leaving fn: %s datacount: 0x%x\n", "__qla4_8xxx_minidump_process_rdmem", loop_cnt * 16U); } else { } *d_ptr = data_ptr; return (0); } } static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha , struct qla8xxx_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t *data_ptr ; int rval ; { data_ptr = *d_ptr; rval = 0; rval = qla4_8xxx_minidump_pex_dma_read(ha, entry_hdr, & data_ptr); if (rval != 0) { rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr, & data_ptr); } else { } *d_ptr = data_ptr; return (rval); } } static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha , struct qla8xxx_minidump_entry_hdr *entry_hdr , int index ) { { entry_hdr->d_ctrl.driver_flags = (uint8_t )((unsigned int )entry_hdr->d_ctrl.driver_flags | 128U); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n", ha->host_no, index, entry_hdr->entry_type, (int )entry_hdr->d_ctrl.entry_capture_mask); } else { } ha->fw_dump_skip_size = ha->fw_dump_skip_size + entry_hdr->entry_capture_size; return; } } static uint32_t qla83xx_minidump_process_pollrd(struct scsi_qla_host *ha , struct qla8xxx_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t r_addr ; uint32_t s_addr ; uint32_t s_value ; uint32_t r_value ; uint32_t poll_wait___0 ; uint32_t poll_mask ; uint16_t s_stride ; uint16_t i ; uint32_t *data_ptr ; uint32_t rval ; struct qla83xx_minidump_entry_pollrd *pollrd_hdr ; uint32_t *tmp ; uint32_t *tmp___0 ; { data_ptr = *d_ptr; rval = 0U; pollrd_hdr = (struct qla83xx_minidump_entry_pollrd *)entry_hdr; s_addr = pollrd_hdr->select_addr; r_addr = pollrd_hdr->read_addr; s_value = pollrd_hdr->select_value; s_stride = pollrd_hdr->select_value_stride; poll_wait___0 = pollrd_hdr->poll_wait; poll_mask = pollrd_hdr->poll_mask; i = 0U; goto ldv_64104; ldv_64103: (*((ha->isp_ops)->wr_reg_indirect))(ha, s_addr, s_value); poll_wait___0 = pollrd_hdr->poll_wait; ldv_64102: (*((ha->isp_ops)->rd_reg_indirect))(ha, s_addr, & r_value); if ((r_value & poll_mask) != 0U) { goto ldv_64099; } else { msleep(1U); poll_wait___0 = poll_wait___0 - 1U; if (poll_wait___0 == 0U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: TIMEOUT\n", "qla83xx_minidump_process_pollrd"); rval = 1U; goto exit_process_pollrd; } else { } } goto ldv_64102; ldv_64099: (*((ha->isp_ops)->rd_reg_indirect))(ha, r_addr, & r_value); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = s_value; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = r_value; s_value = (uint32_t )s_stride + s_value; i = (uint16_t )((int )i + 1); ldv_64104: ; if ((int )pollrd_hdr->op_count > (int )i) { goto ldv_64103; } else { } *d_ptr = data_ptr; exit_process_pollrd: ; return (rval); } } static uint32_t qla4_84xx_minidump_process_rddfe(struct scsi_qla_host *ha , struct qla8xxx_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { int loop_cnt ; uint32_t addr1 ; uint32_t addr2 ; uint32_t value ; uint32_t data ; uint32_t temp ; uint32_t wrval ; uint8_t stride ; uint8_t stride2 ; uint16_t count ; uint32_t poll ; uint32_t mask ; uint32_t data_size ; uint32_t modify_mask ; uint32_t wait_count ; uint32_t *data_ptr ; struct qla8044_minidump_entry_rddfe *rddfe ; uint32_t rval ; uint32_t *tmp ; uint32_t *tmp___0 ; { wait_count = 0U; data_ptr = *d_ptr; rval = 0U; rddfe = (struct qla8044_minidump_entry_rddfe *)entry_hdr; addr1 = rddfe->addr_1; value = rddfe->value; stride = rddfe->stride; stride2 = rddfe->stride2; count = rddfe->count; poll = rddfe->poll; mask = rddfe->mask; modify_mask = rddfe->modify_mask; data_size = rddfe->data_size; addr2 = (uint32_t )stride + addr1; loop_cnt = 0; goto ldv_64141; ldv_64140: (*((ha->isp_ops)->wr_reg_indirect))(ha, addr1, value | 1073741824U); wait_count = 0U; goto ldv_64131; ldv_64130: (*((ha->isp_ops)->rd_reg_indirect))(ha, addr1, & temp); if ((temp & mask) != 0U) { goto ldv_64129; } else { } wait_count = wait_count + 1U; ldv_64131: ; if (wait_count < poll) { goto ldv_64130; } else { } ldv_64129: ; if (wait_count == poll) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: TIMEOUT\n", "qla4_84xx_minidump_process_rddfe"); rval = 1U; goto exit_process_rddfe; } else { (*((ha->isp_ops)->rd_reg_indirect))(ha, addr2, & temp); temp = temp & modify_mask; temp = (uint32_t )((loop_cnt << 16) | loop_cnt) | temp; wrval = (temp << 16) | temp; (*((ha->isp_ops)->wr_reg_indirect))(ha, addr2, wrval); (*((ha->isp_ops)->wr_reg_indirect))(ha, addr1, value); wait_count = 0U; goto ldv_64136; ldv_64135: (*((ha->isp_ops)->rd_reg_indirect))(ha, addr1, & temp); if ((temp & mask) != 0U) { goto ldv_64134; } else { } wait_count = wait_count + 1U; ldv_64136: ; if (wait_count < poll) { goto ldv_64135; } else { } ldv_64134: ; if (wait_count == poll) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: TIMEOUT\n", "qla4_84xx_minidump_process_rddfe"); rval = 1U; goto exit_process_rddfe; } else { } (*((ha->isp_ops)->wr_reg_indirect))(ha, addr1, (value | 1073741824U) + (uint32_t )stride2); wait_count = 0U; goto ldv_64139; ldv_64138: (*((ha->isp_ops)->rd_reg_indirect))(ha, addr1, & temp); if ((temp & mask) != 0U) { goto ldv_64137; } else { } wait_count = wait_count + 1U; ldv_64139: ; if (wait_count < poll) { goto ldv_64138; } else { } ldv_64137: ; if (wait_count == poll) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: TIMEOUT\n", "qla4_84xx_minidump_process_rddfe"); rval = 1U; goto exit_process_rddfe; } else { } (*((ha->isp_ops)->rd_reg_indirect))(ha, addr2, & data); tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = wrval; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = data; } loop_cnt = loop_cnt + 1; ldv_64141: ; if ((int )count > loop_cnt) { goto ldv_64140; } else { } *d_ptr = data_ptr; exit_process_rddfe: ; return (rval); } } static uint32_t qla4_84xx_minidump_process_rdmdio(struct scsi_qla_host *ha , struct qla8xxx_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { int rval ; uint32_t addr1 ; uint32_t addr2 ; uint32_t value1 ; uint32_t value2 ; uint32_t data ; uint32_t selval ; uint8_t stride1 ; uint8_t stride2 ; uint32_t addr3 ; uint32_t addr4 ; uint32_t addr5 ; uint32_t addr6 ; uint32_t addr7 ; uint16_t count ; uint16_t loop_cnt ; uint32_t poll ; uint32_t mask ; uint32_t *data_ptr ; struct qla8044_minidump_entry_rdmdio *rdmdio ; uint32_t tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; uint32_t *tmp___2 ; uint32_t *tmp___3 ; { rval = 0; data_ptr = *d_ptr; rdmdio = (struct qla8044_minidump_entry_rdmdio *)entry_hdr; addr1 = rdmdio->addr_1; addr2 = rdmdio->addr_2; value1 = rdmdio->value_1; stride1 = rdmdio->stride_1; stride2 = rdmdio->stride_2; count = rdmdio->count; poll = rdmdio->poll; mask = rdmdio->mask; value2 = rdmdio->value_2; addr3 = (uint32_t )stride1 + addr1; loop_cnt = 0U; goto ldv_64170; ldv_64169: tmp = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2, addr3, mask); rval = (int )tmp; if (rval != 0) { goto exit_process_rdmdio; } else { } addr4 = addr2 - (uint32_t )stride1; rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr4, value2); if (rval != 0) { goto exit_process_rdmdio; } else { } addr5 = (uint32_t )((int )stride1 * -2) + addr2; rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr5, value1); if (rval != 0) { goto exit_process_rdmdio; } else { } addr6 = (uint32_t )((int )stride1 * -3) + addr2; rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr6, 2U); if (rval != 0) { goto exit_process_rdmdio; } else { } tmp___0 = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2, addr3, mask); rval = (int )tmp___0; if (rval != 0) { goto exit_process_rdmdio; } else { } addr7 = (uint32_t )((int )stride1 * -4) + addr2; tmp___1 = ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, mask, addr7, & data); rval = (int )tmp___1; if (rval != 0) { goto exit_process_rdmdio; } else { } selval = ((value2 << 18) | (value1 << 2)) | 2U; stride2 = rdmdio->stride_2; tmp___2 = data_ptr; data_ptr = data_ptr + 1; *tmp___2 = selval; tmp___3 = data_ptr; data_ptr = data_ptr + 1; *tmp___3 = data; value1 = (uint32_t )stride2 + value1; *d_ptr = data_ptr; loop_cnt = (uint16_t )((int )loop_cnt + 1); ldv_64170: ; if ((int )loop_cnt < (int )count) { goto ldv_64169; } else { } exit_process_rdmdio: ; return ((uint32_t )rval); } } static uint32_t qla4_84xx_minidump_process_pollwr(struct scsi_qla_host *ha , struct qla8xxx_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t addr1 ; uint32_t addr2 ; uint32_t value1 ; uint32_t value2 ; uint32_t poll ; uint32_t mask ; uint32_t r_value ; struct qla8044_minidump_entry_pollwr *pollwr_hdr ; uint32_t wait_count ; uint32_t rval ; { wait_count = 0U; rval = 0U; pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr; addr1 = pollwr_hdr->addr_1; addr2 = pollwr_hdr->addr_2; value1 = pollwr_hdr->value_1; value2 = pollwr_hdr->value_2; poll = pollwr_hdr->poll; mask = pollwr_hdr->mask; goto ldv_64189; ldv_64188: (*((ha->isp_ops)->rd_reg_indirect))(ha, addr1, & r_value); if ((r_value & poll) != 0U) { goto ldv_64187; } else { } wait_count = wait_count + 1U; ldv_64189: ; if (wait_count < poll) { goto ldv_64188; } else { } ldv_64187: ; if (wait_count == poll) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: TIMEOUT\n", "qla4_84xx_minidump_process_pollwr"); rval = 1U; goto exit_process_pollwr; } else { } (*((ha->isp_ops)->wr_reg_indirect))(ha, addr2, value2); (*((ha->isp_ops)->wr_reg_indirect))(ha, addr1, value1); wait_count = 0U; goto ldv_64194; ldv_64193: (*((ha->isp_ops)->rd_reg_indirect))(ha, addr1, & r_value); if ((r_value & poll) != 0U) { goto ldv_64192; } else { } wait_count = wait_count + 1U; ldv_64194: ; if (wait_count < poll) { goto ldv_64193; } else { } ldv_64192: ; exit_process_pollwr: ; return (rval); } } static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha , struct qla8xxx_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t sel_val1 ; uint32_t sel_val2 ; uint32_t t_sel_val ; uint32_t data ; uint32_t i ; uint32_t sel_addr1 ; uint32_t sel_addr2 ; uint32_t sel_val_mask ; uint32_t read_addr ; struct qla83xx_minidump_entry_rdmux2 *rdmux2_hdr ; uint32_t *data_ptr ; uint32_t *tmp ; uint32_t *tmp___0 ; uint32_t *tmp___1 ; uint32_t *tmp___2 ; { data_ptr = *d_ptr; rdmux2_hdr = (struct qla83xx_minidump_entry_rdmux2 *)entry_hdr; sel_val1 = rdmux2_hdr->select_value_1; sel_val2 = rdmux2_hdr->select_value_2; sel_addr1 = rdmux2_hdr->select_addr_1; sel_addr2 = rdmux2_hdr->select_addr_2; sel_val_mask = rdmux2_hdr->select_value_mask; read_addr = rdmux2_hdr->read_addr; i = 0U; goto ldv_64212; ldv_64211: (*((ha->isp_ops)->wr_reg_indirect))(ha, sel_addr1, sel_val1); t_sel_val = sel_val1 & sel_val_mask; tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = t_sel_val; (*((ha->isp_ops)->wr_reg_indirect))(ha, sel_addr2, t_sel_val); (*((ha->isp_ops)->rd_reg_indirect))(ha, read_addr, & data); tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = data; (*((ha->isp_ops)->wr_reg_indirect))(ha, sel_addr1, sel_val2); t_sel_val = sel_val2 & sel_val_mask; tmp___1 = data_ptr; data_ptr = data_ptr + 1; *tmp___1 = t_sel_val; (*((ha->isp_ops)->wr_reg_indirect))(ha, sel_addr2, t_sel_val); (*((ha->isp_ops)->rd_reg_indirect))(ha, read_addr, & data); tmp___2 = data_ptr; data_ptr = data_ptr + 1; *tmp___2 = data; sel_val1 = (uint32_t )rdmux2_hdr->select_value_stride + sel_val1; sel_val2 = (uint32_t )rdmux2_hdr->select_value_stride + sel_val2; i = i + 1U; ldv_64212: ; if (rdmux2_hdr->op_count > i) { goto ldv_64211; } else { } *d_ptr = data_ptr; return; } } static uint32_t qla83xx_minidump_process_pollrdmwr(struct scsi_qla_host *ha , struct qla8xxx_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t poll_wait___0 ; uint32_t poll_mask ; uint32_t r_value ; uint32_t data ; uint32_t addr_1 ; uint32_t addr_2 ; uint32_t value_1 ; uint32_t value_2 ; uint32_t *data_ptr ; uint32_t rval ; struct qla83xx_minidump_entry_pollrdmwr *poll_hdr ; uint32_t *tmp ; uint32_t *tmp___0 ; { data_ptr = *d_ptr; rval = 0U; poll_hdr = (struct qla83xx_minidump_entry_pollrdmwr *)entry_hdr; addr_1 = poll_hdr->addr_1; addr_2 = poll_hdr->addr_2; value_1 = poll_hdr->value_1; value_2 = poll_hdr->value_2; poll_mask = poll_hdr->poll_mask; (*((ha->isp_ops)->wr_reg_indirect))(ha, addr_1, value_1); poll_wait___0 = poll_hdr->poll_wait; ldv_64233: (*((ha->isp_ops)->rd_reg_indirect))(ha, addr_1, & r_value); if ((r_value & poll_mask) != 0U) { goto ldv_64230; } else { msleep(1U); poll_wait___0 = poll_wait___0 - 1U; if (poll_wait___0 == 0U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: TIMEOUT_1\n", "qla83xx_minidump_process_pollrdmwr"); rval = 1U; goto exit_process_pollrdmwr; } else { } } goto ldv_64233; ldv_64230: (*((ha->isp_ops)->rd_reg_indirect))(ha, addr_2, & data); data = poll_hdr->modify_mask & data; (*((ha->isp_ops)->wr_reg_indirect))(ha, addr_2, data); (*((ha->isp_ops)->wr_reg_indirect))(ha, addr_1, value_2); poll_wait___0 = poll_hdr->poll_wait; ldv_64235: (*((ha->isp_ops)->rd_reg_indirect))(ha, addr_1, & r_value); if ((r_value & poll_mask) != 0U) { goto ldv_64234; } else { msleep(1U); poll_wait___0 = poll_wait___0 - 1U; if (poll_wait___0 == 0U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: TIMEOUT_2\n", "qla83xx_minidump_process_pollrdmwr"); rval = 1U; goto exit_process_pollrdmwr; } else { } } goto ldv_64235; ldv_64234: tmp = data_ptr; data_ptr = data_ptr + 1; *tmp = addr_2; tmp___0 = data_ptr; data_ptr = data_ptr + 1; *tmp___0 = data; *d_ptr = data_ptr; exit_process_pollrdmwr: ; return (rval); } } static uint32_t qla4_83xx_minidump_process_rdrom(struct scsi_qla_host *ha , struct qla8xxx_minidump_entry_hdr *entry_hdr , uint32_t **d_ptr ) { uint32_t fl_addr ; uint32_t u32_count ; uint32_t rval ; struct qla8xxx_minidump_entry_rdrom *rom_hdr ; uint32_t *data_ptr ; int tmp ; { data_ptr = *d_ptr; rom_hdr = (struct qla8xxx_minidump_entry_rdrom *)entry_hdr; fl_addr = rom_hdr->read_addr; u32_count = rom_hdr->read_data_size / 4U; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "[%s]: fl_addr: 0x%x, count: 0x%x\n", "qla4_83xx_minidump_process_rdrom", fl_addr, u32_count); } else { } tmp = qla4_83xx_lockless_flash_read_u32(ha, fl_addr, (uint8_t *)data_ptr, (int )u32_count); rval = (uint32_t )tmp; if (rval == 1U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Flash Read Error,Count=%d\n", "qla4_83xx_minidump_process_rdrom", u32_count); goto exit_process_rdrom; } else { } data_ptr = data_ptr + (unsigned long )u32_count; *d_ptr = data_ptr; exit_process_rdrom: ; return (rval); } } static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha ) { int num_entry_hdr ; struct qla8xxx_minidump_entry_hdr *entry_hdr ; struct qla4_8xxx_minidump_template_hdr *tmplt_hdr ; uint32_t *data_ptr ; uint32_t data_collected ; int i ; int rval ; uint64_t now ; uint32_t timestamp ; unsigned int tmp ; int tmp___0 ; int tmp___1 ; uint32_t tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; uint32_t tmp___7 ; int tmp___8 ; int tmp___9 ; uint32_t tmp___10 ; uint32_t tmp___11 ; uint32_t tmp___12 ; uint32_t tmp___13 ; { num_entry_hdr = 0; data_collected = 0U; rval = 1; ha->fw_dump_skip_size = 0U; if ((unsigned long )ha->fw_dump == (unsigned long )((void *)0)) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s(%ld) No buffer to dump\n", "qla4_8xxx_collect_md_data", ha->host_no); return (rval); } else { } tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)ha->fw_dump_tmplt_hdr; data_ptr = (uint32_t *)ha->fw_dump + (unsigned long )ha->fw_dump_tmplt_size; data_collected = ha->fw_dump_tmplt_size + data_collected; num_entry_hdr = (int )tmplt_hdr->num_of_entries; dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "[%s]: starting data ptr: %p\n", "qla4_8xxx_collect_md_data", data_ptr); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "[%s]: no of entry headers in Template: 0x%x\n", "qla4_8xxx_collect_md_data", num_entry_hdr); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "[%s]: Capture Mask obtained: 0x%x\n", "qla4_8xxx_collect_md_data", ha->fw_dump_capture_mask); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "[%s]: Total_data_size 0x%x, %d obtained\n", "qla4_8xxx_collect_md_data", ha->fw_dump_size, ha->fw_dump_size); now = get_jiffies_64(); tmp = jiffies_to_msecs((unsigned long const )now); timestamp = tmp / 1000U; tmplt_hdr->driver_timestamp = timestamp; entry_hdr = (struct qla8xxx_minidump_entry_hdr *)ha->fw_dump_tmplt_hdr + (unsigned long )tmplt_hdr->first_entry_offset; tmp___0 = is_qla8032(ha); if (tmp___0 != 0) { tmplt_hdr->saved_state_array[3] = tmplt_hdr->ocm_window_reg[(int )ha->func_num]; } else { tmp___1 = is_qla8042(ha); if (tmp___1 != 0) { tmplt_hdr->saved_state_array[3] = tmplt_hdr->ocm_window_reg[(int )ha->func_num]; } else { } } i = 0; goto ldv_64290; ldv_64289: ; if (ha->fw_dump_size < data_collected) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Data collected: [0x%x], Total Dump size: [0x%x]\n", data_collected, ha->fw_dump_size); return (rval); } else { } if (((uint32_t )entry_hdr->d_ctrl.entry_capture_mask & ha->fw_dump_capture_mask) == 0U) { entry_hdr->d_ctrl.driver_flags = (uint8_t )((unsigned int )entry_hdr->d_ctrl.driver_flags | 128U); goto skip_nxt_entry; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Data collected: [0x%x], Dump size left:[0x%x]\n", data_collected, ha->fw_dump_size - data_collected); } else { } switch (entry_hdr->entry_type) { case 255U: qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); goto ldv_64263; case 98U: rval = qla4_8xxx_minidump_process_control(ha, entry_hdr); if (rval != 0) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); goto md_failed; } else { } goto ldv_64263; case 1U: qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr, & data_ptr); goto ldv_64263; case 72U: rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr, & data_ptr); if (rval != 0) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); goto md_failed; } else { } goto ldv_64263; case 4U: ; case 71U: tmp___5 = is_qla8022(ha); if (tmp___5 != 0) { qla4_82xx_minidump_process_rdrom(ha, entry_hdr, & data_ptr); } else { tmp___3 = is_qla8032(ha); if (tmp___3 != 0) { goto _L; } else { tmp___4 = is_qla8042(ha); if (tmp___4 != 0) { _L: /* CIL Label */ tmp___2 = qla4_83xx_minidump_process_rdrom(ha, entry_hdr, & data_ptr); rval = (int )tmp___2; if (rval != 0) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); } else { } } else { } } } goto ldv_64263; case 21U: ; case 22U: ; case 23U: ; case 24U: rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr, & data_ptr); if (rval != 0) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); goto md_failed; } else { } goto ldv_64263; case 8U: ; case 9U: ; case 11U: ; case 12U: qla4_8xxx_minidump_process_l1cache(ha, entry_hdr, & data_ptr); goto ldv_64263; case 6U: qla4_8xxx_minidump_process_rdocm(ha, entry_hdr, & data_ptr); goto ldv_64263; case 2U: qla4_8xxx_minidump_process_rdmux(ha, entry_hdr, & data_ptr); goto ldv_64263; case 3U: qla4_8xxx_minidump_process_queue(ha, entry_hdr, & data_ptr); goto ldv_64263; case 35U: tmp___6 = is_qla8022(ha); if (tmp___6 != 0) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); goto ldv_64263; } else { } tmp___7 = qla83xx_minidump_process_pollrd(ha, entry_hdr, & data_ptr); rval = (int )tmp___7; if (rval != 0) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); } else { } goto ldv_64263; case 36U: tmp___8 = is_qla8022(ha); if (tmp___8 != 0) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); goto ldv_64263; } else { } qla83xx_minidump_process_rdmux2(ha, entry_hdr, & data_ptr); goto ldv_64263; case 37U: tmp___9 = is_qla8022(ha); if (tmp___9 != 0) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); goto ldv_64263; } else { } tmp___10 = qla83xx_minidump_process_pollrdmwr(ha, entry_hdr, & data_ptr); rval = (int )tmp___10; if (rval != 0) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); } else { } goto ldv_64263; case 38U: tmp___11 = qla4_84xx_minidump_process_rddfe(ha, entry_hdr, & data_ptr); rval = (int )tmp___11; if (rval != 0) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); } else { } goto ldv_64263; case 39U: tmp___12 = qla4_84xx_minidump_process_rdmdio(ha, entry_hdr, & data_ptr); rval = (int )tmp___12; if (rval != 0) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); } else { } goto ldv_64263; case 40U: tmp___13 = qla4_84xx_minidump_process_pollwr(ha, entry_hdr, & data_ptr); rval = (int )tmp___13; if (rval != 0) { qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); } else { } goto ldv_64263; case 0U: ; default: qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i); goto ldv_64263; } ldv_64263: data_collected = (uint32_t )((long )data_ptr) - (uint32_t )((long )ha->fw_dump); skip_nxt_entry: entry_hdr = entry_hdr + (unsigned long )entry_hdr->entry_size; i = i + 1; ldv_64290: ; if (i < num_entry_hdr) { goto ldv_64289; } else { } if (ha->fw_dump_skip_size + data_collected != ha->fw_dump_size) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n", data_collected, ha->fw_dump_size); rval = 1; goto md_failed; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Leaving fn: %s Last entry: 0x%x\n", "qla4_8xxx_collect_md_data", i); } else { } md_failed: ; return (rval); } } static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha , u32 code ) { char event_string[40U] ; char *envp[2U] ; { envp[0] = (char *)(& event_string); envp[1] = (char *)0; switch (code) { case 0U: snprintf((char *)(& event_string), 40UL, "FW_DUMP=%ld", ha->host_no); goto ldv_64299; default: ; goto ldv_64299; } ldv_64299: kobject_uevent_env(& (ha->pdev)->dev.kobj, 2, (char **)(& envp)); return; } } void qla4_8xxx_get_minidump(struct scsi_qla_host *ha ) { int tmp ; int tmp___0 ; int tmp___1 ; { if (ql4xenablemd != 0) { tmp___0 = constant_test_bit(19L, (unsigned long const volatile *)(& ha->flags)); if (tmp___0 != 0) { tmp___1 = constant_test_bit(24L, (unsigned long const volatile *)(& ha->flags)); if (tmp___1 == 0) { tmp = qla4_8xxx_collect_md_data(ha); if (tmp == 0) { qla4_8xxx_uevent_emit(ha, 0U); set_bit(24L, (unsigned long volatile *)(& ha->flags)); } else { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Unable to collect minidump\n", "qla4_8xxx_get_minidump"); } } else { } } else { } } else { } return; } } int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha ) { int rval ; int i ; uint32_t old_count ; uint32_t count ; int need_reset ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { rval = 1; need_reset = 0; need_reset = (*((ha->isp_ops)->need_reset))(ha); if (need_reset != 0) { tmp = constant_test_bit(19L, (unsigned long const volatile *)(& ha->flags)); if (tmp != 0) { (*((ha->isp_ops)->rom_lock_recovery))(ha); } else { } } else { tmp___0 = qla4_8xxx_rd_direct(ha, 2U); old_count = (uint32_t )tmp___0; i = 0; goto ldv_64315; ldv_64314: msleep(200U); tmp___1 = qla4_8xxx_rd_direct(ha, 2U); count = (uint32_t )tmp___1; if (count != old_count) { rval = 0; goto dev_ready; } else { } i = i + 1; ldv_64315: ; if (i <= 9) { goto ldv_64314; } else { } (*((ha->isp_ops)->rom_lock_recovery))(ha); } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "HW State: INITIALIZING\n"); qla4_8xxx_wr_direct(ha, 4U, 2U); (*((ha->isp_ops)->idc_unlock))(ha); tmp___2 = is_qla8022(ha); if (tmp___2 != 0) { qla4_8xxx_get_minidump(ha); } else { } rval = (*((ha->isp_ops)->restart_firmware))(ha); (*((ha->isp_ops)->idc_lock))(ha); if (rval != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "HW State: FAILED\n"); qla4_8xxx_clear_drv_active(ha); qla4_8xxx_wr_direct(ha, 4U, 6U); return (rval); } else { } dev_ready: dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "HW State: READY\n"); qla4_8xxx_wr_direct(ha, 4U, 3U); return (rval); } } static void qla4_82xx_need_reset_handler(struct scsi_qla_host *ha ) { uint32_t dev_state ; uint32_t drv_state ; uint32_t drv_active ; uint32_t active_mask ; unsigned long reset_timeout ; int tmp ; int tmp___0 ; int tmp___1 ; { active_mask = 4294967295U; dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Performing ISP error recovery\n"); tmp = test_and_clear_bit(0L, (unsigned long volatile *)(& ha->flags)); if (tmp != 0) { qla4_82xx_idc_unlock(ha); (*((ha->isp_ops)->disable_intrs))(ha); qla4_82xx_idc_lock(ha); } else { } tmp___0 = constant_test_bit(25L, (unsigned long const volatile *)(& ha->flags)); if (tmp___0 == 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s(%ld): reset acknowledged\n", "qla4_82xx_need_reset_handler", ha->host_no); } else { } qla4_8xxx_set_rst_ready(ha); } else { active_mask = (uint32_t )(~ (1 << (int )ha->func_num * 4)); } reset_timeout = (unsigned long )(ha->nx_reset_timeout * 250U) + (unsigned long )jiffies; drv_state = qla4_82xx_rd_32(ha, 136323396UL); drv_active = qla4_82xx_rd_32(ha, 136323384UL); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", "qla4_82xx_need_reset_handler", ha->host_no, drv_state, drv_active); goto ldv_64334; ldv_64333: ; if ((long )((unsigned long )jiffies - reset_timeout) >= 0L) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n", (char *)"qla4xxx", drv_state, drv_active); goto ldv_64332; } else { } tmp___1 = constant_test_bit(25L, (unsigned long const volatile *)(& ha->flags)); if (tmp___1 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", "qla4_82xx_need_reset_handler", ha->host_no, drv_state, drv_active); } else { } qla4_82xx_idc_unlock(ha); msleep(1000U); qla4_82xx_idc_lock(ha); drv_state = qla4_82xx_rd_32(ha, 136323396UL); drv_active = qla4_82xx_rd_32(ha, 136323384UL); ldv_64334: ; if ((drv_active & active_mask) != drv_state) { goto ldv_64333; } else { } ldv_64332: clear_bit(25L, (unsigned long volatile *)(& ha->flags)); dev_state = qla4_82xx_rd_32(ha, 136323392UL); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Device state is 0x%x = %s\n", dev_state, dev_state <= 7U ? qdev_state[dev_state] : (char *)"Unknown"); if (dev_state != 2U) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "HW State: COLD/RE-INIT\n"); qla4_82xx_wr_32(ha, 136323392UL, 1U); qla4_8xxx_set_rst_ready(ha); } else { } return; } } void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha ) { { (*((ha->isp_ops)->idc_lock))(ha); qla4_8xxx_set_qsnt_ready(ha); (*((ha->isp_ops)->idc_unlock))(ha); return; } } static void qla4_82xx_set_idc_ver(struct scsi_qla_host *ha ) { int idc_ver ; uint32_t drv_active ; int tmp ; { tmp = qla4_8xxx_rd_direct(ha, 3U); drv_active = (uint32_t )tmp; if ((uint32_t )(1 << (int )ha->func_num * 4) == drv_active) { qla4_8xxx_wr_direct(ha, 8U, 1U); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: IDC version updated to %d\n", "qla4_82xx_set_idc_ver", 1); } else { idc_ver = qla4_8xxx_rd_direct(ha, 8U); if (idc_ver != 1) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n", "qla4_82xx_set_idc_ver", 1, idc_ver); } else { } } return; } } static int qla4_83xx_set_idc_ver(struct scsi_qla_host *ha ) { int idc_ver ; uint32_t drv_active ; int rval ; int tmp ; uint32_t tmp___0 ; { rval = 0; tmp = qla4_8xxx_rd_direct(ha, 3U); drv_active = (uint32_t )tmp; if ((uint32_t )(1 << (int )ha->func_num) == drv_active) { idc_ver = qla4_8xxx_rd_direct(ha, 8U); idc_ver = idc_ver & -256; idc_ver = idc_ver | 1; qla4_8xxx_wr_direct(ha, 8U, (uint32_t const )idc_ver); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: IDC version updated to %d\n", "qla4_83xx_set_idc_ver", idc_ver); } else { idc_ver = qla4_8xxx_rd_direct(ha, 8U); idc_ver = idc_ver & 255; if (idc_ver != 1) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: qla4xxx driver IDC version %d is not compatible with IDC version %d of other drivers!\n", "qla4_83xx_set_idc_ver", 1, idc_ver); rval = 1; goto exit_set_idc_ver; } else { } } tmp___0 = qla4_83xx_rd_reg(ha, 14232UL); idc_ver = (int )tmp___0; idc_ver = ~ (3 << (int )ha->func_num * 2) & idc_ver; idc_ver = idc_ver; qla4_83xx_wr_reg(ha, 14232UL, (uint32_t )idc_ver); exit_set_idc_ver: ; return (rval); } } int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha ) { uint32_t drv_active ; int rval ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; { rval = 0; tmp = constant_test_bit(1L, (unsigned long const volatile *)(& ha->flags)); if (tmp != 0) { goto exit_update_idc_reg; } else { } (*((ha->isp_ops)->idc_lock))(ha); qla4_8xxx_set_drv_active(ha); tmp___1 = is_qla8032(ha); if (tmp___1 != 0) { goto _L; } else { tmp___2 = is_qla8042(ha); if (tmp___2 != 0) { _L: /* CIL Label */ tmp___0 = qla4_8xxx_rd_direct(ha, 3U); drv_active = (uint32_t )tmp___0; if ((uint32_t )(1 << (int )ha->func_num) == drv_active && ql4xdontresethba == 0) { qla4_83xx_clear_idc_dontreset(ha); } else { } } else { } } tmp___5 = is_qla8022(ha); if (tmp___5 != 0) { qla4_82xx_set_idc_ver(ha); } else { tmp___3 = is_qla8032(ha); if (tmp___3 != 0) { goto _L___0; } else { tmp___4 = is_qla8042(ha); if (tmp___4 != 0) { _L___0: /* CIL Label */ rval = qla4_83xx_set_idc_ver(ha); if (rval == 1) { qla4_8xxx_clear_drv_active(ha); } else { } } else { } } } (*((ha->isp_ops)->idc_unlock))(ha); exit_update_idc_reg: ; return (rval); } } int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha ) { uint32_t dev_state ; int rval ; unsigned long dev_init_timeout ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { rval = 0; rval = qla4_8xxx_update_idc_reg(ha); if (rval == 1) { goto exit_state_handler; } else { } tmp = qla4_8xxx_rd_direct(ha, 4U); dev_state = (uint32_t )tmp; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Device state is 0x%x = %s\n", dev_state, dev_state <= 7U ? qdev_state[dev_state] : (char *)"Unknown"); } else { } dev_init_timeout = (unsigned long )(ha->nx_dev_init_timeout * 250U) + (unsigned long )jiffies; (*((ha->isp_ops)->idc_lock))(ha); ldv_64381: ; if ((long )((unsigned long )jiffies - dev_init_timeout) >= 0L) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: Device Init Failed 0x%x = %s\n", (char *)"qla4xxx", dev_state, dev_state <= 7U ? qdev_state[dev_state] : (char *)"Unknown"); qla4_8xxx_wr_direct(ha, 4U, 6U); } else { } tmp___0 = qla4_8xxx_rd_direct(ha, 4U); dev_state = (uint32_t )tmp___0; dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Device state is 0x%x = %s\n", dev_state, dev_state <= 7U ? qdev_state[dev_state] : (char *)"Unknown"); switch (dev_state) { case 3U: ; goto exit; case 1U: rval = qla4_8xxx_device_bootstrap(ha); goto exit; case 2U: (*((ha->isp_ops)->idc_unlock))(ha); msleep(1000U); (*((ha->isp_ops)->idc_lock))(ha); goto ldv_64375; case 4U: tmp___2 = is_qla8032(ha); if (tmp___2 != 0) { qla4_83xx_need_reset_handler(ha); } else { tmp___3 = is_qla8042(ha); if (tmp___3 != 0) { qla4_83xx_need_reset_handler(ha); } else { tmp___1 = is_qla8022(ha); if (tmp___1 != 0) { if (ql4xdontresethba == 0) { qla4_82xx_need_reset_handler(ha); dev_init_timeout = (unsigned long )(ha->nx_dev_init_timeout * 250U) + (unsigned long )jiffies; } else { (*((ha->isp_ops)->idc_unlock))(ha); msleep(1000U); (*((ha->isp_ops)->idc_lock))(ha); } } else { } } } goto ldv_64375; case 5U: qla4_8xxx_need_qsnt_handler(ha); goto ldv_64375; case 7U: (*((ha->isp_ops)->idc_unlock))(ha); msleep(1000U); (*((ha->isp_ops)->idc_lock))(ha); goto ldv_64375; case 6U: (*((ha->isp_ops)->idc_unlock))(ha); qla4xxx_dead_adapter_cleanup(ha); rval = 1; (*((ha->isp_ops)->idc_lock))(ha); goto exit; default: (*((ha->isp_ops)->idc_unlock))(ha); qla4xxx_dead_adapter_cleanup(ha); rval = 1; (*((ha->isp_ops)->idc_lock))(ha); goto exit; } ldv_64375: ; goto ldv_64381; exit: (*((ha->isp_ops)->idc_unlock))(ha); exit_state_handler: ; return (rval); } } int qla4_8xxx_load_risc(struct scsi_qla_host *ha ) { int retval ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp___0 = is_qla8032(ha); if (tmp___0 != 0) { writel(0U, (void volatile *)(& (ha->qla4_83xx_reg)->risc_intr)); readl((void const volatile *)(& (ha->qla4_83xx_reg)->risc_intr)); } else { tmp___1 = is_qla8042(ha); if (tmp___1 != 0) { writel(0U, (void volatile *)(& (ha->qla4_83xx_reg)->risc_intr)); readl((void const volatile *)(& (ha->qla4_83xx_reg)->risc_intr)); } else { tmp = is_qla8022(ha); if (tmp != 0) { writel(0U, (void volatile *)(& (ha->qla4_82xx_reg)->host_int)); readl((void const volatile *)(& (ha->qla4_82xx_reg)->host_int)); } else { } } } retval = qla4_8xxx_device_state_handler(ha); if (retval == 0) { qla4xxx_init_rings(ha); } else { } if (retval == 0) { tmp___2 = constant_test_bit(10L, (unsigned long const volatile *)(& ha->flags)); if (tmp___2 == 0) { retval = qla4xxx_request_irqs(ha); } else { } } else { } return (retval); } } __inline static uint32_t flash_conf_addr(struct ql82xx_hw_data *hw , uint32_t faddr ) { { return (hw->flash_conf_off | faddr); } } static uint32_t *qla4_82xx_read_flash_data(struct scsi_qla_host *ha , uint32_t *dwptr , uint32_t faddr , uint32_t length ) { uint32_t i ; uint32_t val ; int loops ; int tmp ; int tmp___0 ; { loops = 0; goto ldv_64405; ldv_64404: __const_udelay(429500UL); ___might_sleep("/home/ldvuser/mutilin/launch/work/current--X--drivers/--X--defaultlinux-4.2-rc1.tar.xz--X--43_2a--X--cpachecker/linux-4.2-rc1.tar.xz/csd_deg_dscv/3192/dscv_tempdir/dscv/ri/43_2a/drivers/scsi/qla4xxx/ql4_nx.c", 3666, 0); _cond_resched(); loops = loops + 1; ldv_64405: tmp = qla4_82xx_rom_lock(ha); if (tmp != 0 && loops <= 49999) { goto ldv_64404; } else { } if (loops > 49999) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "ROM lock failed\n"); return (dwptr); } else { } i = 0U; goto ldv_64409; ldv_64408: tmp___0 = qla4_82xx_do_rom_fast_read(ha, (int )faddr, (int *)(& val)); if (tmp___0 != 0) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "Do ROM fast read failed\n"); goto done_read; } else { } *(dwptr + (unsigned long )i) = val; i = i + 1U; faddr = faddr + 4U; ldv_64409: ; if (length / 4U > i) { goto ldv_64408; } else { } done_read: qla4_82xx_rom_unlock(ha); return (dwptr); } } static uint8_t *qla4_82xx_read_optrom_data(struct scsi_qla_host *ha , uint8_t *buf , uint32_t offset , uint32_t length ) { { qla4_82xx_read_flash_data(ha, (uint32_t *)buf, offset, length); return (buf); } } static int qla4_8xxx_find_flt_start(struct scsi_qla_host *ha , uint32_t *start ) { char const *loc ; char const *locations[2U] ; { locations[0] = "DEF"; locations[1] = "PCI"; loc = locations[0]; *start = 1033216U; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "FLTL[%s] = 0x%x.\n", loc, *start); } else { } return (0); } } static void qla4_8xxx_get_flt_info(struct scsi_qla_host *ha , uint32_t flt_addr ) { char const *loc ; char const *locations[2U] ; uint16_t *wptr ; uint16_t cnt ; uint16_t chksum ; uint32_t start ; uint32_t status ; struct qla_flt_header *flt ; struct qla_flt_region *region ; struct ql82xx_hw_data *hw ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; uint16_t *tmp___3 ; { locations[0] = "DEF"; locations[1] = "FLT"; hw = & ha->hw; hw->flt_region_flt = flt_addr; wptr = (uint16_t *)ha->request_ring; flt = (struct qla_flt_header *)ha->request_ring; region = (struct qla_flt_region *)flt + 1U; tmp___2 = is_qla8022(ha); if (tmp___2 != 0) { qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring, flt_addr << 2, 4096U); } else { tmp___0 = is_qla8032(ha); if (tmp___0 != 0) { goto _L; } else { tmp___1 = is_qla8042(ha); if (tmp___1 != 0) { _L: /* CIL Label */ tmp = qla4_83xx_flash_read_u32(ha, flt_addr << 2, (uint8_t *)ha->request_ring, 1024); status = (uint32_t )tmp; if (status != 0U) { goto no_flash_data; } else { } } else { } } } if ((unsigned int )*wptr == 65535U) { goto no_flash_data; } else { } if ((unsigned int )flt->version != 1U) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n", (int )flt->version, (int )flt->length, (int )flt->checksum); } else { } goto no_flash_data; } else { } cnt = (uint16_t )(((unsigned long )flt->length + 8UL) >> 1); chksum = 0U; goto ldv_64439; ldv_64438: tmp___3 = wptr; wptr = wptr + 1; chksum = (int )*tmp___3 + (int )chksum; cnt = (uint16_t )((int )cnt - 1); ldv_64439: ; if ((unsigned int )cnt != 0U) { goto ldv_64438; } else { } if ((unsigned int )chksum != 0U) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Inconsistent FLT detected: version=0x%x length=0x%x checksum=0x%x.\n", (int )flt->version, (int )flt->length, (int )chksum); } else { } goto no_flash_data; } else { } loc = locations[1]; cnt = (uint16_t )((unsigned int )flt->length / 16U); goto ldv_64451; ldv_64450: start = region->start >> 2; switch (region->code & 255U) { case 26U: hw->flt_region_fdt = start; goto ldv_64442; case 120U: hw->flt_region_boot = start; goto ldv_64442; case 116U: ; case 151U: hw->flt_region_fw = start; goto ldv_64442; case 114U: hw->flt_region_bootload = start; goto ldv_64442; case 101U: hw->flt_iscsi_param = start; goto ldv_64442; case 99U: hw->flt_region_chap = start; hw->flt_chap_size = region->size; goto ldv_64442; case 106U: hw->flt_region_ddb = start; hw->flt_ddb_size = region->size; goto ldv_64442; } ldv_64442: cnt = (uint16_t )((int )cnt - 1); region = region + 1; ldv_64451: ; if ((unsigned int )cnt != 0U) { goto ldv_64450; } else { } goto done; no_flash_data: loc = locations[0]; hw->flt_region_fdt = 1032192U; hw->flt_region_boot = 131072U; hw->flt_region_bootload = 16384U; hw->flt_region_fw = 262144U; hw->flt_region_chap = 1376256U; hw->flt_chap_size = 786432U; hw->flt_region_ddb = 1081344U; hw->flt_ddb_size = 524288U; done: ; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "FLT[%s]: flt=0x%x fdt=0x%x boot=0x%x bootload=0x%x fw=0x%x chap=0x%x chap_size=0x%x ddb=0x%x ddb_size=0x%x\n", loc, hw->flt_region_flt, hw->flt_region_fdt, hw->flt_region_boot, hw->flt_region_bootload, hw->flt_region_fw, hw->flt_region_chap, hw->flt_chap_size, hw->flt_region_ddb, hw->flt_ddb_size); } else { } return; } } static void qla4_82xx_get_fdt_info(struct scsi_qla_host *ha ) { char const *loc ; char const *locations[2U] ; uint16_t cnt ; uint16_t chksum ; uint16_t *wptr ; struct qla_fdt_layout *fdt ; uint16_t mid ; uint16_t fid ; struct ql82xx_hw_data *hw ; uint16_t *tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; { locations[0] = "MID"; locations[1] = "FDT"; mid = 0U; fid = 0U; hw = & ha->hw; hw->flash_conf_off = 2147287040U; hw->flash_data_off = 2146435072U; wptr = (uint16_t *)ha->request_ring; fdt = (struct qla_fdt_layout *)ha->request_ring; qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring, hw->flt_region_fdt << 2, 4096U); if ((unsigned int )*wptr == 65535U) { goto no_flash_data; } else { } if ((((unsigned int )fdt->sig[0] != 81U || (unsigned int )fdt->sig[1] != 76U) || (unsigned int )fdt->sig[2] != 73U) || (unsigned int )fdt->sig[3] != 68U) { goto no_flash_data; } else { } cnt = 0U; chksum = 0U; goto ldv_64468; ldv_64467: tmp = wptr; wptr = wptr + 1; chksum = (int )*tmp + (int )chksum; cnt = (uint16_t )((int )cnt + 1); ldv_64468: ; if ((unsigned int )cnt <= 63U) { goto ldv_64467; } else { } if ((unsigned int )chksum != 0U) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Inconsistent FDT detected: checksum=0x%x id=%c version=0x%x.\n", (int )chksum, (int )fdt->sig[0], (int )fdt->version); } else { } goto no_flash_data; } else { } loc = locations[1]; mid = fdt->man_id; fid = fdt->id; hw->fdt_wrt_disable = (uint32_t )fdt->wrt_disable_bits; hw->fdt_erase_cmd = flash_conf_addr(hw, (uint32_t )((int )fdt->erase_cmd | 768)); hw->fdt_block_size = fdt->block_size; if ((unsigned int )fdt->unprotect_sec_cmd != 0U) { hw->fdt_unprotect_sec_cmd = flash_conf_addr(hw, (uint32_t )((int )fdt->unprotect_sec_cmd | 768)); if ((unsigned int )fdt->protect_sec_cmd != 0U) { tmp___0 = flash_conf_addr(hw, (uint32_t )((int )fdt->protect_sec_cmd | 768)); hw->fdt_protect_sec_cmd = tmp___0; } else { tmp___1 = flash_conf_addr(hw, 822U); hw->fdt_protect_sec_cmd = tmp___1; } } else { } goto done; no_flash_data: loc = locations[0]; hw->fdt_block_size = 65536U; done: ; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "FDT[%s]: (0x%x/0x%x) erase=0x%x pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, (int )mid, (int )fid, hw->fdt_erase_cmd, hw->fdt_protect_sec_cmd, hw->fdt_unprotect_sec_cmd, hw->fdt_wrt_disable, hw->fdt_block_size); } else { } return; } } static void qla4_82xx_get_idc_param(struct scsi_qla_host *ha ) { uint32_t *wptr ; int tmp ; uint32_t *tmp___0 ; { tmp = is_qla8022(ha); if (tmp == 0) { return; } else { } wptr = (uint32_t *)ha->request_ring; qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring, 4098140U, 8U); if (*wptr == 4294967295U) { ha->nx_dev_init_timeout = 30U; ha->nx_reset_timeout = 10U; } else { tmp___0 = wptr; wptr = wptr + 1; ha->nx_dev_init_timeout = *tmp___0; ha->nx_reset_timeout = *wptr; } if (ql4xextended_error_logging == 2) { dev_printk("\017", (struct device const *)(& (ha->pdev)->dev), "ha->nx_dev_init_timeout = %d\n", ha->nx_dev_init_timeout); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\017", (struct device const *)(& (ha->pdev)->dev), "ha->nx_reset_timeout = %d\n", ha->nx_reset_timeout); } else { } return; } } void qla4_82xx_queue_mbox_cmd(struct scsi_qla_host *ha , uint32_t *mbx_cmd , int in_count ) { int i ; { i = 1; goto ldv_64482; ldv_64481: writel(*(mbx_cmd + (unsigned long )i), (void volatile *)(& (ha->qla4_82xx_reg)->mailbox_in) + (unsigned long )i); i = i + 1; ldv_64482: ; if (i < in_count) { goto ldv_64481; } else { } writel(*mbx_cmd, (void volatile *)(& (ha->qla4_82xx_reg)->mailbox_in)); readl((void const volatile *)(& (ha->qla4_82xx_reg)->mailbox_in)); writel(1U, (void volatile *)(& (ha->qla4_82xx_reg)->hint)); readl((void const volatile *)(& (ha->qla4_82xx_reg)->hint)); return; } } void qla4_82xx_process_mbox_intr(struct scsi_qla_host *ha , int out_count ) { int intr_status ; unsigned int tmp ; unsigned int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = readl((void const volatile *)(& (ha->qla4_82xx_reg)->host_int)); intr_status = (int )tmp; if (intr_status & 1) { ha->mbox_status_count = (uint8_t volatile )out_count; tmp___0 = readl((void const volatile *)(& (ha->qla4_82xx_reg)->host_status)); intr_status = (int )tmp___0; (*((ha->isp_ops)->interrupt_service_routine))(ha, (uint32_t )intr_status); tmp___1 = constant_test_bit(6L, (unsigned long const volatile *)(& ha->flags)); if (tmp___1 != 0) { tmp___2 = constant_test_bit(15L, (unsigned long const volatile *)(& ha->flags)); if (tmp___2 != 0) { qla4_82xx_wr_32(ha, (ulong )ha->nx_legacy_intr.tgt_mask_reg, 64511U); } else { } } else { } } else { } return; } } int qla4_8xxx_get_flash_info(struct scsi_qla_host *ha ) { int ret ; uint32_t flt_addr ; int tmp ; int tmp___0 ; int tmp___1 ; { ret = qla4_8xxx_find_flt_start(ha, & flt_addr); if (ret != 0) { return (ret); } else { } qla4_8xxx_get_flt_info(ha, flt_addr); tmp___1 = is_qla8022(ha); if (tmp___1 != 0) { qla4_82xx_get_fdt_info(ha); qla4_82xx_get_idc_param(ha); } else { tmp = is_qla8032(ha); if (tmp != 0) { qla4_83xx_get_idc_param(ha); } else { tmp___0 = is_qla8042(ha); if (tmp___0 != 0) { qla4_83xx_get_idc_param(ha); } else { } } } return (0); } } int qla4_8xxx_stop_firmware(struct scsi_qla_host *ha ) { int status ; uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; { memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 20U; status = qla4xxx_mailbox_command(ha, 8, 1, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: status = %d\n", ha->host_no, "qla4_8xxx_stop_firmware", status); } else { } return (status); } } int qla4_82xx_isp_reset(struct scsi_qla_host *ha ) { int rval ; uint32_t dev_state ; { qla4_82xx_idc_lock(ha); dev_state = qla4_82xx_rd_32(ha, 136323392UL); if (dev_state == 3U) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "HW State: NEED RESET\n"); qla4_82xx_wr_32(ha, 136323392UL, 4U); set_bit(25L, (unsigned long volatile *)(& ha->flags)); } else { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "HW State: DEVICE INITIALIZING\n"); } qla4_82xx_idc_unlock(ha); rval = qla4_8xxx_device_state_handler(ha); qla4_82xx_idc_lock(ha); qla4_8xxx_clear_rst_ready(ha); qla4_82xx_idc_unlock(ha); if (rval == 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Clearing AF_RECOVERY in qla4_82xx_isp_reset\n"); clear_bit(19L, (unsigned long volatile *)(& ha->flags)); } else { } return (rval); } } int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; struct mbx_sys_info *sys_info ; dma_addr_t sys_info_dma ; int status ; void *tmp ; int tmp___0 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; unsigned long _min1 ; unsigned long _min2 ; unsigned long _min1___0 ; unsigned long _min2___0 ; unsigned long _min1___1 ; unsigned long _min2___1 ; { status = 1; tmp = dma_alloc_attrs(& (ha->pdev)->dev, 64UL, & sys_info_dma, 208U, (struct dma_attrs *)0); sys_info = (struct mbx_sys_info *)tmp; if ((unsigned long )sys_info == (unsigned long )((struct mbx_sys_info *)0)) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: Unable to allocate dma buffer.\n", ha->host_no, "qla4_8xxx_get_sys_info"); } else { } return (status); } else { } memset((void *)sys_info, 0, 64UL); memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 120U; mbox_cmd[1] = (unsigned int )sys_info_dma; mbox_cmd[2] = (unsigned int )(sys_info_dma >> 32ULL); mbox_cmd[4] = 64U; tmp___0 = qla4xxx_mailbox_command(ha, 8, 6, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (tmp___0 != 0) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: GET_SYS_INFO failed\n", ha->host_no, "qla4_8xxx_get_sys_info"); } else { } goto exit_validate_mac82; } else { } tmp___4 = is_qla8032(ha); if (tmp___4 != 0) { tmp___3 = mbox_sts[3] <= 51U; } else { tmp___5 = is_qla8042(ha); if (tmp___5 != 0) { tmp___3 = mbox_sts[3] <= 51U; } else { tmp___3 = mbox_sts[4] <= 51U; } } if (tmp___3) { if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: GET_SYS_INFO data receive error (%x)\n", ha->host_no, "qla4_8xxx_get_sys_info", mbox_sts[4]); } else { } goto exit_validate_mac82; } else { } ha->port_num = sys_info->port_num; _min1 = 6UL; _min2 = 6UL; memcpy((void *)(& ha->my_mac), (void const *)(& sys_info->mac_addr), _min1 < _min2 ? _min1 : _min2); _min1___0 = 16UL; _min2___0 = 16UL; memcpy((void *)(& ha->serial_number), (void const *)(& sys_info->serial_number), _min1___0 < _min2___0 ? _min1___0 : _min2___0); _min1___1 = 16UL; _min2___1 = 16UL; memcpy((void *)(& ha->model_name), (void const *)(& sys_info->board_id_str), _min1___1 < _min2___1 ? _min1___1 : _min2___1); ha->phy_port_cnt = sys_info->phys_port_cnt; ha->phy_port_num = sys_info->port_num; ha->iscsi_pci_func_cnt = (uint16_t )sys_info->iscsi_pci_func_cnt; if (ql4xextended_error_logging == 2) { printk("scsi%ld: %s: mac %02x:%02x:%02x:%02x:%02x:%02x serial %s\n", ha->host_no, "qla4_8xxx_get_sys_info", (int )ha->my_mac[0], (int )ha->my_mac[1], (int )ha->my_mac[2], (int )ha->my_mac[3], (int )ha->my_mac[4], (int )ha->my_mac[5], (uint8_t *)(& ha->serial_number)); } else { } status = 0; exit_validate_mac82: dma_free_attrs(& (ha->pdev)->dev, 64UL, (void *)sys_info, sys_info_dma, (struct dma_attrs *)0); return (status); } } int qla4_8xxx_intr_enable(struct scsi_qla_host *ha ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int tmp ; { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s\n", "qla4_8xxx_intr_enable"); } else { } memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 16U; mbox_cmd[1] = 1U; tmp = qla4xxx_mailbox_command(ha, 8, 1, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (tmp != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: MBOX_CMD_ENABLE_INTRS failed (0x%04x)\n", "qla4_8xxx_intr_enable", mbox_sts[0]); } else { } return (1); } else { } return (0); } } int qla4_8xxx_intr_disable(struct scsi_qla_host *ha ) { uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int tmp ; { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s\n", "qla4_8xxx_intr_disable"); } else { } memset((void *)(& mbox_cmd), 0, 32UL); memset((void *)(& mbox_sts), 0, 32UL); mbox_cmd[0] = 16U; mbox_cmd[1] = 0U; tmp = qla4xxx_mailbox_command(ha, 8, 1, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (tmp != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: MBOX_CMD_ENABLE_INTRS failed (0x%04x)\n", "qla4_8xxx_intr_disable", mbox_sts[0]); } else { } return (1); } else { } return (0); } } void qla4_82xx_enable_intrs(struct scsi_qla_host *ha ) { { qla4_8xxx_intr_enable(ha); spin_lock_irq(& ha->hardware_lock); qla4_82xx_wr_32(ha, (ulong )ha->nx_legacy_intr.tgt_mask_reg, 64511U); spin_unlock_irq(& ha->hardware_lock); set_bit(6L, (unsigned long volatile *)(& ha->flags)); return; } } void qla4_82xx_disable_intrs(struct scsi_qla_host *ha ) { int tmp ; { tmp = test_and_clear_bit(6L, (unsigned long volatile *)(& ha->flags)); if (tmp != 0) { qla4_8xxx_intr_disable(ha); } else { } spin_lock_irq(& ha->hardware_lock); qla4_82xx_wr_32(ha, (ulong )ha->nx_legacy_intr.tgt_mask_reg, 1024U); spin_unlock_irq(& ha->hardware_lock); return; } } static struct ql4_init_msix_entry qla4_8xxx_msix_entries[2U] = { {0U, 0U, "qla4xxx (default)", & qla4_8xxx_default_intr_handler}, {1U, 1U, "qla4xxx (rsp_q)", & qla4_8xxx_msix_rsp_q}}; void qla4_8xxx_disable_msix(struct scsi_qla_host *ha ) { int i ; struct ql4_msix_entry *qentry ; { i = 0; goto ldv_64556; ldv_64555: qentry = (struct ql4_msix_entry *)(& ha->msix_entries) + (unsigned long )qla4_8xxx_msix_entries[i].index; if (qentry->have_irq != 0) { ldv_free_irq_337((unsigned int )qentry->msix_vector, (void *)ha); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: %s\n", "qla4_8xxx_disable_msix", qla4_8xxx_msix_entries[i].name); } else { } } else { } i = i + 1; ldv_64556: ; if (i <= 1) { goto ldv_64555; } else { } pci_disable_msix(ha->pdev); clear_bit(17L, (unsigned long volatile *)(& ha->flags)); return; } } int qla4_8xxx_enable_msix(struct scsi_qla_host *ha ) { int i ; int ret ; struct msix_entry entries[2U] ; struct ql4_msix_entry *qentry ; { i = 0; goto ldv_64566; ldv_64565: entries[i].entry = qla4_8xxx_msix_entries[i].entry; i = i + 1; ldv_64566: ; if (i <= 1) { goto ldv_64565; } else { } ret = pci_enable_msix_exact(ha->pdev, (struct msix_entry *)(& entries), 2); if (ret != 0) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "MSI-X: Failed to enable support -- %d/%d\n", 2, ret); goto msix_out; } else { } set_bit(17L, (unsigned long volatile *)(& ha->flags)); i = 0; goto ldv_64573; ldv_64572: qentry = (struct ql4_msix_entry *)(& ha->msix_entries) + (unsigned long )qla4_8xxx_msix_entries[i].index; qentry->msix_vector = (uint16_t )entries[i].vector; qentry->msix_entry = entries[i].entry; qentry->have_irq = 0; ret = ldv_request_irq_280((unsigned int )qentry->msix_vector, qla4_8xxx_msix_entries[i].handler, 0UL, qla4_8xxx_msix_entries[i].name, (void *)ha); if (ret != 0) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "MSI-X: Unable to register handler -- %x/%d.\n", (int )qla4_8xxx_msix_entries[i].index, ret); qla4_8xxx_disable_msix(ha); goto msix_out; } else { } qentry->have_irq = 1; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: %s\n", "qla4_8xxx_enable_msix", qla4_8xxx_msix_entries[i].name); } else { } i = i + 1; ldv_64573: ; if (i <= 1) { goto ldv_64572; } else { } msix_out: ; return (ret); } } int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha ) { int status ; int tmp ; { status = 0; tmp = constant_test_bit(10L, (unsigned long const volatile *)(& ha->flags)); if (tmp == 0) { dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: Skipping retry of adapter initialization as IRQs are not attached\n", "qla4_8xxx_check_init_adapter_retry"); status = 1; goto exit_init_adapter_failure; } else { } qla4xxx_free_irqs(ha); exit_init_adapter_failure: ; return (status); } } __inline static void spin_lock_irq(spinlock_t *lock ) { { ldv_spin_lock(); ldv_spin_lock_irq_301(lock); return; } } __inline static void spin_unlock_irq(spinlock_t *lock ) { { ldv_spin_unlock(); ldv_spin_unlock_irq_304(lock); return; } } bool ldv_queue_work_on_308(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_309(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_310(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_311(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_312(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_318(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_324(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_326(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_328(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_329(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_330(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_331(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_332(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_333(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_334(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_335(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_scsi_add_host_with_dma_336(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___6 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_27 = 1; ldv_initialize_scsi_host_template_27(); } else { } return (ldv_func_res); } } void ldv_free_irq_337(unsigned int ldv_func_arg1 , void *ldv_func_arg2 ) { { free_irq(ldv_func_arg1, ldv_func_arg2); disable_suitable_irq_1((int )ldv_func_arg1, ldv_func_arg2); return; } } __inline static void spin_unlock_irqrestore(spinlock_t *lock , unsigned long flags ) ; bool ldv_queue_work_on_361(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_363(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_362(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_365(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_364(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_371(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_388(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_379(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_387(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_381(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_377(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_385(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_386(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_382(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_383(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_384(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; int ldv_scsi_add_host_with_dma_389(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; __inline static void *isp_semaphore(struct scsi_qla_host *ha ) { int tmp ; { tmp = is_qla4010(ha); return ((void *)(tmp != 0 ? & (ha->reg)->u1.isp4010.nvram : & (ha->reg)->u1.isp4022.semaphore)); } } __inline static void *isp_nvram(struct scsi_qla_host *ha ) { int tmp ; { tmp = is_qla4010(ha); return ((void *)(tmp != 0 ? & (ha->reg)->u1.isp4010.nvram : & (ha->reg)->u1.isp4022.nvram)); } } __inline static void eeprom_cmd(uint32_t cmd , struct scsi_qla_host *ha ) { void *tmp ; void *tmp___0 ; { tmp = isp_nvram(ha); writel(cmd, (void volatile *)tmp); tmp___0 = isp_nvram(ha); readl((void const volatile *)tmp___0); __const_udelay(4295UL); return; } } __inline static int eeprom_size(struct scsi_qla_host *ha ) { int tmp ; { tmp = is_qla4010(ha); return (tmp != 0 ? 256 : 1024); } } __inline static int eeprom_no_addr_bits(struct scsi_qla_host *ha ) { int tmp ; { tmp = is_qla4010(ha); return (tmp != 0 ? 8 : 10); } } __inline static int eeprom_no_data_bits(struct scsi_qla_host *ha ) { { return (16); } } static int fm93c56a_select(struct scsi_qla_host *ha ) { { ha->eeprom_cmd_data = 983042U; eeprom_cmd(ha->eeprom_cmd_data, ha); return (1); } } static int fm93c56a_cmd(struct scsi_qla_host *ha , int cmd , int addr ) { int i ; int mask ; int dataBit ; int previousBit ; int tmp ; int tmp___0 ; { eeprom_cmd(ha->eeprom_cmd_data | 4U, ha); eeprom_cmd(ha->eeprom_cmd_data | 5U, ha); eeprom_cmd(ha->eeprom_cmd_data | 4U, ha); mask = 2; previousBit = 65535; i = 0; goto ldv_63300; ldv_63299: dataBit = (cmd & mask) != 0 ? 4 : 0; if (previousBit != dataBit) { eeprom_cmd(ha->eeprom_cmd_data | (uint32_t )dataBit, ha); previousBit = dataBit; } else { } eeprom_cmd((ha->eeprom_cmd_data | (uint32_t )dataBit) | 1U, ha); eeprom_cmd(ha->eeprom_cmd_data | (uint32_t )dataBit, ha); cmd = cmd << 1; i = i + 1; ldv_63300: ; if (i <= 1) { goto ldv_63299; } else { } tmp = eeprom_no_addr_bits(ha); mask = 1 << (tmp + -1); previousBit = 65535; i = 0; goto ldv_63303; ldv_63302: dataBit = (addr & mask) != 0 ? 4 : 0; if (previousBit != dataBit) { eeprom_cmd(ha->eeprom_cmd_data | (uint32_t )dataBit, ha); previousBit = dataBit; } else { } eeprom_cmd((ha->eeprom_cmd_data | (uint32_t )dataBit) | 1U, ha); eeprom_cmd(ha->eeprom_cmd_data | (uint32_t )dataBit, ha); addr = addr << 1; i = i + 1; ldv_63303: tmp___0 = eeprom_no_addr_bits(ha); if (tmp___0 > i) { goto ldv_63302; } else { } return (1); } } static int fm93c56a_deselect(struct scsi_qla_host *ha ) { { ha->eeprom_cmd_data = 983040U; eeprom_cmd(ha->eeprom_cmd_data, ha); return (1); } } static int fm93c56a_datain(struct scsi_qla_host *ha , unsigned short *value ) { int i ; int data ; int dataBit ; void *tmp ; unsigned short tmp___0 ; int tmp___1 ; { data = 0; i = 0; goto ldv_63316; ldv_63315: eeprom_cmd(ha->eeprom_cmd_data | 1U, ha); eeprom_cmd(ha->eeprom_cmd_data, ha); tmp = isp_nvram(ha); tmp___0 = readw((void const volatile *)tmp); dataBit = ((int )tmp___0 & 8) != 0; data = (data << 1) | dataBit; i = i + 1; ldv_63316: tmp___1 = eeprom_no_data_bits(ha); if (tmp___1 > i) { goto ldv_63315; } else { } *value = (unsigned short )data; return (1); } } static int eeprom_readword(int eepromAddr , u16 *value , struct scsi_qla_host *ha ) { { fm93c56a_select(ha); fm93c56a_cmd(ha, 2, eepromAddr); fm93c56a_datain(ha, value); fm93c56a_deselect(ha); return (1); } } u16 rd_nvram_word(struct scsi_qla_host *ha , int offset ) { u16 val ; { val = 0U; eeprom_readword(offset, & val, ha); return (val); } } u8 rd_nvram_byte(struct scsi_qla_host *ha , int offset ) { u16 val ; u8 rval ; int index ; { val = 0U; rval = 0U; index = 0; if (offset & 1) { index = (offset + -1) / 2; } else { index = offset / 2; } val = rd_nvram_word(ha, index); if (offset & 1) { rval = (unsigned char )((int )val >> 8); } else { rval = (unsigned char )val; } return (rval); } } int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha ) { int status ; uint16_t checksum ; uint32_t index ; unsigned long flags ; u16 tmp ; int tmp___0 ; { status = 1; checksum = 0U; ldv_spin_lock(); index = 0U; goto ldv_63343; ldv_63342: tmp = rd_nvram_word(ha, (int )index); checksum = (int )tmp + (int )checksum; index = index + 1U; ldv_63343: tmp___0 = eeprom_size(ha); if ((uint32_t )tmp___0 > index) { goto ldv_63342; } else { } spin_unlock_irqrestore(& ha->hardware_lock, flags); if ((unsigned int )checksum == 0U) { status = 0; } else { } return (status); } } int ql4xxx_sem_spinlock(struct scsi_qla_host *ha , u32 sem_mask , u32 sem_bits ) { uint32_t value ; unsigned long flags ; unsigned int seconds ; void *tmp ; void *tmp___0 ; unsigned short tmp___1 ; { seconds = 30U; if (ql4xextended_error_logging == 2) { printk("scsi%ld : Trying to get SEM lock - mask= 0x%x, code = 0x%x\n", ha->host_no, sem_mask, sem_bits); } else { } ldv_63353: ldv_spin_lock(); tmp = isp_semaphore(ha); writel(sem_mask | sem_bits, (void volatile *)tmp); tmp___0 = isp_semaphore(ha); tmp___1 = readw((void const volatile *)tmp___0); value = (uint32_t )tmp___1; spin_unlock_irqrestore(& ha->hardware_lock, flags); if (((sem_mask >> 16) & value) == sem_bits) { if (ql4xextended_error_logging == 2) { printk("scsi%ld : Got SEM LOCK - mask= 0x%x, code = 0x%x\n", ha->host_no, sem_mask, sem_bits); } else { } return (0); } else { } ssleep(1U); seconds = seconds - 1U; if (seconds != 0U) { goto ldv_63353; } else { } return (1); } } void ql4xxx_sem_unlock(struct scsi_qla_host *ha , u32 sem_mask ) { unsigned long flags ; void *tmp ; void *tmp___0 ; { ldv_spin_lock(); tmp = isp_semaphore(ha); writel(sem_mask, (void volatile *)tmp); tmp___0 = isp_semaphore(ha); readl((void const volatile *)tmp___0); spin_unlock_irqrestore(& ha->hardware_lock, flags); if (ql4xextended_error_logging == 2) { printk("scsi%ld : UNLOCK SEM - mask= 0x%x\n", ha->host_no, sem_mask); } else { } return; } } int ql4xxx_sem_lock(struct scsi_qla_host *ha , u32 sem_mask , u32 sem_bits ) { uint32_t value ; unsigned long flags ; void *tmp ; void *tmp___0 ; unsigned short tmp___1 ; { ldv_spin_lock(); tmp = isp_semaphore(ha); writel(sem_mask | sem_bits, (void volatile *)tmp); tmp___0 = isp_semaphore(ha); tmp___1 = readw((void const volatile *)tmp___0); value = (uint32_t )tmp___1; spin_unlock_irqrestore(& ha->hardware_lock, flags); if (((sem_mask >> 16) & value) == sem_bits) { if (ql4xextended_error_logging == 2) { printk("scsi%ld : Got SEM LOCK - mask= 0x%x, code = 0x%x, sema code=0x%x\n", ha->host_no, sem_mask, sem_bits, value); } else { } return (1); } else { } return (0); } } bool ldv_queue_work_on_361(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_362(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_363(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_364(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_365(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_371(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_377(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_379(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_381(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_382(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_383(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_384(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_385(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_386(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_387(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_388(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_scsi_add_host_with_dma_389(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___6 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_27 = 1; ldv_initialize_scsi_host_template_27(); } else { } return (ldv_func_res); } } bool ldv_queue_work_on_410(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_412(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_411(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_414(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_413(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_420(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_437(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_428(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_436(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_430(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_426(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_434(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_435(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_431(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_432(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_433(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; int ldv_scsi_add_host_with_dma_438(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; void qla4xxx_dump_buffer(void *b , uint32_t size ) { uint32_t cnt ; uint8_t *c ; { c = (uint8_t *)b; printk(" 0 1 2 3 4 5 6 7 8 9 Ah Bh Ch Dh Eh Fh\n"); printk("--------------------------------------------------------------\n"); cnt = 0U; goto ldv_63281; ldv_63280: printk("%02x", (int )*c); cnt = cnt + 1U; if ((cnt & 15U) == 0U) { printk("\n"); } else { printk(" "); } c = c + 1; ldv_63281: ; if (cnt < size) { goto ldv_63280; } else { } printk("\016\n"); return; } } void qla4xxx_dump_registers(struct scsi_qla_host *ha ) { uint8_t i ; unsigned int tmp ; int tmp___0 ; unsigned short tmp___1 ; unsigned short tmp___2 ; unsigned short tmp___3 ; unsigned short tmp___4 ; unsigned short tmp___5 ; unsigned short tmp___6 ; unsigned short tmp___7 ; unsigned short tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; unsigned short tmp___12 ; unsigned short tmp___13 ; unsigned short tmp___14 ; unsigned short tmp___15 ; unsigned short tmp___16 ; unsigned short tmp___17 ; unsigned short tmp___18 ; unsigned short tmp___19 ; unsigned short tmp___20 ; unsigned short tmp___21 ; unsigned short tmp___22 ; unsigned short tmp___23 ; unsigned short tmp___24 ; unsigned short tmp___25 ; unsigned short tmp___26 ; uint32_t tmp___27 ; unsigned short tmp___28 ; int tmp___29 ; int tmp___30 ; int tmp___31 ; { tmp___0 = is_qla8022(ha); if (tmp___0 != 0) { i = 1U; goto ldv_63288; ldv_63287: tmp = readl((void const volatile *)(& (ha->qla4_82xx_reg)->mailbox_in) + (unsigned long )i); printk("\016mailbox[%d] = 0x%08X\n", (int )i, tmp); i = (uint8_t )((int )i + 1); ldv_63288: ; if ((unsigned int )i <= 7U) { goto ldv_63287; } else { } return; } else { } i = 0U; goto ldv_63291; ldv_63290: tmp___1 = readw((void const volatile *)(& (ha->reg)->mailbox) + (unsigned long )i); printk("px%02X mailbox[%d] = 0x%08X\n", (int )((unsigned int )i * 4U), (int )i, (int )tmp___1); i = (uint8_t )((int )i + 1); ldv_63291: ; if ((unsigned int )i <= 7U) { goto ldv_63290; } else { } tmp___2 = readw((void const volatile *)(& (ha->reg)->flash_address)); printk("px%02X flash_address = 0x%08X\n", 32, (int )tmp___2); tmp___3 = readw((void const volatile *)(& (ha->reg)->flash_data)); printk("px%02X flash_data = 0x%08X\n", 36, (int )tmp___3); tmp___4 = readw((void const volatile *)(& (ha->reg)->ctrl_status)); printk("px%02X ctrl_status = 0x%08X\n", 40, (int )tmp___4); tmp___11 = is_qla4010(ha); if (tmp___11 != 0) { tmp___5 = readw((void const volatile *)(& (ha->reg)->u1.isp4010.nvram)); printk("px%02X nvram = 0x%08X\n", 44, (int )tmp___5); } else { tmp___9 = is_qla4022(ha); tmp___10 = is_qla4032(ha); if ((tmp___9 | tmp___10) != 0) { tmp___6 = readw((void const volatile *)(& (ha->reg)->u1.isp4022.intr_mask)); printk("px%02X intr_mask = 0x%08X\n", 44, (int )tmp___6); tmp___7 = readw((void const volatile *)(& (ha->reg)->u1.isp4022.nvram)); printk("px%02X nvram = 0x%08X\n", 48, (int )tmp___7); tmp___8 = readw((void const volatile *)(& (ha->reg)->u1.isp4022.semaphore)); printk("px%02X semaphore\t = 0x%08X\n", 52, (int )tmp___8); } else { } } tmp___12 = readw((void const volatile *)(& (ha->reg)->req_q_in)); printk("px%02X req_q_in = 0x%08X\n", 56, (int )tmp___12); tmp___13 = readw((void const volatile *)(& (ha->reg)->rsp_q_out)); printk("px%02X rsp_q_out = 0x%08X\n", 60, (int )tmp___13); tmp___31 = is_qla4010(ha); if (tmp___31 != 0) { tmp___14 = readw((void const volatile *)(& (ha->reg)->u2.isp4010.ext_hw_conf)); printk("px%02X ext_hw_conf = 0x%08X\n", 80, (int )tmp___14); tmp___15 = readw((void const volatile *)(& (ha->reg)->u2.isp4010.port_ctrl)); printk("px%02X port_ctrl = 0x%08X\n", 88, (int )tmp___15); tmp___16 = readw((void const volatile *)(& (ha->reg)->u2.isp4010.port_status)); printk("px%02X port_status = 0x%08X\n", 92, (int )tmp___16); tmp___17 = readw((void const volatile *)(& (ha->reg)->u2.isp4010.req_q_out)); printk("px%02X req_q_out = 0x%08X\n", 128, (int )tmp___17); tmp___18 = readw((void const volatile *)(& (ha->reg)->u2.isp4010.gp_out)); printk("px%02X gp_out = 0x%08X\n", 224, (int )tmp___18); tmp___19 = readw((void const volatile *)(& (ha->reg)->u2.isp4010.gp_in)); printk("px%02X gp_in\t = 0x%08X\n", 228, (int )tmp___19); tmp___20 = readw((void const volatile *)(& (ha->reg)->u2.isp4010.port_err_status)); printk("px%02X port_err_status = 0x%08X\n", 252, (int )tmp___20); } else { tmp___29 = is_qla4022(ha); tmp___30 = is_qla4032(ha); if ((tmp___29 | tmp___30) != 0) { printk("\016Page 0 Registers:\n"); tmp___21 = readw((void const volatile *)(& (ha->reg)->u2.isp4022.__annonCompField129.p0.ext_hw_conf)); printk("px%02X ext_hw_conf = 0x%08X\n", 80, (int )tmp___21); tmp___22 = readw((void const volatile *)(& (ha->reg)->u2.isp4022.__annonCompField129.p0.port_ctrl)); printk("px%02X port_ctrl = 0x%08X\n", 88, (int )tmp___22); tmp___23 = readw((void const volatile *)(& (ha->reg)->u2.isp4022.__annonCompField129.p0.port_status)); printk("px%02X port_status = 0x%08X\n", 92, (int )tmp___23); tmp___24 = readw((void const volatile *)(& (ha->reg)->u2.isp4022.__annonCompField129.p0.gp_out)); printk("px%02X gp_out = 0x%08X\n", 224, (int )tmp___24); tmp___25 = readw((void const volatile *)(& (ha->reg)->u2.isp4022.__annonCompField129.p0.gp_in)); printk("px%02X gp_in = 0x%08X\n", 228, (int )tmp___25); tmp___26 = readw((void const volatile *)(& (ha->reg)->u2.isp4022.__annonCompField129.p0.port_err_status)); printk("px%02X port_err_status = 0x%08X\n", 252, (int )tmp___26); printk("\016Page 1 Registers:\n"); tmp___27 = set_rmask(3U); writel(tmp___27 & 1U, (void volatile *)(& (ha->reg)->ctrl_status)); tmp___28 = readw((void const volatile *)(& (ha->reg)->u2.isp4022.__annonCompField129.p1.req_q_out)); printk("px%02X req_q_out = 0x%08X\n", 128, (int )tmp___28); set_rmask(3U); writel(0U, (void volatile *)(& (ha->reg)->ctrl_status)); } else { } } return; } } void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha ) { uint32_t halt_status1 ; uint32_t halt_status2 ; int tmp ; int tmp___0 ; uint32_t tmp___1 ; uint32_t tmp___2 ; uint32_t tmp___3 ; uint32_t tmp___4 ; uint32_t tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; { tmp = qla4_8xxx_rd_direct(ha, 0U); halt_status1 = (uint32_t )tmp; tmp___0 = qla4_8xxx_rd_direct(ha, 1U); halt_status2 = (uint32_t )tmp___0; tmp___8 = is_qla8022(ha); if (tmp___8 != 0) { tmp___1 = qla4_82xx_rd_32(ha, 116391996UL); tmp___2 = qla4_82xx_rd_32(ha, 121634876UL); tmp___3 = qla4_82xx_rd_32(ha, 120586300UL); tmp___4 = qla4_82xx_rd_32(ha, 119537724UL); tmp___5 = qla4_82xx_rd_32(ha, 118489148UL); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n PEG_NET_4_PC: 0x%x\n", ha->host_no, "qla4_8xxx_dump_peg_reg", (int )(ha->pdev)->device, halt_status1, halt_status2, tmp___5, tmp___4, tmp___3, tmp___2, tmp___1); } else { tmp___6 = is_qla8032(ha); if (tmp___6 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n", ha->host_no, "qla4_8xxx_dump_peg_reg", (int )(ha->pdev)->device, halt_status1, halt_status2); } else { tmp___7 = is_qla8042(ha); if (tmp___7 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n", ha->host_no, "qla4_8xxx_dump_peg_reg", (int )(ha->pdev)->device, halt_status1, halt_status2); } else { } } } return; } } bool ldv_queue_work_on_410(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_411(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_412(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_413(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_414(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_420(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_426(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_428(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_430(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_431(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_432(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_433(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_434(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_435(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_436(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_437(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_scsi_add_host_with_dma_438(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___6 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_27 = 1; ldv_initialize_scsi_host_template_27(); } else { } return (ldv_func_res); } } extern int kstrtoll(char const * , unsigned int , long long * ) ; __inline static int kstrtol(char const *s , unsigned int base , long *res ) { int tmp ; { tmp = kstrtoll(s, base, (long long *)res); return (tmp); } } extern ssize_t memory_read_from_buffer(void * , size_t , loff_t * , void const * , size_t ) ; bool ldv_queue_work_on_459(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_461(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_460(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_463(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_462(struct workqueue_struct *ldv_func_arg1 ) ; extern int sysfs_create_bin_file(struct kobject * , struct bin_attribute const * ) ; extern void sysfs_remove_bin_file(struct kobject * , struct bin_attribute const * ) ; void *ldv_kmem_cache_alloc_469(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_486(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_477(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_485(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_479(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_475(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_483(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_484(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_480(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_481(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_482(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; __inline static struct Scsi_Host *dev_to_shost___0(struct device *dev ) { int tmp ; struct device const *__mptr ; { goto ldv_58266; ldv_58265: ; if ((unsigned long )dev->parent == (unsigned long )((struct device *)0)) { return ((struct Scsi_Host *)0); } else { } dev = dev->parent; ldv_58266: tmp = scsi_is_host_device((struct device const *)dev); if (tmp == 0) { goto ldv_58265; } else { } __mptr = (struct device const *)dev; return ((struct Scsi_Host *)__mptr + 0xfffffffffffffc48UL); } } int ldv_scsi_add_host_with_dma_487(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; static ssize_t qla4_8xxx_sysfs_read_fw_dump(struct file *filep , struct kobject *kobj , struct bin_attribute *ba , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *ha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; struct scsi_qla_host *tmp___0 ; int tmp___1 ; int tmp___2 ; ssize_t tmp___3 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost___0((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = to_qla_host(tmp); ha = tmp___0; tmp___1 = is_qla40XX(ha); if (tmp___1 != 0) { return (-22L); } else { } tmp___2 = constant_test_bit(26L, (unsigned long const volatile *)(& ha->flags)); if (tmp___2 == 0) { return (0L); } else { } tmp___3 = memory_read_from_buffer((void *)buf, count, & off, (void const *)ha->fw_dump, (size_t )ha->fw_dump_size); return (tmp___3); } } static ssize_t qla4_8xxx_sysfs_write_fw_dump(struct file *filep , struct kobject *kobj , struct bin_attribute *ba , char *buf , loff_t off , size_t count ) { struct scsi_qla_host *ha ; struct kobject const *__mptr ; struct Scsi_Host *tmp ; struct scsi_qla_host *tmp___0 ; uint32_t dev_state ; long reading ; int ret ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; { __mptr = (struct kobject const *)kobj; tmp = dev_to_shost___0((struct device *)__mptr + 0xfffffffffffffff0UL); tmp___0 = to_qla_host(tmp); ha = tmp___0; ret = 0; tmp___1 = is_qla40XX(ha); if (tmp___1 != 0) { return (-22L); } else { } if (off != 0LL) { return ((ssize_t )ret); } else { } *(buf + 1UL) = 0; ret = kstrtol((char const *)buf, 10U, & reading); if (ret != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Invalid input. Return err %d\n", "qla4_8xxx_sysfs_write_fw_dump", ret); return ((ssize_t )ret); } else { } switch (reading) { case 0L: tmp___2 = test_and_clear_bit(26L, (unsigned long volatile *)(& ha->flags)); if (tmp___2 != 0) { clear_bit(24L, (unsigned long volatile *)(& ha->flags)); qla4xxx_alloc_fw_dump(ha); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Firmware template reloaded\n"); } else { } } else { } goto ldv_63278; case 1L: tmp___3 = constant_test_bit(24L, (unsigned long const volatile *)(& ha->flags)); if (tmp___3 != 0) { tmp___4 = constant_test_bit(26L, (unsigned long const volatile *)(& ha->flags)); if (tmp___4 == 0) { set_bit(26L, (unsigned long volatile *)(& ha->flags)); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Raw firmware dump ready for read on (%ld).\n", ha->host_no); } else { } } else { } } else { } goto ldv_63278; case 2L: (*((ha->isp_ops)->idc_lock))(ha); tmp___5 = qla4_8xxx_rd_direct(ha, 4U); dev_state = (uint32_t )tmp___5; if (dev_state == 3U) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Setting Need reset\n", "qla4_8xxx_sysfs_write_fw_dump"); qla4_8xxx_wr_direct(ha, 4U, 4U); tmp___6 = is_qla8022(ha); if (tmp___6 != 0) { set_bit(25L, (unsigned long volatile *)(& ha->flags)); set_bit(19L, (unsigned long volatile *)(& ha->flags)); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Reset owner is 0x%x\n", "qla4_8xxx_sysfs_write_fw_dump", (int )ha->func_num); } else { tmp___7 = is_qla8032(ha); if (tmp___7 != 0) { goto _L; } else { tmp___8 = is_qla8042(ha); if (tmp___8 != 0) { _L: /* CIL Label */ tmp___9 = qla4_83xx_can_perform_reset(ha); if (tmp___9 != 0) { set_bit(25L, (unsigned long volatile *)(& ha->flags)); set_bit(19L, (unsigned long volatile *)(& ha->flags)); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Reset owner is 0x%x\n", "qla4_8xxx_sysfs_write_fw_dump", (int )ha->func_num); } else { } } else { } } } } else { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Reset not performed as device state is 0x%x\n", "qla4_8xxx_sysfs_write_fw_dump", dev_state); } (*((ha->isp_ops)->idc_unlock))(ha); goto ldv_63278; default: ; goto ldv_63278; } ldv_63278: ; return ((ssize_t )count); } } static struct bin_attribute sysfs_fw_dump_attr = {{"fw_dump", 384U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, 0UL, 0, & qla4_8xxx_sysfs_read_fw_dump, & qla4_8xxx_sysfs_write_fw_dump, 0}; static struct sysfs_entry bin_file_entries[2U] = { {(char *)"fw_dump", & sysfs_fw_dump_attr}, {(char *)0, 0}}; void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha ) { struct Scsi_Host *host ; struct sysfs_entry *iter ; int ret ; { host = ha->host; iter = (struct sysfs_entry *)(& bin_file_entries); goto ldv_63294; ldv_63293: ret = sysfs_create_bin_file(& host->shost_gendev.kobj, (struct bin_attribute const *)iter->attr); if (ret != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "Unable to create sysfs %s binary attribute (%d).\n", iter->name, ret); } else { } iter = iter + 1; ldv_63294: ; if ((unsigned long )iter->name != (unsigned long )((char *)0)) { goto ldv_63293; } else { } return; } } void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha ) { struct Scsi_Host *host ; struct sysfs_entry *iter ; { host = ha->host; iter = (struct sysfs_entry *)(& bin_file_entries); goto ldv_63302; ldv_63301: sysfs_remove_bin_file(& host->shost_gendev.kobj, (struct bin_attribute const *)iter->attr); iter = iter + 1; ldv_63302: ; if ((unsigned long )iter->name != (unsigned long )((char *)0)) { goto ldv_63301; } else { } return; } } static ssize_t qla4xxx_fw_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { struct scsi_qla_host *ha ; struct device const *__mptr ; struct scsi_qla_host *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { __mptr = (struct device const *)dev; tmp = to_qla_host((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); ha = tmp; tmp___2 = is_qla80XX(ha); if (tmp___2 != 0) { tmp___0 = snprintf(buf, 4096UL, "%d.%02d.%02d (%x)\n", (int )ha->fw_info.fw_major, (int )ha->fw_info.fw_minor, (int )ha->fw_info.fw_patch, (int )ha->fw_info.fw_build); return ((ssize_t )tmp___0); } else { tmp___1 = snprintf(buf, 4096UL, "%d.%02d.%02d.%02d\n", (int )ha->fw_info.fw_major, (int )ha->fw_info.fw_minor, (int )ha->fw_info.fw_patch, (int )ha->fw_info.fw_build); return ((ssize_t )tmp___1); } } } static ssize_t qla4xxx_serial_num_show(struct device *dev , struct device_attribute *attr , char *buf ) { struct scsi_qla_host *ha ; struct device const *__mptr ; struct scsi_qla_host *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = to_qla_host((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); ha = tmp; tmp___0 = snprintf(buf, 4096UL, "%s\n", (uint8_t *)(& ha->serial_number)); return ((ssize_t )tmp___0); } } static ssize_t qla4xxx_iscsi_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { struct scsi_qla_host *ha ; struct device const *__mptr ; struct scsi_qla_host *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = to_qla_host((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); ha = tmp; tmp___0 = snprintf(buf, 4096UL, "%d.%02d\n", (int )ha->fw_info.iscsi_major, (int )ha->fw_info.iscsi_minor); return ((ssize_t )tmp___0); } } static ssize_t qla4xxx_optrom_version_show(struct device *dev , struct device_attribute *attr , char *buf ) { struct scsi_qla_host *ha ; struct device const *__mptr ; struct scsi_qla_host *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = to_qla_host((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); ha = tmp; tmp___0 = snprintf(buf, 4096UL, "%d.%02d.%02d.%02d\n", (int )ha->fw_info.bootload_major, (int )ha->fw_info.bootload_minor, (int )ha->fw_info.bootload_patch, (int )ha->fw_info.bootload_build); return ((ssize_t )tmp___0); } } static ssize_t qla4xxx_board_id_show(struct device *dev , struct device_attribute *attr , char *buf ) { struct scsi_qla_host *ha ; struct device const *__mptr ; struct scsi_qla_host *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = to_qla_host((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); ha = tmp; tmp___0 = snprintf(buf, 4096UL, "0x%08X\n", ha->board_id); return ((ssize_t )tmp___0); } } static ssize_t qla4xxx_fw_state_show(struct device *dev , struct device_attribute *attr , char *buf ) { struct scsi_qla_host *ha ; struct device const *__mptr ; struct scsi_qla_host *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = to_qla_host((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); ha = tmp; qla4xxx_get_firmware_state(ha); tmp___0 = snprintf(buf, 4096UL, "0x%08X%8X\n", ha->firmware_state, ha->addl_fw_state); return ((ssize_t )tmp___0); } } static ssize_t qla4xxx_phy_port_cnt_show(struct device *dev , struct device_attribute *attr , char *buf ) { struct scsi_qla_host *ha ; struct device const *__mptr ; struct scsi_qla_host *tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = to_qla_host((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); ha = tmp; tmp___0 = is_qla40XX(ha); if (tmp___0 != 0) { return (-38L); } else { } tmp___1 = snprintf(buf, 4096UL, "0x%04X\n", (int )ha->phy_port_cnt); return ((ssize_t )tmp___1); } } static ssize_t qla4xxx_phy_port_num_show(struct device *dev , struct device_attribute *attr , char *buf ) { struct scsi_qla_host *ha ; struct device const *__mptr ; struct scsi_qla_host *tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = to_qla_host((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); ha = tmp; tmp___0 = is_qla40XX(ha); if (tmp___0 != 0) { return (-38L); } else { } tmp___1 = snprintf(buf, 4096UL, "0x%04X\n", (int )ha->phy_port_num); return ((ssize_t )tmp___1); } } static ssize_t qla4xxx_iscsi_func_cnt_show(struct device *dev , struct device_attribute *attr , char *buf ) { struct scsi_qla_host *ha ; struct device const *__mptr ; struct scsi_qla_host *tmp ; int tmp___0 ; int tmp___1 ; { __mptr = (struct device const *)dev; tmp = to_qla_host((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); ha = tmp; tmp___0 = is_qla40XX(ha); if (tmp___0 != 0) { return (-38L); } else { } tmp___1 = snprintf(buf, 4096UL, "0x%04X\n", (int )ha->iscsi_pci_func_cnt); return ((ssize_t )tmp___1); } } static ssize_t qla4xxx_hba_model_show(struct device *dev , struct device_attribute *attr , char *buf ) { struct scsi_qla_host *ha ; struct device const *__mptr ; struct scsi_qla_host *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = to_qla_host((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); ha = tmp; tmp___0 = snprintf(buf, 4096UL, "%s\n", (uint8_t *)(& ha->model_name)); return ((ssize_t )tmp___0); } } static ssize_t qla4xxx_fw_timestamp_show(struct device *dev , struct device_attribute *attr , char *buf ) { struct scsi_qla_host *ha ; struct device const *__mptr ; struct scsi_qla_host *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = to_qla_host((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); ha = tmp; tmp___0 = snprintf(buf, 4096UL, "%s %s\n", (uint8_t *)(& ha->fw_info.fw_build_date), (uint8_t *)(& ha->fw_info.fw_build_time)); return ((ssize_t )tmp___0); } } static ssize_t qla4xxx_fw_build_user_show(struct device *dev , struct device_attribute *attr , char *buf ) { struct scsi_qla_host *ha ; struct device const *__mptr ; struct scsi_qla_host *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = to_qla_host((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); ha = tmp; tmp___0 = snprintf(buf, 4096UL, "%s\n", (uint8_t *)(& ha->fw_info.fw_build_user)); return ((ssize_t )tmp___0); } } static ssize_t qla4xxx_fw_ext_timestamp_show(struct device *dev , struct device_attribute *attr , char *buf ) { struct scsi_qla_host *ha ; struct device const *__mptr ; struct scsi_qla_host *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = to_qla_host((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); ha = tmp; tmp___0 = snprintf(buf, 4096UL, "%s\n", (uint8_t *)(& ha->fw_info.extended_timestamp)); return ((ssize_t )tmp___0); } } static ssize_t qla4xxx_fw_load_src_show(struct device *dev , struct device_attribute *attr , char *buf ) { struct scsi_qla_host *ha ; struct device const *__mptr ; struct scsi_qla_host *tmp ; char *load_src ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = to_qla_host((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); ha = tmp; load_src = (char *)0; switch ((int )ha->fw_info.fw_load_source) { case 1: load_src = (char *)"Flash Primary"; goto ldv_63418; case 2: load_src = (char *)"Flash Secondary"; goto ldv_63418; case 3: load_src = (char *)"Host Download"; goto ldv_63418; } ldv_63418: tmp___0 = snprintf(buf, 4096UL, "%s\n", load_src); return ((ssize_t )tmp___0); } } static ssize_t qla4xxx_fw_uptime_show(struct device *dev , struct device_attribute *attr , char *buf ) { struct scsi_qla_host *ha ; struct device const *__mptr ; struct scsi_qla_host *tmp ; int tmp___0 ; { __mptr = (struct device const *)dev; tmp = to_qla_host((struct Scsi_Host *)__mptr + 0xfffffffffffff6c0UL); ha = tmp; qla4xxx_about_firmware(ha); tmp___0 = snprintf(buf, 4096UL, "%u.%u secs\n", ha->fw_uptime_secs, ha->fw_uptime_msecs); return ((ssize_t )tmp___0); } } static struct device_attribute dev_attr_fw_version = {{"fw_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla4xxx_fw_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_serial_num = {{"serial_num", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla4xxx_serial_num_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_iscsi_version = {{"iscsi_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla4xxx_iscsi_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_optrom_version = {{"optrom_version", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla4xxx_optrom_version_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_board_id = {{"board_id", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla4xxx_board_id_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_fw_state = {{"fw_state", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla4xxx_fw_state_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_phy_port_cnt = {{"phy_port_cnt", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla4xxx_phy_port_cnt_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_phy_port_num = {{"phy_port_num", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla4xxx_phy_port_num_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_iscsi_func_cnt = {{"iscsi_func_cnt", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla4xxx_iscsi_func_cnt_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_hba_model = {{"hba_model", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla4xxx_hba_model_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_fw_timestamp = {{"fw_timestamp", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla4xxx_fw_timestamp_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_fw_build_user = {{"fw_build_user", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla4xxx_fw_build_user_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_fw_ext_timestamp = {{"fw_ext_timestamp", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla4xxx_fw_ext_timestamp_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_fw_load_src = {{"fw_load_src", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla4xxx_fw_load_src_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; static struct device_attribute dev_attr_fw_uptime = {{"fw_uptime", 292U, (_Bool)0, 0, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, & qla4xxx_fw_uptime_show, (ssize_t (*)(struct device * , struct device_attribute * , char const * , size_t ))0}; struct device_attribute *qla4xxx_host_attrs[16U] = { & dev_attr_fw_version, & dev_attr_serial_num, & dev_attr_iscsi_version, & dev_attr_optrom_version, & dev_attr_board_id, & dev_attr_fw_state, & dev_attr_phy_port_cnt, & dev_attr_phy_port_num, & dev_attr_iscsi_func_cnt, & dev_attr_hba_model, & dev_attr_fw_timestamp, & dev_attr_fw_build_user, & dev_attr_fw_ext_timestamp, & dev_attr_fw_load_src, & dev_attr_fw_uptime, (struct device_attribute *)0}; extern int ldv_release_20(void) ; extern int ldv_probe_20(void) ; void ldv_initialize_bin_attribute_20(void) { void *tmp ; void *tmp___0 ; void *tmp___1 ; { tmp = ldv_init_zalloc(296UL); sysfs_fw_dump_attr_group0 = (struct kobject *)tmp; tmp___0 = __VERIFIER_nondet_pointer(); sysfs_fw_dump_attr_group1 = (struct file *)tmp___0; tmp___1 = ldv_init_zalloc(72UL); sysfs_fw_dump_attr_group2 = (struct bin_attribute *)tmp___1; return; } } void ldv_main_exported_11(void) { struct device_attribute *ldvarg2 ; void *tmp ; struct device *ldvarg0 ; void *tmp___0 ; char *ldvarg1 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(48UL); ldvarg2 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1416UL); ldvarg0 = (struct device *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg1 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_11 == 1) { qla4xxx_iscsi_func_cnt_show(ldvarg0, ldvarg2, ldvarg1); ldv_state_variable_11 = 1; } else { } goto ldv_63639; default: ldv_stop(); } ldv_63639: ; return; } } void ldv_main_exported_7(void) { struct device_attribute *ldvarg6 ; void *tmp ; char *ldvarg5 ; void *tmp___0 ; struct device *ldvarg4 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(48UL); ldvarg6 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg5 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(1416UL); ldvarg4 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_7 == 1) { qla4xxx_fw_ext_timestamp_show(ldvarg4, ldvarg6, ldvarg5); ldv_state_variable_7 = 1; } else { } goto ldv_63648; default: ldv_stop(); } ldv_63648: ; return; } } void ldv_main_exported_17(void) { struct device *ldvarg59 ; void *tmp ; char *ldvarg60 ; void *tmp___0 ; struct device_attribute *ldvarg61 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1416UL); ldvarg59 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg60 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(48UL); ldvarg61 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_17 == 1) { qla4xxx_iscsi_version_show(ldvarg59, ldvarg61, ldvarg60); ldv_state_variable_17 = 1; } else { } goto ldv_63657; default: ldv_stop(); } ldv_63657: ; return; } } void ldv_main_exported_18(void) { char *ldvarg64 ; void *tmp ; struct device *ldvarg63 ; void *tmp___0 ; struct device_attribute *ldvarg65 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1UL); ldvarg64 = (char *)tmp; tmp___0 = ldv_init_zalloc(1416UL); ldvarg63 = (struct device *)tmp___0; tmp___1 = ldv_init_zalloc(48UL); ldvarg65 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_18 == 1) { qla4xxx_serial_num_show(ldvarg63, ldvarg65, ldvarg64); ldv_state_variable_18 = 1; } else { } goto ldv_63666; default: ldv_stop(); } ldv_63666: ; return; } } void ldv_main_exported_16(void) { char *ldvarg80 ; void *tmp ; struct device *ldvarg79 ; void *tmp___0 ; struct device_attribute *ldvarg81 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1UL); ldvarg80 = (char *)tmp; tmp___0 = ldv_init_zalloc(1416UL); ldvarg79 = (struct device *)tmp___0; tmp___1 = ldv_init_zalloc(48UL); ldvarg81 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_16 == 1) { qla4xxx_optrom_version_show(ldvarg79, ldvarg81, ldvarg80); ldv_state_variable_16 = 1; } else { } goto ldv_63675; default: ldv_stop(); } ldv_63675: ; return; } } void ldv_main_exported_13(void) { char *ldvarg83 ; void *tmp ; struct device *ldvarg82 ; void *tmp___0 ; struct device_attribute *ldvarg84 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1UL); ldvarg83 = (char *)tmp; tmp___0 = ldv_init_zalloc(1416UL); ldvarg82 = (struct device *)tmp___0; tmp___1 = ldv_init_zalloc(48UL); ldvarg84 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_13 == 1) { qla4xxx_phy_port_cnt_show(ldvarg82, ldvarg84, ldvarg83); ldv_state_variable_13 = 1; } else { } goto ldv_63684; default: ldv_stop(); } ldv_63684: ; return; } } void ldv_main_exported_6(void) { char *ldvarg94 ; void *tmp ; struct device_attribute *ldvarg95 ; void *tmp___0 ; struct device *ldvarg93 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1UL); ldvarg94 = (char *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg95 = (struct device_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(1416UL); ldvarg93 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_6 == 1) { qla4xxx_fw_load_src_show(ldvarg93, ldvarg95, ldvarg94); ldv_state_variable_6 = 1; } else { } goto ldv_63693; default: ldv_stop(); } ldv_63693: ; return; } } void ldv_main_exported_9(void) { struct device *ldvarg96 ; void *tmp ; char *ldvarg97 ; void *tmp___0 ; struct device_attribute *ldvarg98 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1416UL); ldvarg96 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg97 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(48UL); ldvarg98 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_9 == 1) { qla4xxx_fw_timestamp_show(ldvarg96, ldvarg98, ldvarg97); ldv_state_variable_9 = 1; } else { } goto ldv_63702; default: ldv_stop(); } ldv_63702: ; return; } } void ldv_main_exported_12(void) { struct device *ldvarg99 ; void *tmp ; char *ldvarg100 ; void *tmp___0 ; struct device_attribute *ldvarg101 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1416UL); ldvarg99 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg100 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(48UL); ldvarg101 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_12 == 1) { qla4xxx_phy_port_num_show(ldvarg99, ldvarg101, ldvarg100); ldv_state_variable_12 = 1; } else { } goto ldv_63711; default: ldv_stop(); } ldv_63711: ; return; } } void ldv_main_exported_14(void) { char *ldvarg109 ; void *tmp ; struct device *ldvarg108 ; void *tmp___0 ; struct device_attribute *ldvarg110 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1UL); ldvarg109 = (char *)tmp; tmp___0 = ldv_init_zalloc(1416UL); ldvarg108 = (struct device *)tmp___0; tmp___1 = ldv_init_zalloc(48UL); ldvarg110 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_14 == 1) { qla4xxx_fw_state_show(ldvarg108, ldvarg110, ldvarg109); ldv_state_variable_14 = 1; } else { } goto ldv_63720; default: ldv_stop(); } ldv_63720: ; return; } } void ldv_main_exported_15(void) { struct device_attribute *ldvarg113 ; void *tmp ; char *ldvarg112 ; void *tmp___0 ; struct device *ldvarg111 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(48UL); ldvarg113 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg112 = (char *)tmp___0; tmp___1 = ldv_init_zalloc(1416UL); ldvarg111 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_15 == 1) { qla4xxx_board_id_show(ldvarg111, ldvarg113, ldvarg112); ldv_state_variable_15 = 1; } else { } goto ldv_63729; default: ldv_stop(); } ldv_63729: ; return; } } void ldv_main_exported_20(void) { char *ldvarg103 ; void *tmp ; loff_t ldvarg107 ; size_t ldvarg105 ; char *ldvarg106 ; void *tmp___0 ; size_t ldvarg102 ; loff_t ldvarg104 ; int tmp___1 ; { tmp = ldv_init_zalloc(1UL); ldvarg103 = (char *)tmp; tmp___0 = ldv_init_zalloc(1UL); ldvarg106 = (char *)tmp___0; ldv_memset((void *)(& ldvarg107), 0, 8UL); ldv_memset((void *)(& ldvarg105), 0, 8UL); ldv_memset((void *)(& ldvarg102), 0, 8UL); ldv_memset((void *)(& ldvarg104), 0, 8UL); tmp___1 = __VERIFIER_nondet_int(); switch (tmp___1) { case 0: ; if (ldv_state_variable_20 == 2) { qla4_8xxx_sysfs_write_fw_dump(sysfs_fw_dump_attr_group1, sysfs_fw_dump_attr_group0, sysfs_fw_dump_attr_group2, ldvarg106, ldvarg107, ldvarg105); ldv_state_variable_20 = 2; } else { } goto ldv_63741; case 1: ; if (ldv_state_variable_20 == 2) { qla4_8xxx_sysfs_read_fw_dump(sysfs_fw_dump_attr_group1, sysfs_fw_dump_attr_group0, sysfs_fw_dump_attr_group2, ldvarg103, ldvarg104, ldvarg102); ldv_state_variable_20 = 2; } else { } goto ldv_63741; case 2: ; if (ldv_state_variable_20 == 2) { ldv_release_20(); ldv_state_variable_20 = 1; ref_cnt = ref_cnt - 1; } else { } goto ldv_63741; case 3: ; if (ldv_state_variable_20 == 1) { ldv_probe_20(); ldv_state_variable_20 = 2; ref_cnt = ref_cnt + 1; } else { } goto ldv_63741; default: ldv_stop(); } ldv_63741: ; return; } } void ldv_main_exported_8(void) { char *ldvarg115 ; void *tmp ; struct device *ldvarg114 ; void *tmp___0 ; struct device_attribute *ldvarg116 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1UL); ldvarg115 = (char *)tmp; tmp___0 = ldv_init_zalloc(1416UL); ldvarg114 = (struct device *)tmp___0; tmp___1 = ldv_init_zalloc(48UL); ldvarg116 = (struct device_attribute *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_8 == 1) { qla4xxx_fw_build_user_show(ldvarg114, ldvarg116, ldvarg115); ldv_state_variable_8 = 1; } else { } goto ldv_63753; default: ldv_stop(); } ldv_63753: ; return; } } void ldv_main_exported_10(void) { struct device *ldvarg133 ; void *tmp ; struct device_attribute *ldvarg135 ; void *tmp___0 ; char *ldvarg134 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1416UL); ldvarg133 = (struct device *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg135 = (struct device_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg134 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_10 == 1) { qla4xxx_hba_model_show(ldvarg133, ldvarg135, ldvarg134); ldv_state_variable_10 = 1; } else { } goto ldv_63762; default: ldv_stop(); } ldv_63762: ; return; } } void ldv_main_exported_19(void) { struct device_attribute *ldvarg132 ; void *tmp ; struct device *ldvarg130 ; void *tmp___0 ; char *ldvarg131 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(48UL); ldvarg132 = (struct device_attribute *)tmp; tmp___0 = ldv_init_zalloc(1416UL); ldvarg130 = (struct device *)tmp___0; tmp___1 = ldv_init_zalloc(1UL); ldvarg131 = (char *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_19 == 1) { qla4xxx_fw_version_show(ldvarg130, ldvarg132, ldvarg131); ldv_state_variable_19 = 1; } else { } goto ldv_63771; default: ldv_stop(); } ldv_63771: ; return; } } void ldv_main_exported_5(void) { char *ldvarg137 ; void *tmp ; struct device_attribute *ldvarg138 ; void *tmp___0 ; struct device *ldvarg136 ; void *tmp___1 ; int tmp___2 ; { tmp = ldv_init_zalloc(1UL); ldvarg137 = (char *)tmp; tmp___0 = ldv_init_zalloc(48UL); ldvarg138 = (struct device_attribute *)tmp___0; tmp___1 = ldv_init_zalloc(1416UL); ldvarg136 = (struct device *)tmp___1; tmp___2 = __VERIFIER_nondet_int(); switch (tmp___2) { case 0: ; if (ldv_state_variable_5 == 1) { qla4xxx_fw_uptime_show(ldvarg136, ldvarg138, ldvarg137); ldv_state_variable_5 = 1; } else { } goto ldv_63780; default: ldv_stop(); } ldv_63780: ; return; } } bool ldv_queue_work_on_459(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_460(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_461(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_462(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_463(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_469(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_475(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_477(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_479(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_480(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_481(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_482(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_483(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_484(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_485(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_486(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_scsi_add_host_with_dma_487(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___6 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_27 = 1; ldv_initialize_scsi_host_template_27(); } else { } return (ldv_func_res); } } __inline static long ldv__builtin_expect(long exp , long c ) ; bool ldv_queue_work_on_508(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_510(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_509(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_512(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_511(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_518(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_535(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; extern size_t sg_copy_from_buffer(struct scatterlist * , unsigned int , void const * , size_t ) ; extern size_t sg_copy_to_buffer(struct scatterlist * , unsigned int , void * , size_t ) ; extern void bsg_job_done(struct bsg_job * , int , unsigned int ) ; struct sk_buff *ldv_skb_clone_526(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_534(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_528(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_524(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_532(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_533(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_529(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_530(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_531(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; int ldv_scsi_add_host_with_dma_536(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; __inline static int ql4xxx_reset_active(struct scsi_qla_host *ha ) { int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; { tmp = constant_test_bit(20L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp != 0) { tmp___5 = 1; } else { tmp___0 = constant_test_bit(1L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___0 != 0) { tmp___5 = 1; } else { tmp___1 = constant_test_bit(2L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___1 != 0) { tmp___5 = 1; } else { tmp___2 = constant_test_bit(5L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___2 != 0) { tmp___5 = 1; } else { tmp___3 = constant_test_bit(4L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___3 != 0) { tmp___5 = 1; } else { tmp___4 = constant_test_bit(21L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___4 != 0) { tmp___5 = 1; } else { tmp___5 = 0; } } } } } } return (tmp___5); } } int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job ) ; static int qla4xxx_read_flash(struct bsg_job *bsg_job ) { struct Scsi_Host *host ; struct Scsi_Host *tmp ; struct scsi_qla_host *ha ; struct scsi_qla_host *tmp___0 ; struct iscsi_bsg_reply *bsg_reply ; struct iscsi_bsg_request *bsg_req ; uint32_t offset ; uint32_t length ; dma_addr_t flash_dma ; uint8_t *flash ; int rval ; int tmp___1 ; long tmp___2 ; int tmp___3 ; void *tmp___4 ; size_t tmp___5 ; { tmp = dev_to_shost___0(bsg_job->dev); host = tmp; tmp___0 = to_qla_host(host); ha = tmp___0; bsg_reply = (struct iscsi_bsg_reply *)bsg_job->reply; bsg_req = (struct iscsi_bsg_request *)bsg_job->request; offset = 0U; length = 0U; flash = (uint8_t *)0U; rval = -22; bsg_reply->reply_payload_rcv_len = 0U; tmp___1 = pci_channel_offline(ha->pdev); tmp___2 = ldv__builtin_expect(tmp___1 != 0, 0L); if (tmp___2 != 0L) { goto leave; } else { } tmp___3 = ql4xxx_reset_active(ha); if (tmp___3 != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: reset active\n", "qla4xxx_read_flash"); rval = -16; goto leave; } else { } if (ha->flash_state != 0U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: another flash operation active\n", "qla4xxx_read_flash"); rval = -16; goto leave; } else { } ha->flash_state = 1U; offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; length = bsg_job->reply_payload.payload_len; tmp___4 = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )length, & flash_dma, 208U, (struct dma_attrs *)0); flash = (uint8_t *)tmp___4; if ((unsigned long )flash == (unsigned long )((uint8_t *)0U)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: dma alloc failed for flash data\n", "qla4xxx_read_flash"); rval = -12; goto leave; } else { } rval = qla4xxx_get_flash(ha, flash_dma, offset, length); if (rval != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: get flash failed\n", "qla4xxx_read_flash"); bsg_reply->result = 458752U; rval = -5; } else { tmp___5 = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, (unsigned int )bsg_job->reply_payload.sg_cnt, (void const *)flash, (size_t )length); bsg_reply->reply_payload_rcv_len = (uint32_t )tmp___5; bsg_reply->result = 0U; } bsg_job_done(bsg_job, (int )bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_attrs(& (ha->pdev)->dev, (size_t )length, (void *)flash, flash_dma, (struct dma_attrs *)0); leave: ha->flash_state = 0U; return (rval); } } static int qla4xxx_update_flash(struct bsg_job *bsg_job ) { struct Scsi_Host *host ; struct Scsi_Host *tmp ; struct scsi_qla_host *ha ; struct scsi_qla_host *tmp___0 ; struct iscsi_bsg_reply *bsg_reply ; struct iscsi_bsg_request *bsg_req ; uint32_t length ; uint32_t offset ; uint32_t options ; dma_addr_t flash_dma ; uint8_t *flash ; int rval ; int tmp___1 ; long tmp___2 ; int tmp___3 ; void *tmp___4 ; { tmp = dev_to_shost___0(bsg_job->dev); host = tmp; tmp___0 = to_qla_host(host); ha = tmp___0; bsg_reply = (struct iscsi_bsg_reply *)bsg_job->reply; bsg_req = (struct iscsi_bsg_request *)bsg_job->request; length = 0U; offset = 0U; options = 0U; flash = (uint8_t *)0U; rval = -22; bsg_reply->reply_payload_rcv_len = 0U; tmp___1 = pci_channel_offline(ha->pdev); tmp___2 = ldv__builtin_expect(tmp___1 != 0, 0L); if (tmp___2 != 0L) { goto leave; } else { } tmp___3 = ql4xxx_reset_active(ha); if (tmp___3 != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: reset active\n", "qla4xxx_update_flash"); rval = -16; goto leave; } else { } if (ha->flash_state != 0U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: another flash operation active\n", "qla4xxx_update_flash"); rval = -16; goto leave; } else { } ha->flash_state = 2U; length = bsg_job->request_payload.payload_len; offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; options = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; tmp___4 = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )length, & flash_dma, 208U, (struct dma_attrs *)0); flash = (uint8_t *)tmp___4; if ((unsigned long )flash == (unsigned long )((uint8_t *)0U)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: dma alloc failed for flash data\n", "qla4xxx_update_flash"); rval = -12; goto leave; } else { } sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, (void *)flash, (size_t )length); rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options); if (rval != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: set flash failed\n", "qla4xxx_update_flash"); bsg_reply->result = 458752U; rval = -5; } else { bsg_reply->result = 0U; } bsg_job_done(bsg_job, (int )bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_attrs(& (ha->pdev)->dev, (size_t )length, (void *)flash, flash_dma, (struct dma_attrs *)0); leave: ha->flash_state = 0U; return (rval); } } static int qla4xxx_get_acb_state(struct bsg_job *bsg_job ) { struct Scsi_Host *host ; struct Scsi_Host *tmp ; struct scsi_qla_host *ha ; struct scsi_qla_host *tmp___0 ; struct iscsi_bsg_request *bsg_req ; struct iscsi_bsg_reply *bsg_reply ; uint32_t status[8U] ; uint32_t acb_idx ; uint32_t ip_idx ; int rval ; int tmp___1 ; long tmp___2 ; int tmp___3 ; int tmp___4 ; size_t tmp___5 ; { tmp = dev_to_shost___0(bsg_job->dev); host = tmp; tmp___0 = to_qla_host(host); ha = tmp___0; bsg_req = (struct iscsi_bsg_request *)bsg_job->request; bsg_reply = (struct iscsi_bsg_reply *)bsg_job->reply; rval = -22; bsg_reply->reply_payload_rcv_len = 0U; tmp___1 = pci_channel_offline(ha->pdev); tmp___2 = ldv__builtin_expect(tmp___1 != 0, 0L); if (tmp___2 != 0L) { goto leave; } else { } tmp___3 = is_qla4010(ha); if (tmp___3 != 0) { goto leave; } else { } tmp___4 = ql4xxx_reset_active(ha); if (tmp___4 != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: reset active\n", "qla4xxx_get_acb_state"); rval = -16; goto leave; } else { } if (bsg_job->reply_payload.payload_len <= 31U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: invalid payload len %d\n", "qla4xxx_get_acb_state", bsg_job->reply_payload.payload_len); rval = -22; goto leave; } else { } acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, (uint32_t *)(& status)); if (rval != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: get ip state failed\n", "qla4xxx_get_acb_state"); bsg_reply->result = 458752U; rval = -5; } else { tmp___5 = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, (unsigned int )bsg_job->reply_payload.sg_cnt, (void const *)(& status), 32UL); bsg_reply->reply_payload_rcv_len = (uint32_t )tmp___5; bsg_reply->result = 0U; } bsg_job_done(bsg_job, (int )bsg_reply->result, bsg_reply->reply_payload_rcv_len); leave: ; return (rval); } } static int qla4xxx_read_nvram(struct bsg_job *bsg_job ) { struct Scsi_Host *host ; struct Scsi_Host *tmp ; struct scsi_qla_host *ha ; struct scsi_qla_host *tmp___0 ; struct iscsi_bsg_request *bsg_req ; struct iscsi_bsg_reply *bsg_reply ; uint32_t offset ; uint32_t len ; uint32_t total_len ; dma_addr_t nvram_dma ; uint8_t *nvram ; int rval ; int tmp___1 ; long tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; void *tmp___10 ; size_t tmp___11 ; { tmp = dev_to_shost___0(bsg_job->dev); host = tmp; tmp___0 = to_qla_host(host); ha = tmp___0; bsg_req = (struct iscsi_bsg_request *)bsg_job->request; bsg_reply = (struct iscsi_bsg_reply *)bsg_job->reply; offset = 0U; len = 0U; total_len = 0U; nvram = (uint8_t *)0U; rval = -22; bsg_reply->reply_payload_rcv_len = 0U; tmp___1 = pci_channel_offline(ha->pdev); tmp___2 = ldv__builtin_expect(tmp___1 != 0, 0L); if (tmp___2 != 0L) { goto leave; } else { } tmp___3 = is_qla4010(ha); if (tmp___3 == 0) { tmp___4 = is_qla4022(ha); if (tmp___4 == 0) { tmp___5 = is_qla4032(ha); if (tmp___5 == 0) { goto leave; } else { } } else { } } else { } tmp___6 = ql4xxx_reset_active(ha); if (tmp___6 != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: reset active\n", "qla4xxx_read_nvram"); rval = -16; goto leave; } else { } offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; len = bsg_job->reply_payload.payload_len; total_len = offset + len; tmp___7 = is_qla4010(ha); if (tmp___7 != 0 && total_len > 512U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: offset+len greater than max nvram size, offset=%d len=%d\n", "qla4xxx_read_nvram", offset, len); goto leave; } else { tmp___8 = is_qla4022(ha); if (tmp___8 != 0) { goto _L; } else { tmp___9 = is_qla4032(ha); if (tmp___9 != 0) { _L: /* CIL Label */ if (total_len > 2048U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: offset+len greater than max nvram size, offset=%d len=%d\n", "qla4xxx_read_nvram", offset, len); goto leave; } else { } } else { } } } tmp___10 = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )len, & nvram_dma, 208U, (struct dma_attrs *)0); nvram = (uint8_t *)tmp___10; if ((unsigned long )nvram == (unsigned long )((uint8_t *)0U)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: dma alloc failed for nvram data\n", "qla4xxx_read_nvram"); rval = -12; goto leave; } else { } rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len); if (rval != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: get nvram failed\n", "qla4xxx_read_nvram"); bsg_reply->result = 458752U; rval = -5; } else { tmp___11 = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, (unsigned int )bsg_job->reply_payload.sg_cnt, (void const *)nvram, (size_t )len); bsg_reply->reply_payload_rcv_len = (uint32_t )tmp___11; bsg_reply->result = 0U; } bsg_job_done(bsg_job, (int )bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_attrs(& (ha->pdev)->dev, (size_t )len, (void *)nvram, nvram_dma, (struct dma_attrs *)0); leave: ; return (rval); } } static int qla4xxx_update_nvram(struct bsg_job *bsg_job ) { struct Scsi_Host *host ; struct Scsi_Host *tmp ; struct scsi_qla_host *ha ; struct scsi_qla_host *tmp___0 ; struct iscsi_bsg_request *bsg_req ; struct iscsi_bsg_reply *bsg_reply ; uint32_t offset ; uint32_t len ; uint32_t total_len ; dma_addr_t nvram_dma ; uint8_t *nvram ; int rval ; int tmp___1 ; long tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; void *tmp___10 ; { tmp = dev_to_shost___0(bsg_job->dev); host = tmp; tmp___0 = to_qla_host(host); ha = tmp___0; bsg_req = (struct iscsi_bsg_request *)bsg_job->request; bsg_reply = (struct iscsi_bsg_reply *)bsg_job->reply; offset = 0U; len = 0U; total_len = 0U; nvram = (uint8_t *)0U; rval = -22; bsg_reply->reply_payload_rcv_len = 0U; tmp___1 = pci_channel_offline(ha->pdev); tmp___2 = ldv__builtin_expect(tmp___1 != 0, 0L); if (tmp___2 != 0L) { goto leave; } else { } tmp___3 = is_qla4010(ha); if (tmp___3 == 0) { tmp___4 = is_qla4022(ha); if (tmp___4 == 0) { tmp___5 = is_qla4032(ha); if (tmp___5 == 0) { goto leave; } else { } } else { } } else { } tmp___6 = ql4xxx_reset_active(ha); if (tmp___6 != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: reset active\n", "qla4xxx_update_nvram"); rval = -16; goto leave; } else { } offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; len = bsg_job->request_payload.payload_len; total_len = offset + len; tmp___7 = is_qla4010(ha); if (tmp___7 != 0 && total_len > 512U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: offset+len greater than max nvram size, offset=%d len=%d\n", "qla4xxx_update_nvram", offset, len); goto leave; } else { tmp___8 = is_qla4022(ha); if (tmp___8 != 0) { goto _L; } else { tmp___9 = is_qla4032(ha); if (tmp___9 != 0) { _L: /* CIL Label */ if (total_len > 2048U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: offset+len greater than max nvram size, offset=%d len=%d\n", "qla4xxx_update_nvram", offset, len); goto leave; } else { } } else { } } } tmp___10 = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )len, & nvram_dma, 208U, (struct dma_attrs *)0); nvram = (uint8_t *)tmp___10; if ((unsigned long )nvram == (unsigned long )((uint8_t *)0U)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: dma alloc failed for flash data\n", "qla4xxx_update_nvram"); rval = -12; goto leave; } else { } sg_copy_to_buffer(bsg_job->request_payload.sg_list, (unsigned int )bsg_job->request_payload.sg_cnt, (void *)nvram, (size_t )len); rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len); if (rval != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: set nvram failed\n", "qla4xxx_update_nvram"); bsg_reply->result = 458752U; rval = -5; } else { bsg_reply->result = 0U; } bsg_job_done(bsg_job, (int )bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_attrs(& (ha->pdev)->dev, (size_t )len, (void *)nvram, nvram_dma, (struct dma_attrs *)0); leave: ; return (rval); } } static int qla4xxx_restore_defaults(struct bsg_job *bsg_job ) { struct Scsi_Host *host ; struct Scsi_Host *tmp ; struct scsi_qla_host *ha ; struct scsi_qla_host *tmp___0 ; struct iscsi_bsg_request *bsg_req ; struct iscsi_bsg_reply *bsg_reply ; uint32_t region ; uint32_t field0 ; uint32_t field1 ; int rval ; int tmp___1 ; long tmp___2 ; int tmp___3 ; int tmp___4 ; { tmp = dev_to_shost___0(bsg_job->dev); host = tmp; tmp___0 = to_qla_host(host); ha = tmp___0; bsg_req = (struct iscsi_bsg_request *)bsg_job->request; bsg_reply = (struct iscsi_bsg_reply *)bsg_job->reply; region = 0U; field0 = 0U; field1 = 0U; rval = -22; bsg_reply->reply_payload_rcv_len = 0U; tmp___1 = pci_channel_offline(ha->pdev); tmp___2 = ldv__builtin_expect(tmp___1 != 0, 0L); if (tmp___2 != 0L) { goto leave; } else { } tmp___3 = is_qla4010(ha); if (tmp___3 != 0) { goto leave; } else { } tmp___4 = ql4xxx_reset_active(ha); if (tmp___4 != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: reset active\n", "qla4xxx_restore_defaults"); rval = -16; goto leave; } else { } region = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3]; rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1); if (rval != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: set nvram failed\n", "qla4xxx_restore_defaults"); bsg_reply->result = 458752U; rval = -5; } else { bsg_reply->result = 0U; } bsg_job_done(bsg_job, (int )bsg_reply->result, bsg_reply->reply_payload_rcv_len); leave: ; return (rval); } } static int qla4xxx_bsg_get_acb(struct bsg_job *bsg_job ) { struct Scsi_Host *host ; struct Scsi_Host *tmp ; struct scsi_qla_host *ha ; struct scsi_qla_host *tmp___0 ; struct iscsi_bsg_request *bsg_req ; struct iscsi_bsg_reply *bsg_reply ; uint32_t acb_type ; uint32_t len ; dma_addr_t acb_dma ; uint8_t *acb ; int rval ; int tmp___1 ; long tmp___2 ; int tmp___3 ; int tmp___4 ; void *tmp___5 ; size_t tmp___6 ; { tmp = dev_to_shost___0(bsg_job->dev); host = tmp; tmp___0 = to_qla_host(host); ha = tmp___0; bsg_req = (struct iscsi_bsg_request *)bsg_job->request; bsg_reply = (struct iscsi_bsg_reply *)bsg_job->reply; acb_type = 0U; len = 0U; acb = (uint8_t *)0U; rval = -22; bsg_reply->reply_payload_rcv_len = 0U; tmp___1 = pci_channel_offline(ha->pdev); tmp___2 = ldv__builtin_expect(tmp___1 != 0, 0L); if (tmp___2 != 0L) { goto leave; } else { } tmp___3 = is_qla4010(ha); if (tmp___3 != 0) { goto leave; } else { } tmp___4 = ql4xxx_reset_active(ha); if (tmp___4 != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: reset active\n", "qla4xxx_bsg_get_acb"); rval = -16; goto leave; } else { } acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; len = bsg_job->reply_payload.payload_len; if (len <= 767U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: invalid acb len %d\n", "qla4xxx_bsg_get_acb", len); rval = -22; goto leave; } else { } tmp___5 = dma_alloc_attrs(& (ha->pdev)->dev, (size_t )len, & acb_dma, 208U, (struct dma_attrs *)0); acb = (uint8_t *)tmp___5; if ((unsigned long )acb == (unsigned long )((uint8_t *)0U)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: dma alloc failed for acb data\n", "qla4xxx_bsg_get_acb"); rval = -12; goto leave; } else { } rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len); if (rval != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: get acb failed\n", "qla4xxx_bsg_get_acb"); bsg_reply->result = 458752U; rval = -5; } else { tmp___6 = sg_copy_from_buffer(bsg_job->reply_payload.sg_list, (unsigned int )bsg_job->reply_payload.sg_cnt, (void const *)acb, (size_t )len); bsg_reply->reply_payload_rcv_len = (uint32_t )tmp___6; bsg_reply->result = 0U; } bsg_job_done(bsg_job, (int )bsg_reply->result, bsg_reply->reply_payload_rcv_len); dma_free_attrs(& (ha->pdev)->dev, (size_t )len, (void *)acb, acb_dma, (struct dma_attrs *)0); leave: ; return (rval); } } static void ql4xxx_execute_diag_cmd(struct bsg_job *bsg_job ) { struct Scsi_Host *host ; struct Scsi_Host *tmp ; struct scsi_qla_host *ha ; struct scsi_qla_host *tmp___0 ; struct iscsi_bsg_request *bsg_req ; struct iscsi_bsg_reply *bsg_reply ; uint8_t *rsp_ptr ; uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int status ; int tmp___1 ; { tmp = dev_to_shost___0(bsg_job->dev); host = tmp; tmp___0 = to_qla_host(host); ha = tmp___0; bsg_req = (struct iscsi_bsg_request *)bsg_job->request; bsg_reply = (struct iscsi_bsg_reply *)bsg_job->reply; rsp_ptr = (uint8_t *)0U; status = 1; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: in\n", "ql4xxx_execute_diag_cmd"); } else { } tmp___1 = constant_test_bit(1L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___1 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Adapter reset in progress. Invalid Request\n", "ql4xxx_execute_diag_cmd"); bsg_reply->result = 458752U; goto exit_diag_mem_test; } else { } bsg_reply->reply_payload_rcv_len = 0U; memcpy((void *)(& mbox_cmd), (void const *)(& bsg_req->rqst_data.h_vendor.vendor_cmd) + 1U, 32UL); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n", "ql4xxx_execute_diag_cmd", mbox_cmd[0], mbox_cmd[1], mbox_cmd[2], mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6], mbox_cmd[7]); } else { } status = qla4xxx_mailbox_command(ha, 8, 8, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n", "ql4xxx_execute_diag_cmd", mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6], mbox_sts[7]); } else { } if (status == 0) { bsg_reply->result = 0U; } else { bsg_reply->result = 458752U; } bsg_job->reply_len = 40U; rsp_ptr = (uint8_t *)bsg_reply + 8UL; memcpy((void *)rsp_ptr, (void const *)(& mbox_sts), 32UL); exit_diag_mem_test: ; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: bsg_reply->result = x%x, status = %s\n", "ql4xxx_execute_diag_cmd", bsg_reply->result, status == 1 ? (char *)"FAILED" : (char *)"SUCCEEDED"); } else { } bsg_job_done(bsg_job, (int )bsg_reply->result, bsg_reply->reply_payload_rcv_len); return; } } static int qla4_83xx_wait_for_loopback_config_comp(struct scsi_qla_host *ha , int wait_for_link ) { int status ; unsigned long tmp ; unsigned long tmp___0 ; unsigned long tmp___1 ; { status = 0; tmp___0 = wait_for_completion_timeout(& ha->idc_comp, 1250UL); if (tmp___0 == 0UL) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: IDC Complete notification not received, Waiting for another %d timeout", "qla4_83xx_wait_for_loopback_config_comp", ha->idc_extend_tmo); if (ha->idc_extend_tmo != 0) { tmp = wait_for_completion_timeout(& ha->idc_comp, (unsigned long )(ha->idc_extend_tmo * 250)); if (tmp == 0UL) { ha->notify_idc_comp = 0; ha->notify_link_up_comp = 0; dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: Aborting: IDC Complete notification not received", "qla4_83xx_wait_for_loopback_config_comp"); status = 1; goto exit_wait; } else if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: IDC Complete notification received\n", "qla4_83xx_wait_for_loopback_config_comp"); } else { } } else { } } else if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: IDC Complete notification received\n", "qla4_83xx_wait_for_loopback_config_comp"); } else { } ha->notify_idc_comp = 0; if (wait_for_link != 0) { tmp___1 = wait_for_completion_timeout(& ha->link_up_comp, 1250UL); if (tmp___1 == 0UL) { ha->notify_link_up_comp = 0; dev_printk("\f", (struct device const *)(& (ha->pdev)->dev), "%s: Aborting: LINK UP notification not received", "qla4_83xx_wait_for_loopback_config_comp"); status = 1; goto exit_wait; } else if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: LINK UP notification received\n", "qla4_83xx_wait_for_loopback_config_comp"); } else { } ha->notify_link_up_comp = 0; } else { } exit_wait: ; return (status); } } static int qla4_83xx_pre_loopback_config(struct scsi_qla_host *ha , uint32_t *mbox_cmd ) { uint32_t config ; int status ; { config = 0U; status = 0; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: in\n", "qla4_83xx_pre_loopback_config"); } else { } status = qla4_83xx_get_port_config(ha, & config); if (status != 0) { goto exit_pre_loopback_config; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Default port config=%08X\n", "qla4_83xx_pre_loopback_config", config); } else { } if ((config & 4U) != 0U || (config & 8U) != 0U) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Loopback diagnostics already in progress. Invalid request\n", "qla4_83xx_pre_loopback_config"); goto exit_pre_loopback_config; } else { } if (*(mbox_cmd + 1UL) == 7U) { config = config | 4U; } else { } if (*(mbox_cmd + 1UL) == 8U) { config = config | 8U; } else { } config = config & 4294967279U; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: New port config=%08X\n", "qla4_83xx_pre_loopback_config", config); } else { } ha->notify_idc_comp = 1; ha->notify_link_up_comp = 1; qla4xxx_get_firmware_state(ha); status = qla4_83xx_set_port_config(ha, & config); if (status != 0) { ha->notify_idc_comp = 0; ha->notify_link_up_comp = 0; goto exit_pre_loopback_config; } else { } exit_pre_loopback_config: ; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: status = %s\n", "qla4_83xx_pre_loopback_config", status == 1 ? (char *)"FAILED" : (char *)"SUCCEEDED"); } else { } return (status); } } static int qla4_83xx_post_loopback_config(struct scsi_qla_host *ha , uint32_t *mbox_cmd ) { int status ; uint32_t config ; { status = 0; config = 0U; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: in\n", "qla4_83xx_post_loopback_config"); } else { } status = qla4_83xx_get_port_config(ha, & config); if (status != 0) { goto exit_post_loopback_config; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: port config=%08X\n", "qla4_83xx_post_loopback_config", config); } else { } if (*(mbox_cmd + 1UL) == 7U) { config = config & 4294967291U; } else if (*(mbox_cmd + 1UL) == 8U) { config = config & 4294967287U; } else { } config = config | 16U; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Restore default port config=%08X\n", "qla4_83xx_post_loopback_config", config); } else { } ha->notify_idc_comp = 1; if ((ha->addl_fw_state & 16U) != 0U) { ha->notify_link_up_comp = 1; } else { } status = qla4_83xx_set_port_config(ha, & config); if (status != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Scheduling adapter reset\n", "qla4_83xx_post_loopback_config"); set_bit(1L, (unsigned long volatile *)(& ha->dpc_flags)); clear_bit(9L, (unsigned long volatile *)(& ha->flags)); goto exit_post_loopback_config; } else { } exit_post_loopback_config: ; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: status = %s\n", "qla4_83xx_post_loopback_config", status == 1 ? (char *)"FAILED" : (char *)"SUCCEEDED"); } else { } return (status); } } static void qla4xxx_execute_diag_loopback_cmd(struct bsg_job *bsg_job ) { struct Scsi_Host *host ; struct Scsi_Host *tmp ; struct scsi_qla_host *ha ; struct scsi_qla_host *tmp___0 ; struct iscsi_bsg_request *bsg_req ; struct iscsi_bsg_reply *bsg_reply ; uint8_t *rsp_ptr ; uint32_t mbox_cmd[8U] ; uint32_t mbox_sts[8U] ; int wait_for_link ; int status ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; { tmp = dev_to_shost___0(bsg_job->dev); host = tmp; tmp___0 = to_qla_host(host); ha = tmp___0; bsg_req = (struct iscsi_bsg_request *)bsg_job->request; bsg_reply = (struct iscsi_bsg_reply *)bsg_job->reply; rsp_ptr = (uint8_t *)0U; wait_for_link = 1; status = 1; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: in\n", "qla4xxx_execute_diag_loopback_cmd"); } else { } bsg_reply->reply_payload_rcv_len = 0U; tmp___1 = constant_test_bit(9L, (unsigned long const volatile *)(& ha->flags)); if (tmp___1 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Loopback Diagnostics already in progress. Invalid Request\n", "qla4xxx_execute_diag_loopback_cmd"); bsg_reply->result = 458752U; goto exit_loopback_cmd; } else { } tmp___2 = constant_test_bit(1L, (unsigned long const volatile *)(& ha->dpc_flags)); if (tmp___2 != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Adapter reset in progress. Invalid Request\n", "qla4xxx_execute_diag_loopback_cmd"); bsg_reply->result = 458752U; goto exit_loopback_cmd; } else { } memcpy((void *)(& mbox_cmd), (void const *)(& bsg_req->rqst_data.h_vendor.vendor_cmd) + 1U, 32UL); tmp___3 = is_qla8032(ha); if (tmp___3 != 0) { goto _L; } else { tmp___4 = is_qla8042(ha); if (tmp___4 != 0) { _L: /* CIL Label */ status = qla4_83xx_pre_loopback_config(ha, (uint32_t *)(& mbox_cmd)); if (status != 0) { bsg_reply->result = 458752U; goto exit_loopback_cmd; } else { } status = qla4_83xx_wait_for_loopback_config_comp(ha, wait_for_link); if (status != 0) { bsg_reply->result = 196608U; goto restore; } else { } } else { } } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: mbox_cmd: %08X %08X %08X %08X %08X %08X %08X %08X\n", "qla4xxx_execute_diag_loopback_cmd", mbox_cmd[0], mbox_cmd[1], mbox_cmd[2], mbox_cmd[3], mbox_cmd[4], mbox_cmd[5], mbox_cmd[6], mbox_cmd[7]); } else { } status = qla4xxx_mailbox_command(ha, 8, 8, (uint32_t *)(& mbox_cmd), (uint32_t *)(& mbox_sts)); if (status == 0) { bsg_reply->result = 0U; } else { bsg_reply->result = 458752U; } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: mbox_sts: %08X %08X %08X %08X %08X %08X %08X %08X\n", "qla4xxx_execute_diag_loopback_cmd", mbox_sts[0], mbox_sts[1], mbox_sts[2], mbox_sts[3], mbox_sts[4], mbox_sts[5], mbox_sts[6], mbox_sts[7]); } else { } bsg_job->reply_len = 40U; rsp_ptr = (uint8_t *)bsg_reply + 8UL; memcpy((void *)rsp_ptr, (void const *)(& mbox_sts), 32UL); restore: tmp___5 = is_qla8032(ha); if (tmp___5 != 0) { goto _L___0; } else { tmp___6 = is_qla8042(ha); if (tmp___6 != 0) { _L___0: /* CIL Label */ status = qla4_83xx_post_loopback_config(ha, (uint32_t *)(& mbox_cmd)); if (status != 0) { bsg_reply->result = 458752U; goto exit_loopback_cmd; } else { } if ((ha->addl_fw_state & 16U) == 0U) { wait_for_link = 0; } else { } status = qla4_83xx_wait_for_loopback_config_comp(ha, wait_for_link); if (status != 0) { bsg_reply->result = 196608U; goto exit_loopback_cmd; } else { } } else { } } exit_loopback_cmd: ; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: bsg_reply->result = x%x, status = %s\n", "qla4xxx_execute_diag_loopback_cmd", bsg_reply->result, status == 1 ? (char *)"FAILED" : (char *)"SUCCEEDED"); } else { } bsg_job_done(bsg_job, (int )bsg_reply->result, bsg_reply->reply_payload_rcv_len); return; } } static int qla4xxx_execute_diag_test(struct bsg_job *bsg_job ) { struct Scsi_Host *host ; struct Scsi_Host *tmp ; struct scsi_qla_host *ha ; struct scsi_qla_host *tmp___0 ; struct iscsi_bsg_request *bsg_req ; uint32_t diag_cmd ; int rval ; { tmp = dev_to_shost___0(bsg_job->dev); host = tmp; tmp___0 = to_qla_host(host); ha = tmp___0; bsg_req = (struct iscsi_bsg_request *)bsg_job->request; rval = -22; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: in\n", "qla4xxx_execute_diag_test"); } else { } diag_cmd = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; if (diag_cmd == 117U) { switch (bsg_req->rqst_data.h_vendor.vendor_cmd[2]) { case 2U: ; case 3U: ; case 4U: ; case 5U: ; case 6U: ; case 9U: ; case 12U: ; case 13U: ql4xxx_execute_diag_cmd(bsg_job); rval = 0; goto ldv_63418; case 7U: ; case 8U: qla4xxx_execute_diag_loopback_cmd(bsg_job); rval = 0; goto ldv_63418; default: dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Invalid diag test: 0x%x\n", "qla4xxx_execute_diag_test", bsg_req->rqst_data.h_vendor.vendor_cmd[2]); } ldv_63418: ; } else if (diag_cmd == 293U || diag_cmd == 294U) { ql4xxx_execute_diag_cmd(bsg_job); rval = 0; } else { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Invalid diag cmd: 0x%x\n", "qla4xxx_execute_diag_test", diag_cmd); } return (rval); } } int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job ) { struct iscsi_bsg_reply *bsg_reply ; struct iscsi_bsg_request *bsg_req ; struct Scsi_Host *host ; struct Scsi_Host *tmp ; struct scsi_qla_host *ha ; struct scsi_qla_host *tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; { bsg_reply = (struct iscsi_bsg_reply *)bsg_job->reply; bsg_req = (struct iscsi_bsg_request *)bsg_job->request; tmp = dev_to_shost___0(bsg_job->dev); host = tmp; tmp___0 = to_qla_host(host); ha = tmp___0; switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) { case 1U: tmp___1 = qla4xxx_read_flash(bsg_job); return (tmp___1); case 2U: tmp___2 = qla4xxx_update_flash(bsg_job); return (tmp___2); case 3U: tmp___3 = qla4xxx_get_acb_state(bsg_job); return (tmp___3); case 4U: tmp___4 = qla4xxx_read_nvram(bsg_job); return (tmp___4); case 5U: tmp___5 = qla4xxx_update_nvram(bsg_job); return (tmp___5); case 6U: tmp___6 = qla4xxx_restore_defaults(bsg_job); return (tmp___6); case 7U: tmp___7 = qla4xxx_bsg_get_acb(bsg_job); return (tmp___7); case 8U: tmp___8 = qla4xxx_execute_diag_test(bsg_job); return (tmp___8); default: dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: invalid BSG vendor command: 0x%x\n", "qla4xxx_process_vendor_specific", bsg_req->msgcode); bsg_reply->result = 458752U; bsg_reply->reply_payload_rcv_len = 0U; bsg_job_done(bsg_job, (int )bsg_reply->result, bsg_reply->reply_payload_rcv_len); return (-38); } } } int qla4xxx_bsg_request(struct bsg_job *bsg_job ) { struct iscsi_bsg_request *bsg_req ; struct Scsi_Host *host ; struct Scsi_Host *tmp ; struct scsi_qla_host *ha ; struct scsi_qla_host *tmp___0 ; int tmp___1 ; { bsg_req = (struct iscsi_bsg_request *)bsg_job->request; tmp = dev_to_shost___0(bsg_job->dev); host = tmp; tmp___0 = to_qla_host(host); ha = tmp___0; switch (bsg_req->msgcode) { case 2147483903U: tmp___1 = qla4xxx_process_vendor_specific(bsg_job); return (tmp___1); default: dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: invalid BSG command: 0x%x\n", "qla4xxx_bsg_request", bsg_req->msgcode); } return (-38); } } bool ldv_queue_work_on_508(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_509(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_510(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_511(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_512(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_518(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_524(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_526(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_528(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_529(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_530(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_531(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_532(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_533(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_534(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_535(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_scsi_add_host_with_dma_536(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___6 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_27 = 1; ldv_initialize_scsi_host_template_27(); } else { } return (ldv_func_res); } } bool ldv_queue_work_on_557(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_work_on_559(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) ; bool ldv_queue_delayed_work_on_558(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; bool ldv_queue_delayed_work_on_561(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) ; void ldv_flush_workqueue_560(struct workqueue_struct *ldv_func_arg1 ) ; void *ldv_kmem_cache_alloc_567(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; void *ldv_kmem_cache_alloc_584(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) ; extern void __udelay(unsigned long ) ; void *ldv_vmalloc_586(unsigned long ldv_func_arg1 ) ; void *ldv_vmalloc_587(unsigned long ldv_func_arg1 ) ; struct sk_buff *ldv_skb_clone_575(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_clone_583(struct sk_buff *ldv_func_arg1 , gfp_t flags ) ; struct sk_buff *ldv_skb_copy_577(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) ; int ldv_pskb_expand_head_573(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_581(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; int ldv_pskb_expand_head_582(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_578(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_579(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; struct sk_buff *ldv___netdev_alloc_skb_580(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) ; int ldv_scsi_add_host_with_dma_585(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) ; uint32_t qla4_83xx_rd_reg(struct scsi_qla_host *ha , ulong addr ) { unsigned int tmp ; { tmp = readl((void const volatile *)(ha->nx_pcibase + addr)); return (tmp); } } void qla4_83xx_wr_reg(struct scsi_qla_host *ha , ulong addr , uint32_t val ) { { writel(val, (void volatile *)(ha->nx_pcibase + addr)); return; } } static int qla4_83xx_set_win_base(struct scsi_qla_host *ha , uint32_t addr ) { uint32_t val ; int ret_val ; { ret_val = 0; qla4_83xx_wr_reg(ha, (ulong )(((int )ha->func_num + 3584) * 4), addr); val = qla4_83xx_rd_reg(ha, (ulong )(((int )ha->func_num + 3584) * 4)); if (val != addr) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Failed to set register window : addr written 0x%x, read 0x%x!\n", "qla4_83xx_set_win_base", addr, val); ret_val = 1; } else { } return (ret_val); } } int qla4_83xx_rd_reg_indirect(struct scsi_qla_host *ha , uint32_t addr , uint32_t *data ) { int ret_val ; { ret_val = qla4_83xx_set_win_base(ha, addr); if (ret_val == 0) { *data = qla4_83xx_rd_reg(ha, 14576UL); } else { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: failed read of addr 0x%x!\n", "qla4_83xx_rd_reg_indirect", addr); } return (ret_val); } } int qla4_83xx_wr_reg_indirect(struct scsi_qla_host *ha , uint32_t addr , uint32_t data ) { int ret_val ; { ret_val = qla4_83xx_set_win_base(ha, addr); if (ret_val == 0) { qla4_83xx_wr_reg(ha, 14576UL, data); } else { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: failed wrt to addr 0x%x, data 0x%x\n", "qla4_83xx_wr_reg_indirect", addr, data); } return (ret_val); } } static int qla4_83xx_flash_lock(struct scsi_qla_host *ha ) { int lock_owner ; int timeout ; uint32_t lock_status ; int ret_val ; uint32_t tmp ; { timeout = 0; lock_status = 0U; ret_val = 0; goto ldv_63322; ldv_63321: lock_status = qla4_83xx_rd_reg(ha, 14416UL); if (lock_status != 0U) { goto ldv_63319; } else { } timeout = timeout + 1; if (timeout > 499) { tmp = qla4_83xx_rd_reg(ha, 13568UL); lock_owner = (int )tmp; dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: flash lock by func %d failed, held by func %d\n", "qla4_83xx_flash_lock", (int )ha->func_num, lock_owner); ret_val = 1; goto ldv_63319; } else { } msleep(20U); ldv_63322: ; if (lock_status == 0U) { goto ldv_63321; } else { } ldv_63319: qla4_83xx_wr_reg(ha, 13568UL, (uint32_t )ha->func_num); return (ret_val); } } static void qla4_83xx_flash_unlock(struct scsi_qla_host *ha ) { { qla4_83xx_wr_reg(ha, 13568UL, 255U); qla4_83xx_rd_reg(ha, 14420UL); return; } } int qla4_83xx_flash_read_u32(struct scsi_qla_host *ha , uint32_t flash_addr , uint8_t *p_data , int u32_word_count ) { int i ; uint32_t u32_word ; uint32_t addr ; int ret_val ; { addr = flash_addr; ret_val = 0; ret_val = qla4_83xx_flash_lock(ha); if (ret_val == 1) { goto exit_lock_error; } else { } if ((addr & 3U) != 0U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Illegal addr = 0x%x\n", "qla4_83xx_flash_read_u32", addr); ret_val = 1; goto exit_flash_read; } else { } i = 0; goto ldv_63340; ldv_63339: ret_val = qla4_83xx_wr_reg_indirect(ha, 1108410416U, addr & 4294901760U); if (ret_val == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW\n!", "qla4_83xx_flash_read_u32", addr); goto exit_flash_read; } else { } ret_val = qla4_83xx_rd_reg_indirect(ha, (addr & 65535U) | 1108672512U, & u32_word); if (ret_val == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: failed to read addr 0x%x!\n", "qla4_83xx_flash_read_u32", addr); goto exit_flash_read; } else { } *((__le32 *)p_data) = u32_word; p_data = p_data + 4UL; addr = addr + 4U; i = i + 1; ldv_63340: ; if (i < u32_word_count) { goto ldv_63339; } else { } exit_flash_read: qla4_83xx_flash_unlock(ha); exit_lock_error: ; return (ret_val); } } int qla4_83xx_lockless_flash_read_u32(struct scsi_qla_host *ha , uint32_t flash_addr , uint8_t *p_data , int u32_word_count ) { uint32_t i ; uint32_t u32_word ; uint32_t flash_offset ; uint32_t addr ; int ret_val ; { addr = flash_addr; ret_val = 0; flash_offset = addr & 65535U; if ((addr & 3U) != 0U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Illegal addr = 0x%x\n", "qla4_83xx_lockless_flash_read_u32", addr); ret_val = 1; goto exit_lockless_read; } else { } ret_val = qla4_83xx_wr_reg_indirect(ha, 1108410416U, addr); if (ret_val == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", "qla4_83xx_lockless_flash_read_u32", addr); goto exit_lockless_read; } else { } if ((unsigned long )flash_offset + (unsigned long )u32_word_count * 4UL > 65535UL) { i = 0U; goto ldv_63356; ldv_63355: ret_val = qla4_83xx_rd_reg_indirect(ha, (addr & 65535U) | 1108672512U, & u32_word); if (ret_val == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: failed to read addr 0x%x!\n", "qla4_83xx_lockless_flash_read_u32", addr); goto exit_lockless_read; } else { } *((__le32 *)p_data) = u32_word; p_data = p_data + 4UL; addr = addr + 4U; flash_offset = flash_offset + 4U; if (flash_offset > 65535U) { ret_val = qla4_83xx_wr_reg_indirect(ha, 1108410416U, addr); if (ret_val == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n", "qla4_83xx_lockless_flash_read_u32", addr); goto exit_lockless_read; } else { } flash_offset = 0U; } else { } i = i + 1U; ldv_63356: ; if ((uint32_t )u32_word_count > i) { goto ldv_63355; } else { } } else { i = 0U; goto ldv_63359; ldv_63358: ret_val = qla4_83xx_rd_reg_indirect(ha, (addr & 65535U) | 1108672512U, & u32_word); if (ret_val == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: failed to read addr 0x%x!\n", "qla4_83xx_lockless_flash_read_u32", addr); goto exit_lockless_read; } else { } *((__le32 *)p_data) = u32_word; p_data = p_data + 4UL; addr = addr + 4U; i = i + 1U; ldv_63359: ; if ((uint32_t )u32_word_count > i) { goto ldv_63358; } else { } } exit_lockless_read: ; return (ret_val); } } void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha ) { int tmp ; { tmp = qla4_83xx_flash_lock(ha); if (tmp != 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Resetting rom lock\n", "qla4_83xx_rom_lock_recovery"); } else { } qla4_83xx_flash_unlock(ha); return; } } static int qla4_83xx_lock_recovery(struct scsi_qla_host *ha ) { uint32_t lock ; uint32_t lockid ; int ret_val ; { lock = 0U; ret_val = 1; lockid = (*((ha->isp_ops)->rd_reg_direct))(ha, 14236UL); if ((lockid & 3U) != 0U) { goto exit_lock_recovery; } else { } (*((ha->isp_ops)->wr_reg_direct))(ha, 14236UL, (uint32_t )(((int )ha->func_num << 2) | 1)); msleep(200U); lockid = (*((ha->isp_ops)->rd_reg_direct))(ha, 14236UL); if ((lockid & 60U) != (uint32_t )((int )ha->func_num << 2)) { goto exit_lock_recovery; } else { } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: IDC Lock recovery initiated for func %d\n", "qla4_83xx_lock_recovery", (int )ha->func_num); (*((ha->isp_ops)->wr_reg_direct))(ha, 14236UL, (uint32_t )(((int )ha->func_num << 2) | 2)); (*((ha->isp_ops)->wr_reg_direct))(ha, 13572UL, 255U); (*((ha->isp_ops)->rd_reg_direct))(ha, 14444UL); (*((ha->isp_ops)->wr_reg_direct))(ha, 14236UL, 0U); lock = (*((ha->isp_ops)->rd_reg_direct))(ha, 14440UL); if (lock != 0U) { lockid = (*((ha->isp_ops)->rd_reg_direct))(ha, 13572UL); lockid = ((lockid + 256U) & 4294967040U) | (uint32_t )ha->func_num; (*((ha->isp_ops)->wr_reg_direct))(ha, 13572UL, lockid); ret_val = 0; } else { } exit_lock_recovery: ; return (ret_val); } } int qla4_83xx_drv_lock(struct scsi_qla_host *ha ) { int timeout ; uint32_t status ; int ret_val ; uint32_t first_owner ; uint32_t tmo_owner ; uint32_t lock_id ; uint32_t func_num ; uint32_t lock_cnt ; { timeout = 0; status = 0U; ret_val = 0; first_owner = 0U; tmo_owner = 0U; goto ldv_63387; ldv_63386: status = qla4_83xx_rd_reg(ha, 14440UL); if (status != 0U) { lock_id = qla4_83xx_rd_reg(ha, 13572UL); lock_id = ((lock_id + 256U) & 4294967040U) | (uint32_t )ha->func_num; qla4_83xx_wr_reg(ha, 13572UL, lock_id); goto ldv_63384; } else { } if (timeout == 0) { first_owner = (*((ha->isp_ops)->rd_reg_direct))(ha, 13572UL); } else { } timeout = timeout + 1; if (timeout > 9) { tmo_owner = qla4_83xx_rd_reg(ha, 13572UL); func_num = tmo_owner & 255U; lock_cnt = tmo_owner >> 8; dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Lock by func %d failed after 2s, lock held by func %d, lock count %d, first_owner %d\n", "qla4_83xx_drv_lock", (int )ha->func_num, func_num, lock_cnt, first_owner & 255U); if (first_owner != tmo_owner) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: IDC lock failed for func %d\n", "qla4_83xx_drv_lock", (int )ha->func_num); timeout = 0; } else { ret_val = qla4_83xx_lock_recovery(ha); if (ret_val == 0) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: IDC lock Recovery by %d successful\n", "qla4_83xx_drv_lock", (int )ha->func_num); goto ldv_63384; } else { } dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: IDC lock Recovery by %d failed, Retrying timeout\n", "qla4_83xx_drv_lock", (int )ha->func_num); timeout = 0; } } else { } msleep(200U); ldv_63387: ; if (status == 0U) { goto ldv_63386; } else { } ldv_63384: ; return (ret_val); } } void qla4_83xx_drv_unlock(struct scsi_qla_host *ha ) { int id ; uint32_t tmp ; { tmp = qla4_83xx_rd_reg(ha, 13572UL); id = (int )tmp; if ((id & 255) != (int )ha->func_num) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: IDC Unlock by %d failed, lock owner is %d\n", "qla4_83xx_drv_unlock", (int )ha->func_num, id & 255); return; } else { } qla4_83xx_wr_reg(ha, 13572UL, (uint32_t )(id | 255)); qla4_83xx_rd_reg(ha, 14444UL); return; } } void qla4_83xx_set_idc_dontreset(struct scsi_qla_host *ha ) { uint32_t idc_ctrl ; { idc_ctrl = qla4_83xx_rd_reg(ha, 14224UL); idc_ctrl = idc_ctrl | 1U; qla4_83xx_wr_reg(ha, 14224UL, idc_ctrl); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: idc_ctrl = %d\n", "qla4_83xx_set_idc_dontreset", idc_ctrl); } else { } return; } } void qla4_83xx_clear_idc_dontreset(struct scsi_qla_host *ha ) { uint32_t idc_ctrl ; { idc_ctrl = qla4_83xx_rd_reg(ha, 14224UL); idc_ctrl = idc_ctrl & 4294967294U; qla4_83xx_wr_reg(ha, 14224UL, idc_ctrl); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: idc_ctrl = %d\n", "qla4_83xx_clear_idc_dontreset", idc_ctrl); } else { } return; } } int qla4_83xx_idc_dontreset(struct scsi_qla_host *ha ) { uint32_t idc_ctrl ; { idc_ctrl = qla4_83xx_rd_reg(ha, 14224UL); return ((int )idc_ctrl & 1); } } int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha ) { uint32_t drv_active ; uint32_t dev_part ; uint32_t dev_part1 ; uint32_t dev_part2 ; int i ; struct device_info device_map[16U] ; int func_nibble ; int nibble ; int nic_present ; int iscsi_present ; int iscsi_func_low ; { nic_present = 0; iscsi_present = 0; iscsi_func_low = 0; dev_part1 = qla4_83xx_rd_reg(ha, (ulong )*(ha->reg_tbl + 7UL)); dev_part2 = qla4_83xx_rd_reg(ha, 14308UL); drv_active = qla4_83xx_rd_reg(ha, (ulong )*(ha->reg_tbl + 3UL)); dev_part = dev_part1; nibble = 0; i = nibble; goto ldv_63432; ldv_63431: func_nibble = (int )((uint32_t )(15 << nibble * 4) & dev_part); func_nibble = func_nibble >> nibble * 4; device_map[i].func_num = i; device_map[i].device_type = func_nibble & 3; device_map[i].port_num = func_nibble & 12; if (device_map[i].device_type == 1) { if (((uint32_t )(1 << device_map[i].func_num) & drv_active) != 0U) { nic_present = nic_present + 1; goto ldv_63430; } else { } } else if (device_map[i].device_type == 3) { if (((uint32_t )(1 << device_map[i].func_num) & drv_active) != 0U) { if (iscsi_present == 0 || (iscsi_present != 0 && device_map[i].func_num < iscsi_func_low)) { iscsi_func_low = device_map[i].func_num; } else { } iscsi_present = iscsi_present + 1; } else { } } else { } if (nibble == 7) { nibble = 0; dev_part = dev_part2; } else { } i = i + 1; nibble = nibble + 1; ldv_63432: ; if (i <= 15) { goto ldv_63431; } else { } ldv_63430: ; if (nic_present == 0 && (int )ha->func_num == iscsi_func_low) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: can reset - NIC not present and lower iSCSI function is %d\n", "qla4_83xx_can_perform_reset", (int )ha->func_num); } else { } return (1); } else { } return (0); } } void qla4_83xx_need_reset_handler(struct scsi_qla_host *ha ) { uint32_t dev_state ; uint32_t drv_state ; uint32_t drv_active ; unsigned long reset_timeout ; unsigned long dev_init_timeout ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Performing ISP error recovery\n", "qla4_83xx_need_reset_handler"); tmp___4 = constant_test_bit(25L, (unsigned long const volatile *)(& ha->flags)); if (tmp___4 == 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: reset acknowledged\n", "qla4_83xx_need_reset_handler"); } else { } qla4_8xxx_set_rst_ready(ha); dev_init_timeout = (unsigned long )(ha->nx_dev_init_timeout * 250U) + (unsigned long )jiffies; ldv_63450: ; if ((long )((unsigned long )jiffies - dev_init_timeout) >= 0L) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Non Reset owner dev init timeout\n", "qla4_83xx_need_reset_handler"); goto ldv_63449; } else { } (*((ha->isp_ops)->idc_unlock))(ha); msleep(1000U); (*((ha->isp_ops)->idc_lock))(ha); tmp = qla4_8xxx_rd_direct(ha, 4U); dev_state = (uint32_t )tmp; if (dev_state == 4U) { goto ldv_63450; } else { } ldv_63449: ; } else { qla4_8xxx_set_rst_ready(ha); reset_timeout = (unsigned long )(ha->nx_reset_timeout * 250U) + (unsigned long )jiffies; tmp___0 = qla4_8xxx_rd_direct(ha, 5U); drv_state = (uint32_t )tmp___0; tmp___1 = qla4_8xxx_rd_direct(ha, 3U); drv_active = (uint32_t )tmp___1; dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: drv_state = 0x%x, drv_active = 0x%x\n", "qla4_83xx_need_reset_handler", drv_state, drv_active); goto ldv_63459; ldv_63458: ; if ((long )((unsigned long )jiffies - reset_timeout) >= 0L) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: %s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n", "qla4_83xx_need_reset_handler", (char *)"qla4xxx", drv_state, drv_active); goto ldv_63457; } else { } (*((ha->isp_ops)->idc_unlock))(ha); msleep(1000U); (*((ha->isp_ops)->idc_lock))(ha); tmp___2 = qla4_8xxx_rd_direct(ha, 5U); drv_state = (uint32_t )tmp___2; tmp___3 = qla4_8xxx_rd_direct(ha, 3U); drv_active = (uint32_t )tmp___3; ldv_63459: ; if (drv_state != drv_active) { goto ldv_63458; } else { } ldv_63457: ; if (drv_state != drv_active) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Reset_owner turning off drv_active of non-acking function 0x%x\n", "qla4_83xx_need_reset_handler", drv_active ^ drv_state); drv_active = drv_active & drv_state; qla4_8xxx_wr_direct(ha, 3U, drv_active); } else { } clear_bit(25L, (unsigned long volatile *)(& ha->flags)); qla4_8xxx_device_bootstrap(ha); } return; } } void qla4_83xx_get_idc_param(struct scsi_qla_host *ha ) { uint32_t idc_params ; uint32_t ret_val ; int tmp ; { tmp = qla4_83xx_flash_read_u32(ha, 4096032U, (uint8_t *)(& idc_params), 1); ret_val = (uint32_t )tmp; if (ret_val == 0U) { ha->nx_dev_init_timeout = idc_params & 65535U; ha->nx_reset_timeout = idc_params >> 16; } else { ha->nx_dev_init_timeout = 30U; ha->nx_reset_timeout = 10U; } if (ql4xextended_error_logging == 2) { dev_printk("\017", (struct device const *)(& (ha->pdev)->dev), "%s: ha->nx_dev_init_timeout = %d, ha->nx_reset_timeout = %d\n", "qla4_83xx_get_idc_param", ha->nx_dev_init_timeout, ha->nx_reset_timeout); } else { } return; } } static void qla4_83xx_dump_reset_seq_hdr(struct scsi_qla_host *ha ) { uint8_t *phdr ; { if ((unsigned long )ha->reset_tmplt.buff == (unsigned long )((uint8_t *)0U)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Error: Invalid reset_seq_template\n", "qla4_83xx_dump_reset_seq_hdr"); return; } else { } phdr = ha->reset_tmplt.buff; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Reset Template: 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n", (int )*phdr, (int )*(phdr + 1UL), (int )*(phdr + 2UL), (int )*(phdr + 3UL), (int )*(phdr + 4UL), (int )*(phdr + 5UL), (int )*(phdr + 6UL), (int )*(phdr + 7UL), (int )*(phdr + 8UL), (int )*(phdr + 9UL), (int )*(phdr + 10UL), (int )*(phdr + 11UL), (int )*(phdr + 12UL), (int )*(phdr + 13UL), (int )*(phdr + 14UL), (int )*(phdr + 15UL)); } else { } return; } } static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha ) { uint8_t *p_cache ; uint32_t src ; uint32_t count ; uint32_t size ; uint64_t dest ; int ret_val ; uint32_t tmp ; void *tmp___0 ; { ret_val = 0; src = 65536U; tmp = qla4_83xx_rd_reg(ha, 13660UL); dest = (uint64_t )tmp; size = qla4_83xx_rd_reg(ha, 13664UL); if ((size & 15U) != 0U) { size = (size + 16U) & 4294967280U; } else { } count = size / 16U; tmp___0 = ldv_vmalloc_586((unsigned long )size); p_cache = (uint8_t *)tmp___0; if ((unsigned long )p_cache == (unsigned long )((uint8_t *)0U)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Failed to allocate memory for boot loader cache\n", "qla4_83xx_copy_bootloader"); ret_val = 1; goto exit_copy_bootloader; } else { } ret_val = qla4_83xx_lockless_flash_read_u32(ha, src, p_cache, (int )(size / 4U)); if (ret_val == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Error reading firmware from flash\n", "qla4_83xx_copy_bootloader"); goto exit_copy_error; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Read firmware from flash\n", "qla4_83xx_copy_bootloader"); } else { } ret_val = qla4_8xxx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache, count); if (ret_val == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Error writing firmware to MS\n", "qla4_83xx_copy_bootloader"); goto exit_copy_error; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Wrote firmware size %d to MS\n", "qla4_83xx_copy_bootloader", size); } else { } exit_copy_error: vfree((void const *)p_cache); exit_copy_bootloader: ; return (ret_val); } } static int qla4_83xx_check_cmd_peg_status(struct scsi_qla_host *ha ) { uint32_t val ; uint32_t ret_val ; int retries ; { ret_val = 1U; retries = 60; ldv_63491: val = qla4_83xx_rd_reg(ha, 13904UL); if (val == 65281U) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Command Peg initialization complete. State=0x%x\n", "qla4_83xx_check_cmd_peg_status", val); } else { } ret_val = 0U; goto ldv_63490; } else { } msleep(500U); retries = retries - 1; if (retries != 0) { goto ldv_63491; } else { } ldv_63490: ; return ((int )ret_val); } } static int qla4_83xx_poll_reg(struct scsi_qla_host *ha , uint32_t addr , int duration , uint32_t test_mask , uint32_t test_result ) { uint32_t value ; uint8_t retries ; int ret_val ; uint8_t tmp ; { ret_val = 0; ret_val = qla4_83xx_rd_reg_indirect(ha, addr, & value); if (ret_val == 1) { goto exit_poll_reg; } else { } retries = (uint8_t )(duration / 10); ldv_63504: ; if ((value & test_mask) != test_result) { msleep((unsigned int )(duration / 10)); ret_val = qla4_83xx_rd_reg_indirect(ha, addr, & value); if (ret_val == 1) { goto exit_poll_reg; } else { } ret_val = 1; } else { ret_val = 0; goto ldv_63503; } tmp = retries; retries = (uint8_t )((int )retries - 1); if ((unsigned int )tmp != 0U) { goto ldv_63504; } else { } ldv_63503: ; exit_poll_reg: ; if (ret_val == 1) { ha->reset_tmplt.seq_error = ha->reset_tmplt.seq_error + 1; dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n", "qla4_83xx_poll_reg", value, test_mask, test_result); } else { } return (ret_val); } } static int qla4_83xx_reset_seq_checksum_test(struct scsi_qla_host *ha ) { uint32_t sum ; uint16_t *buff ; int u16_count ; int ret_val ; uint16_t *tmp ; int tmp___0 ; { sum = 0U; buff = (uint16_t *)ha->reset_tmplt.buff; u16_count = (int )((unsigned int )(ha->reset_tmplt.hdr)->size / 2U); goto ldv_63514; ldv_63513: tmp = buff; buff = buff + 1; sum = (uint32_t )*tmp + sum; ldv_63514: tmp___0 = u16_count; u16_count = u16_count - 1; if (tmp___0 > 0) { goto ldv_63513; } else { } goto ldv_63517; ldv_63516: sum = (sum & 65535U) + (sum >> 16); ldv_63517: ; if (sum >> 16 != 0U) { goto ldv_63516; } else { } if (sum != 4294967295U) { ret_val = 0; } else { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Reset seq checksum failed\n", "qla4_83xx_reset_seq_checksum_test"); ret_val = 1; } return (ret_val); } } void qla4_83xx_read_reset_template(struct scsi_qla_host *ha ) { uint8_t *p_buff ; uint32_t addr ; uint32_t tmplt_hdr_def_size ; uint32_t tmplt_hdr_size ; uint32_t ret_val ; void *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { ha->reset_tmplt.seq_error = 0; tmp = ldv_vmalloc_587(8192UL); ha->reset_tmplt.buff = (uint8_t *)tmp; if ((unsigned long )ha->reset_tmplt.buff == (unsigned long )((uint8_t *)0U)) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Failed to allocate reset template resources\n", "qla4_83xx_read_reset_template"); goto exit_read_reset_template; } else { } p_buff = ha->reset_tmplt.buff; addr = 5177344U; tmplt_hdr_def_size = 4U; if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Read template hdr size %d from Flash\n", "qla4_83xx_read_reset_template", tmplt_hdr_def_size); } else { } tmp___0 = qla4_83xx_flash_read_u32(ha, addr, p_buff, (int )tmplt_hdr_def_size); ret_val = (uint32_t )tmp___0; if (ret_val != 0U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Failed to read reset template\n", "qla4_83xx_read_reset_template"); goto exit_read_template_error; } else { } ha->reset_tmplt.hdr = (struct qla4_83xx_reset_template_hdr *)ha->reset_tmplt.buff; tmplt_hdr_size = (unsigned int )(ha->reset_tmplt.hdr)->hdr_size / 4U; if (tmplt_hdr_size != tmplt_hdr_def_size || (unsigned int )(ha->reset_tmplt.hdr)->signature != 51966U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Template Header size %d is invalid, tmplt_hdr_def_size %d\n", "qla4_83xx_read_reset_template", tmplt_hdr_size, tmplt_hdr_def_size); goto exit_read_template_error; } else { } addr = (uint32_t )((int )(ha->reset_tmplt.hdr)->hdr_size + 5177344); p_buff = ha->reset_tmplt.buff + (unsigned long )(ha->reset_tmplt.hdr)->hdr_size; tmplt_hdr_def_size = (uint32_t )((unsigned long )((int )(ha->reset_tmplt.hdr)->size - (int )(ha->reset_tmplt.hdr)->hdr_size) / 4UL); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Read rest of the template size %d\n", "qla4_83xx_read_reset_template", (int )(ha->reset_tmplt.hdr)->size); } else { } tmp___1 = qla4_83xx_flash_read_u32(ha, addr, p_buff, (int )tmplt_hdr_def_size); ret_val = (uint32_t )tmp___1; if (ret_val != 0U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Failed to read reset template\n", "qla4_83xx_read_reset_template"); goto exit_read_template_error; } else { } tmp___2 = qla4_83xx_reset_seq_checksum_test(ha); if (tmp___2 != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Reset Seq checksum failed!\n", "qla4_83xx_read_reset_template"); goto exit_read_template_error; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Reset Seq checksum passed, Get stop, start and init seq offsets\n", "qla4_83xx_read_reset_template"); } else { } ha->reset_tmplt.init_offset = ha->reset_tmplt.buff + (unsigned long )(ha->reset_tmplt.hdr)->init_seq_offset; ha->reset_tmplt.start_offset = ha->reset_tmplt.buff + (unsigned long )(ha->reset_tmplt.hdr)->start_seq_offset; ha->reset_tmplt.stop_offset = ha->reset_tmplt.buff + (unsigned long )(ha->reset_tmplt.hdr)->hdr_size; qla4_83xx_dump_reset_seq_hdr(ha); goto exit_read_reset_template; exit_read_template_error: vfree((void const *)ha->reset_tmplt.buff); exit_read_reset_template: ; return; } } static void qla4_83xx_read_write_crb_reg(struct scsi_qla_host *ha , uint32_t raddr , uint32_t waddr ) { uint32_t value ; { qla4_83xx_rd_reg_indirect(ha, raddr, & value); qla4_83xx_wr_reg_indirect(ha, waddr, value); return; } } static void qla4_83xx_rmw_crb_reg(struct scsi_qla_host *ha , uint32_t raddr , uint32_t waddr , struct qla4_83xx_rmw *p_rmw_hdr ) { uint32_t value ; { if ((unsigned int )p_rmw_hdr->index_a != 0U) { value = ha->reset_tmplt.array[(int )p_rmw_hdr->index_a]; } else { qla4_83xx_rd_reg_indirect(ha, raddr, & value); } value = p_rmw_hdr->test_mask & value; value = value << (int )p_rmw_hdr->shl; value = value >> (int )p_rmw_hdr->shr; value = p_rmw_hdr->or_value | value; value = p_rmw_hdr->xor_value ^ value; qla4_83xx_wr_reg_indirect(ha, waddr, value); return; } } static void qla4_83xx_write_list(struct scsi_qla_host *ha , struct qla4_83xx_reset_entry_hdr *p_hdr ) { struct qla4_83xx_entry *p_entry ; uint32_t i ; { p_entry = (struct qla4_83xx_entry *)p_hdr + 8U; i = 0U; goto ldv_63551; ldv_63550: qla4_83xx_wr_reg_indirect(ha, p_entry->arg1, p_entry->arg2); if ((unsigned int )p_hdr->delay != 0U) { __udelay((unsigned long )p_hdr->delay); } else { } i = i + 1U; p_entry = p_entry + 1; ldv_63551: ; if ((uint32_t )p_hdr->count > i) { goto ldv_63550; } else { } return; } } static void qla4_83xx_read_write_list(struct scsi_qla_host *ha , struct qla4_83xx_reset_entry_hdr *p_hdr ) { struct qla4_83xx_entry *p_entry ; uint32_t i ; { p_entry = (struct qla4_83xx_entry *)p_hdr + 8U; i = 0U; goto ldv_63560; ldv_63559: qla4_83xx_read_write_crb_reg(ha, p_entry->arg1, p_entry->arg2); if ((unsigned int )p_hdr->delay != 0U) { __udelay((unsigned long )p_hdr->delay); } else { } i = i + 1U; p_entry = p_entry + 1; ldv_63560: ; if ((uint32_t )p_hdr->count > i) { goto ldv_63559; } else { } return; } } static void qla4_83xx_poll_list(struct scsi_qla_host *ha , struct qla4_83xx_reset_entry_hdr *p_hdr ) { long delay ; struct qla4_83xx_entry *p_entry ; struct qla4_83xx_poll *p_poll ; uint32_t i ; uint32_t value ; int tmp ; { p_poll = (struct qla4_83xx_poll *)p_hdr + 8U; p_entry = (struct qla4_83xx_entry *)p_poll + 8U; delay = (long )p_hdr->delay; if (delay == 0L) { i = 0U; goto ldv_63572; ldv_63571: qla4_83xx_poll_reg(ha, p_entry->arg1, (int )delay, p_poll->test_mask, p_poll->test_value); i = i + 1U; p_entry = p_entry + 1; ldv_63572: ; if ((uint32_t )p_hdr->count > i) { goto ldv_63571; } else { } } else { i = 0U; goto ldv_63575; ldv_63574: tmp = qla4_83xx_poll_reg(ha, p_entry->arg1, (int )delay, p_poll->test_mask, p_poll->test_value); if (tmp != 0) { qla4_83xx_rd_reg_indirect(ha, p_entry->arg1, & value); qla4_83xx_rd_reg_indirect(ha, p_entry->arg2, & value); } else { } i = i + 1U; p_entry = p_entry + 1; ldv_63575: ; if ((uint32_t )p_hdr->count > i) { goto ldv_63574; } else { } } return; } } static void qla4_83xx_poll_write_list(struct scsi_qla_host *ha , struct qla4_83xx_reset_entry_hdr *p_hdr ) { long delay ; struct qla4_83xx_quad_entry *p_entry ; struct qla4_83xx_poll *p_poll ; uint32_t i ; int tmp ; { p_poll = (struct qla4_83xx_poll *)p_hdr + 8U; p_entry = (struct qla4_83xx_quad_entry *)p_poll + 8U; delay = (long )p_hdr->delay; i = 0U; goto ldv_63587; ldv_63586: qla4_83xx_wr_reg_indirect(ha, p_entry->dr_addr, p_entry->dr_value); qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr, p_entry->ar_value); if (delay != 0L) { tmp = qla4_83xx_poll_reg(ha, p_entry->ar_addr, (int )delay, p_poll->test_mask, p_poll->test_value); if (tmp != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Timeout Error: poll list, item_num %d, entry_num %d\n", "qla4_83xx_poll_write_list", i, ha->reset_tmplt.seq_index); } else { } } else { } } else { } i = i + 1U; p_entry = p_entry + 1; ldv_63587: ; if ((uint32_t )p_hdr->count > i) { goto ldv_63586; } else { } return; } } static void qla4_83xx_read_modify_write(struct scsi_qla_host *ha , struct qla4_83xx_reset_entry_hdr *p_hdr ) { struct qla4_83xx_entry *p_entry ; struct qla4_83xx_rmw *p_rmw_hdr ; uint32_t i ; { p_rmw_hdr = (struct qla4_83xx_rmw *)p_hdr + 8U; p_entry = (struct qla4_83xx_entry *)p_rmw_hdr + 16U; i = 0U; goto ldv_63597; ldv_63596: qla4_83xx_rmw_crb_reg(ha, p_entry->arg1, p_entry->arg2, p_rmw_hdr); if ((unsigned int )p_hdr->delay != 0U) { __udelay((unsigned long )p_hdr->delay); } else { } i = i + 1U; p_entry = p_entry + 1; ldv_63597: ; if ((uint32_t )p_hdr->count > i) { goto ldv_63596; } else { } return; } } static void qla4_83xx_pause(struct scsi_qla_host *ha , struct qla4_83xx_reset_entry_hdr *p_hdr ) { unsigned long __ms ; unsigned long tmp ; { if ((unsigned int )p_hdr->delay != 0U) { __ms = (unsigned long )p_hdr->delay; goto ldv_63605; ldv_63604: __const_udelay(4295000UL); ldv_63605: tmp = __ms; __ms = __ms - 1UL; if (tmp != 0UL) { goto ldv_63604; } else { } } else { } return; } } static void qla4_83xx_poll_read_list(struct scsi_qla_host *ha , struct qla4_83xx_reset_entry_hdr *p_hdr ) { long delay ; int index ; struct qla4_83xx_quad_entry *p_entry ; struct qla4_83xx_poll *p_poll ; uint32_t i ; uint32_t value ; int tmp ; int tmp___0 ; { p_poll = (struct qla4_83xx_poll *)p_hdr + 8U; p_entry = (struct qla4_83xx_quad_entry *)p_poll + 8U; delay = (long )p_hdr->delay; i = 0U; goto ldv_63619; ldv_63618: qla4_83xx_wr_reg_indirect(ha, p_entry->ar_addr, p_entry->ar_value); if (delay != 0L) { tmp___0 = qla4_83xx_poll_reg(ha, p_entry->ar_addr, (int )delay, p_poll->test_mask, p_poll->test_value); if (tmp___0 != 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Timeout Error: poll list, Item_num %d, entry_num %d\n", "qla4_83xx_poll_read_list", i, ha->reset_tmplt.seq_index); } else { } } else { index = ha->reset_tmplt.array_index; qla4_83xx_rd_reg_indirect(ha, p_entry->dr_addr, & value); tmp = index; index = index + 1; ha->reset_tmplt.array[tmp] = value; if (index == 16) { ha->reset_tmplt.array_index = 1; } else { } } } else { } i = i + 1U; p_entry = p_entry + 1; ldv_63619: ; if ((uint32_t )p_hdr->count > i) { goto ldv_63618; } else { } return; } } static void qla4_83xx_seq_end(struct scsi_qla_host *ha , struct qla4_83xx_reset_entry_hdr *p_hdr ) { { ha->reset_tmplt.seq_end = 1U; return; } } static void qla4_83xx_template_end(struct scsi_qla_host *ha , struct qla4_83xx_reset_entry_hdr *p_hdr ) { { ha->reset_tmplt.template_end = 1U; if (ha->reset_tmplt.seq_error == 0) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Reset sequence completed SUCCESSFULLY.\n", "qla4_83xx_template_end"); } else { } } else { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Reset sequence completed with some timeout errors.\n", "qla4_83xx_template_end"); } return; } } static void qla4_83xx_process_reset_template(struct scsi_qla_host *ha , char *p_buff ) { int index ; int entries ; struct qla4_83xx_reset_entry_hdr *p_hdr ; char *p_entry ; { p_entry = p_buff; ha->reset_tmplt.seq_end = 0U; ha->reset_tmplt.template_end = 0U; entries = (int )(ha->reset_tmplt.hdr)->entries; index = ha->reset_tmplt.seq_index; goto ldv_63652; ldv_63651: p_hdr = (struct qla4_83xx_reset_entry_hdr *)p_entry; switch ((int )p_hdr->cmd) { case 0: ; goto ldv_63639; case 1: qla4_83xx_write_list(ha, p_hdr); goto ldv_63639; case 2: qla4_83xx_read_write_list(ha, p_hdr); goto ldv_63639; case 4: qla4_83xx_poll_list(ha, p_hdr); goto ldv_63639; case 8: qla4_83xx_poll_write_list(ha, p_hdr); goto ldv_63639; case 16: qla4_83xx_read_modify_write(ha, p_hdr); goto ldv_63639; case 32: qla4_83xx_pause(ha, p_hdr); goto ldv_63639; case 64: qla4_83xx_seq_end(ha, p_hdr); goto ldv_63639; case 128: qla4_83xx_template_end(ha, p_hdr); goto ldv_63639; case 256: qla4_83xx_poll_read_list(ha, p_hdr); goto ldv_63639; default: dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Unknown command ==> 0x%04x on entry = %d\n", "qla4_83xx_process_reset_template", (int )p_hdr->cmd, index); goto ldv_63639; } ldv_63639: p_entry = p_entry + (unsigned long )p_hdr->size; index = index + 1; ldv_63652: ; if ((unsigned int )ha->reset_tmplt.seq_end == 0U && index < entries) { goto ldv_63651; } else { } ha->reset_tmplt.seq_index = index; return; } } static void qla4_83xx_process_stop_seq(struct scsi_qla_host *ha ) { { ha->reset_tmplt.seq_index = 0; qla4_83xx_process_reset_template(ha, (char *)ha->reset_tmplt.stop_offset); if ((unsigned int )ha->reset_tmplt.seq_end != 1U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Abrupt STOP Sub-Sequence end.\n", "qla4_83xx_process_stop_seq"); } else { } return; } } static void qla4_83xx_process_start_seq(struct scsi_qla_host *ha ) { { qla4_83xx_process_reset_template(ha, (char *)ha->reset_tmplt.start_offset); if ((unsigned int )ha->reset_tmplt.template_end != 1U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Abrupt START Sub-Sequence end.\n", "qla4_83xx_process_start_seq"); } else { } return; } } static void qla4_83xx_process_init_seq(struct scsi_qla_host *ha ) { { qla4_83xx_process_reset_template(ha, (char *)ha->reset_tmplt.init_offset); if ((unsigned int )ha->reset_tmplt.seq_end != 1U) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Abrupt INIT Sub-Sequence end.\n", "qla4_83xx_process_init_seq"); } else { } return; } } static int qla4_83xx_restart(struct scsi_qla_host *ha ) { int ret_val ; uint32_t idc_ctrl ; int tmp ; { ret_val = 0; qla4_83xx_process_stop_seq(ha); idc_ctrl = qla4_83xx_rd_reg(ha, 14224UL); if ((idc_ctrl & 2U) != 0U) { qla4_83xx_wr_reg(ha, 14224UL, idc_ctrl & 4294967293U); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Graceful RESET: Not collecting minidump\n", "qla4_83xx_restart"); } else { qla4_8xxx_get_minidump(ha); } qla4_83xx_process_init_seq(ha); tmp = qla4_83xx_copy_bootloader(ha); if (tmp != 0) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Copy bootloader, firmware restart failed!\n", "qla4_83xx_restart"); ret_val = 1; goto exit_restart; } else { } qla4_83xx_wr_reg(ha, 13820UL, 0U); qla4_83xx_process_start_seq(ha); exit_restart: ; return (ret_val); } } int qla4_83xx_start_firmware(struct scsi_qla_host *ha ) { int ret_val ; { ret_val = 0; ret_val = qla4_83xx_restart(ha); if (ret_val == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Restart error\n", "qla4_83xx_start_firmware"); goto exit_start_fw; } else if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: Restart done\n", "qla4_83xx_start_firmware"); } else { } ret_val = qla4_83xx_check_cmd_peg_status(ha); if (ret_val == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Peg not initialized\n", "qla4_83xx_start_firmware"); } else { } exit_start_fw: ; return (ret_val); } } static void qla4_83xx_disable_iocb_intrs(struct scsi_qla_host *ha ) { int tmp ; { tmp = test_and_clear_bit(28L, (unsigned long volatile *)(& ha->flags)); if (tmp != 0) { qla4_8xxx_intr_disable(ha); } else { } return; } } static void qla4_83xx_disable_mbox_intrs(struct scsi_qla_host *ha ) { uint32_t mb_int ; uint32_t ret ; int tmp ; { tmp = test_and_clear_bit(29L, (unsigned long volatile *)(& ha->flags)); if (tmp != 0) { ret = readl((void const volatile *)(& (ha->qla4_83xx_reg)->mbox_int)); mb_int = ret & 4294967291U; writel(mb_int, (void volatile *)(& (ha->qla4_83xx_reg)->mbox_int)); writel(1U, (void volatile *)(& (ha->qla4_83xx_reg)->leg_int_mask)); } else { } return; } } void qla4_83xx_disable_intrs(struct scsi_qla_host *ha ) { { qla4_83xx_disable_mbox_intrs(ha); qla4_83xx_disable_iocb_intrs(ha); return; } } static void qla4_83xx_enable_iocb_intrs(struct scsi_qla_host *ha ) { int tmp ; { tmp = constant_test_bit(28L, (unsigned long const volatile *)(& ha->flags)); if (tmp == 0) { qla4_8xxx_intr_enable(ha); set_bit(28L, (unsigned long volatile *)(& ha->flags)); } else { } return; } } void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha ) { uint32_t mb_int ; int tmp ; { tmp = constant_test_bit(29L, (unsigned long const volatile *)(& ha->flags)); if (tmp == 0) { mb_int = 4U; writel(mb_int, (void volatile *)(& (ha->qla4_83xx_reg)->mbox_int)); writel(0U, (void volatile *)(& (ha->qla4_83xx_reg)->leg_int_mask)); set_bit(29L, (unsigned long volatile *)(& ha->flags)); } else { } return; } } void qla4_83xx_enable_intrs(struct scsi_qla_host *ha ) { { qla4_83xx_enable_mbox_intrs(ha); qla4_83xx_enable_iocb_intrs(ha); return; } } void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha , uint32_t *mbx_cmd , int incount ) { int i ; { i = 1; goto ldv_63707; ldv_63706: writel(*(mbx_cmd + (unsigned long )i), (void volatile *)(& (ha->qla4_83xx_reg)->mailbox_in) + (unsigned long )i); i = i + 1; ldv_63707: ; if (i < incount) { goto ldv_63706; } else { } writel(*mbx_cmd, (void volatile *)(& (ha->qla4_83xx_reg)->mailbox_in)); writel(1U, (void volatile *)(& (ha->qla4_83xx_reg)->host_intr)); return; } } void qla4_83xx_process_mbox_intr(struct scsi_qla_host *ha , int outcount ) { int intr_status ; unsigned int tmp ; { tmp = readl((void const volatile *)(& (ha->qla4_83xx_reg)->risc_intr)); intr_status = (int )tmp; if (intr_status != 0) { ha->mbox_status_count = (uint8_t volatile )outcount; (*((ha->isp_ops)->interrupt_service_routine))(ha, (uint32_t )intr_status); } else { } return; } } int qla4_83xx_isp_reset(struct scsi_qla_host *ha ) { int rval ; uint32_t dev_state ; int tmp ; int tmp___0 ; int tmp___1 ; { (*((ha->isp_ops)->idc_lock))(ha); tmp = qla4_8xxx_rd_direct(ha, 4U); dev_state = (uint32_t )tmp; if (ql4xdontresethba != 0) { qla4_83xx_set_idc_dontreset(ha); } else { } if (dev_state == 3U) { tmp___0 = qla4_83xx_idc_dontreset(ha); if (tmp___0 == 1) { dev_printk("\v", (struct device const *)(& (ha->pdev)->dev), "%s: Reset recovery disabled\n", "qla4_83xx_isp_reset"); rval = 1; goto exit_isp_reset; } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: HW State: NEED RESET\n", "qla4_83xx_isp_reset"); } else { } qla4_8xxx_wr_direct(ha, 4U, 4U); } else if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: HW state already set to NEED_RESET\n", "qla4_83xx_isp_reset"); } else { } tmp___1 = qla4_83xx_can_perform_reset(ha); if (tmp___1 != 0) { set_bit(25L, (unsigned long volatile *)(& ha->flags)); } else { } (*((ha->isp_ops)->idc_unlock))(ha); rval = qla4_8xxx_device_state_handler(ha); (*((ha->isp_ops)->idc_lock))(ha); qla4_8xxx_clear_rst_ready(ha); exit_isp_reset: (*((ha->isp_ops)->idc_unlock))(ha); if (rval == 0) { clear_bit(19L, (unsigned long volatile *)(& ha->flags)); } else { } return (rval); } } static void qla4_83xx_dump_pause_control_regs(struct scsi_qla_host *ha ) { u32 val ; u32 val1 ; int i ; int status ; { val = 0U; val1 = 0U; status = 0; status = qla4_83xx_rd_reg_indirect(ha, 220201604U, & val); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "SRE-Shim Ctrl:0x%x\n", val); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Port 0 Rx Buffer Pause Threshold Registers[TC7..TC0]:"); } else { } i = 0; goto ldv_63729; ldv_63728: status = qla4_83xx_rd_reg_indirect(ha, (uint32_t )((i + 46661865) * 4), & val); if (ql4xextended_error_logging == 2) { printk("px%x ", val); } else { } i = i + 1; ldv_63729: ; if (i <= 7) { goto ldv_63728; } else { } if (ql4xextended_error_logging == 2) { printk("\016\n"); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Port 1 Rx Buffer Pause Threshold Registers[TC7..TC0]:"); } else { } i = 0; goto ldv_63732; ldv_63731: status = qla4_83xx_rd_reg_indirect(ha, (uint32_t )((i + 46662889) * 4), & val); if (ql4xextended_error_logging == 2) { printk("px%x ", val); } else { } i = i + 1; ldv_63732: ; if (i <= 7) { goto ldv_63731; } else { } if (ql4xextended_error_logging == 2) { printk("\016\n"); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Port 0 RxB Traffic Class Max Cell Registers[3..0]:"); } else { } i = 0; goto ldv_63735; ldv_63734: status = qla4_83xx_rd_reg_indirect(ha, (uint32_t )((i + 46661858) * 4), & val); if (ql4xextended_error_logging == 2) { printk("px%x ", val); } else { } i = i + 1; ldv_63735: ; if (i <= 3) { goto ldv_63734; } else { } if (ql4xextended_error_logging == 2) { printk("\016\n"); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Port 1 RxB Traffic Class Max Cell Registers[3..0]:"); } else { } i = 0; goto ldv_63738; ldv_63737: status = qla4_83xx_rd_reg_indirect(ha, (uint32_t )((i + 46662882) * 4), & val); if (ql4xextended_error_logging == 2) { printk("px%x ", val); } else { } i = i + 1; ldv_63738: ; if (i <= 3) { goto ldv_63737; } else { } if (ql4xextended_error_logging == 2) { printk("\016\n"); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Port 0 RxB Rx Traffic Class Stats [TC7..TC0]"); } else { } i = 7; goto ldv_63741; ldv_63740: status = qla4_83xx_rd_reg_indirect(ha, 186647452U, & val); val = val & 536870911U; qla4_83xx_wr_reg_indirect(ha, 186647452U, (u32 )(i << 29) | val); status = qla4_83xx_rd_reg_indirect(ha, 186647452U, & val); if (ql4xextended_error_logging == 2) { printk("px%x ", val); } else { } i = i - 1; ldv_63741: ; if (i >= 0) { goto ldv_63740; } else { } if (ql4xextended_error_logging == 2) { printk("\016\n"); } else { } if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Port 1 RxB Rx Traffic Class Stats [TC7..TC0]"); } else { } i = 7; goto ldv_63744; ldv_63743: status = qla4_83xx_rd_reg_indirect(ha, 186651548U, & val); val = val & 536870911U; qla4_83xx_wr_reg_indirect(ha, 186651548U, (u32 )(i << 29) | val); status = qla4_83xx_rd_reg_indirect(ha, 186651548U, & val); if (ql4xextended_error_logging == 2) { printk("px%x ", val); } else { } i = i - 1; ldv_63744: ; if (i >= 0) { goto ldv_63743; } else { } if (ql4xextended_error_logging == 2) { printk("\016\n"); } else { } status = qla4_83xx_rd_reg_indirect(ha, 186648324U, & val); status = qla4_83xx_rd_reg_indirect(ha, 186652420U, & val1); if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n", val, val1); } else { } return; } } static void __qla4_83xx_disable_pause(struct scsi_qla_host *ha ) { int i ; { qla4_83xx_wr_reg_indirect(ha, 220201604U, 0U); i = 0; goto ldv_63751; ldv_63750: qla4_83xx_wr_reg_indirect(ha, (uint32_t )((i + 46661865) * 4), 0U); qla4_83xx_wr_reg_indirect(ha, (uint32_t )((i + 46662889) * 4), 0U); i = i + 1; ldv_63751: ; if (i <= 7) { goto ldv_63750; } else { } i = 0; goto ldv_63754; ldv_63753: qla4_83xx_wr_reg_indirect(ha, (uint32_t )((i + 46661858) * 4), 67044351U); qla4_83xx_wr_reg_indirect(ha, (uint32_t )((i + 46662882) * 4), 67044351U); i = i + 1; ldv_63754: ; if (i <= 3) { goto ldv_63753; } else { } qla4_83xx_wr_reg_indirect(ha, 186648324U, 0U); qla4_83xx_wr_reg_indirect(ha, 186652420U, 0U); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "Disabled pause frames successfully.\n"); return; } } static void qla4_83xx_eport_init(struct scsi_qla_host *ha ) { { qla4_83xx_wr_reg_indirect(ha, 671632992U, 0U); qla4_83xx_wr_reg_indirect(ha, 671633008U, 0U); qla4_83xx_wr_reg_indirect(ha, 671633024U, 0U); qla4_83xx_wr_reg_indirect(ha, 671633040U, 0U); qla4_83xx_wr_reg_indirect(ha, 671633056U, 0U); qla4_83xx_wr_reg_indirect(ha, 671633072U, 0U); qla4_83xx_wr_reg_indirect(ha, 671633088U, 0U); qla4_83xx_wr_reg_indirect(ha, 671633104U, 0U); qla4_83xx_wr_reg_indirect(ha, 671632976U, 255U); dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "EPORT is out of reset.\n"); return; } } void qla4_83xx_disable_pause(struct scsi_qla_host *ha ) { { (*((ha->isp_ops)->idc_lock))(ha); qla4_83xx_eport_init(ha); qla4_83xx_dump_pause_control_regs(ha); __qla4_83xx_disable_pause(ha); (*((ha->isp_ops)->idc_unlock))(ha); return; } } int qla4_83xx_is_detached(struct scsi_qla_host *ha ) { uint32_t drv_active ; int tmp ; int tmp___0 ; { tmp = qla4_8xxx_rd_direct(ha, 3U); drv_active = (uint32_t )tmp; tmp___0 = constant_test_bit(1L, (unsigned long const volatile *)(& ha->flags)); if (tmp___0 != 0 && ((uint32_t )(1 << (int )ha->func_num) & drv_active) == 0U) { if (ql4xextended_error_logging == 2) { dev_printk("\016", (struct device const *)(& (ha->pdev)->dev), "%s: drv_active = 0x%X\n", "qla4_83xx_is_detached", drv_active); } else { } return (0); } else { } return (1); } } bool ldv_queue_work_on_557(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___2 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } bool ldv_queue_delayed_work_on_558(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___3 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } bool ldv_queue_work_on_559(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct work_struct *ldv_func_arg3 ) { ldv_func_ret_type___4 ldv_func_res ; bool tmp ; { tmp = queue_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3); ldv_func_res = tmp; activate_work_2(ldv_func_arg3, 2); return (ldv_func_res); } } void ldv_flush_workqueue_560(struct workqueue_struct *ldv_func_arg1 ) { { flush_workqueue(ldv_func_arg1); call_and_disable_all_2(2); return; } } bool ldv_queue_delayed_work_on_561(int ldv_func_arg1 , struct workqueue_struct *ldv_func_arg2 , struct delayed_work *ldv_func_arg3 , unsigned long ldv_func_arg4 ) { ldv_func_ret_type___5 ldv_func_res ; bool tmp ; { tmp = queue_delayed_work_on(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3, ldv_func_arg4); ldv_func_res = tmp; activate_work_2(& ldv_func_arg3->work, 2); return (ldv_func_res); } } void *ldv_kmem_cache_alloc_567(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_pskb_expand_head_573(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_575(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv_skb_copy_577(struct sk_buff const *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_578(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_579(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } struct sk_buff *ldv___netdev_alloc_skb_580(struct net_device *ldv_func_arg1 , unsigned int ldv_func_arg2 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } int ldv_pskb_expand_head_581(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } int ldv_pskb_expand_head_582(struct sk_buff *ldv_func_arg1 , int ldv_func_arg2 , int ldv_func_arg3 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((int )((long )tmp)); } } struct sk_buff *ldv_skb_clone_583(struct sk_buff *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return ((struct sk_buff *)tmp); } } void *ldv_kmem_cache_alloc_584(struct kmem_cache *ldv_func_arg1 , gfp_t flags ) { void *tmp ; { ldv_check_alloc_flags(flags); tmp = ldv_undef_ptr(); return (tmp); } } int ldv_scsi_add_host_with_dma_585(struct Scsi_Host *shost , struct device *dev , struct device *dma_dev ) { ldv_func_ret_type___6 ldv_func_res ; int tmp ; { tmp = scsi_add_host_with_dma(shost, dev, dma_dev); ldv_func_res = tmp; if (ldv_func_res == 0) { ldv_state_variable_27 = 1; ldv_initialize_scsi_host_template_27(); } else { } return (ldv_func_res); } } void *ldv_vmalloc_586(unsigned long ldv_func_arg1 ) { void *tmp ; { ldv_check_alloc_nonatomic(); tmp = ldv_undef_ptr(); return (tmp); } } void *ldv_vmalloc_587(unsigned long ldv_func_arg1 ) { void *tmp ; { ldv_check_alloc_nonatomic(); tmp = ldv_undef_ptr(); return (tmp); } } __inline static void ldv_error(void) { { ERROR: ; __VERIFIER_error(); } } bool ldv_is_err(void const *ptr ) { { return ((unsigned long )ptr > 2012UL); } } void *ldv_err_ptr(long error ) { { return ((void *)(2012L - error)); } } long ldv_ptr_err(void const *ptr ) { { return ((long )(2012UL - (unsigned long )ptr)); } } bool ldv_is_err_or_null(void const *ptr ) { bool tmp ; int tmp___0 ; { if ((unsigned long )ptr == (unsigned long )((void const *)0)) { tmp___0 = 1; } else { tmp = ldv_is_err(ptr); if ((int )tmp) { tmp___0 = 1; } else { tmp___0 = 0; } } return ((bool )tmp___0); } } int ldv_spin = 0; void ldv_check_alloc_flags(gfp_t flags ) { { if (ldv_spin != 0 && (flags & 16U) != 0U) { ldv_error(); } else { } return; } } extern struct page *ldv_some_page(void) ; struct page *ldv_check_alloc_flags_and_return_some_page(gfp_t flags ) { struct page *tmp ; { if (ldv_spin != 0 && (flags & 16U) != 0U) { ldv_error(); } else { } tmp = ldv_some_page(); return (tmp); } } void ldv_check_alloc_nonatomic(void) { { if (ldv_spin != 0) { ldv_error(); } else { } return; } } void ldv_spin_lock(void) { { ldv_spin = 1; return; } } void ldv_spin_unlock(void) { { ldv_spin = 0; return; } } int ldv_spin_trylock(void) { int is_lock ; { is_lock = ldv_undef_int(); if (is_lock != 0) { return (0); } else { ldv_spin = 1; return (1); } } }